pax_global_header00006660000000000000000000000064121034413740014511gustar00rootroot0000000000000052 comment=afed8d833fc292d65579cf3d1ac03afeb99000bd nipy-0.3.0/000077500000000000000000000000001210344137400124705ustar00rootroot00000000000000nipy-0.3.0/.gitattributes000066400000000000000000000000421210344137400153570ustar00rootroot00000000000000nipy/COMMIT_INFO.txt export-subst nipy-0.3.0/.gitignore000066400000000000000000000016351210344137400144650ustar00rootroot00000000000000# Editor temporary/working/backup files # ######################################### .#* [#]*# *~ *$ *.bak *.diff *.org .project *.rej .settings/ .*.sw[nop] .sw[nop] *.tmp *.orig # Not sure what the next two are for *.kpf *-stamp # Compiled source # ################### *.a *.com *.class *.dll *.exe *.o *.py[oc] *.so *.pyd # Packages # ############ # it's better to unpack these files and commit the raw source # git has its own built in compression methods *.7z *.bz2 *.bzip2 *.dmg *.gz *.iso *.jar *.rar *.tar *.tbz2 *.tgz *.zip # Python files # ################ MANIFEST build/ _build dist/ *.egg-info .shelf/ # Logs and databases # ###################### *.log *.sql *.sqlite # OS generated files # ###################### .gdb_history .DS_Store? ehthumbs.db Icon? Thumbs.db # Things specific to this project # ################################### __config__.py doc/api/generated doc/build/ doc/manual cythonize.dat nipy-0.3.0/.mailmap000066400000000000000000000052231210344137400141130ustar00rootroot00000000000000Alexis Roche Alexis ROCHE Ariel Rokem arokem Benjamin Thyreau benjamin.thyreau <> Benjamin Thyreau benji2@decideur.info <> Bertrand Thirion Bertrand THIRION Bertrand Thirion bertrand.thirion <> Bertrand Thirion bthirion Christopher Burns Chris Christopher Burns cburns <> Cindee Madison Cindee Madison Cindee Madison cindee.madison <> Cindee Madison cindeem <> Cindee Madison cindeem Eleftherios Garyfallidis Erik Ziegler erikz Fabian Pedregosa Fernando Perez fdo.perez <> Gael Varoquaux Gael varoquaux Gael Varoquaux GaelVaroquaux Gael Varoquaux GaelVaroquaux Gael Varoquaux gvaroquaux Gael Varoquaux varoquau Jarrod Millman Jarrod Millman Jarrod Millman jarrod.millman <> Jean-Baptiste Poline JB Jean-Baptiste Poline jbpoline Jonathan Taylor jonathan.taylor <> Jonathan Taylor jtaylo Martin Bergtholdt Matthew Brett matthew.brett <> Matthew Brett mb312 Matthieu Brucher Merlin Keller Merlin KELLER Merlin Keller keller Tom Waite twaite Virgile Fritsch VirgileFritsch Virgile Fritsch Fritsch # and below the ones to fill out Paris Sprint Account Philippe CIUCIU Thomas VINCENT <20100thomas@gmail.com> alan brian.hawthorne <> davclark <> denis.riviere <> michael.castelle <> mike.trumpis <> sebastien.meriaux <> tim.leslie <> yann.cointepas <> nipy-0.3.0/.travis.yml000066400000000000000000000031221210344137400145770ustar00rootroot00000000000000# vim ft=yaml # travis-ci.org definition for nipy build # # We pretend to be erlang because we need can't use the python support in # travis-ci; it uses virtualenvs, they do not have numpy, scipy, matplotlib, # and it is impractical to build them language: erlang env: # Enable python 2 and python 3 builds. Python3 available in Ubuntu 12.04. - PYTHON=python PYSUF='' - PYTHON=python3 PYSUF=3 install: - sudo apt-get update - sudo apt-get install $PYTHON-dev - sudo apt-get install $PYTHON-numpy - sudo apt-get install $PYTHON-scipy - sudo apt-get install $PYTHON-setuptools - sudo apt-get install $PYTHON-nose # Installing sympy for python3 currently an unpleasant task. # The following is an extended virtual line; will be made into one line by # Travis processing. Avoid `-` at the beginning of the line, remember to # add `;` at the end of continuation lines. - if [ "${PYSUF}" == "3" ]; then git clone git://github.com/sympy/sympy.git _sympy ; cd _sympy ; git checkout -b 0.7.2 sympy-0.7.2 ; python3 ./bin/use2to3 ; cd py3k-sympy ; sudo python3 setup.py install ; cd ../.. ; else sudo apt-get install $PYTHON-sympy ; fi - sudo easy_install$PYSUF nibabel # Latest pypi - $PYTHON setup.py build - sudo $PYTHON setup.py install script: # Change into an innocuous directory and find tests from installation - mkdir for_test - cd for_test - $PYTHON ../tools/nipnost -A "not slow" `$PYTHON -c "import os; import nipy; print(os.path.dirname(nipy.__file__))"` nipy-0.3.0/AUTHOR000066400000000000000000000011441210344137400134150ustar00rootroot00000000000000Alexis Roche Bertrand Thirion Brian Hawthrorne Chris Burns Cindee Madison Fernando Perez Gael Varoquaux Jarrod Millman Jean-Baptiste Poline Jonathan Taylor Matthew Brett Merlin Keller Mike Trumpis Tim Leslie Tom Waite Virgile Fritsch Yannick Schwartz Yaroslav Halchenko nipy-0.3.0/Changelog000066400000000000000000000042321210344137400143030ustar00rootroot00000000000000.. -*- mode: rst -*- .. vim:ft=rst .. _changelog: NIPY Changelog -------------- NIPY is not only a module for neuroimaing analysis but an umbrella for other Python neuroimaging-related projects -- see https://github.com/nipy and http://www.nipy.org for more information about their releases. 'Close gh-' statements refer to GitHub issues that are available at:: http://github.com/nipy/nipy/issues The full VCS changelog is available here: http://github.com/nipy/nipy/commits/master Releases ~~~~~~~~ Abbreviated authors are: * MB - Matthew Brett * BT - Bertrand Thirion * AR - Alexis Roche * GV - Gaël Varoquaux * YH - Yarik Halchenko * 0.3.0 (Saturday 2 February 2013) * Bugfix, refactoring and compatibility release * Addition of EM algorithm for mixed effects analysis (BT) * New high-level GLM class interface (BT) * nipy diagnostic scripts save PCA and tsdifana vectors to npz file * Python 3 compatibility through 3.3 (MB) * Fixes for compatibility with upcoming Numpy 1.7 * Fixes to background and axis specification in visualization tools (GV, BT) * Fixes and tests for installed nipy scripts (MB) * Fix to optimization parameters for Realign4D - thanks to `bpinsard` * Fix 0 in affine diagonal for TR=0 in affines by default (MB) * Allow saving of nipy images loaded from nifti files that lack explicit affine (MB) * Allow `slice_order=None` to `FmriRealign4D` when not doing time interpolation (AR); check for valid slice order specification (YR) * Refactoring of quantile routine to move code out of C library (AR) * Fix bug in resampling of unsigned int images (AR) * Custom doctest machinery to work round differences of dtype repr on different platforms, and to skip doctests with optional dependencies (MB) * Script to run examples for testing (MB) * Fix for accidental integer division of frametimes in design matrix generation (Fabian Pedregosa) * Various other fixes and refactorings with thanks from (AR, BT, MB, YR, Yannick Schwartz, Virgile Fritsch) * 0.2.0 (Sunday 22 July 2012) * The first ever official release - > 30 contributors - > 6 years in development - 192 issues closed on github nipy-0.3.0/LICENSE000066400000000000000000000030041210344137400134720ustar00rootroot00000000000000Copyright (c) 2006-2012, NIPY Developers All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the NIPY Developers nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. nipy-0.3.0/MANIFEST.in000066400000000000000000000010151210344137400142230ustar00rootroot00000000000000include AUTHOR COPYING Makefile* MANIFEST.in setup* README.* THANKS include Changelog TODO include *.py include site.* recursive-include nipy *.c *.h *.pyx *.pxd recursive-include libcstat *.c *.h *.pyx *.pxd recursive-include scripts * recursive-include tools * # put this stuff back into setup.py (package_data) once I'm enlightened # enough to accomplish this herculean task recursive-include nipy/algorithms/tests/data * include nipy/COMMIT_INFO.txt include LICENSE graft examples graft doc global-exclude *~ *.swp *.pyc nipy-0.3.0/Makefile000066400000000000000000000053421210344137400141340ustar00rootroot00000000000000# Automating common tasks for NIPY development PYTHON = python clean-pyc: find . -regex ".*\.pyc" -exec rm -rf "{}" \; clean: clean-pyc find . -regex ".*\.so" -exec rm -rf "{}" \; find . -regex ".*\.pyd" -exec rm -rf "{}" \; find . -regex ".*~" -exec rm -rf "{}" \; find . -regex ".*#" -exec rm -rf "{}" \; rm -rf build $(MAKE) -C doc clean clean-dev: clean dev distclean: clean -rm MANIFEST -rm $(COVERAGE_REPORT) @find . -name '*.py[co]' \ -o -name '*.a' \ -o -name '*,cover' \ -o -name '.coverage' \ -o -iname '*~' \ -o -iname '*.kcache' \ -o -iname '*.pstats' \ -o -iname '*.prof' \ -o -iname '#*#' | xargs -L10 rm -f -rm -r dist -rm build-stamp -rm -r .tox -git clean -fxd dev: cythonize $(PYTHON) setup.py build_ext --inplace test: cd .. && $(PYTHON) -c 'import nipy; nipy.test()' build: $(PYTHON) setup.py build install: $(PYTHON) setup.py install cythonize: $(PYTHON) tools/nicythize bdist_rpm: $(PYTHON) setup.py bdist_rpm \ --doc-files "doc" \ --packager "nipy authors " --vendor "nipy authors " # build MacOS installer -- depends on patched bdist_mpkg for Leopard bdist_mpkg: $(PYTHON) tools/mpkg_wrapper.py setup.py install # Check for files not installed check-files: $(PYTHON) -c 'from nisext.testers import check_files; check_files("nipy")' # Print out info for possible install methods check-version-info: $(PYTHON) -c 'from nisext.testers import info_from_here; info_from_here("nipy")' # Run tests from installed code installed-tests: $(PYTHON) -c 'from nisext.testers import tests_installed; tests_installed("nipy")' # Run tests from sdist archive of code sdist-tests: $(PYTHON) -c 'from nisext.testers import sdist_tests; sdist_tests("nipy")' # Run tests from bdist egg of code bdist-egg-tests: $(PYTHON) -c 'from nisext.testers import bdist_egg_tests; bdist_egg_tests("nipy")' source-release: distclean python -m compileall . make distclean python setup.py sdist --formats=gztar,zip venv-tests: # I use this for python2.5 because the sdist-tests target doesn't work # (the tester routine uses a 2.6 feature) make distclean - rm -rf $(VIRTUAL_ENV)/lib/python$(PYVER)/site-packages/nipy python setup.py install cd .. && nosetests $(VIRTUAL_ENV)/lib/python$(PYVER)/site-packages/nipy tox-fresh: # tox tests with fresh-installed virtualenvs. Needs network. And # pytox, obviously. tox -c tox.ini tox-stale: # tox tests with MB's already-installed virtualenvs (numpy and nose # installed) tox -e python25,python26,python27,python32,np-1.2.1 recythonize: # Recythonize all pyx files find . -name "*.pyx" -exec cython -I libcstat/wrapper {} \; .PHONY: orig-src pylint nipy-0.3.0/README.rst000066400000000000000000000043111210344137400141560ustar00rootroot00000000000000.. -*- rest -*- .. vim:syntax=rest ==== NIPY ==== Neuroimaging tools for Python. The aim of NIPY is to produce a platform-independent Python environment for the analysis of functional brain imaging data using an open development model. In NIPY we aim to: 1. Provide an open source, mixed language scientific programming environment suitable for rapid development. 2. Create sofware components in this environment to make it easy to develop tools for MRI, EEG, PET and other modalities. 3. Create and maintain a wide base of developers to contribute to this platform. 4. To maintain and develop this framework as a single, easily installable bundle. NIPY is the work of many people. We list the main authors in the file ``AUTHOR`` in the NIPY distribution, and other contributions in ``THANKS``. Website ======= Current information can always be found at the NIPY website:: http://nipy.org/nipy Mailing Lists ============= Please see the developer's list:: http://projects.scipy.org/mailman/listinfo/nipy-devel Code ==== You can find our sources and single-click downloads: * `Main repository`_ on Github. * Documentation_ for all releases and current development tree. * Download as a tar/zip file the `current trunk`_. * Downloads of all `available releases`_. .. _main repository: http://github.com/nipy/nipy .. _Documentation: http://nipy.org/nipy .. _current trunk: http://github.com/nipy/nipy/archives/master .. _available releases: http://pypi.python.org/pypi/nipy Dependencies ============ To run NIPY, you will need: * python_ >= 2.5 (tested with 2.5, 2.6, 2.7, 3.2, 3.3) * numpy_ >= 1.2 * scipy_ >= 0.7.0 * sympy_ >= 0.6.6 * nibabel_ >= 1.2 You will probably also like to have: * ipython_ for interactive work * matplotlib_ for 2D plotting * mayavi_ for 3D plotting .. _python: http://python.org .. _numpy: http://numpy.scipy.org .. _scipy: http://www.scipy.org .. _sympy: http://sympy.org .. _nibabel: http://nipy.org/nibabel .. _ipython: http://ipython.scipy.org .. _matplotlib: http://matplotlib.sourceforge.net .. _mayavi: http://code.enthought.com/projects/mayavi/ License ======= We use the 3-clause BSD license; the full license is in the file ``LICENSE`` in the nipy distribution. nipy-0.3.0/THANKS000066400000000000000000000010071210344137400134010ustar00rootroot00000000000000NIPY is an open source project for neuroimaging analysis using Python. It is a community project. Many people have contributed to NIPY, in code development, and they are (mainly) listed in the AUTHOR file. Others have contributed greatly code review, discussion, and financial support. Below is a partial list. If you've been left off, please let us know (nipy-devel at neuroimaging.scipy.org), and we'll add you. Michael Castelle Philippe Ciuciu Dav Clark Yann Cointepas Mark D'Esposito Denis Riviere Karl Young nipy-0.3.0/doc/000077500000000000000000000000001210344137400132355ustar00rootroot00000000000000nipy-0.3.0/doc/.gitignore000066400000000000000000000000201210344137400152150ustar00rootroot00000000000000labs/generated/ nipy-0.3.0/doc/Makefile000066400000000000000000000112171210344137400146770ustar00rootroot00000000000000# Makefile for Sphinx documentation # SF_USER ?= matthewbrett DIST_DIR = dist # You can set these variables from the command line. SPHINXOPTS = #-q # suppress all output but warnings SPHINXBUILD = sphinx-build PAPER = # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help clean pdf all dist public_html web web_public htmlonly api html pickle htmlhelp latex changes linkcheck doctest help: @echo "Please use \`make ' where is one of" @echo " html make HTML and API documents" @echo " htmlonly make HTML documents only" @echo " api make API documents only" @echo " latex make LaTeX documents (you can set\ PAPER=a4 or PAPER=letter)" @echo " all make HTML, API and PDF documents" @echo " clean remove all generated documents" @echo @echo " linkcheck check all external links for integrity" @echo " doctest run doctests in reST files" @echo " pdf make and run the PDF generation" @echo " dist make and put results in $DIST_DIR/" @echo " public_html make and rsync with public website" @echo " upload-stable-web make and rsync with public stable website" @echo " upload-devel-web make and rsync with public devel website" @echo " gitwash-update update git workflow from source repo" # Commented these out, wasn't clear if we'd use these targets or not. # @echo " pickle to make pickle files (usable by e.g. sphinx-web)" # @echo " htmlhelp to make HTML files and a HTML help project" # @echo " changes to make an overview over all changed/added/deprecated items" clean: -rm -rf build/* $(DIST_DIR)/* *~ api/generated labs/generated -rm -f manual pdf: latex cd build/latex && make all-pdf all: html pdf dist: clean all mkdir -p $(DIST_DIR) ln build/latex/nipy*.pdf $(DIST_DIR) cp -a build/html/* $(DIST_DIR) @echo "Build finished. Final docs are in $(DIST_DIR)" # This one udates for the specific user named at the top of the makefile upload-stable-web: upload-stable-web-$(SF_USER) # This one updates according to passed user e.g # make upload-stable-web-my_own_sf_user upload-stable-web-%: dist rsync -rzhvp --delete --chmod=Dg+s,g+rw $(DIST_DIR)/* \ $*,nipy@web.sourceforge.net:/home/groups/n/ni/nipy/htdocs/nipy/stable # This one udates for the specific user named at the top of the makefile upload-devel-web: upload-devel-web-$(SF_USER) # This one updates according to passed user e.g # make upload-devel-web-my_own_sf_user upload-devel-web-%: dist rsync -rzhvp --delete --chmod=Dg+s,g+rw $(DIST_DIR)/* \ $*,nipy@web.sourceforge.net:/home/groups/n/ni/nipy/htdocs/nipy/devel htmlonly: mkdir -p build/html build/doctrees $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) build/html @echo @echo "Build finished. The HTML pages are in build/html." api: python ../tools/build_modref_templates.py @echo "Build API docs finished." html: api htmlonly -ln -s build manual @echo "Build HTML and API finished." gitwash-update: python ../tools/gitwash_dumper.py devel/guidelines nipy \ --github-user=nipy \ --project-url=http://nipy.org/nipy \ --project-ml-url=http://mail.scipy.org/mailman/listinfo/nipy-devel pickle: mkdir -p build/pickle build/doctrees $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) build/pickle @echo @echo "Build finished; now you can process the pickle files or run" @echo " sphinx-web build/pickle" @echo "to start the sphinx-web server." htmlhelp: mkdir -p build/htmlhelp build/doctrees $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) build/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in build/htmlhelp." latex: api mkdir -p build/latex build/doctrees $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) build/latex # Clear bug for longtable column output in sphinx python ../tools/fix_longtable.py build/latex/nipy.tex @echo @echo "Build finished; the LaTeX files are in build/latex." @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ "run these through (pdf)latex." changes: mkdir -p build/changes build/doctrees $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) build/changes @echo @echo "The overview file is in build/changes." linkcheck: mkdir -p build/linkcheck build/doctrees $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) build/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in build/linkcheck/output.txt." doctest: mkdir -p build/doctest build/doctrees $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) build/doctest @echo @echo "The overview file is in build/doctest." nipy-0.3.0/doc/README.txt000066400000000000000000000034261210344137400147400ustar00rootroot00000000000000==================== Nipy Documentation ==================== This is the top level build directory for the nipy documentation. All of the documentation is written using Sphinx_, a python documentation system built on top of reST_. Dependencies ============ In order to build the documentation, you must have: * Sphinx 1.0 or greater * nipy and all its dependencies so that nipy can import * matplotlib * latex (for the PNG mathematics graphics) * graphviz (for the inheritance diagrams) This directory contains: * Makefile - the build script to build the HTML or PDF docs. Type ``make help`` for a list of options. * users - the user documentation. * devel - documentation for developers. * faq - frequently asked questions * api - placeholders to automatically generate the api documentation * www - source files for website only reST documentss which should not go in the generated PDF documentation. * links_names.txt - reST document with hyperlink targets for common links used throughout the documentation * .rst files - some top-level documentation source files * conf.py - the sphinx configuration. * sphinxext - some extensions to sphinx to handle math, ipython syntax highlighting, numpy_ docstring parsing, and autodocs. * _static - used by the sphinx build system. * _templates - used by the sphinx build system. Building the documentation -------------------------- Instructions for building the documentation are in the file: ``devel/guidelines/howto_document.rst`` .. Since this README.txt is not processed by Sphinx during the .. documentation build, I've included the links directly so it is at .. least a valid reST doc. .. _Sphinx: http://sphinx.pocoo.org/ .. _reST: http://docutils.sourceforge.net/rst.html .. _numpy: http://www.scipy.org/NumPy .. vim: ft=rst nipy-0.3.0/doc/_static/000077500000000000000000000000001210344137400146635ustar00rootroot00000000000000nipy-0.3.0/doc/_static/nipy.css000066400000000000000000000177521210344137400163700ustar00rootroot00000000000000/** * Alternate Sphinx design * Originally created by Armin Ronacher for Werkzeug, adapted by Georg Brandl. */ body { font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', sans-serif; font-size: 14px; letter-spacing: -0.01em; line-height: 150%; text-align: center; /*background-color: #AFC1C4; */ background-color: #BFD1D4; color: black; padding: 0; border: 1px solid #aaa; margin: 0px 80px 0px 80px; min-width: 740px; } a { color: #CA7900; text-decoration: none; } a:hover { color: #2491CF; } pre { font-family: 'Consolas', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace; font-size: 0.95em; letter-spacing: 0.015em; padding: 0.5em; border: 1px solid #ccc; background-color: #f8f8f8; } td.linenos pre { padding: 0.5em 0; border: 0; background-color: transparent; color: #aaa; } table.highlighttable { margin-left: 0.5em; } table.highlighttable td { padding: 0 0.5em 0 0.5em; } cite, code, tt { font-family: 'Consolas', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace; font-size: 0.95em; letter-spacing: 0.01em; } hr { border: 1px solid #abc; margin: 2em; } tt { background-color: #f2f2f2; border-bottom: 1px solid #ddd; color: #333; } tt.descname { background-color: transparent; font-weight: bold; font-size: 1.2em; border: 0; } tt.descclassname { background-color: transparent; border: 0; } tt.xref { background-color: transparent; font-weight: bold; border: 0; } a tt { background-color: transparent; font-weight: bold; border: 0; color: #CA7900; } a tt:hover { color: #2491CF; } dl { margin-bottom: 15px; } dd p { margin-top: 0px; } dd ul, dd table { margin-bottom: 10px; } dd { margin-top: 3px; margin-bottom: 10px; margin-left: 30px; } .refcount { color: #060; } dt:target, .highlight { background-color: #fbe54e; } dl.class, dl.function { border-top: 2px solid #888; } dl.method, dl.attribute { border-top: 1px solid #aaa; } dl.glossary dt { font-weight: bold; font-size: 1.1em; } pre { line-height: 120%; } pre a { color: inherit; text-decoration: underline; } .first { margin-top: 0 !important; } div.document { background-color: white; text-align: left; background-image: url(contents.png); background-repeat: repeat-x; } /* div.documentwrapper { width: 100%; } */ div.clearer { clear: both; } div.related h3 { display: none; } div.related ul { background-image: url(navigation.png); height: 2em; list-style: none; border-top: 1px solid #ddd; border-bottom: 1px solid #ddd; margin: 0; padding-left: 10px; } div.related ul li { margin: 0; padding: 0; height: 2em; float: left; } div.related ul li.right { float: right; margin-right: 5px; } div.related ul li a { margin: 0; padding: 0 5px 0 5px; line-height: 1.75em; color: #EE9816; } div.related ul li a:hover { color: #3CA8E7; } div.body { margin: 0; padding: 0.5em 20px 20px 20px; } div.bodywrapper { margin: 0 240px 0 0; border-right: 1px solid #ccc; } div.body a { text-decoration: underline; } div.sphinxsidebar { margin: 0; padding: 0.5em 15px 15px 0; width: 210px; float: right; text-align: left; /* margin-left: -100%; */ } div.sphinxsidebar h4, div.sphinxsidebar h3 { margin: 1em 0 0.5em 0; font-size: 0.9em; padding: 0.1em 0 0.1em 0.5em; color: white; border: 1px solid #86989B; background-color: #AFC1C4; } div.sphinxsidebar ul { padding-left: 1.5em; margin-top: 7px; list-style: none; padding: 0; line-height: 130%; } div.sphinxsidebar ul ul { list-style: square; margin-left: 20px; } p { margin: 0.8em 0 0.5em 0; } p.rubric { font-weight: bold; } h1 { margin: 0; padding: 0.7em 0 0.3em 0; font-size: 1.5em; color: #11557C; } h2 { margin: 1.3em 0 0.2em 0; font-size: 1.35em; padding: 0; } h3 { margin: 1em 0 -0.3em 0; font-size: 1.2em; } h1 a, h2 a, h3 a, h4 a, h5 a, h6 a { color: black!important; } h1 a.anchor, h2 a.anchor, h3 a.anchor, h4 a.anchor, h5 a.anchor, h6 a.anchor { display: none; margin: 0 0 0 0.3em; padding: 0 0.2em 0 0.2em; color: #aaa!important; } h1:hover a.anchor, h2:hover a.anchor, h3:hover a.anchor, h4:hover a.anchor, h5:hover a.anchor, h6:hover a.anchor { display: inline; } h1 a.anchor:hover, h2 a.anchor:hover, h3 a.anchor:hover, h4 a.anchor:hover, h5 a.anchor:hover, h6 a.anchor:hover { color: #777; background-color: #eee; } table { border-collapse: collapse; margin: 0 -0.5em 0 -0.5em; } table td, table th { padding: 0.2em 0.5em 0.2em 0.5em; } div.footer { background-color: #E3EFF1; color: #86989B; padding: 3px 8px 3px 0; clear: both; font-size: 0.8em; text-align: right; } div.footer a { color: #86989B; text-decoration: underline; } div.pagination { margin-top: 2em; padding-top: 0.5em; border-top: 1px solid black; text-align: center; } div.sphinxsidebar ul.toc { margin: 1em 0 1em 0; padding: 0 0 0 0.5em; list-style: none; } div.sphinxsidebar ul.toc li { margin: 0.5em 0 0.5em 0; font-size: 0.9em; line-height: 130%; } div.sphinxsidebar ul.toc li p { margin: 0; padding: 0; } div.sphinxsidebar ul.toc ul { margin: 0.2em 0 0.2em 0; padding: 0 0 0 1.8em; } div.sphinxsidebar ul.toc ul li { padding: 0; } div.admonition, div.warning { font-size: 0.9em; margin: 1em 0 0 0; border: 1px solid #86989B; background-color: #f7f7f7; } div.admonition p, div.warning p { margin: 0.5em 1em 0.5em 1em; padding: 0; } div.admonition pre, div.warning pre { margin: 0.4em 1em 0.4em 1em; } div.admonition p.admonition-title, div.warning p.admonition-title { margin: 0; padding: 0.1em 0 0.1em 0.5em; color: white; border-bottom: 1px solid #86989B; font-weight: bold; background-color: #AFC1C4; } div.warning { border: 1px solid #940000; } div.warning p.admonition-title { background-color: #CF0000; border-bottom-color: #940000; } div.admonition ul, div.admonition ol, div.warning ul, div.warning ol { margin: 0.1em 0.5em 0.5em 3em; padding: 0; } div.versioninfo { margin: 1em 0 0 0; border: 1px solid #ccc; background-color: #DDEAF0; padding: 8px; line-height: 1.3em; font-size: 0.9em; } a.headerlink { color: #c60f0f!important; font-size: 1em; margin-left: 6px; padding: 0 4px 0 4px; text-decoration: none!important; visibility: hidden; } h1:hover > a.headerlink, h2:hover > a.headerlink, h3:hover > a.headerlink, h4:hover > a.headerlink, h5:hover > a.headerlink, h6:hover > a.headerlink, dt:hover > a.headerlink { visibility: visible; } a.headerlink:hover { background-color: #ccc; color: white!important; } table.indextable td { text-align: left; vertical-align: top; } table.indextable dl, table.indextable dd { margin-top: 0; margin-bottom: 0; } table.indextable tr.pcap { height: 10px; } table.indextable tr.cap { margin-top: 10px; background-color: #f2f2f2; } img.toggler { margin-right: 3px; margin-top: 3px; cursor: pointer; } img.inheritance { border: 0px } form.pfform { margin: 10px 0 20px 0; } table.contentstable { width: 90%; } table.contentstable p.biglink { line-height: 150%; } a.biglink { font-size: 1.3em; } span.linkdescr { font-style: italic; padding-top: 5px; font-size: 90%; } ul.search { margin: 10px 0 0 20px; padding: 0; } ul.search li { padding: 5px 0 5px 20px; background-image: url(file.png); background-repeat: no-repeat; background-position: 0 7px; } ul.search li a { font-weight: bold; } ul.search li div.context { color: #888; margin: 2px 0 0 30px; text-align: left; } ul.keywordmatches li.goodmatch a { font-weight: bold; } nipy-0.3.0/doc/_static/reggie2.png000066400000000000000000000224731210344137400167250ustar00rootroot00000000000000PNG  IHDRLZsRGBbKGD pHYs.#.#x?vtIME` IDATx՝yUeߟ眻;3 ;;_" -ȴ˴ͥKVh~,5%-\(-R-ٷ=. 2зz^˜{9y>|~CR QOpO@ D (PtQ(P.]KᣕPo7hQſ](?[Ge,GwZ] =A0 T  ]#'pBD E48E`tRi=e_ XƎ"P.bF3+l ]~&o߿7]Qt JDv֤W߻X9֘BL۾L2Gy&Mm YRp\>W_-)u$?\n;x' udɧ+WSB̙֮C{{{Z#"Ga)h J((]vy۶mv B[,cρqꩧ0vdYZ}Q0dnG.`y7]3EkËW0s?S?,R w$X[Y-f!&?T*u@|l)xT 'o L;<MMMuY=^Mkx{>5!}Ͻz,|qUAa <݃=C}~t'ɶN='#dZXF:O3 bՈ/J$ac={/fڵ="MO,"뷵->Kg{ J*ۇq]V(PEe m72v]>XÇD;l6DB9lE\K#v;k\uL:Gi(4%#nL2J`%2$|g,LIwPcV uk6PQQq@**p|lE{A ͠a|عs'˖-6uv\D?xT[ C*ŝ`U5DQ4Z,a񀒩Ԫ 1bF tz-d1pw[7)`z060T 1a UR#58YpZBA @8ӧ#`%ԷadyD*m:r6\Akkk[d x0pD&I!?JКf}v~Q$K]$I-$ Q A^Aҍijܭb1yLD"TBd\DT.Gp}i\#|`{>?A?%UE&ɂX) TcxӷC{+p~e4{J,Uaʹ["d ؘDǀ!YtiܹsD"\+TH%`Af>3d4%(?WjC~gG )j+pqC{wj@'O!kg <`(Ws!Lߟ0CeK kJxafC Xx,3e~J]r%mk>F +hi=_N"yQBA?*r$y>w55 pRᯪ&kYN"U!2D6`byL8H8`O*LC}T ^JhM:u1=m8(%rBwXR >7^^y" J1|&ȧ[ɶyD={イ}֊ݱ"** ছjHfE'0qwo,{[35IXm1=~{K'[[Y[Xen[."6B!9q[[1sYj$J*fW{/3inn歷b=X|Oc+h>u>vepsD‘9!sDu]®l!o ~gHvBཔ$*W-{_b_K @@9W 8ӸY#"XJt!,J<|*ÈGH$XrAc_߻v\s~5&RP+";4MX9)|^+c0 ?8y=Zy{L<{lBl\ipHsOWQ! 5Ti<^yFy3>E.UPN sďпAyn|56=Ҟ.n*|_eƍXǞx饗زi+ o*ar*G8eg9o0Ծj]=Ÿ p$@P >3J'SOK2~L)yr ߚRP$ ~ʞ|IRݎD5\9oc֭[ioo #'5$,WFŐ!C)RV\KQ#/:u* hn6҄NXk_JƦFAe+[Fl$'袔,9i?>o?<R]!ҧ>%| `#Gk-7)O}}=&MZ _G~ 5C̠A4is^͎hL'xK$?x<Ά Kȑ# XMC@>L"趁%K現*>Sa?{UY QJ=p?BoZd uuuzظu d|:M#B'P=[~Gcc#bTd}Db>:yΛ~f͢zſz}~ثb.F^X GO8zP6YQ0H`˖-m#i2&"i$S&ߙ;%Kǹ{1b~u#.-[иmn`Ӗ|ԏ#F,HkGxN8++ >}(Y&ww3 5ȈҞǏ8y*ZG&*)_ m9jmmm|6"}kTcVWH`F8nTbؾ<[팟r<_q^]_Dt1DD|<9І>$ɇ}{뮻sMohojukITx"DE0HG6V"Є3!;T>yرc 5I%G4qnA` tY"oN|ዳ./C(PAaDCd[0i/@>h#ě0~TUg? +BK3!qׯ_dY__H=E<-'8"kf#x=i;vŵlǣ:܀HbVDp_ v`kH6?3$M8h)L1:-ɥ4U!8lݺѣGXJ)͛mwS{kwEoW݂TZk$OI}đW5D &eX0tܸql޼-l̰!#RɼK6ىBC 혴A:&,"U3Hsxdȴijk2bBk֬R*T EzZ.rJ(a<[mDZ`'9 OwD4}߾#Pڇgt}*pN:vmP'ώVltGw>D9}No:2h4AϚp[.~H]]-,x|cwIŲTWDzI&wpriřg~;Qn{HP#P؞iѼe1uUQ;.p\}^sQJɵ?{7]. {DwqW2-+B,MG#X,}ѣp7ᖛo!TՆH<4  R$)Vq/~=pM7\ "Dk͒w_/I`0Hmm-,ylOƌ*h϶k0N<>URN]B4y?(0H9E#,:\qHCy[Mb%fF~ZJՐ qϠ ɒ? JոK;/4J4׿?i#M)+wI[7$h8,FMr"S|a:t/˚`Y'pGy.JإB}{ niR>}1VHyOcz+/I$6p77 VU %lCg>gfcP\;vΡ2#NiYWb ABZk/_N˰3^:qG·H$CDXt=\t楷7I %ɓ۸ꪫ|?<A0d&M:~ؾk'MvўkO-f6/gƌ 2lG2jQ1:B/IF]\|.ԃ"|L]1)pUW~fL?T^LJf^G)M Lq\0I$ :>zÞ+cYJOV-:LQRJ I} ҇JAg~yw~Id'Nֳ/ށx!Z3ạ>Jihh7d̘1=y'/2fsud[ꫯ/Q3g[ۓ]p= #ΑHbZ6*p4%ҥKikk[?1cFY)0 CƎ8 Uae~/I&1~xVXA3Zqb.brp-rt1 ?-'z( %kkkYnZ|'hg߿$޳ĉy F_%OkC dR|m ü֏㵟|WIEXCCJ y@$]kt,SO=$Sxx G=8 F ֫>w>̽z(!)^׵AlΜ9(hhh`Ĉyʝ˗BzղeD%x[Ծ"v(AoRbL׳b h>|8^{-+V`…h9qHN0z3F+?^x(e@k3'7m"#~6V )HǼQa9wRNu5!"zΝ~p|&3&tyF"%K #Z%kI^ :T=L a0˦eVذ)L&]z|pbҸziZ$,ZkΞ8g g(T=TAVSO:<;$Ձ4$mwEr,|\+fP8*/rw<ϓoHVE0xz ,˦vޚRJg]0,ާֲs`mDzIDATAwlcC2 xT \+Kq's 7pN^2kM;hSGH[R0Kv fРbVإQG{9BN{1rgDakp'"7o(ca6v\ҏ{m'U@%ˌAA\:@.K+dҘtE{^_ 8ԉZƵV\U7j+o.KWm k3&hHzGVCa]f8JN3^WhkաxtH]7Sg1˿r]N>D:;S`N(?}P"^4YA;jtyXy C\/oe3uzVvCʩw)[>Hb老ѴXGjMvD"1bW9w4z·GfK <-9L+HL9ZSzk2ieEY.Jeޛ ̝;*SD{GJ 5q U(^.v$ΐ6RI 3HIoyG_._WYp!tIUBq=_]v1Up[B@ /]lwˡi/?Ok)U*l3g,{De9q]CPXοWM6zU־GEQԆ64n[^չ\ b&#F`[(\y^ %|&"+5UߏtHys_J) S۷O&w$Vy8SL=L2ARA|>߿DwŕzhyRc'/СC8p .,Ǝ% z's2xȐ2QQQQ.gQG3buC7R$Nc߿\r&Æ +DKR +JFbY6-{Y4GZ_Eai3ʞ}u ±G4e|OV{=ǽQր"%8gIENDB`nipy-0.3.0/doc/_templates/000077500000000000000000000000001210344137400153725ustar00rootroot00000000000000nipy-0.3.0/doc/_templates/layout.html000066400000000000000000000057171210344137400176070ustar00rootroot00000000000000{% extends "!layout.html" %} {% set title = 'Neuroimaging in Python' %} {% block rootrellink %}
  • NIPY home
  • {% endblock %} {% block extrahead %} {% endblock %} {% block header %} {% endblock %} {# This block gets put at the top of the sidebar #} {% block sidebarlogo %}

    Site Navigation

    NIPY Community

    Github repo

    {% endblock %} {# I had to copy the whole search block just to change the rendered text, so it doesn't mention modules or classes #} {%- block sidebarsearch %} {%- if pagename != "search" %} {%- endif %} {# The sidebarsearch block is the last one available in the default sidebar() macro, so the only way to add something to the bottom of the sidebar is to put it here, at the end of the sidebarsearch block (before it closes). #} {%- endblock %} nipy-0.3.0/doc/api/000077500000000000000000000000001210344137400140065ustar00rootroot00000000000000nipy-0.3.0/doc/api/index.rst000066400000000000000000000001731210344137400156500ustar00rootroot00000000000000.. _api-index: ##### API ##### .. only:: html :Release: |version| :Date: |today| .. include:: generated/gen.rst nipy-0.3.0/doc/bibtex/000077500000000000000000000000001210344137400145125ustar00rootroot00000000000000nipy-0.3.0/doc/bibtex/README.txt000066400000000000000000000016301210344137400162100ustar00rootroot00000000000000.. Using -*- rst -*- (ReST) mode for emacs editing .. We don't expect this file to appear in the output documentation =============== Bibtex folder =============== This folder is for bibtex bibliographies, for citations in NIPY documentation. At the moment there is no standard bibtex mechanism in sphinx_, but we keep be the bibs here, waiting for the time that this is done. They also provide the sources for script conversion to ReST_. For script conversion, we have used: http://code.google.com/p/bibstuff/ For example, let's say in your ReST_ page ``example.rst`` you have something like this:: I here cite the VTK book [VTK4]_ and you've got a bibtex entry starting ``@book{VTK4,`` in a file ``vtk.bib``, then you could run this command:: bib4txt.py -i example.rst vtk.bib which would output, to the terminal, the ReST_ text you could add to the bottom of ``example.rst`` to create the reference. nipy-0.3.0/doc/bibtex/vtk.bib000066400000000000000000000003561210344137400160000ustar00rootroot00000000000000@book{VTK4, author={Will Schroeder and Ken Martin and Bill Lorensen}, title={{The Visualization Toolkit--An Object-Oriented Approach To 3D Graphics}}, publisher={Kitware, Inc.}, edition={Fourth}, year={2006} } nipy-0.3.0/doc/conf.py000066400000000000000000000147461210344137400145500ustar00rootroot00000000000000# emacs: -*- coding: utf-8; mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: # # sampledoc documentation build configuration file, created by # sphinx-quickstart on Tue Jun 3 12:40:24 2008. # # This file is execfile()d with the current directory set to its containing dir. # # The contents of this file are pickled, so don't put values in the namespace # that aren't pickleable (module imports are okay, they're removed automatically). # # All configuration values have a default value; values that are commented out # serve to show the default value. import sys, os # If your extensions are in another directory, add it here. If the directory # is relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. sys.path.append(os.path.abspath('sphinxext')) # Get project related strings. Please do not change this line to use # execfile because execfile is not available in Python 3 _info_fname = os.path.join('..', 'nipy', 'info.py') rel = {} exec(open(_info_fname, 'rt').read(), {}, rel) # Import support for ipython console session syntax highlighting (lives # in the sphinxext directory defined above) import ipython_console_highlighting # General configuration # --------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.pngmath', 'sphinx.ext.autosummary', 'inheritance_diagram', 'numpy_ext.numpydoc', 'matplotlib.sphinxext.plot_directive', 'matplotlib.sphinxext.only_directives', # needed for above ] # Autosummary on autosummary_generate=True # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General substitutions. project = 'nipy' #copyright = ':ref:`2005-2010, Neuroimaging in Python team. `' copyright = '2005-2012, Neuroimaging in Python team' # The default replacements for |version| and |release|, also used in various # other places throughout the built documents. # # The short X.Y version. version = rel['__version__'] # The full version, including alpha/beta/rc tags. release = version # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. unused_docs = [] # List of directories, relative to source directories, that shouldn't # be searched for source files. # exclude_trees = [] # what to put into API doc (just class doc, just init, or both) autoclass_content = 'class' # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # Options for HTML output # ----------------------- # # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = 'sphinxdoc' # The style sheet to use for HTML and HTML Help pages. A file of that name # must exist either in Sphinx' static/ path, or in one of the custom paths # given in html_static_path. html_style = 'nipy.css' # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". html_title = 'NIPY Documentation' # The name of an image file (within the static path) to place at the top of # the sidebar. #html_logo = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Content template for the index page. html_index = 'index.html' # Custom sidebar templates, maps document names to template names. # html_sidebars = {'index': 'indexsidebar.html'} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If true, the reST sources are included in the HTML build as _sources/. html_copy_source = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = project # Options for LaTeX output # ------------------------ # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, document class # [howto/manual]). latex_documents = [ ('documentation', 'nipy.tex', 'Neuroimaging in Python Documentation', ur'Neuroimaging in Python team.','manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. latex_use_parts = True # Additional stuff for the LaTeX preamble. latex_preamble = """ \usepackage{amsmath} \usepackage{amssymb} % Uncomment these two if needed %\usepackage{amsfonts} %\usepackage{txfonts} """ # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. latex_use_modindex = True nipy-0.3.0/doc/devel/000077500000000000000000000000001210344137400143345ustar00rootroot00000000000000nipy-0.3.0/doc/devel/code_discussions/000077500000000000000000000000001210344137400176745ustar00rootroot00000000000000nipy-0.3.0/doc/devel/code_discussions/brainvisa_repositories.rst000066400000000000000000000211171210344137400252150ustar00rootroot00000000000000.. _brainvisa_repositories: Can NIPY get something interesting from BrainVISA databases? ============================================================ I wrote this document to try to give more information to the NIPY developers about the present and future of :term:`BrainVISA` database system. I hope it will serve the discussion opened by Jarrod Millman about a possible collaboration between the two projects on this topic. Unfortunately, I do not know other projects providing similar features (such as BIRN) so I will only focus on BrainVISA. Yann Cointepas 2006-11-21 Introduction ------------ In BrainVISA, all the database system is home made and written in Python. This system is based on the file system and allows to do requests for both reading and writing (get the name of non existing files). We will change this in the future by defining an API (such the one introduced below) and by using at least two implementations, one relying on a relational database system and one compatible with the actual database system. Having one single API will make it possible, for instance, to work on huge databases located on servers and on smaller databases located in a laptop directory (with some synchronization features). This system will be independent from the BrainVISA application, it could be packaged separately. Unfortunately, we cannot say when this work will be done (our developments are slowed because all our lab will move in a new institute in January 2007). Here is a summary describing actual BrainVISA database system and some thoughts of what it may become. What is a database in BrainVISA today? -------------------------------------- A directory is a BrainVISA database if the structure of its sub-directories and the file names in this directory respect a set of rules. These rules make it possible to BrainVISA to scan the whole directory contents and to identify without ambiguity the database elements. These elements are composed of the following information: * *Data type:* identify the contents of a data (image, mesh, functional image, anatomical RM, etc). The data types are organized in hierarchy making it possible to decline a generic type in several specialized types. For example, there is a 4D Image type which is specialized in 3D Image. 3D Image is itself declined in several types of which T1 MRI and Brain mask. * *File format:* Represent the format of files used to record a data. BrainVISA is able to recognize several file formats (for example DICOM, Analyze/SPM, GIS, etc). It is easy to add new data formats and to provide converters to make it possible for existing processes to use these new formats. * *Files:* contains the names of the files (and/or directories) used to record the data. * *Attributes:* an attribute is an association between a name and a value. A set of attributes is associated with each element of BrainVISA database. This set represents all of the characteristics of a data (as the image size, the name of the protocol corresponding to the data or the acquisition parameters). Attributes values are set by BrainVISA during directory scanning (typically protocol, group, subject, etc.). It is possible to completely define the set of rules used to convert a directory in a BrainVISA database. That allows the use of BrainVISA without having to modify an existing file organization. However, the writing of such a system of rules requires very good knowledge of BrainVISA. This is why BrainVISA is provided with a default data organization system that can be used easily. A database can be used for deciding where to write data. The set of rules is used to generate the appropriate file name according to the data type, file format and attributes. This is a key feature that greatly helps the users and allow automation. It is not mandatory to use a database to process data with BrainVISA. However, some important features are not available when you are using data which are not in a database. For example, the BrainVISA ability to construct a default output file name when an input data is selected in a process relies on the database system. Moreover, some processes use the database system to find data; for example, the brain mask viewer tries to find the T1 MRI used to build the brain mask in order to superimpose both images in an Anatomist window. A few thoughts about a possible API for repositories ---------------------------------------------------- I think the most important point for data repositories is to define an user API. This API should be independent of data storage and of data organization. Data organization is important because it is very difficult to find a single organization that covers the needs of all users in the long term. In this API, each data item should have an unique identifier (let’s call it an URL). The rest of the API could be divided in two parts: #. An indexation system managing data organization. It defines properties attached to data items (for instance, “group” or “subject” can be seen as properties of an FMRI image) as well as possible user requests on the data. This indexation API could have several implementations (relational database, BIRN, BrainVISA, etc.). #. A data storage system managing the link between the URL of a data item and its representation on a local file system. This system should take into account various file formats and various file storage systems (e.g. on a local file system, on a distant ftp site, as bytes blocks in a relational database). This separation between indexation and storage is important for the design of databases, it makes it possible, for instance, to use distant or local data storage, or to define several indexations (i.e. several data organizations) for the same data. However indexation and data storage are not always independent. For example, they are independent if we use a relational database for indexation and URLs for storage, but they are not if file or directory names give indexation information (like in BrainVISA databases described above). At the user level, things can be simpler because the separation can be hidden in one object: the repository. A repository is composed of one indexation system and one data storage system and manage all the links between them. The user can send requests to the repository and receive a set of data items. Each data item contains indexation information (via the indexation system) and gives access to the data (via the storage system). Here is a sample of what-user-code-could-be to illustrate what I have in mind followed by a few comments: :: # Get an acces to one repository repository = openRepository( repositoryURL ) # Create a request for selection of all the FMRI in the repository request = ‘SELECT * FROM FMRI’ # Iterate on data items in the repository for item in repository.select( request ): print item.url # Item is a directory-like structure for properties access for property in item: print property, ‘=’, item[ property ] # Retrieve the file(s) (and directorie(s) if any) from the data storage system # and convert it to NIFTI format (if necessary). files = item.getLocalFiles( format=’NIFTI’ ) niftiFileName = files[ 0 ] # Read the image and do something with it ... #. I do not yet have a good idea of how to represent requests. Here, I chose to use SQL since it is simple to understand. #. This code does not make any assumption on the properties that are associated to an FMRI image. #. The method getLocalFiles can do nothing more than return a file name if the data item correspond to a local file in NIFTI format. But the same code can be used to acces a DICOM image located in a distant ftp server. In this case, getLocalFiles will manage the transfer of the DICOM file, then the conversion to the required NIFTI format and return name of temporary file(s). #. getLocalFiles cannot always return just one file name because on the long term, there will be many data types (FMRI, diffusion MRI, EEG, MEG, etc.) that are going to be stored in the repositories. These different data will use various file formats. Some of these formats can use a combination of files and directories (for instance, CTF MEG raw data are stored in a directory (``*.ds``), the structural sulci format of BrainVISA is composed of a file(``*.arg``) and a directory (``*.data``), NIFTI images can be in one or two files, etc. ). #. The same kind of API can be used for writing data items in a repository. One could build a data item, adds properties and files and call something like repository.update( item ). nipy-0.3.0/doc/devel/code_discussions/comparisons/000077500000000000000000000000001210344137400222315ustar00rootroot00000000000000nipy-0.3.0/doc/devel/code_discussions/comparisons/index.rst000066400000000000000000000002601210344137400240700ustar00rootroot00000000000000.. _comparisons: ================= Software Design ================= .. only:: html :Release: |version| :Date: |today| .. toctree:: :maxdepth: 2 vtk_datasets nipy-0.3.0/doc/devel/code_discussions/comparisons/vtk_datasets.rst000066400000000000000000000135631210344137400254670ustar00rootroot00000000000000============== VTK datasets ============== Here we describe the VTK dataset model, because of some parallels with our own idea of an image object. The document is from the VTK book - [VTK4]_ See also: * http://code.enthought.com/projects/mayavi/docs/development/html/mayavi/data.html#vtk-data-structures * http://code.enthought.com/projects/mayavi/docs/development/html/mayavi/auto/example_datasets.html * http://www.vtk.org/Wiki/VTK/Writing_VTK_files_using_python * http://www.vtk.org/VTK/img/file-formats.pdf * https://svn.enthought.com/enthought/attachment/wiki/MayaVi/tvtk_datasets.pdf?format=raw * http://public.kitware.com/cgi-bin/viewcvs.cgi/*checkout*/Examples/DataManipulation/Python/BuildUGrid.py?root=VTK&content-type=text/plain What is a VTK dataset? ====================== VTK datasets represent discrete spatial data. Datasets consist of two components: * *organizing structure* - the topology and geometry * *data attributes* - data that can be attached to the topology / geometry above. Structure: topology / geometry ------------------------------ The structure part of a dataset is the part that gives the position and connection of points in 3D space. Let us first import *vtk* for our code examples. >>> import vtk An *id* is an index into a given vector --------------------------------------- We introduce *id* to explain the code below. An id is simply an index into a vector, and is therefore an integer. Of course the id identifies the element in the vector; as long as you know which vector the id refers to, you can identify the element. >>> pts = vtk.vtkPoints() >>> id = pts.InsertNextPoint(0, 0, 0) >>> id == 0 True >>> id = pts.InsertNextPoint(0, 1, 0) >>> id == 1 True >>> pts.GetPoint(1) == (0.0, 1.0, 0.0) True A dataset has one or more points ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Points have coordinates in 3 dimensions, in the order ``x``, ``y``, ``z`` - see http://www.vtk.org/doc/release/5.4/html/a00374.html - ``GetPoint()`` >>> pts = vtk.vtkPoints() >>> pts.InsertNextPoint(0, 0) # needs 3 coordinates Traceback (most recent call last): ... TypeError: function takes exactly 3 arguments (2 given) >>> _ = pts.InsertNextPoint(0, 0, 0) # returns point index in point array >>> pts.GetPoint(0) (0.0, 0.0, 0.0) >>> _ = pts.InsertNextPoint(0, 1, 0) >>> _ = pts.InsertNextPoint(0, 0, 1) A dataset has one or more cells ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A cell is a local specification of the connection between points - an atom of topology in VTK. A cell has a type, and a list of point ids. The point type determines (by convention) what the connectivity of the list of points should be. For example we can make a cell of type ``vtkTriangle``. The first point starts the triangle, the next point is the next point in the triangle counterclockwise, connected to the first and third, and the third is the remaining point, connected to the first and second. >>> VTK_TRIANGLE = 5 # A VTK constant identifying the triangle type >>> triangle = vtk.vtkTriangle() >>> isinstance(triangle, vtk.vtkCell) True >>> triangle.GetCellType() == VTK_TRIANGLE True >>> pt_ids = triangle.GetPointIds() # these are default (zeros) at the moment >>> [pt_ids.GetId(i) for i in range(pt_ids.GetNumberOfIds())] == [0, 0, 0] True Here we set the ids. The ids refer to the points above. The system does not know this yet, but it will because, later, we are going to associate this cell with the points, in a dataset object. >>> for i in range(pt_ids.GetNumberOfIds()): pt_ids.SetId(i, i) Associating points and cells ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We make the most general possible of VTK datasets - the unstructured grid. >>> ugrid = vtk.vtkUnstructuredGrid() >>> ugrid.Allocate(1, 1) >>> ugrid.SetPoints(pts) >>> id = ugrid.InsertNextCell(VTK_TRIANGLE, pt_ids) Data attributes --------------- So far we have specified a triangle, with 3 points, but no associated data. You can associate data with cells, or with points, or both. Point data associates values (e.g. scalars) with the points in the dataset. Cell data associates values (e.g. scalars) with the cells - in this case one (e.g) scalar value with the whole triangle. >>> pt_data = ugrid.GetPointData() >>> cell_data = ugrid.GetCellData() There are many data attributes that can be set, include scalars, vectors, normals (normalized vectors), texture coordinates and tensors, using (respectively) ``{pt|cell|_data.{Get|Set}{Scalars|Vectors|Normals|TCoords|Tensors}``. For example: >>> pt_data.GetScalars() is None True But we can set the scalar (or other) data: >>> tri_pt_data = vtk.vtkFloatArray() >>> for i in range(3): _ = tri_pt_data.InsertNextValue(i) >>> _ = pt_data.SetScalars(tri_pt_data) To the cells as well, or instead, if we want. Don't forget there is only one cell. >>> tri_cell_data = vtk.vtkFloatArray() >>> _ = tri_cell_data.InsertNextValue(3) >>> _ = cell_data.SetScalars(tri_cell_data) You can set different types of data into the same dataset: >>> tri_pt_vecs = vtk.vtkFloatArray() >>> tri_pt_vecs.SetNumberOfComponents(3) >>> tri_pt_vecs.InsertNextTuple3(1, 1, 1) >>> tri_pt_vecs.InsertNextTuple3(2, 2, 2) >>> tri_pt_vecs.InsertNextTuple3(3, 3, 3) >>> _ = pt_data.SetVectors(tri_pt_vecs) If you want to look at what you have, run this code :: # ..testcode:: when live # make a dataset mapper and actor for our unstructured grid mapper = vtk.vtkDataSetMapper() mapper.SetInput(ugrid) actor = vtk.vtkActor() actor.SetMapper(mapper) # Create the usual rendering stuff. ren = vtk.vtkRenderer() renWin = vtk.vtkRenderWindow() renWin.AddRenderer(ren) iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) # add the actor ren.AddActor(actor) # Render the scene and start interaction. iren.Initialize() renWin.Render() iren.Start() .. [VTK4] Schroeder, Will, Ken Martin, and Bill Lorensen. (2006) *The Visualization Toolkit--An Object-Oriented Approach To 3D Graphics*. : Kitware, Inc. nipy-0.3.0/doc/devel/code_discussions/coordmap_notes.rst000066400000000000000000001114741210344137400234520ustar00rootroot00000000000000.. _coordmap-discussion: ######################################## Some discussion notes on coordinate maps ######################################## These notes contain some email discussion between Jonathan Taylor, Bertrand Thirion and Gael Varoquaux about coordinate maps, coordinate systems and transforms. They are a little bit rough and undigested in their current form, but they might be useful for background. The code and discussion below mentions ideas like ``LPIImage``, ``XYZImage`` and ``AffineImage``. These were image classes that constrained their coordinate maps to have input and output axes in a particular order. We eventually removed these in favor of automated reordering of image axes on save, and explicit reordering of images that needed known axis ordering. .. some working notes :: import sympy i, j, k = sympy.symbols('i, j, k') np.dot(np.array([[0,0,1],[1,0,0],[0,1,0]]), np.array([i,j,k])) kij = CoordinateSystem('kij') ijk_to_kij = AffineTransform(ijk, kij, np.array([[0,0,1,0],[1,0,0,0],[0,1,0,0],[0,0,0,1]])) ijk_to_kij([i,j,k]) kij = CoordinateSystem('kij') ijk_to_kij = AffineTransform(ijk, kij, np.array([[0,0,1,0],[1,0,0,0],[0,1,0,0],[0,0,0,1]])) ijk_to_kij([i,j,k]) kij_to_RAS = compose(ijk_to_kij, ijk_to_RAS) kij_to_RAS = compose(ijk_to_RAS,ijk_to_kij) kij_to_RAS = compose(ijk_to_RAS,ijk_to_kij.inverse()) kij_to_RAS kij = CoordinateSystem('kij') ijk_to_kij = AffineTransform(ijk, kij, np.array([[0,0,1,0],[1,0,0,0],[0,1,0,0],[0,0,0,1]])) # Check that it does the right permutation ijk_to_kij([i,j,k]) # Yup, now let's try to make a kij_to_RAS transform # At first guess, we might try kij_to_RAS = compose(ijk_to_RAS,ijk_to_kij) # but we have a problem, we've asked for a composition that doesn't make sense kij_to_RAS = compose(ijk_to_RAS,ijk_to_kij.inverse()) kij_to_RAS # check that things are working -- I should get the same value at i=20,j=30,k=40 for both mappings, only the arguments are reversed ijk_to_RAS([i,j,k]) kij_to_RAS([k,i,j]) another_kij_to_RAS = ijk_to_RAS.reordered_domain('kij') another_kij_to_RAS([k,i,j]) # rather than finding the permuation matrix your self another_kij_to_RAS = ijk_to_RAS.reordered_domain('kij') another_kij_to_RAS([k,i,j]) >>> ijk = CoordinateSystem('ijk', coord_dtype=np.array(sympy.Symbol('x')).dtype) >>> xyz = CoordinateSystem('xyz', coord_dtype=np.array(sympy.Symbol('x')).dtype) >>> x_start, y_start, z_start = [sympy.Symbol(s) for s in ['x_start', 'y_start', 'z_start']] >>> x_step, y_step, z_step = [sympy.Symbol(s) for s in ['x_step', 'y_step', 'z_step']] >>> i, j, k = [sympy.Symbol(s) for s in 'ijk'] >>> T = np.array([[x_step,0,0,x_start],[0,y_step,0,y_start],[0,0,z_step,z_start],[0,0,0,1]]) >>> T array([[x_step, 0, 0, x_start], [0, y_step, 0, y_start], [0, 0, z_step, z_start], [0, 0, 0, 1]], dtype=object) >>> A = AffineTransform(ijk, xyz, T) >>> A AffineTransform( function_domain=CoordinateSystem(coord_names=('i', 'j', 'k'), name='', coord_dtype=object), function_range=CoordinateSystem(coord_names=('x', 'y', 'z'), name='', coord_dtype=object), affine=array([[x_step, 0, 0, x_start], [0, y_step, 0, y_start], [0, 0, z_step, z_start], [0, 0, 0, 1]], dtype=object) ) >>> A([i,j,k]) array([x_start + i*x_step, y_start + j*y_step, z_start + k*z_step], dtype=object) >>> # this is another >>> A_kij = A.reordered_domain('kij') >>> A_kij AffineTransform( function_domain=CoordinateSystem(coord_names=('k', 'i', 'j'), name='', coord_dtype=object), function_range=CoordinateSystem(coord_names=('x', 'y', 'z'), name='', coord_dtype=object), affine=array([[0, x_step, 0, x_start], [0, 0, y_step, y_start], [z_step, 0, 0, z_start], [0.0, 0.0, 0.0, 1.0]], dtype=object) ) >>> >>> A_kij([k,i,j]) array([x_start + i*x_step, y_start + j*y_step, z_start + k*z_step], dtype=object) >>> # let's look at another reordering >>> A_kij_yzx = A_kij.reordered_range('yzx') >>> A_kij_yzx AffineTransform( function_domain=CoordinateSystem(coord_names=('k', 'i', 'j'), name='', coord_dtype=object), function_range=CoordinateSystem(coord_names=('y', 'z', 'x'), name='', coord_dtype=object), affine=array([[0, 0, y_step, y_start], [z_step, 0, 0, z_start], [0, x_step, 0, x_start], [0, 0, 0, 1.00000000000000]], dtype=object) ) >>> A_kij_yzx([k,i,j]) array([y_start + j*y_step, z_start + k*z_step, x_start + i*x_step], dtype=object) >>> class RASTransform(AffineTransform): """ An AffineTransform with output, i.e. range: x: units of 1mm increasing from Right to Left y: units of 1mm increasing from Anterior to Posterior z: units of 1mm increasing from Superior to Inferior """ def reorder_range(self): raise ValueError('not allowed to reorder the "xyz" output coordinates') def to_LPS(self): from copy import copy return AffineTransform(copy(self.function_domain), copy(self.function_range), np.dot(np.diag([-1,-1,1,1], self.affine)) class LPSTransform(AffineTransform): """ An AffineTransform with output, i.e. range: x: units of 1mm increasing from Left to Right y: units of 1mm increasing from Posterior to Anterior z: units of 1mm increasing from Inferior to Superior """ def reorder_range(self): raise ValueError('not allowed to reorder the "xyz" output coordinates') def to_RAS(self): from copy import copy return AffineTransform(copy(self.function_domain), copy(self.function_range), np.dot(np.diag([-1,-1,1,1], self.affine))) class NeuroImage(Image): def __init__(self, data, affine, axis_names, world='world-RAS'): affine_transform = {'LPS':LPSTransform, 'RAS':RAITransform}[world])(axis_names[:3], "xyz", affine} ... LPIImage only forced it to be of one type. Email #1 -------- Excuse the long email but I started writing, and then it started looking like documentation. I will put most of it into doc/users/coordinate_map.rst. Also, I am not sure what this means. The image is in LPI ordering, only if the reference frame of the world space it is pointing to is. I am proposing we enforce the world space to have this frame of reference to be explicit so that you could tell left from right on an image after calling xyz_ordered(). If it is pointing to MNI152 (or Talairach), then x=Left to Right, y=Posterior to Anterior, and z=Inferior to Superior. If not, you are not in MNI152. Moreover, according to the FSL docs, the whole 'anatomical' versus 'neurological' mess that I hear has been a long standing problem has nothing to do with the target frame of reference, but only with the way the data is stored. I think the LPI designation simply specifies "x=Left to Right, y=Posterior to Anterior, and z=Inferior to Superior" so any MNI152 or Tailarach would be in LPI coordinates, that's all I'm trying to specify with the designation "LPI". If MNI152 might imply a certain voxel size, then I would prefer not to use MNI152. If there's a better colour for the bike shed, then I'll let someone else paint it, :) This LPI specification actually makes a difference to the "AffineImage/LPIImage.xyz_ordered" method. If, in the interest of being explicit, we would enforce the direction of x,y,z in LPI/Neuro/AffineImage, then the goal of having "xyz_ordered" return an image with an affine that has a diagonal with positive entries, as in the AffineImage specification, means that you might have to call affine_image.get_data()[::-1,::-1] # or some other combination of flips (i.e. you have to change how it is stored in memory). The other way to return an diagonal affine with positive entries is to flip send x to -x, y to -y, i.e. multiply the diagonal matrix by np.diag([-1,-1,1,1]) on the left. But then your AffineImage would now have "x=Right to Left, y=Anterior to Posterior" and we have lost the interpretation of x,y,z as LPI coordinates. By being explicit about the direction of x,y,z we know that if the affine matrix was diagonal and had a negative entry in the first position, then we know that left and right were flipped when viewed with a command like:: >>> pylab.imshow(image.get_data()[:,:,10]) Without specifying the direction of x,y,z we just don't know. You can of course create a new coordinate system describing, for instance the scanner space, where the first coordinnate is not x, and the second not y, ... but I am not sure what this means: x, y, and z, as well as left or right, are just names. The only important information between two coordinate systems is the transform linking them. The sentence: "The only important information between two coordinate systems is the transform linking them." has, in one form or another, often been repeated in NiPy meetings, but no one bothers to define the terms in this sentence. So, I have to ask what is your definition of "transform" and "coordinate system"? I have a precise definition, and the names are part of it. Let's go through that sentence. Mathematically, if a transform is a function, then a transform knows its domain and its range so it knows the what the coordinate systems are. So yes, with transform defined as "function", if I give you a transform between two coordinate systems (mathematical spaces of some kind) the only important information about it is itself. The problem is that, for a 4x4 matrix T, the python function transform_function = lambda v: np.dot(T, np.hstack([v,1])[:3] has a "duck-type" domain that knows nothing about image acquisition and a range inferred by numpy that knows nothing about LPI or MNI152. The string "coord_sys" in AffineImage is meant to imply that its domain and range say it should be interpreted in some way, but it is not explicit in AffineImage. (Somewhere around here, I start veering off into documentation.... sorry). To me, a "coordinate system" is a basis for a vector space (sometimes you might want transforms between integers but ignore them for now). It's not even a description of an affine subspace of a vector space, (see e.g. http://en.wikipedia.org/wiki/Affine_transformation). To describe such an affine subspace, "coordinate system" would need one more piece of information, the "constant" or "displacement" vector of the affine subspace. Because it's a basis, each element in the basis can be identified by a name, so the transform depends on the names because that's how I determine a "coordinate system" and I need "coordinate systems" because they are what the domain and range of my "transform" are going to be. For instance, this describes the range "coordinate system" of a "transform" whose output is in LPI coordinates: "x" = a unit vector of length 1mm pointing in the Left to Right direction "y" = a unit vector of length 1mm pointing in the Posterior to Anterior direction "z" = a unit vector of length 1mm pointing in the Inferior to Superior direction OK, so that's my definition of "coordinate system" and the names are an important part of it. Now for the "transform" which I will restrict to be "affine transform". To me, this is an affine function or transformation between two vector spaces (we're not even considering affine transformations between affine spaces). I bring up the distinction because generally affine transforms act on affine spaces rather than vector spaces. A vector space is an affine subspace of itself with "displacement" vector given by its origin, hence it is an affine space and so we can define affine functions on vector spaces. Because it is an affine function, the mathematical image of the domain under this function is an affine subspace of its range (which is a vector space). The "displacement" vector of this affine subspace is represented by the floats in b where A,b = to_matvec(T) (once I have specified a basis for the range of this function). Since my "affine transform" is a function between two vector spaces, it should have a domain that is a vector space, as well. For the "affine transform" associated with an Image, this domain vector space has coordinates that can be interpreted as array coordinates, or coordinates in a "data cube". Depending on the acquisition parameters, these coordinates might have names like "phase", "freq", "slice". Now, I can encode all this information in a tuple: (T=a 4x4 matrix of floats with bottom row [0,0,0,1], ('phase', 'freq', "slice"), ('x','y','z')) >>> import numpy as np >>> from nipy.core.api import CoordinateSystem, AffineTransform >>> acquisition = ('phase', 'freq', 'slice') >>> xyz_world = ('x','y','z') >>> T = np.array([[2,0,0,-91.095],[0,2,0,-129.51],[0,0,2,-73.25],[0,0,0,1]]) >>> AffineTransform(CoordinateSystem(acquisition), CoordinateSystem(xyz_world), T) AffineTransform( function_domain=CoordinateSystem(coord_names=('phase', 'freq', 'slice'), name='', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('x', 'y', 'z'), name='', coord_dtype=float64), affine=array([[ 2. , 0. , 0. , -91.095], [ 0. , 2. , 0. , -129.51 ], [ 0. , 0. , 2. , -73.25 ], [ 0. , 0. , 0. , 1. ]]) ) The float64 appearing above is a way of specifying that the "coordinate systems" are vector spaces over the real numbers, rather than, say the complex numbers. It is specified as an optional argument to CoordinateSystem. Compare this to the way a MINC file is described:: jtaylo@ubuntu:~$ mincinfo data.mnc file: data.mnc image: signed__ short -32768 to 32767 image dimensions: zspace yspace xspace dimension name length step start -------------- ------ ---- ----- zspace 84 2 -73.25 yspace 114 2 -129.51 xspace 92 2 -91.095 jtaylo@ubuntu:~$ jtaylo@ubuntu:~$ mincheader data.mnc netcdf data { dimensions: zspace = 84 ; yspace = 114 ; xspace = 92 ; variables: double zspace ; zspace:varid = "MINC standard variable" ; zspace:vartype = "dimension____" ; zspace:version = "MINC Version 1.0" ; zspace:comments = "Z increases from patient inferior to superior" ; zspace:spacing = "regular__" ; zspace:alignment = "centre" ; zspace:step = 2. ; zspace:start = -73.25 ; zspace:units = "mm" ; double yspace ; yspace:varid = "MINC standard variable" ; yspace:vartype = "dimension____" ; yspace:version = "MINC Version 1.0" ; yspace:comments = "Y increases from patient posterior to anterior" ; yspace:spacing = "regular__" ; yspace:alignment = "centre" ; yspace:step = 2. ; yspace:start = -129.509994506836 ; yspace:units = "mm" ; double xspace ; xspace:varid = "MINC standard variable" ; xspace:vartype = "dimension____" ; xspace:version = "MINC Version 1.0" ; xspace:comments = "X increases from patient left to right" ; xspace:spacing = "regular__" ; xspace:alignment = "centre" ; xspace:step = 2. ; xspace:start = -91.0950012207031 ; xspace:units = "mm" ; short image(zspace, yspace, xspace) ; image:parent = "rootvariable" ; image:varid = "MINC standard variable" ; image:vartype = "group________" ; image:version = "MINC Version 1.0" ; image:complete = "true_" ; image:signtype = "signed__" ; image:valid_range = -32768., 32767. ; image:image-min = "--->image-min" ; image:image-max = "--->image-max" ; int rootvariable ; rootvariable:varid = "MINC standard variable" ; rootvariable:vartype = "group________" ; rootvariable:version = "MINC Version 1.0" ; rootvariable:parent = "" ; rootvariable:children = "image" ; double image-min ; image-min:varid = "MINC standard variable" ; image-min:vartype = "var_attribute" ; image-min:version = "MINC Version 1.0" ; image-min:_FillValue = 0. ; image-min:parent = "image" ; double image-max ; image-max:varid = "MINC standard variable" ; image-max:vartype = "var_attribute" ; image-max:version = "MINC Version 1.0" ; image-max:_FillValue = 1. ; image-max:parent = "image" ; data: zspace = 0 ; yspace = 0 ; xspace = 0 ; rootvariable = _ ; image-min = -50 ; image-max = 50 ; } I like the MINC description, but the one thing missing in this file is the ability to specify ('phase', 'freq', 'slice'). It may be possible to add it but I'm not sure, it certainly can be added by adding a string to the header. It also mixes the definition of the basis with the affine transformation (look at the output of mincheader which says that yspace has step 2). The NIFTI-1 standard allows limited possibilities to specify ('phase', 'freq', 'slice') this with its dim_info byte but there are pulse sequences for which these names are not appropriate. One might ask: why bother making a "coordinate system" for the voxels. Well, this is part of my definition of "affine transform". More importantly, it separates the notion of world axes ('x','y','z') and voxel indices ('i','j','k'). There is at least one use case, slice timing, a key step in the fMRI pipeline, where we need to know which spatial axis is slice. One solution would be to just add an attribute to AffineImage called "slice_axis" but then, as Gael says, the possibilites for axis names are infinite, what if we want an attribute for "group_axis"? AffineTransform provides an easy way to specify an axis as "slice": >>> unknown_acquisition = ('i','j','k') >>> A = AffineTransform(CoordinateSystem(unknown_acquisition), ... CoordinateSystem(xyz_world), T) After some deliberation, we find out that the third axis is slice... >>> A.renamed_domain({'k':'slice'}) AffineTransform( function_domain=CoordinateSystem(coord_names=('i', 'j', 'slice'), name='', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('x', 'y', 'z'), name='', coord_dtype=float64), affine=array([[ 2. , 0. , 0. , -91.095], [ 0. , 2. , 0. , -129.51 ], [ 0. , 0. , 2. , -73.25 ], [ 0. , 0. , 0. , 1. ]]) ) Another question one might ask is: why bother allowing non-4x4 affine matrices like: >>> AffineTransform.from_params('ij', 'xyz', np.array([[2,3,1,0],[3,4,5,0],[7,9,3,1]]).T) AffineTransform( function_domain=CoordinateSystem(coord_names=('i', 'j'), name='', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('x', 'y', 'z'), name='', coord_dtype=float64), affine=array([[ 2., 3., 7.], [ 3., 4., 9.], [ 1., 5., 3.], [ 0., 0., 1.]]) ) For one, it allows very clear specification of a 2-dimensional plane (i.e. a 2-dimensional affine subspace of some vector spce) called P, in, say, the LPI "coordinate system". Let's say we want the plane in LPI-world corresponding to "j=30" for im above. (I guess that's coronal?) Make an affine transform that maps (i,k) -> (i,30,k): >>> j30 = AffineTransform(CoordinateSystem('ik'), CoordinateSystem('ijk'), np.array([[1,0,0],[0,0,30],[0,1,0],[0,0,1]])) >>> j30 AffineTransform( function_domain=CoordinateSystem(coord_names=('i', 'k'), name='', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('i', 'j', 'k'), name='', coord_dtype=float64), affine=array([[ 1., 0., 0.], [ 0., 0., 30.], [ 0., 1., 0.], [ 0., 0., 1.]]) ) Its dtype is np.float since we didn't specify np.int in constructing the CoordinateSystems: >>> from nipy.core.api import compose >>> j30_to_XYZ = compose(A, j30) >>> j30_to_XYZ AffineTransform( function_domain=CoordinateSystem(coord_names=('i', 'k'), name='', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('x', 'y', 'z'), name='', coord_dtype=float64), affine=array([[ 2. , 0. , -91.095], [ 0. , 0. , -69.51 ], [ 0. , 2. , -73.25 ], [ 0. , 0. , 1. ]]) ) This could be used to resample any RAS Image on the coronal plane y=-69.51 with voxels of size 2mm x 2mm starting at x=-91.095 and z=-73.25. Of course, this doesn't seem like a very natural slice. The module :mod:`nipy.core.reference.slices` has some convenience functions for specifying slices. >>> from nipy.core.reference.slices import yslice, bounding_box >>> x_spec = ([-92,92], 93) # voxels of size 2 in x, starting at -92, ending at 92 >>> z_spec = ([-70,100], 86) # voxels of size 2 in z, starting at -70, ending at 100 When specifying a *y* slice - we have to know what "y" means. In order for "y" to have meaning, we need to specify the name of an output (range) space that has a defined "y". In this case we use MNI space: >>> y70 = yslice(70, x_spec, z_spec, 'mni') >>> y70 AffineTransform( function_domain=CoordinateSystem(coord_names=('i_x', 'i_z'), name='slice', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('mni-x=L->R', 'mni-y=P->A', 'mni-z=I->S'), name='mni', coord_dtype=float64), affine=array([[ 2., 0., -92.], [ 0., 0., 70.], [ 0., 2., -70.], [ 0., 0., 1.]]) ) >>> bounding_box(y70, (x_spec[1], z_spec[1])) ((-92.0, 92.0), (70.0, 70.0), (-70.0, 100.0)) Maybe these aren't things that "normal human beings" (to steal a quote from Gael) can use, but they're explicit and they are tied to precise mathematical objects. Email #2 --------- I apologize again for the long emails, but I'm glad we. as a group, are having this discussion electronically. Usually, our discussions of CoordinateMap begin with Matthew standing in front of a white board with a marker and asking a newcomer, "Are you familiar with the notion of a transformation, say, from voxel to world?" :) Where they go after that really depends on the kind of day everyone's having... :) These last two emails also have the advantage that most of them can go right in to doc/users/coordinate_map.rst. I agree with Gael that LPIImage is an obscure name. OK. I already know that people often don't agree with names I choose, just ask Matthew. :) I just wanted to choose a name that is as explicit as possible. Since I'm neither a neuroscientist nor an MRI physicist but a statistician, I have no idea what it really means. I found it mentioned in this link below and John Ollinger mentioned LPI in another email thread http://afni.nimh.nih.gov/afni/community/board/read.php?f=1&i=9140&t=9140 I was suggesting we use a well-established term, apparently LPI is not well-established. :) Does LPS mean (left, posterior, superior)? Doesn't that suggest that LPI means (left, posterior, inferior) and RAI means (right, anterior, inferior)? If so, then good, now I know what LPI means and I'm not a neuroscientist or an MRI physicist, :) We can call the images RASImages, or at least let's call their AffineTransform RASTransforms, or we could have NeuroImages that can only have RASTransforms or LPSTransforms, NeuroTransform that have a property and NeuroImage raises an exception like this:: @property def world(self): return self.affine_transform.function_range if (self.world.name not in ['world-RAS', 'world-LPS'] or self.world.coord_names != ('x', 'y', 'z')): raise ValueError("the output space must be named one of " "['world-RAS','world-LPS'] and " "the axes must be ('x', 'y', 'z')") _doc['world'] = "World space, one of ['world-RAS', 'world-LPS']. If it is 'world-LPS', then x increases from patient's left to right, y increases posterior to anterior, z increases superior to inferior. If it is 'world-RAS' then x increases patient's right to left, y increases posterior to anterior, z increases superior to inferior." I completely advocate any responsibility for deciding which acronym to choose, someone who can use rope can just change every lpi/LPI to ras/RAS I just want it explicit. I also want some version of these phrases "x increases from patient's right to left", "y increases from posterior to anterior", "z increases from superior to inferior" somewhere in a docstring for RAS/LPSTransform (see why I feel that "increasing vs. decreasing" is important below). I want the name and its docstring to scream at you what it represents so there is no discussion like on the AFNI list where users are not sure which output of which program (in AFNI) should be flipped (see the other emails in the thread). It should be a subclass of AffineTransform because it has restrictions: namely, its range is 'xyz' and "xy" can be interpreted in of two ways either RAS or LPS). You can represent any other version of RAS/LPS or (whatever colour your bike shed is, :)) with the same class, it just may have negative values on the diagonal. If it has some rotation applied, then it becomes pretty hard (at least for me) to decide if it's RAS or LPS from the 4x4 matrix of floats. I can't even tell you now when I look at the FIAC data which way left and right go unless I ask Matthew. For background, you may want to look at what Gordon Kindlmann did for nrrd format where you can declare the space in which your orientation information and other transforms should be interpreted: http://teem.sourceforge.net/nrrd/format.html#space Or, if that's too flexible for you, you could adopt a standard space. ITK chose LPS to match DICOM. For slicer, like nifti, we chose RAS It may be that there is well-established convention for this, but then why does ITK say DICOM=LPS and AFNI say DICOM=RAI? At least MINC is explicit. I favor making it as precise as MINC does. That AFNI discussion I pointed to uses the pairing RAI/DICOM and LPI/SPM. This discrepancy suggests there's some disagreement between using the letters to name the system and whether they mean increasing or decreasing. My guess is that LPI=RAS based on ITK/AFNI's identifications of LPS=DICOM=RAI. But I can't tell if the acronym LPI means "x is increasing L to R, y increasing from P to A, z in increasing from I to S" which would be equivalent to RAS meaning "x decreasing from R to L, y decreasing from A to P, z is decreasing from S to I". That is, I can't tell from the acronyms which of LPI or RAS is using "increasing" and which is "decreasing", i.e. they could have flipped everything so that LPI means "x is decreasing L to R, y is decreasing P to A, z is decreasing I to S" and RAS means "x is increasing R to L, y is increasing A to P, z is increasing S to I". To add more confusion to the mix, the acronym doesn't say if it is the patient's left to right or the technician looking at him, :) For this, I'm sure there's a standard answer, and it's likely the patient, but heck, I'm just a statistician so I don't know the answer. (every volume has an ijkToRAS affine transform). We convert to/from LPS when calling ITK code, e.g., for I/O. How much clearer can you express "ijkToRAS" or "convert to/from LPS" than something like this: >>> T = np.array([[2,0,0,-91.095],[0,2,0,-129.51],[0,0,2,-73.25],[0,0,0,1]]) >>> ijk = CoordinateSystem('ijk', 'voxel') >>> RAS = CoordinateSystem('xyz', 'world-RAS') >>> ijk_to_RAS = AffineTransform(ijk, RAS, T) >>> ijk_to_RAS AffineTransform( function_domain=CoordinateSystem(coord_names=('i', 'j', 'k'), name='voxel', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('x', 'y', 'z'), name='world-RAS', coord_dtype=float64), affine=array([[ 2. , 0. , 0. , -91.095], [ 0. , 2. , 0. , -129.51 ], [ 0. , 0. , 2. , -73.25 ], [ 0. , 0. , 0. , 1. ]]) ) >>> LPS = CoordinateSystem('xyz', 'world-LPS') >>> RAS_to_LPS = AffineTransform(RAS, LPS, np.diag([-1,-1,1,1])) >>> ijk_to_LPS = compose(RAS_to_LPS, ijk_to_RAS) >>> RAS_to_LPS AffineTransform( function_domain=CoordinateSystem(coord_names=('x', 'y', 'z'), name='world-RAS', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('x', 'y', 'z'), name='world-LPS', coord_dtype=float64), affine=array([[-1., 0., 0., 0.], [ 0., -1., 0., 0.], [ 0., 0., 1., 0.], [ 0., 0., 0., 1.]]) ) >>> ijk_to_LPS AffineTransform( function_domain=CoordinateSystem(coord_names=('i', 'j', 'k'), name='voxel', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('x', 'y', 'z'), name='world-LPS', coord_dtype=float64), affine=array([[ -2. , 0. , 0. , 91.095], [ 0. , -2. , 0. , 129.51 ], [ 0. , 0. , 2. , -73.25 ], [ 0. , 0. , 0. , 1. ]]) ) Of course, we shouldn't rely on the names ijk_to_RAS to know that it is an ijk_to_RAS transform, that's why they're in the AffineTransform. I don't think any one wants an attribute named "ijk_to_RAS" for AffineImage/Image/LPIImage. The other problem that LPI/RAI/AffineTransform addresses is that someday you might want to transpose the data in your array and still have what you would call an "image". AffineImage allows this explicitly because there is no identifier for the domain of the AffineTransform (the attribute name "coord_sys" implies that it refers to either the domain or the range but not both). (Even those who share the sentiment that "everything that is important about the linking between two coordinate systems is contained in the transform" acknowledge there are two coordinate systems :)) Once you've transposed the array, say >>> data = np.random.normal(size=(10, 12, 14)) # original array >>> newdata = data.transpose([2,0,1]) You shouldn't use something called "ijk_to_RAS" or "ijk_to_LPS" transform. Rather, you should use a "kij_to_RAS" or "kij_to_LPS" transform. >>> ijk = CoordinateSystem('ijk', 'voxel') >>> kij = CoordinateSystem('kij', 'voxel') >>> ijk_to_kij = AffineTransform(ijk, kij, np.array([[0,0,1,0],[1,0,0,0],[0,1,0,0],[0,0,0,1]])) Check that it does the right permutation >>> i, j, k = 10., 20., 40 >>> ijk_to_kij([i, j, k]) array([ 40., 10., 20.]) Yup, now let's try to make a kij_to_RAS transform At first guess, we might try >>> kij_to_RAS = compose(ijk_to_RAS, ijk_to_kij) Traceback (most recent call last): ... ValueError: domains and ranges don't match up correctly We have a problem, we've asked for a composition that doesn't make sense. If you're good with permutation matrices, you wouldn't have to call "compose" above and you can just do matrix multiplication. But here the name of the function tells you that yes, you should do the inverse: "ijk_to_kij" says that the range are "kij" values, but to get a "transform" for your data in "kij" it should have a domain that is "kij". The call to compose raised an exception because it saw you were trying to compose a function with domain="ijk" and range="kji" with a function (on its left) having domain="ijk" and range "kji". This composition just doesn't make sense so it raises an exception. >>> kij_to_ijk = ijk_to_kij.inverse() >>> kij_to_RAS = compose(ijk_to_RAS, kij_to_ijk) >>> kij_to_RAS AffineTransform( function_domain=CoordinateSystem(coord_names=('k', 'i', 'j'), name='voxel', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('x', 'y', 'z'), name='world-RAS', coord_dtype=float64), affine=array([[ 0. , 2. , 0. , -91.095], [ 0. , 0. , 2. , -129.51 ], [ 2. , 0. , 0. , -73.25 ], [ 0. , 0. , 0. , 1. ]]) ) >>> ijk_to_RAS([i,j,k]) array([-71.095, -89.51 , 6.75 ]) >>> kij_to_RAS([k,i,j]) array([-71.095, -89.51 , 6.75 ]) We also shouldn't have to rely on the names of the AffineTransforms, i.e. ijk_to_RAS, to remember what's what (in typing this example, I mixed up kij and kji many times). The objects ijk_to_RAS, kij_to_RAS represent the same "affine transform", as evidenced by their output above. There are lots of representations of the same "affine transform": (6=permutations of i,j,k)*(6=permutations of x,y,z)=36 matrices for one "affine transform". If we throw in ambiguity about the sign in front of the output, there are 36*(8=2^3 possible flips of the x,y,z)=288 matrices possible but there are only really 8 different "affine transforms". If you force the order of the range to be "xyz" then there are 6*8=48 different matrices possible, again only specifying 8 different "affine transforms". For AffineImage, if we were to allow both "LPS" and "RAS" this means two flips are allowed, namely either "LPS"=[-1,-1,1] or "RAS"=[1,1,1], so there are 6*2=12 possible matrices to represent 2 different "affine transforms". Here's another example that uses sympy to show what's going on in the 4x4 matrix as you reorder the 'ijk' and the 'RAS'. (Note that this code won't work in general because I had temporarily disabled a check in CoordinateSystem that enforced the dtype of the array to be a builtin scalar dtype for sanity's sake). To me, each of A, A_kij and A_kij_yzx below represent the same "transform" because if I substitute i=30, j=40, k=50 and I know the order of the 'xyz' in the output then they will all give me the same answer. >>> import sympy >>> ijk = CoordinateSystem('ijk', coord_dtype=np.array(sympy.Symbol('x')).dtype) >>> xyz = CoordinateSystem('xyz', coord_dtype=np.array(sympy.Symbol('x')).dtype) >>> x_start, y_start, z_start = [sympy.Symbol(s) for s in ['x_start', 'y_start', 'z_start']] >>> x_step, y_step, z_step = [sympy.Symbol(s) for s in ['x_step', 'y_step', 'z_step']] >>> i, j, k = [sympy.Symbol(s) for s in 'ijk'] >>> T = np.array([[x_step,0,0,x_start],[0,y_step,0,y_start],[0,0,z_step,z_start],[0,0,0,1]]) >>> T array([[x_step, 0, 0, x_start], [0, y_step, 0, y_start], [0, 0, z_step, z_start], [0, 0, 0, 1]], dtype=object) >>> A = AffineTransform(ijk, xyz, T) >>> A AffineTransform( function_domain=CoordinateSystem(coord_names=('i', 'j', 'k'), name='', coord_dtype=object), function_range=CoordinateSystem(coord_names=('x', 'y', 'z'), name='', coord_dtype=object), affine=array([[x_step, 0, 0, x_start], [0, y_step, 0, y_start], [0, 0, z_step, z_start], [0, 0, 0, 1]], dtype=object) ) >>> A([i,j,k]) == [x_start + i*x_step, y_start + j*y_step, z_start + k*z_step] array([ True, True, True], dtype=bool) This is another >>> A_kij = A.reordered_domain('kij') >>> A_kij AffineTransform( function_domain=CoordinateSystem(coord_names=('k', 'i', 'j'), name='', coord_dtype=object), function_range=CoordinateSystem(coord_names=('x', 'y', 'z'), name='', coord_dtype=object), affine=array([[0, 1.0*x_step, 0, 1.0*x_start], [0, 0, 1.0*y_step, 1.0*y_start], [1.0*z_step, 0, 0, 1.0*z_start], [0.0, 0.0, 0.0, 1.0]], dtype=object) ) >>> A_kij([k,i,j]) array([1.0*i*x_step + 1.0*x_start, 1.0*j*y_step + 1.0*y_start, 1.0*k*z_step + 1.0*z_start], dtype=object) Let's look at another reordering: >>> A_kij_yzx = A_kij.reordered_range('yzx') >>> A_kij_yzx AffineTransform( function_domain=CoordinateSystem(coord_names=('k', 'i', 'j'), name='', coord_dtype=object), function_range=CoordinateSystem(coord_names=('y', 'z', 'x'), name='', coord_dtype=object), affine=array([[0, 0, 1.0*y_step, 1.0*y_start], [1.0*z_step, 0, 0, 1.0*z_start], [0, 1.0*x_step, 0, 1.0*x_start], [0, 0, 0, 1.00000000000000]], dtype=object) ) >>> A_kij_yzx([k,i,j]) array([1.0*j*y_step + 1.0*y_start, 1.0*k*z_step + 1.0*z_start, 1.0*i*x_step + 1.0*x_start], dtype=object) >>> A_kij AffineTransform( function_domain=CoordinateSystem(coord_names=('k', 'i', 'j'), name='', coord_dtype=object), function_range=CoordinateSystem(coord_names=('x', 'y', 'z'), name='', coord_dtype=object), affine=array([[0, 1.0*x_step, 0, 1.0*x_start], [0, 0, 1.0*y_step, 1.0*y_start], [1.0*z_step, 0, 0, 1.0*z_start], [0.0, 0.0, 0.0, 1.0]], dtype=object) ) >>> from nipy.core.reference.coordinate_map import equivalent >>> equivalent(A_kij, A) True >>> equivalent(A_kij, A_kij_yzx) True nipy-0.3.0/doc/devel/code_discussions/image_ordering.rst000066400000000000000000000137711210344137400234120ustar00rootroot00000000000000.. _image_ordering: Image index ordering ==================== Background ---------- In general, images - and in particular NIfTI format images, are ordered in memory with the X dimension changing fastest, and the Z dimension changing slowest. Numpy has two different ways of indexing arrays in memory, C and fortran. With C index ordering, the first index into an array indexes the slowest changing dimension, and the last indexes the fastest changing dimension. With fortran ordering, the first index refers to the fastest changing dimension - X in the case of the image mentioned above. C is the default index ordering for arrays in Numpy. For example, let's imagine that we have a binary block of 3D image data, in standard NIfTI / Analyze format, with the X dimension changing fastest, called `my.img`, containing Float32 data. Then we memory map it: :: img_arr = memmap('my.img', dtype=float32) When we index this new array, the first index indexes the Z dimension, and the third indexes X. For example, if I want a voxel X=3, Y=10, Z=20 (zero-based), I have to get this from the array with: :: img_arr[20, 10, 3] The problem ----------- Most potential users of NiPy are likely to have experience of using image arrays in Matlab and SPM. Matlab uses Fortran index ordering. For fortran, the first index is the fastest changing, and the last is the slowest-changing. For example, here is how to get voxel X=3, Y=10, Z=20 (zero-based) using SPM in Matlab: :: img_arr = spm_read_vols(spm_vol('my.img')); img_arr(4, 11, 21) % matlab indexing is one-based This ordering fits better with the way that we talk about coordinates in functional imaging, as we invariably use XYZ ordered coordinates in papers. It is possible to do the same in numpy, by specifying that the image should have fortran index ordering: :: img_arr = memmap('my.img', dtype=float32, order='F') img_arr[3, 10, 20] Native fortran or C indexing for images --------------------------------------- We could change the default ordering of image arrays to fortran, in order to allow XYZ index ordering. So, change the access to the image array in the image class so that, to get the voxel at X=3, Y=10, Z=20 (zero-based): :: img = load_image('my.img') img[3, 10, 20] instead of the current situation, which requires: :: img = load_image('my.img') img[20, 10, 3] For and against fortran ordering ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For: * Fortran index ordering is more intuitive for functional imaging because of conventional XYZ ordering of spatial coordinates, and Fortran index ordering in packages such as Matlab * Indexing into a raw array is fast, and common in lower-level applications, so it would be useful to implement the more intuitive XYZ ordering at this level rather than via interpolators (see below) * Standardizing to one index ordering (XYZ) would mean users would not have to think about the arrangement of the image in memory Against: * C index ordering is more familiar to C users * C index ordering is the default in numpy * XYZ ordering can be implemented by wrapping by an interpolator Note that there is no performance penalty for either array ordering, as this is dealt with internally by NumPy. For example, imagine the following:: arr = np.empty((100,50)) # Indexing is C by default arr2 = arr.transpose() # Now it is fortran # There should be no effective difference in speed for the next two lines b = arr[0] # get first row of data - most discontiguous memory c = arr2[:,0] # gets same data, again most discontiguous memory Potential problems for fortran ordering ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Clash between default ordering of numpy arrays and nipy images ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ C index ordering is the default in numpy, and using fortran ordering for images might be confusing in some circumstances. Consider for example: :: img_obj = load_image('my.img') # Where the Image class has been changed to implement Fortran ordering first_z_slice = img_obj[...,0] # returns a Z slice img_arr = memmap('my.img', dtype=float32) # C ordering, the numpy default img_obj = Image.from_array(img_arr) # this call may not be correct first_z_slice = img_obj[...,0] # in fact returns an X slice I suppose that we could check that arrays are fortran index ordered in the Image __init__ routine. An alternative proposal - XYZ ordering of output coordinates ------------------------------------------------------------ JT: Another thought, that is a compromise between the XYZ coordinates and Fortran ordering. To me, having worked mostly with C-type arrays, when I index an array I think in C terms. But, the Image objects have the "warp" attached to them, which describes the output coordinates. We could insist that the output coordinates are XYZT (or make this an option). So, for instance, if the 4x4 transform was the identity, the following two calls would give something like: :: >>> interp = interpolator(img) >>> img[3,4,5] == interp(5,4,3) True This way, users would be sure in the interpolator of the order of the coordinates, but users who want access to the array would know that they would be using the array order on disk... I see that a lot of users will want to think of the first coordinate as "x", but depending on the sampling the [0] slice of img may be the leftmost or the rightmost. To find out which is which, users will have to look at the 4x4 transform (or equivalently the start and the step). So just knowing the first array coordinate is the "x" coordinate still misses some information, all of which is contained in the transform. MB replied: I agree that the output coordinates are very important - and I think we all agree that this should be XYZ(T)? For the raw array indices - it is very common for people to want to do things to the raw image array - the quickstart examples containing a few - and you usually don't care about which end of X is left in that situation, only which spatial etc dimension the index refers to. nipy-0.3.0/doc/devel/code_discussions/index.rst000066400000000000000000000007061210344137400215400ustar00rootroot00000000000000.. _code-discussions: ================ Code discussions ================ These are some developer discussions about design of code in NIPY. .. only:: html :Release: |version| :Date: |today| .. toctree:: :maxdepth: 2 understanding_affines image_ordering registration_api repository_design brainvisa_repositories repository_api pipelining_api simple_viewer usecases/index refactoring/index comparisons/index nipy-0.3.0/doc/devel/code_discussions/pipelining_api.rst000066400000000000000000000013711210344137400234170ustar00rootroot00000000000000.. _pipelining_api: ================================== What would pipelining look like? ================================== Imagine a repository that is a modified version of the one in :ref:`repository_api` Then:: my_repo = SubjectRepository('/some/structured/file/system') my_designmaker = MyDesignParser() # Takes parameters from subject to create design my_pipeline = Pipeline([ realignerfactory('fsl'), slicetimerfactory('nipy', 'linear'), coregisterfactory('fsl', 'flirt'), normalizerfactory('spm'), filterfactory('nipy', 'smooth', 8), designfactory('nipy', my_designmaker), ]) my_analysis = SubjectAnalysis(my_repo, subject_pipeline=my_pipeline) my_analysis.do() my_analysis.archive() nipy-0.3.0/doc/devel/code_discussions/refactoring/000077500000000000000000000000001210344137400221775ustar00rootroot00000000000000nipy-0.3.0/doc/devel/code_discussions/refactoring/imagelists.rst000066400000000000000000000014361210344137400250760ustar00rootroot00000000000000======================== Refactoring imagelists ======================== Usecases for ImageList ====================== Thus far only used in anger in :mod:`nipy.modalities.fmri.fmristat.model`, similarly in :mod:`nipy.modalities.fmri.spm.model`. From that file, an object ``obj`` of class :class:`FmriImageList` must: * return 4D array from ``np.asarray(obj)``, such that the first axis (axis 0) is the axis over which the model is applied * be indexable such that ``obj[0]`` returns an Image instance, with valid ``shape`` and ``coordmap`` attributes for a time-point 3D volume in the 4D time-series. * have an attribute ``volume_start_times`` giving times of the start of each of the volumes in the 4D time series. * Return the number of volumes in the time-series from ``len(obj)`` nipy-0.3.0/doc/devel/code_discussions/refactoring/index.rst000066400000000000000000000002101210344137400240310ustar00rootroot00000000000000.. _refactoring_index: ====================== Defining use cases ====================== .. toctree:: :maxdepth: 2 imagelists nipy-0.3.0/doc/devel/code_discussions/registration_api.rst000066400000000000000000000057041210344137400237770ustar00rootroot00000000000000========================= Registration API Design ========================= This contains design ideas for the end-user api when registering images in nipy. We want to provide a simple api, but with enough flexibility to allow users to changes various components of the pipeline. We will also provide various **Standard** scripts that perform typical pipelines. The pluggable script:: func_img = load_image(filename) anat_img = load_image(filename) interpolator = SplineInterpolator(order=3) metric = NormalizedMutualInformation() optimizer = Powell() strategy = RegistrationStrategy(interpolator, metric, optimizer) w2w = strategy.apply(img_fixed, img_moving) To apply the transform and resample the image:: new_img = resample(img_moving, w2w, interp=interpolator) Or:: new_img = Image(img_moving, w2w*img_moving.coordmap) Transform Multiplication ------------------------ The multiplication order is important and coordinate systems must *make sense*. The *output coordinates* of the mapping on the right-hand of the operator, must match the *input coordinates* of the mapping on the left-hand side of the operator. For example, imageA has a mapping from voxels-to-world (v2w), imageB has a mapping from world-to-world (w2w). So the output of imageA, *world*, maps to the input of imageB, *world*. We would compose a new mapping (transform) from these mappings like this:: new_coordmap = imageB.coordmap * imageA.coordmap If one tried to compose a mapping in the other order, an error should be raised as the code would detect a mismatch of trying to map output coordinates from imageB, *world* to the input coordinates of imageA, *voxels*:: new_coordmap = imageA.coordmap * imageB.coordmap raise ValueError!!! Note: We should consider a meaningful error message to help people quickly correct this mistake. One way to remember this ordering is to think of composing functions. If these were functions, the output of the first function to evaluate (imageA.coordmap) is passed as input to the second function (imageB.coordmap). And therefore they must match:: new_coordmap = imageB.coordmap(imageA.coordmap()) Matching Coordinate Systems --------------------------- We need to make sure we can detect mismatched coordinate mappings. The CoordinateSystem class has a check for equality (__eq__ method) based on the axis and name attributes. Long-term this may not be robust enough, but it's a starting place. We should write tests for failing cases of this, if they don't already exists. CoordinateMap ------------- Recall the CoordinateMap defines a mapping between two coordinate systems, an input coordinate system and an output coordinate system. One example of this would be a mapping from voxel space to scanner space. In a Nifti1 header we would have an affine transform to apply this mapping. The *input coordinates* would be voxel space, the *output coordinates* would be world space, and the affine transform provides the mapping between them. nipy-0.3.0/doc/devel/code_discussions/repository_api.rst000066400000000000000000000033021210344137400234740ustar00rootroot00000000000000.. _repository_api: Repository API ============== See also :ref:`repository_design` and :ref:`brainvisa_repositories` FMRI datasets often have the structure: * Group (sometimes) e.g. Patients, Controls * Subject e.g. Subject1, Subject2 * Session e.g. Sess1, Sess1 How about an interface like: :: repo = GSSRespository( root_dir = '/home/me/data/experiment1', groups = {'patients': {'subjects': {'patient1': {'sess1': 'filter': 'raw*nii'}, {'sess2': 'filter': 'raw*nii'} }, {'patient2': {'sess1': 'filter': 'raw*nii'} {'sess2': 'filter': 'raw*nii'} } }, 'controls': {'subjects': {'control1': {'sess1': 'filter': 'raw*nii'}, {'sess2': 'filter': 'raw*nii'} }, {'control2': {'sess1': 'filter': 'raw*nii'} {'sess2': 'filter': 'raw*nii'} } } }) for group in repo.groups: for subject in group.subjects: for session in subject.sessions: img = session.image # do something with image We would need to think about adding metadata such as behavioral data from the scanning session, and so on. I suppose this will help us move transparently to using something like HDF5 for data storage. nipy-0.3.0/doc/devel/code_discussions/repository_design.rst000066400000000000000000000060651210344137400242050ustar00rootroot00000000000000.. _repository_design: =================== Repository design =================== See also :ref:`repository_api` and :ref:`brainvisa_repositories` For the NIPY system, there seems to be interest for the following: * Easy distributed computing * Easy scripting, replicating the same analysis on different data * Flexibility - easy of inter-operation with other brain imaging systems At a minimum, this seems to entail the following requirements for the NIPY repository system: * Unique identifiers of data, which can be abstracted from the most local or convenient data storage * A mechanism for mapping the canonical data model(s) from NIPY to an arbitrary, and potentially even inconsistent repository structure * A set of semantic primitives / metadata slots, enabling for example: * "all scans from this subject" * "the first scan from every subject in the control group" * "V1 localizer scans from all subjects" * "Extract the average timecourse for each subject from the ROI defined by all voxels with t > 0.005 in the V1 localizer scan for that subject" These problems are not unique to the problem of brain imaging data, and in many cases have been treated in the domains of database design, geospatial and space telescope data, and the semantic web. Technologies of particular interest include: * HDF5 - the basis of MINC 2.0 (and potentially NIFTII 2), the most recent development in the more general CDF / HDF series (and very highly regarded). There are excellent python binding available in `PyTables `_. * Relational database design - it would be nice to efficiently select data based on any arbitrary subset of attributes associated with that data. * The notion of `URI `_ developed under the guidance of the w3c. Briefly, a URI consists of: * An authority (i.e. a domain name controlled by a particular entity) * A path - a particular resource specified by that authority * Abstraction from storage (as opposed to a URL) - a URI does not necessarily include the information necessary for retrieving the data referred to, though it may. * Ways of dealing with hierarchical data as developed in the XML field (though these strategies could be implemented potentially in other hierarchical data formats - even filesystems). Note that incorporation of any of the above ideas does not require the use of the actual technology referenced. For example, relational queries can be made in PyTables in many cases **more efficiently** than in a relational database by storing everything in a single denormalized table. This data structure tends to be more efficient than the equivalent normalized relational database format in the cases where a single data field is much larger than the others (as is the case with the data array in brain imaging data). That said, adherance to standards allows us to leverage existing code which may be tuned to a degree that would be beyond the scope of this project (for example, fast Xpath query libraries, as made available via lxml in Python). nipy-0.3.0/doc/devel/code_discussions/simple_viewer.rst000066400000000000000000000003101210344137400232720ustar00rootroot00000000000000Simple image viewer ------------------- Other attempts -------------- http://biomag.wikidot.com/mri-tools http://code.google.com/p/dicompyler https://cirl.berkeley.edu/svn/cburns/trunk/nifti_viewer nipy-0.3.0/doc/devel/code_discussions/understanding_affines.rst000066400000000000000000000225641210344137400247770ustar00rootroot00000000000000.. _understanding_affines: ============================================= Understanding voxel and real world mappings ============================================= Voxel coordinates and real-world coordinates ---------------------------------------------- A point can be represented by coordinates relative to specified axes. coordinates are (almost always) numbers - see `coordinate systems `_ For example, a map grid reference gives a coordinate (a pair of numbers) to a point on the map. The numbers give the respective positions on the horizontal (``x``) and vertical (``y``) axes of the map. A coordinate system is defined by a set of axes. In the example above, the axes are the ``x`` and ``y`` axes. Axes for coordinates are usually orthogonal - for example, moving one unit up on the ``x`` axis on the map causes no change in the ``y`` coordinate - because the axes are at 90 degrees. In this discussion we'll concentrate on the three dimensional case. Having three dimensions means that we have a three axis coordinate system, and coordinates have three values. The meaning of the values depend on what the axes are. Voxel coordinates ````````````````` Array indexing is one example of using a coordinate system. Let's say we have a three dimensional array:: A = np.arange(24).reshape((2,3,4)) The value ``0`` is at array coordinate ``0,0,0``:: assert A[0,0,0] == 0 and the value ``23`` is at array coordinate ``1,2,3``:: assert A[1,2,3] == 23 (remembering python's zero-based indexing). If we now say that our array is a 3D volume element array - an array of voxels, then the array coordinate is also a voxel coordinate. If we want to use ``numpy`` to index our array, then we need integer voxel coordinates, but if we use a resampling scheme, we can also imagine non-integer voxel coordinates for ``A``, such as ``(0.6,1.2,1.9)``, and we could use resampling to estimate the value at such a coordinate, given the actual data in the surrounding (integer) points. Array / voxel coordinates refer to the array axes. Without any further information, they do not tell us about where the point is in the real world - the world we can measure with a ruler. We refer to array / voxel coordinates with indices ``i, j, k``, where ``i`` is the first value in the 3 value coordinate tuple. For example, if array / voxel point ``(1,2,3)`` has ``i=1, j=2, k=3``. We'll be careful only to use ``i, j, k`` rather than ``x, y, z``, because we are going to use ``x, y, z`` to refer to real-world coordinates. Real-world coordinates `````````````````````` Real-world coordinates are coordinates where the values refer to real-world axes. A real-world axis is an axis that refers to some real physical space, like low to high position in an MRI scanner, or the position in terms of the subject's head. Here we'll use the usual neuroimaging convention, and that is to label our axes relative to the subject's head: * ``x`` has negative values for left and positive values for right * ``y`` has negative values for posterior (back of head) and positive values for anterior (front of head) * ``z`` has negative values for the inferior (towards the neck) and postive values for superior (towards the highest point of the head, when standing) Image index ordering -------------------- Background `````````` In general, images - and in particular NIfTI format images, are ordered in memory with the X dimension changing fastest, and the Z dimension changing slowest. Numpy has two different ways of indexing arrays in memory, C and fortran. With C index ordering, the first index into an array indexes the slowest changing dimension, and the last indexes the fastest changing dimension. With fortran ordering, the first index refers to the fastest changing dimension - X in the case of the image mentioned above. C is the default index ordering for arrays in Numpy. For example, let's imagine that we have a binary block of 3D image data, in standard NIfTI / Analyze format, with the X dimension changing fastest, called `my.img`, containing Float32 data. Then we memory map it:: img_arr = memmap('my.img', dtype=float32) When we index this new array, the first index indexes the Z dimension, and the third indexes X. For example, if I want a voxel X=3, Y=10, Z=20 (zero-based), I have to get this from the array with:: img_arr[20, 10, 3] The problem ``````````` Most potential users of NiPy are likely to have experience of using image arrays in Matlab and SPM. Matlab uses Fortran index ordering. For fortran, the first index is the fastest changing, and the last is the slowest-changing. For example, here is how to get voxel X=3, Y=10, Z=20 (zero-based) using SPM in Matlab:: img_arr = spm_read_vols(spm_vol('my.img')); img_arr(4, 11, 21) % matlab indexing is one-based This ordering fits better with the way that we talk about coordinates in functional imaging, as we invariably use XYZ ordered coordinates in papers. It is possible to do the same in numpy, by specifying that the image should have fortran index ordering:: img_arr = memmap('my.img', dtype=float32, order='F') img_arr[3, 10, 20] The proposal ```````````` Change the default ordering of image arrays to fortran, in order to allow XYZ index ordering. So, change the access to the image array in the image class so that, to get the voxel at X=3, Y=10, Z=20 (zero-based):: img = Image('my.img') img[3, 10, 20] instead of the current situation, which requires:: img = Image('my.img') img[20, 10, 3] Summary of discussion ````````````````````` For: * Fortran index ordering is more intuitive for functional imaging because of conventional XYZ ordering of spatial coordinates, and Fortran index ordering in packages such as Matlab * Indexing into a raw array is fast, and common in lower-level applications, so it would be useful to implement the more intuitive XYZ ordering at this level rather than via interpolators (see below) * Standardizing to one index ordering (XYZ) would mean users would not have to think about the arrangement of the image in memory Against: * C index ordering is more familiar to C users * C index ordering is the default in numpy * XYZ ordering can be implemented by wrapping by an interpolator Potential problems `````````````````` Performance penalties ^^^^^^^^^^^^^^^^^^^^^ KY commented:: This seems like a good idea to me but I have no knowledge of numpy internals (and even less than none after the numeric/numarray integration). Does anyone know if this will (or definitely will not) incur any kind of obvious performance penalties re. array operations (sans arcane problems like stride issues in huge arrays)? MB replied: Note that, we are not proposing to change the memory layout of the image, which is fixed by the image format in e.g NIfTI, but only to index it XYZ instead of ZYX. As far as I am aware, there are no significant performance differences between:: img_arr = memmap('my.img', dtype=float32, order='C') img_arr[5,4,3] and:: img_arr = memmap('my.img', dtype=float32, order='F') img_arr[3,4,5] Happy to be corrected though. Clash between default ordering of numpy arrays and nipy images ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ C index ordering is the default in numpy, and using fortran ordering for images might be confusing in some circumstances. Consider for example: img_obj = Image('my.img') # Where the Image class has been changed to implement Fortran ordering first_z_slice = img_obj[...,0] # returns a Z slice img_arr = memmap('my.img', dtype=float32) # C ordering, the numpy default img_obj = Image(img_arr) first_z_slice = img_obj[...,0] # in fact returns an X slice I suppose that we could check that arrays are fortran index ordered in the Image __init__ routine. An alternative proposal - XYZ ordering of output coordinates ```````````````````````````````````````````````````````````` JT: Another thought, that is a compromise between the XYZ coordinates and Fortran ordering. To me, having worked mostly with C-type arrays, when I index an array I think in C terms. But, the Image objects have the "warp" attached to them, which describes the output coordinates. We could insist that the output coordinates are XYZT (or make this an option). So, for instance, if the 4x4 transform was the identity, the following two calls would give something like:: interp = interpolator(img) img[3,4,5] == interp(5,4,3) This way, users would be sure in the interpolator of the order of the coordinates, but users who want access to the array would know that they would be using the array order on disk... I see that a lot of users will want to think of the first coordinate as "x", but depending on the sampling the [0] slice of img may be the leftmost or the rightmost. To find out which is which, users will have to look at the 4x4 transform (or equivalently the start and the step). So just knowing the first array coordinate is the "x" coordinate still misses some information, all of which is contained in the transform. MB replied: I agree that the output coordinates are very important - and I think we all agree that this should be XYZ(T)? For the raw array indices - it is very common for people to want to do things to the raw image array - the quickstart examples containing a few - and you usually don't care about which end of X is left in that situation, only which spatial etc dimension the index refers to. nipy-0.3.0/doc/devel/code_discussions/usecases/000077500000000000000000000000001210344137400215075ustar00rootroot00000000000000nipy-0.3.0/doc/devel/code_discussions/usecases/batching.rst000066400000000000000000000004711210344137400240220ustar00rootroot00000000000000.. _batching: ================== Batching use cases ================== Using the nipy_ framework for creating scripts to process whole datasets, for example movement correction, coregistration of functional to structural (intermodality), smoothing, statistics, inference. .. include:: ../../../links_names.txt nipy-0.3.0/doc/devel/code_discussions/usecases/images.rst000066400000000000000000000117611210344137400235140ustar00rootroot00000000000000.. _image_usecases: ======================= Image model use cases ======================= In which we lay out the various things that users and developers may want to do to images. See also :ref:`resampling` Taking a mean over a 4D image ============================= We could do this much more simply than below, this is just an example of reducing over a particular axis:: # take mean of 4D image from glob import glob import numpy as np import nipy as ni fname = 'some4d.nii' img_list = ni.load_list(fname, axis=3) vol0 = img_list[0] arr = vol0.array[:] for vol in img_list[1:]: arr += vol.array mean_img = ni.Image(arr, vol0.coordmap) ni.save(mean_img, 'mean_some4d.nii') Taking mean over series of 3D images ==================================== Just to show how this works with a list of images:: # take mean of some PCA volumes fnames = glob('some3d*.nii') vol0 = ni.load(fnames[0]) arr = vol0.array[:] for fname in fnames[1:]: vol = ni.load(fname) arr += vol.array mean_img = ni.Image(arr, vol0.coordmap) ni.save(mean_img, 'mean_some3ds.nii') Simple motion correction ======================== This is an example of how the 4D -> list of 3D interface works:: # motion correction img_list = ni.load_list(fname, axis=3) reggie = ni.interfaces.fsl.Register(tol=0.1) vol0 = img_list[0] mocod = [] # unresliced rmocod = [] # resliced for vol in img_list[1:]: rcoord_map = reggie.run(moving=vol, fixed=vol0) cmap = ni.ref.compose(rcoord_map, vol.coordmap) mocovol = ni.Image(vol.array, cmap) # But... try: a_vol = ni.Image(vol.array, rcoord_map) except CoordmapError, msg assert msg == 'need coordmap with voxel input' mocod.append(mocovol) rmocovol = ni.reslice(mocovol, vol0) rmocod.append(rmocovol) rmocod_img = ni.list_to_image(rmocovol) ni.save(rmocod_img, 'rsome4d.nii') try: mocod_img = ni.list_to_image(mocovol) except ImageListError: print 'That is what I thought; the transforms were not the same' Slice timing ============ Here putting 3D image into an image list, and back into a 4D image / array:: # slice timing img_list = ni.load_list(fname, axis=2) slicetimer = ni.interfaces.fsl.SliceTime(algorithm='linear') vol0 = img_list[0] try: vol0.timestamp except AttributeError: print 'we do not have a timestamp' try: vol0.slicetimes except AttributeError: print 'we do not have slicetimes' try: st_list = slicetimer.run(img) except SliceTimeError, msg: assert msg == 'no timestamp for volume' TR = 2.0 slicetime = 0.15 sliceaxis = 2 nslices = vol0.array.shape[sliceaxis] slicetimes = np.range(nslices) * slicetime timestamps = range(len(img_list)) * TR # Either the images are in a simple list for i, img in enumerate(img_list): img.timestamp = timestamps[i] img.slicetimes = slicetimes img.axis['slice'] = sliceaxis # note setting of voxel axis meaning # if the sliceaxes do not match, error when run img_list[0].axis['slice'] = 1 try: st_list = slicetimer.run(img) except SliceTimeError, msg: assert msg == 'images do not have the same sliceaxes'] # Or - with ImageList object img_list.timestamps = timestamps img_list.slicetimes = slicetimes img_list.axis['slice'] = sliceaxis # Either way, we run and save st_list = slicetimer.run(img) ni.save(ni.list_to_image(st_img), 'stsome4d.nii') Creating an image given data and affine ======================================= Showing how we would like the image creation API to look:: # making an image from an affine data = img.array affine = np.eye(4) scanner_img = ni.Image(data, ni.ref.voxel2scanner(affine)) mni_img = ni.Image(data, ni.ref.voxel2mni(affine)) Coregistration / normalization ============================== Demonstrating coordinate maps and non-linear resampling:: # coregistration and normalization anat_img = ni.load_image('anatomical.nii') func_img = ni.load_image('epi4d.nii') template = ni.load_image('mni152T1.nii') # coreg coreger = ni.interfaces.fsl.flirt(tol=0.2) coreg_cmap = coreger.run(fixed=func_img, moving=anat_img) c_anat_img = ni.Image(anat_img.data, coreg_cmap.compose_with(anat_img.cmap)) # calculate normalization parameters template_cmap = template.coordmap template_dims = template.data.shape c_anat_cmap = c_anat_img.coordmap normalizer = ni.interfaces.fsl.fnirt(param=3) norm_cmap = normalizer.run(moving=template, fixed=c_anat_img) # resample anatomical using calculated coordinate map full_cmap = norm_cmap.composed_with(template_cmap) w_anat_data = img.resliced_to_grid(full_cmap, template_dims) w_anat_img = ni.Image(w_anat_data, template.coordmap) # resample functionals with calculated coordinate map w_func_list = [] for img in ni.image_list(func_img, axis=3): w_img_data = img.resliced_to_grid(full_cmap, template_dims) w_func_list.append(ni.Image(w_img_data, template_cmap)) ni.save(ni.list_to_image(w_func_list), 'stsome4d.nii') nipy-0.3.0/doc/devel/code_discussions/usecases/index.rst000066400000000000000000000002571210344137400233540ustar00rootroot00000000000000.. _usecases_index: ====================== Defining use cases ====================== .. toctree:: :maxdepth: 2 transformations images resampling batching nipy-0.3.0/doc/devel/code_discussions/usecases/resampling.rst000066400000000000000000000002211210344137400243750ustar00rootroot00000000000000.. _resampling: ======================= Resampling use cases ======================= Use cases for image resampling. See also :ref:`images`. nipy-0.3.0/doc/devel/code_discussions/usecases/transformations.rst000066400000000000000000000175751210344137400255110ustar00rootroot00000000000000.. _transformations: ========================== Transformation use cases ========================== Use cases for defining and using transforms on images. We should be very careful to only use the terms ``x, y, z`` to refer to physical space. For voxels, we should use ``i, j, k``, or ``i', j', k'`` (i prime, j prime k prime). I have an image *Img*. Image Orientation ----------------- I would like to know what the voxel sizes are. I would like to determine whether it was acquired axially, coronally or sagittally. What is the brain orientation in relation to the voxels? Has it been acquired at an oblique angle? What are the voxel dimensions?:: img = load_image(file) cm = img.coordmap print cm input_coords axis_i: axis_j: axis_k: effective pixel dimensions axis_i: 4mm axis_j: 2mm axis_k: 2mm input/output mapping x y z ------------ i| 90 90 0 j| 90 0 90 k| 180 90 90 input axis_i maps exactly to output axis_z input axis_j maps exactly to output axis_y input axis_k maps exactly to output axis_x flipped 180 output_coords axis0: Left -> Right axis1: Posterior -> Anterior axis2: Inferior -> Superior In the case of a mapping that does not exactly align the input and output axes, something like:: ... input/output mapping input axis0 maps closest to output axis2 input axis1 maps closest to output axis1 input axis2 maps closest to output axis0 ... If the best matching axis is reversed compared to input axis:: ... input axis0 maps [closest|exactly] to negative output axis2 and so on. Creating transformations / co-ordinate maps ------------------------------------------- I have an array *pixelarray* that represents voxels in an image and have a matrix/transform *mat* which represents the relation between the voxel coordinates and the coordinates in scanner space (world coordinates). I want to associate the array with the matrix:: img = load_image(infile) pixelarray = np.asarray(img) (*pixelarray* is an array and does not have a coordinate map.):: pixelarray.shape (40,256,256) So, now I have some arbitrary transformation matrix:: mat = np.zeros((4,4)) mat[0,2] = 2 # giving x mm scaling mat[1,1] = 2 # giving y mm scaling mat[2,0] = 4 # giving z mm scaling mat[3,3] = 1 # because it must be so # Note inverse diagonal for zyx->xyz coordinate flip I want to make an ``Image`` with these two:: coordmap = voxel2mm(pixelarray.shape, mat) img = Image(pixelarray, coordmap) The ``voxel2mm`` function allows separation of the image *array* from the size of the array, e.g.:: coordmap = voxel2mm((40,256,256), mat) We could have another way of constructing image which allows passing of *mat* directly:: img = Image(pixelarray, mat=mat) or:: img = Image.from_data_and_mat(pixelarray, mat) but there should be "only one (obvious) way to do it". Composing transforms '''''''''''''''''''' I have two images, *img1* and *img2*. Each image has a voxel-to-world transform associated with it. (The "world" for these two transforms could be similar or even identical in the case of an fmri series.) I would like to get from voxel coordinates in *img1* to voxel coordinates in *img2*, for resampling:: imgA = load_image(infile_A) vx2mmA = imgA.coordmap imgB = load_image(infile_B) vx2mmB = imgB.coordmap mm2vxB = vx2mmB.inverse # I want to first apply transform implied in # cmA, then the inverse of transform implied in # cmB. If these are matrices then this would be # np.dot(mm2vxB, vx2mmA) voxA_to_voxB = mm2vxB.composewith(vx2mmA) The (matrix) multiply version of this syntax would be:: voxA_to_voxB = mm2vxB * vx2mmA Composition should be of form ``Second.composewith(First)`` - as in ``voxA_to_voxB = mm2vxB.composewith(vx2mmA)`` above. The alternative is ``First.composewith(Second)``, as in ``voxA_to_voxB = vx2mmA.composewith(mm2vxB)``. We choose ``Second.composewith(First)`` on the basis that people need to understand the mathematics of function composition to some degree - see wikipedia_function_composition_. .. _wikipedia_function_composition: http://en.wikipedia.org/wiki/Function_composition Real world to real world transform '''''''''''''''''''''''''''''''''' We remind each other that a mapping is a function (callable) that takes coordinates as input and returns coordinates as output. So, if *M* is a mapping then:: [i',j',k'] = M(i, j, k) where the *i, j, k* tuple is a coordinate, and the *i', j', k'* tuple is a transformed coordinate. Let us imagine we have somehow come by a mapping *T* that relates a coordinate in a world space (mm) to other coordinates in a world space. A registration may return such a real-world to real-world mapping. Let us say that *V* is a useful mapping matching the voxel coordinates in *img1* to voxel coordinates in *img2*. If *img1* has a voxel to mm mapping *M1* and *img2* has a mm to voxel mapping of *inv_M2*, as in the previous example (repeated here):: imgA = load_image(infile_A) vx2mmA = imgA.coordmap imgB = load_image(infile_B) vx2mmB = imgB.coordmap mm2vxB = vx2mmB.inverse then the registration may return the some coordinate map, *T* such that the intended mapping *V* from voxels in *img1* to voxels in *img2* is:: mm2vxB_map = mm2vxB.mapping vx2mmA_map = vx2mmA.mapping V = mm2vxB_map.composewith(T.composedwith(vx2mmA_map)) To support this, there should be a CoordinateMap constructor that looks like this:: T_coordmap = mm2mm(T) where *T* is a mapping, so that:: V_coordmap = mm2vxB.composewith(T_coordmap.composedwith(vx2mmA)) I have done a coregistration between two images, *img1* and *img2*. This has given me a voxel-to-voxel transformation and I want to store this transformation in such a way that I can use this transform to resample *img1* to *img2*. :ref:`resampling` I have done a coregistration between two images, *img1* and *img2*. I may want this to give me a worldA-to-worldB transformation, where worldA is the world of voxel-to-world for *img1*, and worldB is the world of voxel-to-world of *img2*. My *img1* has a voxel to world transformation. This transformation may (for example) have come from the scanner that acquired the image - so telling me how the voxel positions in *img1* correspond to physical coordinates in terms of the magnet isocenter and millimeters in terms of the primary gradient orientations (x, y and z). I have the same for *img2*. For example, I might choose to display this image resampled so each voxel is a 1mm cube. Now I have these transformations: ST(*img1*-V2W), and ST(*img2*-V2W) (where ST is *scanner tranform* as above, and *V2W* is voxel to world). I have now done a coregistration between *img1* and *img2* (somehow) - giving me, in addition to *img1* and *img2*, a transformation that registers *img1* and *img2*. Let's call this tranformation V2V(*img1*, *img2*), where V2V is voxel-to-voxel. In actuality *img2* can be an array of images, such as series of fMRI images and I want to align all the *img2* series to *img1* and then take these voxel-to-voxel aligned images (the *img1* and *img2* array) and remap them to the world space (voxel-to-world). Since remapping is an interpolation operation I can generate errors in the resampled pixel values. If I do more than one resampling, error will accumulate. I want to do only a single resampling. To avoid the errors associated with resampling I will build a *composite transformation* that will chain the separate voxel-to-voxel and voxel-to-world transformations into a single transformation function (such as an affine matrix that is the result of multiplying the several affine matrices together). With this single *composite transformatio* I now resample *img1* and *img2* and put them into the world coordinate system from which I can make measurements. nipy-0.3.0/doc/devel/development_quickstart.rst000066400000000000000000000052331210344137400216650ustar00rootroot00000000000000.. _development-quickstart: ======================== Development quickstart ======================== Source Code =========== NIPY uses github_ for our code hosting. For immediate access to the source code, see the `nipy github`_ site. Checking out the latest version =============================== To check out the latest version of nipy you need git_:: git clone git://github.com/nipy/nipy.git There are two methods to install a development version of nipy. For both methods, build the extensions in place:: python setup.py build_ext --inplace Then you can either: #. Create a symbolic link in your *site-packages* directory to the inplace build of your source. The advantage of this method is it does not require any modifications of your PYTHONPATH. #. Place the source directory in your PYTHONPATH. With either method, all of the modifications made to your source tree will be picked up when nipy is imported. Getting data files ================== See :ref:`data_files`. Guidelines ========== We have adopted many developer guidelines in an effort to make development easy, and the source code readable, consistent and robust. Many of our guidelines are adopted from the scipy_ / numpy_ community. We welcome new developers to the effort, if you're interested in developing code or documentation please join the `nipy mailing list`_ and introduce yourself. If you plan to do any code development, we ask that you take a look at the following guidelines. We do our best to follow these guidelines ourselves: * :ref:`howto_document` : Documentation is critical. This document describes the documentation style, syntax, and tools we use. * `Numpy/Scipy Coding Style Guidelines: `_ This is the coding style we strive to maintain. * :ref:`development-workflow` : This describes our process for version control. * :ref:`testing` : We've adopted a rigorous testing framework. * :ref:`optimization`: "premature optimization is the root of all evil." .. _trunk_download: Submitting a patch ================== The preferred method to submit a patch is to create a branch of nipy on your machine, modify the code and make a patch or patches. Then email the `nipy mailing list`_ and we will review your code and hopefully apply (merge) your patch. See the instructions for :ref:`making-patches`. If you do not wish to use git and github, please feel free to file a bug report and submit a patch or email the `nipy mailing list`_. Bug reports =========== If you find a bug in nipy, please submit a bug report at the `nipy bugs`_ github site so that we can fix it. .. include:: ../links_names.txt nipy-0.3.0/doc/devel/doctests_preprocessor.rst000066400000000000000000000104151210344137400215250ustar00rootroot00000000000000#################### Doctest preprocessor #################### We oftentimes have output from doctests that we don't want to test explicitly against the output string. An obvious example is decimal precision. For example:: >>> import numpy as np >>> np.sqrt(2) 1.4142135623730951 where the last few digits depend on the CPU and math libraries on the particular platform. Another example are sympy tests, because the order of symbols in an expression can be difficult to predict:: >>> from sympy import symbols, cos >>> a, b = symbols('a, b') >>> cos(b) + a a + cos(b) >>> c, d = symbols('c, d') >>> cos(c) + d d + cos(c) It looks like the order is predictable for particular versions of sympy, but not across versions. A third is the classic of output dtype specific to byte ordering:: >>> np.zeros((1,), dtype=[('f1', 'i')]) array([(0,)], dtype=[('f1', '>> import numpy as np >>> np.sqrt(2) #doctest +ELLIPSIS 1.414213562373... and:: >>> np.zeros((1,), dtype=[('f1', 'i')]) array([(0,)], dtype=[('f1', '...i4')]) So - ugly - because the reader can't easily guess what you've elided, and so the examples are hard to read. And, it can't easily deal with the sympy case above. In the cases below, I found the doctest machinery by looking for ``OutputChecker`` or ``DoctestParser``. Numpy doctest machinery ======================= We clearly depend on Numpy. The doctest machinery is in ``numpy/testing/noseclasses.py``. Around line 232 in my version, we see:: test.globs = {'__builtins__':__builtins__, '__file__':'__main__', '__name__':'__main__', 'np':numpy} and following. Here the normal module execution context gets overwritten by this minimal namespace, and hence, if we used this stuff directly, we'd have to import a load of module level stuff in every doctest - which looks messy and makes the examples harder to read. Notice around line 144, there is the ``NumpyOutputChecker`` - there is a hackish check for the string ``#random`` that appears to result in the check being short-cut as passed, and attempts to deal with the byte order and 'i4' 'i8' default integer for 32 and 64 bit. The whole is rather difficult to work with because the class names are hard-coded into the various method calls. In summary, ``numpy.testing.nosetester.NoseTester.test`` initializes then calls ``NumpyTestProgram``. ``NumpyTestProgram`` pulls the ``doctest`` nose plugin out of the plugin list. Meanwhile, in ``numpy.testing.nosetester.NoseTester.prepare_test_args`` (called from the ``test`` method), ``--with-doctest`` becomes ``--with-numpydoctest``, and the method stuffs the ``NumpyDoctest`` and ``KnownFailure`` plugins into the list of plugins. So, overriding this stuff means subclassing or otherwise replacing ``NumpyDoctest`` with our own doctest plugin. Options are - rewrite for ourselves, or generalize numpy's machinery, propose pull, and meanwhile use the rewrite for our own purposes. Sympy doctest machinery ======================= We depend on sympy, but not as fundamentally as we depend on numpy. Sympy's test machinery is in ``sympy/utilities/runtest.py``. They don't use nose_. Sympy also clears the context so all names have to be imported specifically in the doctests. However, names imported in one doctest are available in the others. They initialize printing with:: def setup_pprint(): from sympy import pprint_use_unicode, init_printing # force pprint to be in ascii mode in doctests pprint_use_unicode(False) # hook our nice, hash-stable strprinter init_printing(pretty_print=False) in that same file. Quick tests suggested that the usual result of this is to output strings via ``sympy.printing.sstrrepr``. This doesn't seem to affect the order of symbol output so doesn't solve our problem above. IPython doctest machinery ========================= ``IPython/testing/plugin/ipdoctest.py`` This looks very similar to the numpy machinery. Again, it's a nose plugin that inherits from the nose ``Doctest`` class. .. include:: ../links_names.txt nipy-0.3.0/doc/devel/guidelines/000077500000000000000000000000001210344137400164645ustar00rootroot00000000000000nipy-0.3.0/doc/devel/guidelines/changelog.rst000066400000000000000000000032741210344137400211530ustar00rootroot00000000000000.. _changelog: =============== The ChangeLog =============== **NOTE:** We have not kepted up with our ChangeLog. This is here for future reference. We will be more diligent with this when we have regular software releases. If you are a developer with commit access, **please** fill a proper ChangeLog entry per significant change. The SVN commit messages may be shorter (though a brief summary is appreciated), but a detailed ChangeLog is critical. It gives us a history of what has happened, allows us to write release notes at each new release, and is often the only way to backtrack on the rationale for a change (as the diff will only show the change, not **why** it happened). Please skim the existing ChangeLog for an idea of the proper level of detail (you don't have to write a novel about a patch). The existing ChangeLog is generated using (X)Emacs' fantastic ChangeLog mode: all you have to do is position the cursor in the function/method where the change was made, and hit 'C-x 4 a'. XEmacs automatically opens the ChangeLog file, mark a dated/named point, and creates an entry pre-titled with the file and function name. It doesn't get any better than this. If you are not using (X)Emacs, please try to follow the same convention so we have a readable, organized ChangeLog. To get your name in the ChangeLog, set this in your .emacs file: (setq user-full-name "Your Name") (setq user-mail-address "youradddress@domain.com") Feel free to obfuscate or omit the address, but at least leave your name in. For user contributions, try to give credit by name on patches or significant ideas, but please do an @ -> -AT- replacement in the email addresses (users have asked for this in the past). nipy-0.3.0/doc/devel/guidelines/commit_codes.rst000066400000000000000000000033461210344137400216710ustar00rootroot00000000000000.. _commit-codes: Commit message codes --------------------- Please prefix all commit summaries with one (or more) of the following labels. This should help others to easily classify the commits into meaningful categories: * *BF* : bug fix * *RF* : refactoring * *ENH* : new feature or extended functionality * *BW* : addresses backward-compatibility * *OPT* : optimization * *BK* : breaks something and/or tests fail * *DOC*: for all kinds of documentation related commits * *TEST* : for adding or changing tests * *STY* : PEP8 conformance, whitespace changes etc that do not affect function. * *WIP* : Work in progress; please try and avoid using this one, and rebase incomplete changes into functional units using e.g. ``git rebase -i`` So your commit message might look something like this:: TEST: relax test threshold slightly Attempted fix for failure on windows test run when arrays are in fact very close (within 6 dp). Keeping up a habit of doing this is useful because it makes it much easier to see at a glance which changes are likely to be important when you are looking for sources of bugs, fixes, large refactorings or new features. Pull request codes ------------------ When you submit a pull request to github, github will ask you for a summary. If your code is not ready to merge, but you want to get feedback, please consider using ``WIP - me working on image design`` or similar for the title of your pull request. That way we will all know that it's not yet ready to merge and that you may be interested in more fundamental comments about design. When you think the pull request is ready to merge, change the title (using the *Edit* button) to something like ``MRG - my work on image design``. nipy-0.3.0/doc/devel/guidelines/compiling_windows.rst000066400000000000000000000012301210344137400227450ustar00rootroot00000000000000Some notes on compiling on windows with Visual Studio ----------------------------------------------------- I followed instructions here: http://wiki.cython.org/64BitCythonExtensionsOnWindows First I downloaded and installed from here: http://download.microsoft.com/download/2/E/9/2E911956-F90F-4BFB-8231-E292A7B6F287/GRMSDKX_EN_DVD.iso via here: http://www.microsoft.com/en-us/download/details.aspx?id=18950#instructions Then I got Visual Studio 2008 from here: http://www.microsoft.com/en-us/download/details.aspx?id=14597 (file ``vcsetup.exe``) with hints from here: http://docs.python.org/devguide/setup.html#windows http://bugs.python.org/issue16161 nipy-0.3.0/doc/devel/guidelines/coverage_testing.rst000066400000000000000000000060131210344137400225460ustar00rootroot00000000000000 Coverage Testing ---------------- Coverage testing is a technique used to see how much of the code is exercised by the unit tests. It is important to remember that a high level of coverage is a necessary but not sufficient condition for having effective tests. Coverage testing can be useful for identifying whole functions or classes which are not tested, or for finding certain conditions which are never tested. This is an excellent task for nose_ - the automated test runner we are using. Nose can run the `python coverage tester`_. First make sure you have the coverage tester installed on your system. Download the tarball from the link, extract and install ``python setup.py install``. Or on Ubuntu you can install from apt-get: ``sudo apt-get install python-coverage``. Run nose with coverage testing arguments:: nosetests -sv --with-coverage path_to_code For example, this command:: nosetests -sv --with-coverage test_coordinate_map.py will report the following:: Name Stmts Exec Cover Missing ----------------------------------------------------------------------------- nipy 21 14 66% 70-74, 88-89 nipy.core 4 4 100% nipy.core.reference 8 8 100% nipy.core.reference.array_coords 100 90 90% 133-134, 148-151, 220, 222, 235, 242 nipy.core.reference.coordinate_map 188 187 99% 738 nipy.core.reference.coordinate_system 61 61 100% nipy.core.reference.slices 34 34 100% nipy.core.transforms 0 0 100% nipy.core.transforms.affines 14 14 100% The coverage report will cover any python source module imported after the start of the test. This can be noisy and difficult to focus on the specific module for which you are writing nosetests. For instance, the above report also included coverage of most of ``numpy``. To focus the coverage report, you can provide nose with the specific package you would like output from using the ``--cover-package``. For example, in writing tests for the coordinate_map module:: nosetests --with-coverage --cover-package=nipy.core.reference.coordinate_map test_coordinate_map.py Since that's a lot to type, I wrote a tool called ``sneeze`` to that simplifies coverage testing with nose. Sneeze ^^^^^^ Sneeze runs nose with coverage testing and reports only the package the test module is testing. It requires the test module follow a simple naming convention: #. Prefix ``test_`` #. The package name you are testing #. Suffix ``.py`` For example, the test module for the ``coordinate_map`` module is named ``test_coordinate_map.py``. Then testing coverage is as simple as:: sneeze.py test_coordinate_map.py Sneeze is included in the ``tools`` directory in the nipy_ source. Simply run the ``setup.py`` to install sneeze in your local bin directory. .. include:: ../../links_names.txt nipy-0.3.0/doc/devel/guidelines/debugging.rst000066400000000000000000000024011210344137400211460ustar00rootroot00000000000000=========== Debugging =========== Some options are: Run in ipython -------------- As in:: In [1]: run mymodule.py ... (somecrash) In [2]: %debug Then diagnose, using the workspace that comes up, which has the context of the crash. You can also do:: In [1] %pdb on In [2]: run mymodule.py ... (somecrash) At that point you will be automatically dropped into the the workspace in the context of the error. This is very similar to the matlab ``dbstop if error`` command. See the `ipython manual`_ , and `debugging in ipython `_ for more detail. Embed ipython in crashing code ------------------------------ Often it is not possible to run the code directly from ipython using the ``run`` command. For example, the code may be called from some other system such as sphinx_. In that case you can embed. At the point that you want ipython to open with the context available for instrospection, add:: from IPython.Shell import IPShellEmbed ipshell = IPShellEmbed() ipshell() See `embedding ipython `_ for more detail. .. include:: ../../links_names.txt nipy-0.3.0/doc/devel/guidelines/elegant.py000066400000000000000000000003161210344137400204550ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import matplotlib.pyplot as plt plt.plot([1,2,3], [4,5,6]) plt.ylabel('some more numbers') nipy-0.3.0/doc/devel/guidelines/gitwash/000077500000000000000000000000001210344137400201325ustar00rootroot00000000000000nipy-0.3.0/doc/devel/guidelines/gitwash/branch_list.png000066400000000000000000000320611210344137400231320ustar00rootroot00000000000000PNG  IHDRyU pHYs   IDATx]\T/,lcL-F1bSS^QcFhF+RK~g‚ vzΜ9sf]xp8WSG# <G##S\5G#<G## oΆJ߳=#<^D֜ȀRD"xq85Xl3 )xGGGܭAp8ՈJhwT#xG ,xe>x8Gf!`g;p855Kָi4 233QPP`U]IJhooWWWږLwGJ[J#SD@@REkl>甔h)W5pS@(yf3_^:;BSeG!`]\4̂;N"Yճ3G#x{\4ea"uY8GZKZz5-'!3@X(GC-y-bOH.#vyn0^z 8#*,8<  QgtHFuǔ;5&Z6o*q H#dy~LYhb [W?.!|%tR'ؗHV"5+^A\FKn]–+/$$i&-dnj2fnfhh1vQ0KɎUkƇ|53w > ,dľp,] )5Rѿ-drg9~oG]YuⓇ3qX|,,o@d=#rfD/0gr[Magx8kaFPkM e|z+1Pb(;_M:;OB/@Išطl.Q2).X30 )c"p!cC^ƣWpr**8  [ Iv!*m&nPiteW"!WU 0ط N&ՙX=&MEW2zR#Rى[ۊ|0{}܏؈kA #Vf5 ^9PԣWiq`~}Fv»=}XTi ;HN {79lsHiU]{6.Fہq-ĠɊуt*jI5dE]wr:'KZw-Ь-u#)lJ>8c`HDЫq+6.FD:5EG Quא5+SG§lMah*ԋu푹%[m;1]B˜ UY5,q{U_1eI{`ؿr)fb١' *%_֩VcLC#I`okep7=v,œkg>~>z{P&v| ~oLگFU7K wo@+\Z4-ImulN'ad5?=Rw,XZ O7QyX=DOSPթRBCcvy WWdҁ0v2VgN^-0h4u.~ߐH?s3Dj;q#S[Ƽ6DM:r6ߢ!`&v; boGJqw`/X=g>6vعu$+24 a/U%lz6fnc0EilRч{Ʒر堀L^ sHVړ ϿE&y#ZS\! -Y5Ur Zvơ\@.)U5v@x8h1ZO@kGd Gu[h {tC_j1B(yds]0zL >[y"4c/vaִGS<#S]U͸!)'7IpR:F,u!Y{k8'/HƐ~*pl] ع' %K*7 [BO& ؁RK|4c$IcZ7ǦQ8r1 A]eX`tP9Y{ڴ=@įǕh֪5d)5Zm޴`DoDշQi ǔ~1LĠٳ>diYdĚY7~=rq'\C}"-3ǐGKM}D8ΘxB ez>E _̌'_Q%60jg">j'f=y8C~M7KG47۠|Q8ID8iwG8 674^r#; X6ک)X4@Rʺض68y.n艄8Lx:^ŕT-h >O//aq%}F@O<'fk&%r߅-| 2uI|WR:M!bRl]-U>]HsԸz23*bpOOypew-1g?k'y"`Ş!=ÉVA;ԫ]}|:,l \d vE~j 2ӄ,C {psײ`Wʗo^WU\A2Lal߱ȅ‚;X`-Α7W0{=9P?Hn#<?!1{V0)F"R3!!*q /k aԸ-Ҵp:t2[dIƝ<2l eI^ mt\p#\w:Syptv(9"xZyY򎭇ൠXMV )o GWI|,Z~qG`{ˠ^B^l%P{?{;{?*+"n'xE3~dZ i GcL_'!}GY6b8P]@gANFiP~ixw` i/܅R0PsCr>_1{k>@,w Ǡe;}꓏7->}C{W>Zwr0"Ѕ@?>ȗR0384Pa0[TۣRD*9 /W(+'03̃G#V"<2W#7Z|Pwn߾#/`G/`P5,= ~3p_ITڹ{9KpE$\W˛8z&\: (̘ V|5(E&%[iS dhhch2W +[ϖxi*r"Dn @tAm컣 s i E3C`r'e"٥Zk !$C?l)hΫh>\Ov7#Ƞ-bsnLę<;]1.^maM8Q⎺n"2(d;2F$`Xeۇ*5֗Af+b5>9 Z+a/k%\Dޘ \8m; ""q6Y2z9^pшhЍ}pƷW2Dۤ-~XI$8PnDC;a5!Mb4ۖ|ZJ]0lg,eNi+7'Y/$42 1OFJܴ/t*%s)V򽩡ͱA$Rڣge Rd8aR{"0E2Ȫ37M+#ȨqPʧO=# jK<>펦*d0^y)ŒMEZ 1nŽezdn "J0kVSGh0SIVDvydّRQADVZCA$$<б=Y¤2 ,;( G&qo MD(O=k"N)#⥬$')qfY lzcuZ̶,^yy |Tј> χ`pg23=* LJݍ4 Q_b?"tB50lt0Eseđew3t:loڱo z-р(gDB~NFwhꌅQsTeN\*ph=IBVpJcDD$W]`D?]+p矈Sۑ &< 0h?ipgG{!hݒ-5x=7B̕%Ֆ>HWfRS*C½+,Rmx#mtahGSjKGMl;ƯRIsJe?Ej.bN_I#ȻhSߌwz= ʯIiG4~G".hbQF26s^OF_6H"=%o qÿ}'a ,:;o6TÓR+w c)X V d-le'bMWx W|_ b # k="3ƈE,):w}Pԇ sG1m3 /&c@ND3, zLǒՂ` NM pp֬ZͿڹ }0=J.]D:t *˳`KUe'_wZfȧ'eRtM6郃<Ҥ"rfλ1|8#C;- oąLglH-'Kq*7ՎYfmxcL@,nױG6 h4bz忇 QGxU3U\VM:C2_#yKexAtŐHb8a^IiƮ21o >{'NGF\[ĉ:njҋʰ!+I%1jί'ƿGoCP`ӄX "PyQe ϮQ t6dl${eaŜ؃5˗㷋9<1%a,zJ Þt|uXgc- vX,Zr؏znCю{1JӸӑ͊Pp镢,Xӂk/=xKقLMFf3${z`(ؒS!_nvj𨿫k5%jy@[{zR ?]-ox:OTާTҹJbN9G;;ɏJ9g)dѦZ+Yhr49)BM9r S+0lnKH]f,CE2#mfoX]R:˜CL_)st'JI\!7fTL9*6prs{5)a }٠P6ZЦ>8І&QW@* О6db(3"mlѶ3#.m-ah,9/bZ' al]U!7n%W7h>bvwܹ/ΌWvO3rטڒО*_ ɓYpZ9%9\npc*ڼUT*野 ssH>Id2+)@Hk1/6x6Kug:JIToIiwv^"2բJe0dV99B:MgOa)˽JK~DµFɗYI dJxHdذ,YXZr<e]pjUYeo.#p oaެ9C4v(; ԱXHDң"bʩp^-§<*HL ? PfqO1_l^q|cԿ&mE @=#iIo粖&{]_xw~ ҇EbIxڻTSFrpѧ?F/Wzs~ '$WqBtu<2e1m܂IMJ9߅J[jɫrR!r4sutDZ\_1O$XWos5,,O?O`*1Ub8>f7C*E"=)?LMD@/C.  Z5ơhM㮢9Vo,>G+2ikq| \9Bl #bp:4]%f;U4"e#R҅[ż1Pygbv$˙'o<_-qaB@a(F lMԦH]އG7<_Afo#:.l>XD@yup;$]vCOB\= ~$?mO`U kìi=w67`ոگ!2=Nl_$?G-Mѷ-L8湫iW`#$ h\} ƍh蝏tyϽ1Qrx ѷۚƉ( LAO5G^0cԡgD+WaYKU>y%uۡ1r0G=|*,_?'q;W'C߿6C19)~Qk 3S1oX8}㣙{UFRR^+b@t:Q |x~)b޼x"6#m9QI=pjd"߀6/c1N 6ChɎw牘DԹ}ײW\Ƃ=M̝>'w #30yH>Gi0:7)&D⽻‡i^ 6G'YǙ8tp;wNV9~v58 '.gzǘԧ.f"H|zpܳWs%9JO[dų6ѱ.\+ʫ'iJOt= %dOT N!ڼys1m]w톎@k*Я?Yl麞*a[>/očy6Gw£ӫ3~ĥ5Í$IC' ehe-ד'C Y8v\.L UL; lY]bOR[fKҍ됵8`oT)SxYɩYٰA@ oJG2ߌA̍3Z9]1czg`'bR 9b"Y{ j[=.3.5ɅuKa1u6+>|6gt?9R;l6$U:~g6CY<޴A $H=*U^mxKY%Idd+ 'r)fYy< zBoz"݀n>3ՅA g:jgJ,+to@~1y>"!\qh/Y~/`v^SK"ƆgJLy[صXLw,A/6fgMڷAP6hݦOCΤD7Xt_K.ooęJ4iYf9xFl0i@ C'4FY̑TiJe%]G>Z->:wX8ohXo!^HQ^bk S14j"2Uޚ4Q6FǞ0 tIh'˽^y?DbkHiҲ R±1DS?N3ˢP xC&7Ȗؽb^z[5oų`ǮY('qR #@"3e39o:a04cR|h~/9fDOi14Tbh,-)N; SC>kkȐ5GW1P9L-4׋cȔ0Q2 (筌4ͪ ef&2pa^wZ:9xK@tnV  d-P5f̮,φbHǕ†/hy.IdFH)8]t<@Ehڥ&9 7=~٠!TWn",.4*o+#֚K}׫[̂wqq1LYo/wAzlV?q okP5B}FhOtg4!_:Io⡶ %S.- _~Ӎ`⽱d\P}4xԚiNJMxmH[;X!]H梑,]sZؽ--YJ{\ yBI ,0MMIKFJr X4B'@fY'`.~)aaĴA+(xwucWڌ#6o!MmHAџ 7 ~ڄY*l7r٢r'i#MYL0s߉ϻ]&Z4_ ř,C-GyP??ޠ`)^T/+mÂӦM }޾t0YR;#k|0w$)ܵÙa0^ !"̟yZ=x:Hq BhRkþlL NNlT!D Мo:g!^Q%bFbc4N_[5qmr/8fp#ѻ{@,KUƼu8i)Z<>kWkll,$]~. 7_CYVCL0TP( fc,ɃT*χ YC*/ZE. :rub,@dp(5~={B-'$ 4>)Ao# */g+ 6Ԁ0sh<*bp=z~uφ5;]K*.f-NcxIENDB`nipy-0.3.0/doc/devel/guidelines/gitwash/branch_list_compare.png000066400000000000000000000246671210344137400246550ustar00rootroot00000000000000PNG  IHDRL4sRGBbKGD pHYs  tIME0 IDATxy|u4I4mz=-Zh r! JUP\WPpZQ ޠTVXE[RP,-6M#Ee|B!2JB!OB B!$B!VB ֒cX]*S&D!^k]3WBH^xe 5 s{ 2;SO︗%fC%gLP <9;[F\/=:Jˤdqɲ>#n{J Bi9qRx|3"L#RBڒ2ג?#!?:d0o(SGDV"=w t08ztfÎT8BĮmeT8lmձ7e&d+ q0өG i+Y0e_sU7̵s]K9NWMd: G #enz6{-J2v7!5yԅNrol>Gj6c/$iC-A)buZw Ŷ۷RL,]څ7(͈߮#..6m0d q:F-!{>2ӗ0{VT˿x۷`j;@B\snѵ: $3H\;ݓr">٫v/U" JRQ#ڸ2h*B\l]ٴIϠ?,0b)sA9SȬQl 'gp+!5UU; qۍ)f%h?~{[ӱ[ PG.r } !1\F]B!% B!$B!B]!B!$B!B]!mces_r݇~U%JB\!e7/I.;g-y|kGbːdKn чֲB]!oZMat/jIPZ)߿n¥فC:ŲEiҮJl۲Ehkܿ&O `;_D.*7Dl! HׅvSisbH,M(.Ub4)hlXqoSI~ʯBhiM Ӓ=`[:b}wvx#_XP;`D 2cb& W fBqstx>r0Ex~ n ؀&fKş¢ZRU& Դ~`; )R!PQ]v39QҚ| UžP C (GH`laJ69нfq&BˁS.Z[5I.-;Cb-1F6<Ѐz]Ť<Il|g7YU[5*Rm*.⭫zUl nl6 hGz-!$.*6_ԊOVQG_Azi`^F86*؋ H`+Jcf\ӡNSgڰ(؈Ѓ^zܞ/'lJOa{]K$%` ^X9oC'_E!,uwV[XPj4r[;nЗl*qCo?nTa()jqnVuѦ} VR 7et zon\ui}74äT}TCpx[)|h܊:qN`WIk6Ǚv^r"0~6 Ź6ErO"e穀hsFwg9R Ԟ͵޸fU }XSܜ]yok% 37'jX!tY95&MKHuCiwS>`Ex<F V>FB {o檞'ȴhx] /~d=јPw1(\{T~-;dkYWx<8qr  !,, ?soUTUU8k3뉫)XN WqĮO捾ju`&[O,L46ޙO5_Vg6 哩 ٷAyi걱|1k*x)aXo7C80Mmg?Cu|5lj¼SD8|.<oԌכylku賯|_ËRVb0d;߫,vt&: 8nt'JP5nc ԂӉON.&4 !įwq<&IqTU ???6mz˝}6)Rt&ޖq귟yjT[k靦RM'=niUY%dU_:⿿]ˇ)LpD[8≯6YvXuW|UpЦRWYp 8p6޸306GܷJ[QyT&=oh˒x7JuxDڲ{R/槊:  -D9& 搐3$$8c g0aL3jk -Kk2K+, FQqEh4bXkֿ[njM,~7rޖMm+QA]IOs MlY (A`7|`y[jN8M@ۺ7Є!x2VPɫp࿷vN-8UK#meaUaފqJ7{Y9xya 1G>-f(pnB\5k^bygd(Y15]8^w#_OB(G9 iF\:#wy$3\ըed}Ŵp }6r%uᄧnz-:_.Jmu]ѺWІ1|ҁBN)ѱT}v1}`8>k5ԻWO]]PΩ~?gQ(8uP}s+h+k xfh.^2.ךb}]_Q]5Z/wUT.ժZ"!MHXɱ&Bb<{UUU5k3+9ZNg|lʔDw N _!!(χwB\-~k̘1(J /ԩSQVK>}NQ5ϥZFu;Zy ,fV-SNhO9_BQ5~gm@P&'jB{kzuẚ.H `K iZM_C{3fk5?Db`432.&ՅSc[PXsx(wԌvEC\E~׆sB!~?SO??W_}bt:o4|V^ݸy#+0~$}>0%q5Naݻ7a^F.b;q+1woy+yav7VBUU̓&3ݛ[|-6TUxv:N 7@U+INySne\T>?vMuƸy}_^E͓߼VJݠnݛ\'y 4+;h0p2@dlf`qCdTi׿O?`dbʔ)`09r$ .l0 5g!CRY0J*T@`2-[͍'SN"яf<,cғ<'~GS"$3۹Խ(F p>]3i}'7Mw/N#4iszt{%O$5yͩvZNgaaAm۵;_Į n2omQE)?{hMZFsitD6OF!>`^W;Y`vUUt' BCuU{\MU?m 1s8;ᶩd(fl7o#ۄ^}L&O_Oe'׵"7?Cܴ3+wQS}/6<q"ϬB!9/vr{2eoѭ0gǼ;י:(#w_]?&w¤QyLo& (z t#'y lPb<)h_wX6!iZ}yޖh%@5qo=OߟeZ]wzhB#7 !75{n"##Oŋcj9r$/`ȑ 毙qvva jFHӆ}̗NqQU.f!cힲ<Չ% @⩢܃S7_ACW܎oՕ>K rVr*ǨBGBꡦVrOU.Z.:y~:km z7.g<tҠʈQ*\3U=칳\2BqI Qߤ4Z\<~L].fտ?`yV71czg}iЇsIW@ɜ{Qy_ݭjxsxj LJOȶoEF?ى6RKv٩dI -|eX9gP!)<@;kϾo4Gxor <7<8n,%$qe7#o fi:U2` Ӡg3ɺYG8[R}&3n]mc{1Ww?;pZn"3 }B|t$1j>/O샓cG>^Z@ؑ{o0o%A[mE!1>z7ulR-JeQ7c-%UyY|4c~*>>w>1l]Xz\KѱpQ+ea>Lۺa߄ZFņ14Cók^Q$FCAeέMb4GrΥ܊泏l-Q/!s.]V>\N_5XYUn(sIP`/bkn[HC  ^c c&MeRJm|<~bԩoƚo UV0}JÄ$sm&?o/~k3ld&ȮT4G~{VR?Uֲ߮~O FLOiEӺH^ mOY`EǤkIRb8)3eTw4LC9OG._) 1uX/|IҪqՏ9Xshңvj>v擣.*t?k[槂J m;F5pURxFh 땈Fc_*̲V@+ |j@Bh4x<$ќqW%Zll3 %¤S,H`l[$0tP\duk.7=eCΠt!TEV@㧥d<ݺBCѨ`wGQbڸs@NAui#C! +IDAT<(8 uB ӌL`jU!`=OqW ;cV}OG_);o,yo,WnEMmL̘ "#Nnl}NYDMFmejQ~FIvaA!R[ OLe)Ir >1+b]bLиP ԢH7Ѥd3KW"QT~4rwJ[.eox8.Ն`Fud׵5q򯢴Nʍ,_N Wb.]7c-#d2I0.@UU'5Wo/bop}=QfPJ69нfqɦBƝp ),n]"44e9]@]Beh}qDMlYWet<32zJ茮8jиb`Uix ˹[y[ &ob/ȺI(ɣiֲ"=G_ \NhJT0(wFP@URR&"-/&5B!c z=3xCJi9wS[3Y[E_W 4Sо8:ϡLe+!)=3x{g9w9&\I/>H@2OαT@lu7ثM%a]ߩ\|Y(D h5 ^So'ʑÒm2D] qFp5Lk1vh]J0wm?*c+.*hNNq:\6 h- !ğ͛7y9%fTV?ȽڳvlƭtKaӆTOnȢgoO& 3bIK[a Z20&d%zZ:QRHf⽽Иaf|2PУ!덄| sly{S2xi_ȂSj/yl z3HayL4×EBYMB>n0gl[[vBZ̨赚}[5113ãi@H`Gՠ3,{9Zz& d;s=RW"1a&w }#WGID~/aׂ,$#`oN̰'Ƒ3u}=6|-UCHՒڦyXhrt nFVE'z$X6ѥ}8JV M/A+md_.찔bui3j`2,%EX &s:`:z)ưuSQR7O=Φk&a9QInk 'p,!aZqL5q8pi:5;,Tu[)=aEu[+8aDx/a`EC(7騢܎`=a'ǵ^Y㸴!`Ȧa+ =PuSܑ[8M wn>t= p=#hYn%Q7Bdk ;Xl#6{eh:eђzwOLɜվ/7X'QZ O{FOݣ F#e !݋q#?l<9;¯YMI<9(Asѳ];evA!Os[ƈPcۉZdB+<\V 6M A%B!n#7[3)D8lŹYdnR' =naPg-$B!B]!B]!B!NEF/B! !B!B! !B:/Q֭[!I!2B!,I!B!B! !B!B! !B!B!DW!BItB! hE$IENDB`nipy-0.3.0/doc/devel/guidelines/gitwash/configure_git.rst000066400000000000000000000057751210344137400235260ustar00rootroot00000000000000.. _configure-git: =============== Configure git =============== .. _git-config-basic: Overview ======== Your personal git_ configurations are saved in the ``.gitconfig`` file in your home directory. Here is an example ``.gitconfig`` file:: [user] name = Your Name email = you@yourdomain.example.com [alias] ci = commit -a co = checkout st = status -a stat = status -a br = branch wdiff = diff --color-words [core] editor = vim [merge] summary = true You can edit this file directly or you can use the ``git config --global`` command:: git config --global user.name "Your Name" git config --global user.email you@yourdomain.example.com git config --global alias.ci "commit -a" git config --global alias.co checkout git config --global alias.st "status -a" git config --global alias.stat "status -a" git config --global alias.br branch git config --global alias.wdiff "diff --color-words" git config --global core.editor vim git config --global merge.summary true To set up on another computer, you can copy your ``~/.gitconfig`` file, or run the commands above. In detail ========= user.name and user.email ------------------------ It is good practice to tell git_ who you are, for labeling any changes you make to the code. The simplest way to do this is from the command line:: git config --global user.name "Your Name" git config --global user.email you@yourdomain.example.com This will write the settings into your git configuration file, which should now contain a user section with your name and email:: [user] name = Your Name email = you@yourdomain.example.com Of course you'll need to replace ``Your Name`` and ``you@yourdomain.example.com`` with your actual name and email address. Aliases ------- You might well benefit from some aliases to common commands. For example, you might well want to be able to shorten ``git checkout`` to ``git co``. Or you may want to alias ``git diff --color-words`` (which gives a nicely formatted output of the diff) to ``git wdiff`` The following ``git config --global`` commands:: git config --global alias.ci "commit -a" git config --global alias.co checkout git config --global alias.st "status -a" git config --global alias.stat "status -a" git config --global alias.br branch git config --global alias.wdiff "diff --color-words" will create an ``alias`` section in your ``.gitconfig`` file with contents like this:: [alias] ci = commit -a co = checkout st = status -a stat = status -a br = branch wdiff = diff --color-words Editor ------ You may also want to make sure that your editor of choice is used :: git config --global core.editor vim Merging ------- To enforce summaries when doing merges (``~/.gitconfig`` file again):: [merge] log = true Or from the command line:: git config --global merge.log true .. include:: git_links.inc nipy-0.3.0/doc/devel/guidelines/gitwash/development_workflow.rst000066400000000000000000000161201210344137400251400ustar00rootroot00000000000000.. _development-workflow: ==================== Development workflow ==================== You already have your own forked copy of the nipy_ repository, by following :ref:`forking`, :ref:`set-up-fork`, and you have configured git_ by following :ref:`configure-git`. Workflow summary ================ * Keep your ``master`` branch clean of edits that have not been merged to the main nipy_ development repo. Your ``master`` then will follow the main nipy_ repository. * Start a new *feature branch* for each set of edits that you do. * If you can avoid it, try not to merge other branches into your feature branch while you are working. * Ask for review! This way of working really helps to keep work well organized, and in keeping history as clear as possible. See - for example - `linux git workflow`_. Making a new feature branch =========================== :: git branch my-new-feature git checkout my-new-feature Generally, you will want to keep this also on your public github_ fork of nipy_. To do this, you `git push`_ this new branch up to your github_ repo. Generally (if you followed the instructions in these pages, and by default), git will have a link to your github_ repo, called ``origin``. You push up to your own repo on github_ with:: git push origin my-new-feature In git >1.7 you can ensure that the link is correctly set by using the ``--set-upstream`` option:: git push --set-upstream origin my-new-feature From now on git_ will know that ``my-new-feature`` is related to the ``my-new-feature`` branch in the github_ repo. The editing workflow ==================== Overview -------- :: # hack hack git add my_new_file git commit -am 'NF - some message' git push In more detail -------------- #. Make some changes #. See which files have changed with ``git status`` (see `git status`_). You'll see a listing like this one:: # On branch ny-new-feature # Changed but not updated: # (use "git add ..." to update what will be committed) # (use "git checkout -- ..." to discard changes in working directory) # # modified: README # # Untracked files: # (use "git add ..." to include in what will be committed) # # INSTALL no changes added to commit (use "git add" and/or "git commit -a") #. Check what the actual changes are with ``git diff`` (`git diff`_). #. Add any new files to version control ``git add new_file_name`` (see `git add`_). #. To commit all modified files into the local copy of your repo,, do ``git commit -am 'A commit message'``. Note the ``-am`` options to ``commit``. The ``m`` flag just signals that you're going to type a message on the command line. The ``a`` flag - you can just take on faith - or see `why the -a flag?`_ - and the helpful use-case description in the `tangled working copy problem`_. The `git commit`_ manual page might also be useful. #. To push the changes up to your forked repo on github_, do a ``git push`` (see `git push`). Asking for code review ====================== #. Go to your repo URL - e.g. ``http://github.com/your-user-name/nipy``. #. Click on the *Branch list* button: .. image:: branch_list.png #. Click on the *Compare* button for your feature branch - here ``my-new-feature``: .. image:: branch_list_compare.png #. If asked, select the *base* and *comparison* branch names you want to compare. Usually these will be ``master`` and ``my-new-feature`` (where that is your feature branch name). #. At this point you should get a nice summary of the changes. Copy the URL for this, and post it to the `nipy mailing list`_, asking for review. The URL will look something like: ``http://github.com/your-user-name/nipy/compare/master...my-new-feature``. There's an example at http://github.com/matthew-brett/nipy/compare/master...find-install-data See: http://github.com/blog/612-introducing-github-compare-view for more detail. The generated comparison, is between your feature branch ``my-new-feature``, and the place in ``master`` from which you branched ``my-new-feature``. In other words, you can keep updating ``master`` without interfering with the output from the comparison. More detail? Note the three dots in the URL above (``master...my-new-feature``) and see :ref:`dot2-dot3`. Asking for your changes to be merged with the main repo ======================================================= When you are ready to ask for the merge of your code: #. Go to the URL of your forked repo, say ``http://github.com/your-user-name/nipy.git``. #. Click on the 'Pull request' button: .. image:: pull_button.png Enter a message; we suggest you select only ``nipy`` as the recipient. The message will go to the `nipy mailing list`_. Please feel free to add others from the list as you like. Merging from trunk ================== This updates your code from the upstream `nipy github`_ repo. Overview -------- :: # go to your master branch git checkout master # pull changes from github git fetch upstream # merge from upstream git merge upstream/master In detail --------- We suggest that you do this only for your ``master`` branch, and leave your 'feature' branches unmerged, to keep their history as clean as possible. This makes code review easier:: git checkout master Make sure you have done :ref:`linking-to-upstream`. Merge the upstream code into your current development by first pulling the upstream repo to a copy on your local machine:: git fetch upstream then merging into your current branch:: git merge upstream/master Deleting a branch on github_ ============================ :: git checkout master # delete branch locally git branch -D my-unwanted-branch # delete branch on github git push origin :my-unwanted-branch (Note the colon ``:`` before ``test-branch``. See also: http://github.com/guides/remove-a-remote-branch Several people sharing a single repository ========================================== If you want to work on some stuff with other people, where you are all committing into the same repository, or even the same branch, then just share it via github_. First fork nipy into your account, as from :ref:`forking`. Then, go to your forked repository github page, say ``http://github.com/your-user-name/nipy`` Click on the 'Admin' button, and add anyone else to the repo as a collaborator: .. image:: pull_button.png Now all those people can do:: git clone git@githhub.com:your-user-name/nipy.git Remember that links starting with ``git@`` use the ssh protocol and are read-write; links starting with ``git://`` are read-only. Your collaborators can then commit directly into that repo with the usual:: git commit -am 'ENH - much better code' git push origin master # pushes directly into your repo Exploring your repository ========================= To see a graphical representation of the repository branches and commits:: gitk --all To see a linear list of commits for this branch:: git log You can also look at the `network graph visualizer`_ for your github_ repo. .. include:: git_links.inc nipy-0.3.0/doc/devel/guidelines/gitwash/dot2_dot3.rst000066400000000000000000000012511210344137400224640ustar00rootroot00000000000000.. _dot2-dot3: ======================================== Two and three dots in difference specs ======================================== Thanks to Yarik Halchenko for this explanation. Imagine a series of commits A, B, C, D... Imagine that there are two branches, *topic* and *master*. You branched *topic* off *master* when *master* was at commit 'E'. The graph of the commits looks like this:: A---B---C topic / D---E---F---G master Then:: git diff master..topic will output the difference from G to C (i.e. with effects of F and G), while:: git diff master...topic would output just differences in the topic branch (i.e. only A, B, and C). nipy-0.3.0/doc/devel/guidelines/gitwash/following_latest.rst000066400000000000000000000014701210344137400242420ustar00rootroot00000000000000.. _following-latest: ============================= Following the latest source ============================= These are the instructions if you just want to follow the latest *nipy* source, but you don't need to do any development for now. The steps are: * :ref:`install-git` * get local copy of the git repository from github_ * update local copy from time to time Get the local copy of the code ============================== From the command line:: git clone git://github.com/nipy/nipy.git You now have a copy of the code tree in the new ``nipy`` directory. Updating the code ================= From time to time you may want to pull down the latest code. Do this with:: cd nipy git pull The tree in ``nipy`` will now have the latest changes from the initial repository. .. include:: git_links.inc nipy-0.3.0/doc/devel/guidelines/gitwash/forking_button.png000066400000000000000000000314441210344137400237000ustar00rootroot00000000000000PNG  IHDR]Vl8E pHYs   IDATx]|Tv7ݴMN]&O  X(("}`@z!!@$ I6{wHŐ ww|s,JlG#i7p8t"p8Dn ͛p8tp8Dn ͛p8tp8D۪SMYVܺu jN:H;Ц22{wG(7o#77}Z+6/_18+{22+ߔ'Tnn.\\\z)H~cCEҿ++3^KX,`B`UC(s*Ҁ{/22'7#G>F},\>ë˗ sݿ/^v;s+p[cF5nGd2{VVՍÙ3gh"A0'OFvٱ߹nɊd-ڕzzz:mۆ9s=33ž鯬C~0~Ure$!nm,W#q(gǝ݌XlP844\K7n' USֽ__\] \d 222@h[/Aadr]3\ĻTnJo8aӻ%&52_VN߫nV䭄ha9r0`0vލM6 駟.ڙ٦z$])Fg,Zn.P:٠3Uӕ+17sBo aMM0V9EGի3f S`f Bq]_w34&k%dAG=pS*V999;w.Fc$o0}tNANj̈́B!@<E~=ֹG ~Jk6hubC}@#S1@H³^gȌn1j˕+W" AAA5kؽS6> |oZlY`ݖ>,uh ؽę}&|~4Ú`{a{hi_}UΘ(d]ٳÐD74'Xfm Z`zd ޚ0Ԥ/{q٫!Lq)5(J/1+pũK|K+bէƏ>ƺ9Bc>GSޝfDND?](f[<kcc\wviprr\*씝6vTJ%m㿳b%/Q8`[BLjo?@ ew26؆c޻E'6W  _B8'^m6@m_ϚA'|%=s}]j8HGǦ`wgiSbȜy8|b7r$*&t+|Nc;#p헏P:GJ+.2. Ô)S'|RHc_}=Z6P(QKؘUN|N6ؤlEqbs!ES0nl"\CoBvn>lB{o6IBѽs#-F!#ؾ= D*βă84I!V<.#\iZe?`(l 6 >َ; pOoʼnU %/8. D/8˨㻘vn'[UH+w-_|@l1b9&N(rY-47{=Q/W[n4b`vq=b,bpu&=\Ip`g8ً+V!JJ@Gc?ExF(>1JO 2/s@~λvH4rl.ӻ-,Ĩc#U"~T|bBǽzDؗLrK@硫]+d)J ('Y<3nʲJZLeE;)yc3QiXʊYc,^A>,#El %I_ɎƙHB# <$W|,S. FqF{-.-R=F?zGzys> 2ȭ0.峂dSr%b>\f1eiggg和ܸhME(p]_ ĶC`zP__cgA ~U%%Yt5q˞ՁÈ!kl;HdiLט\.8>n0y: aPK%:`#"DJ.])b#P|cl,:hՂNB"C^~.n,"NG.zc2jb7Xƶň % ^.nJ?DKVfʃ,/S)].`:2= ]EXE>H{ Xt`qܹϟ/={~xxx7Do*|]k=&teAN:mj>:EO![N>EqwpJx CqḬlRR4TƋ k ¯Ɍr тTRF0r,zp9pG%4oo~)dߞn ƅ*X攝D4Ta3'XJB%Qj`.KPˋ9i M&[Yp{yq,Y\0*9a=!aON:`AB,l"mӏWr~ţcah|4$ꔠ`Bo3 {e5;Q#m.擏810I"'S33~^fӘah#18mkSp`rGZ(D}woo5+p_ +9[Kc;91czꅤ$pq>|*OJJ\ uX*:e> Q+ƌ8c1>[f(h^ de 76ѯ=.]a6 y_Bzϡg#3.7 1P-FbM&'c3Иz >5<( /^ Kj(ǸO1wٟ|e=?6M~A q`lѩS'!XYJN&&<9soVC_=똧s-? !:bs". ۿϛ/R􅊔ʫQБ=ˋs ONS.VM>‘tװ}ؿe"_^5^оMNָ.W 6C':ep[ȶ)o۠m]h c'q&iI>7`=p!xhVEφ2HGa{T*{ӾߊVC&&iɲv.p)2br-ˈYk:NgϞ &y @L W?}@+8w4¤sщfF 'ڲ3S9HF@O4)PZMh1Q̞0 pQ!E/G ز`sa&^~fXh4|]D7pTZ͛z/^T 53ĉ,"{e[C}#+N!vr_b\ \*w2l%7=gadaR.^H7n\A ޗ$ - Lj,::Sycr67'nAp_A+Y'A>0|1ާWg`ΆH<'x*A_IS"PNkcoǓf!#&Ǿo,ċt(r3i"m7с{ 55 N\D"R"G"_gRdU2VޖSqI=?r2eȶD]Rhew pMyhZ=#hӓ&6-P*/w;R\\=i&UZg&&3|=)L5@mh`+  =R\V;ӍZˡP J|MKKCHHH_+믿^Ph•ijrJ@J>K gOBC.gGZ-LdyzcW q!pKa1h_vPtE^5km+d4FAwJd`iǝg]7,)H/n@1 7Ȝ!w;CFq{Eƫ-ɭpÇZt+IF zm vAFZXԆAThO";l \vHftoTJ6PSwRrƀMvSfΪ%ҒU# ŋ.,VJ~C4T*R\VKe0fRفٯ*,^l2FF?qd__fޖ"*'ov(JH*VZ`E½<^?olz[wG8 7Z18'{322];aVun 1ǡ" +#3~{.ɍxq9.5s_VMv6Gy~ǘPʹȌ);+G>E|S>4G#-8]GN"IN#-8]GN"IN#-8]GN"IN#-8]GN"IN#-8]GN"IN#-8]GN"IN#-8]GN"IN#-8]GN"IN#-8]GN"IN#-km.G#s8yyyչAs8Bj y.G#P'plur|G6nmp,t9@ju&G#Pgn=8G#Ppҭ y@E9@[:o#pKΊ# [69:lʔ)ӫ:fD}$U-\OhZ #r4՛t.Y?p"ͭHbѦ…xe!-/_]oxDE"QxR5|\ing8]ܢ>d:jpv]9kMDMTMtȼQ Rq!1psqxÔeɦK/m804sayF ,YWcu DMkYaIߢ#GwgSp&"V(OaRa_7`?ķ"lQk\Nȸr+~ك[LW^w CQ#OCj$6l(=㥁Q8}KV}6"öduSLꫛj2dFaǎs4*ծŞp݋xUkشgM3k[KӜ[@)xl&!͟#c ZʧN0k"lV CDҁHt`12^$XL6]1wV7 R~ҟtp vGxcʳh+8f6$w'Ӟ*Y̮Pa܊9<=4/}k |MP[D??|e_ϫO˘0qHsK<1TO=$_jF¾q,^ v2b(Zkǖ}r 4n=26`Q`mBG*s[Ȃ͕'-/@Ett /2Dz%D;yxa` ",DYKTU8k$褰!awU1Hv͌ m}1v0 oÑ/w||}%DϨ=4ƀ^a”W81ra4ޱ O›<`p4]һ>-%t'`!'W'|t59*֯ .!H?1~@S$Dg!'0ok8[2m h  \ Nmؓe1rHsIYvPTѾC!,z >b0Z+q`8-4&5IȒ*i5f^GVB>T0K6z%6.sQ/C1bpONᗅ{`0(wʕЭPk:R6FhBd":Ҥ%زy%y\]4eBDGiK$[HUu1#е-#|֟"ԄHw#Ȉˑκ˩(ɇwסA#?S{h'\}(YQY0F 6p}Q#,|4_`IšP7t8 F- םi._i&Mg@LAPiySpխwm$QMՔ|rU{ KHI%-C1\ˆt˰)W2P Md,a#T:Qx 9ߦZ\s'LfA>[5&Mo£W'Q B " q1't"񮐑QKY˷yyUDC;"ڤB O/;Çggd \'!2<Ύe q1pUzbCå'iV#KxM¬ 4)7֞ z[ѭ[s@v)]BKKS*읤VF6fIVbs&4莽!ȴ6sg sx[9l0p#3ѡl̀ JgAR3 F3dMKdYìuXlr_udMȻ!?f|`IF(ĥg"W$ꎩS?YJڶrlsçtܳk&R0,bE<=;$bTv_F2+ԭ(rxȿoo!+a[I[.`EC/3aĠIDATF6`!vdI#:T ZT=$&{+-O^#XA>c 01x(`61ʠ\g/IkM"݊J/.\EK[4i@_ĥ,훉hZ5T=:df?ʒ47n@{mFL-\-yM4X>'6icTr9lthJGN}gr>\u#O/.d':F6$vgp&70eڹ;j?B;W*s=lɇ#m5Х>d aF~0 ޭ >e"4XPG^Cf1f`D=Ef@@61X?{26CmukhDii/( ;H#$2h)aQz#1hfˉ YKO im,?-(aoak&*"HI+q0<ӔR 5$ ! /Hw"\4~1߯u@9LGAeñ+7KE{KȲ!:ՆaP،ɡе\:1oJݣ`ީoȥcu bBGzBWZ $^!g67: c tf q$&_p=IB-4"{8W趒#zlBZ%RhEk5!Gk4(F'4 ͪe}-.PuKV`s@|^+D=.xێ.#6)Ei* i,<ĢFڡ)Vjq%|;-Phk&v!_ޱ㈊?oSc0HT#[ Tcv$zھ-OFd3[ 7PѷڊS.#!Uqe8pƍ4諔F`#I žHػvñq6RMdÉq*җHXgk1d mmT@R)[X vI2!d0˒yÍpv;0 if" YidD!YӬ3▧PgJȓ{%!rP]PN!CV:X[iAxx2BAvͰoWf:4Ul$+Š"RgCoڦ-4p/HgQacfANDf5.],$ &&ӡ&] 9 B 5$ە_xaI6ƢBKm&=?߹OgQ!xE= :Ǹsss^6'_3Y0I!6€aKm|T[0hj:B4R⥧GgB/AT}۰!IiCۋ+\FlpJUw ]qOҤ" t:Y~, F"/ĨehE@ m:HMh{Q}lD17R4ep&mq+C[D/]V q[xN-lH7ɔ *49v0i{= hN=)B !~2rHXXB=4΂gyXFH8 E +EP _-("Cv|ċT͚ةKHS_Xf&Y8vEI 9h dP]z :.'M;D-ċccwM˓ 歡ƉYrm߁_* ++pCtJߍҰ ,Y{U!aeҲ^4{F&G4Y\XciG%hأTV:@kv i׈Z6e7wع2VڤP߉~©rkbhr\Tn]_fc-jxV4hۊ&sVj\?a$N'WRdt;$dLV;[GYJG~4d%ŚG&F֣t]k#X):j_`pgx!$7Uy`X{ARؙ@c=ca&ůN%ȅ:(h6Wь"H[Oؿ E,3N^'7O¼6'[KUDt ?gc JtA`V>rV:~^O_$ڙ9匉gѧoZNuIUti $3KB;fA4.ApT#z5Act9EY* <HSI_4!4Kװh vb-!3iGךp! hEYqڴ\#It[0vcX=p)>)/)m'dۆ?>b]yOM\1phmO}P/+^5h5p4W]paزO֠&6ȅY*\6dg#UZ,)#a=MpǘIHOeߙ²4b+CflZ [z?? = ^]111 V%߅ ɕTwWVWNt^MŐ@oZ+\_"o_=4w1#ڒ0PxEKwDǚѵ]؏Sm3hڈ'RT*ꀓ™ɂ#n䅕8tyS`QZіᓸ9d2`9+L>MΡ LeMKvGv]+hRQφƒUO6fdUC88~+e=9ܝc[ȍKA !6w沸tƂہ ^Bf;W5H^C?K4 (NidQ8)TӬFnYE,tH樗.Ɓ]tp"W]) rA5*`9ɹ^|PZf>NÛ"nNHf:tc DE)Y~f|JMr+sjQU/QxCub|M. [Г޴"2QJ((/DYYEI?vX0&)<|}lVAlK ZJד_ |-f'DbYt`&wV+|UtKĂ'YwZ:H4* J %ʂnTN@aW;%x1paLͿUSg My::WY:Ҕ*_wY(Ț%pYFrZ<b7ɇ~?MehdE{ix*# hSId3}n÷coK[95w?]sN W9ёӎOQ١܂2V2rWᏠk3t@ -݂ɢX;l :+Jݙ\ڛ\s"Ǵ&&}/o/[^@[f%~5[,A\GFm˲feK'\Vd"kTirMn .Mƿ;8su*$TVY䋑poo kJ U-ro~Z p/gRUkiKJf$h.kj)$\]'q0tJ 7NGA^Ąv} &O Lj_DUXʪӒ(c3|WںhkpYIݮ`e )V2d 3YB^5 )qXE%W[{ {ѿDWҲ4} zTYx|# UC5[:TI/_wrȭpM~XL[,E1w<$!!sJM p8@8G#P>U2V~}<#pA[Ó8@u#-FpA[Ó8@u#Iq8rrIGd-N$IENDB`nipy-0.3.0/doc/devel/guidelines/gitwash/forking_hell.rst000066400000000000000000000021151210344137400233260ustar00rootroot00000000000000.. _forking: ========================================== Making your own copy (fork) of nipy ========================================== You need to do this only once. The instructions here are very similar to the instructions at http://help.github.com/forking/ - please see that page for more detail. We're repeating some of it here just to give the specifics for the nipy_ project, and to suggest some default names. Set up and configure a github_ account ====================================== If you don't have a github_ account, go to the github_ page, and make one. You then need to configure your account to allow write access - see the ``Generating SSH keys`` help on `github help`_. Create your own forked copy of nipy_ ========================================= #. Log into your github_ account. #. Go to the nipy_ github home at `nipy github`_. #. Click on the *fork* button: .. image:: forking_button.png Now, after a short pause and some 'Hardcore forking action', you should find yourself at the home page for your own forked copy of nipy_. .. include:: git_links.inc nipy-0.3.0/doc/devel/guidelines/gitwash/git_development.rst000066400000000000000000000003151210344137400240500ustar00rootroot00000000000000.. _git-development: ===================== Git for development ===================== Contents: .. toctree:: :maxdepth: 2 forking_hell set_up_fork configure_git development_workflow nipy-0.3.0/doc/devel/guidelines/gitwash/git_install.rst000066400000000000000000000011151210344137400231730ustar00rootroot00000000000000.. _install-git: ============= Install git ============= Overview ======== ================ ============= Debian / Ubuntu ``sudo apt-get install git-core`` Fedora ``sudo yum install git-core`` Windows Download and install msysGit_ OS X Use the git-osx-installer_ ================ ============= In detail ========= See the git_ page for the most recent information. Have a look at the github_ install help pages available from `github help`_ There are good instructions here: http://book.git-scm.com/2_installing_git.html .. include:: git_links.inc nipy-0.3.0/doc/devel/guidelines/gitwash/git_intro.rst000066400000000000000000000010361210344137400226620ustar00rootroot00000000000000============== Introduction ============== These pages describe a git_ and github_ workflow for the nipy_ project. There are several different workflows here, for different ways of working with *nipy*. This is not a comprehensive git_ reference, it's just a workflow for our own project. It's tailored to the github_ hosting service. You may well find better or quicker ways of getting stuff done with git_, but these should get you started. For general resources for learning git_ see :ref:`git-resources`. .. include:: git_links.inc nipy-0.3.0/doc/devel/guidelines/gitwash/git_links.inc000066400000000000000000000101521210344137400226070ustar00rootroot00000000000000.. This (-*- rst -*-) format file contains commonly used link targets and name substitutions. It may be included in many files, therefore it should only contain link targets and name substitutions. Try grepping for "^\.\. _" to find plausible candidates for this list. .. NOTE: reST targets are __not_case_sensitive__, so only one target definition is needed for nipy, NIPY, Nipy, etc... .. PROJECTNAME placeholders .. _PROJECTNAME: http://neuroimaging.scipy.org .. _`PROJECTNAME github`: http://github.com/nipy .. _`PROJECTNAME mailing list`: http://projects.scipy.org/mailman/listinfo/nipy-devel .. nipy .. _nipy: http://nipy.org/nipy .. _`nipy github`: http://github.com/nipy/nipy .. _`nipy mailing list`: http://mail.scipy.org/mailman/listinfo/nipy-devel .. ipython .. _ipython: http://ipython.scipy.org .. _`ipython github`: http://github.com/ipython/ipython .. _`ipython mailing list`: http://mail.scipy.org/mailman/listinfo/IPython-dev .. dipy .. _dipy: http://nipy.org/dipy .. _`dipy github`: http://github.com/Garyfallidis/dipy .. _`dipy mailing list`: http://mail.scipy.org/mailman/listinfo/nipy-devel .. nibabel .. _nibabel: http://nipy.org/nibabel .. _`nibabel github`: http://github.com/nipy/nibabel .. _`nibabel mailing list`: http://mail.scipy.org/mailman/listinfo/nipy-devel .. marsbar .. _marsbar: http://marsbar.sourceforge.net .. _`marsbar github`: http://github.com/matthew-brett/marsbar .. _`MarsBaR mailing list`: https://lists.sourceforge.net/lists/listinfo/marsbar-users .. git stuff .. _git: http://git-scm.com/ .. _github: http://github.com .. _github help: http://help.github.com .. _msysgit: http://code.google.com/p/msysgit/downloads/list .. _git-osx-installer: http://code.google.com/p/git-osx-installer/downloads/list .. _subversion: http://subversion.tigris.org/ .. _git cheat sheet: http://github.com/guides/git-cheat-sheet .. _pro git book: http://progit.org/ .. _git svn crash course: http://git-scm.com/course/svn.html .. _learn.github: http://learn.github.com/ .. _network graph visualizer: http://github.com/blog/39-say-hello-to-the-network-graph-visualizer .. _git user manual: http://www.kernel.org/pub/software/scm/git/docs/user-manual.html .. _git tutorial: http://www.kernel.org/pub/software/scm/git/docs/gittutorial.html .. _git community book: http://book.git-scm.com/ .. _git ready: http://www.gitready.com/ .. _git casts: http://www.gitcasts.com/ .. _Fernando's git page: http://www.fperez.org/py4science/git.html .. _git magic: http://www-cs-students.stanford.edu/~blynn/gitmagic/index.html .. _git concepts: http://www.eecs.harvard.edu/~cduan/technical/git/ .. _git clone: http://www.kernel.org/pub/software/scm/git/docs/git-clone.html .. _git checkout: http://www.kernel.org/pub/software/scm/git/docs/git-checkout.html .. _git commit: http://www.kernel.org/pub/software/scm/git/docs/git-commit.html .. _git push: http://www.kernel.org/pub/software/scm/git/docs/git-push.html .. _git pull: http://www.kernel.org/pub/software/scm/git/docs/git-pull.html .. _git add: http://www.kernel.org/pub/software/scm/git/docs/git-add.html .. _git status: http://www.kernel.org/pub/software/scm/git/docs/git-status.html .. _git diff: http://www.kernel.org/pub/software/scm/git/docs/git-diff.html .. _git log: http://www.kernel.org/pub/software/scm/git/docs/git-log.html .. _git branch: http://www.kernel.org/pub/software/scm/git/docs/git-branch.html .. _git remote: http://www.kernel.org/pub/software/scm/git/docs/git-remote.html .. _git config: http://www.kernel.org/pub/software/scm/git/docs/git-config.html .. _why the -a flag?: http://www.gitready.com/beginner/2009/01/18/the-staging-area.html .. _git staging area: http://www.gitready.com/beginner/2009/01/18/the-staging-area.html .. _tangled working copy problem: http://tomayko.com/writings/the-thing-about-git .. _git management: http://kerneltrap.org/Linux/Git_Management .. _linux git workflow: http://www.mail-archive.com/dri-devel@lists.sourceforge.net/msg39091.html .. _git parable: http://tom.preston-werner.com/2009/05/19/the-git-parable.html .. _git foundation: http://matthew-brett.github.com/pydagogue/foundation.html .. other stuff .. _python: http://www.python.org nipy-0.3.0/doc/devel/guidelines/gitwash/git_resources.rst000066400000000000000000000034031210344137400235410ustar00rootroot00000000000000.. _git-resources: ================ git_ resources ================ Tutorials and summaries ======================= * `github help`_ has an excellent series of how-to guides. * `learn.github`_ has an excellent series of tutorials * The `pro git book`_ is a good in-depth book on git. * A `git cheat sheet`_ is a page giving summaries of common commands. * The `git user manual`_ * The `git tutorial`_ * The `git community book`_ * `git ready`_ - a nice series of tutorials * `git casts`_ - video snippets giving git how-tos. * `git magic`_ - extended introduction with intermediate detail * The `git parable`_ is an easy read explaining the concepts behind git. * Our own `git foundation`_ expands on the `git parable`_. * Fernando Perez' git page - `Fernando's git page`_ - many links and tips * A good but technical page on `git concepts`_ * `git svn crash course`_: git_ for those of us used to subversion_ Advanced git workflow ===================== There are many ways of working with git_; here are some posts on the rules of thumb that other projects have come up with: * Linus Torvalds on `git management`_ * Linus Torvalds on `linux git workflow`_ . Summary; use the git tools to make the history of your edits as clean as possible; merge from upstream edits as little as possible in branches where you are doing active development. Manual pages online =================== You can get these on your own machine with (e.g) ``git help push`` or (same thing) ``git push --help``, but, for convenience, here are the online manual pages for some common commands: * `git add`_ * `git branch`_ * `git checkout`_ * `git clone`_ * `git commit`_ * `git config`_ * `git diff`_ * `git log`_ * `git pull`_ * `git push`_ * `git remote`_ * `git status`_ .. include:: git_links.inc nipy-0.3.0/doc/devel/guidelines/gitwash/index.rst000066400000000000000000000003431210344137400217730ustar00rootroot00000000000000.. _using-git: Working with *nipy* source code ====================================== Contents: .. toctree:: :maxdepth: 2 git_intro git_install following_latest patching git_development git_resources nipy-0.3.0/doc/devel/guidelines/gitwash/patching.rst000066400000000000000000000075431210344137400224720ustar00rootroot00000000000000================ Making a patch ================ You've discovered a bug or something else you want to change in nipy_ - excellent! You've worked out a way to fix it - even better! You want to tell us about it - best of all! The easiest way is to make a *patch* or set of patches. Here we explain how. Making a patch is the simplest and quickest, but if you're going to be doing anything more than simple quick things, please consider following the :ref:`git-development` model instead. .. _making-patches: Making patches ============== Overview -------- :: # tell git who you are git config --global user.email you@yourdomain.example.com git config --global user.name "Your Name Comes Here" # get the repository if you don't have it git clone git://github.com/nipy/nipy.git # make a branch for your patching cd nipy git branch the-fix-im-thinking-of git checkout the-fix-im-thinking-of # hack, hack, hack # Tell git about any new files you've made git add somewhere/tests/test_my_bug.py # commit work in progress as you go git commit -am 'BF - added tests for Funny bug' # hack hack, hack git commit -am 'BF - added fix for Funny bug' # make the patch files git format-patch -M -C master Then, send the generated patch files to the `nipy mailing list`_ - where we will thank you warmly. In detail --------- #. Tell git_ who you are so it can label the commits you've made:: git config --global user.email you@yourdomain.example.com git config --global user.name "Your Name Comes Here" #. If you don't already have one, clone a copy of the nipy_ repository:: git clone git://github.com/nipy/nipy.git cd nipy #. Make a 'feature branch'. This will be where you work on your bug fix. It's nice and safe and leaves you with access to an unmodified copy of the code in the main branch:: git branch the-fix-im-thinking-of git checkout the-fix-im-thinking-of #. Do some edits, and commit them as you go:: # hack, hack, hack # Tell git about any new files you've made git add somewhere/tests/test_my_bug.py # commit work in progress as you go git commit -am 'BF - added tests for Funny bug' # hack hack, hack git commit -am 'BF - added fix for Funny bug' Note the ``-am`` options to ``commit``. The ``m`` flag just signals that you're going to type a message on the command line. The ``a`` flag - you can just take on faith - or see `why the -a flag?`_. #. When you have finished, check you have committed all your changes:: git status #. Finally, make your commits into patches. You want all the commits since you branched from the ``master`` branch:: git format-patch -M -C master You will now have several files named for the commits:: 0001-BF-added-tests-for-Funny-bug.patch 0002-BF-added-fix-for-Funny-bug.patch Send these files to the `nipy mailing list`_. When you are done, to switch back to the main copy of the code, just return to the ``master`` branch:: git checkout master Moving from patching to development =================================== If you find you have done some patches, and you have one or more feature branches, you will probably want to switch to development mode. You can do this with the repository you have. Fork the nipy_ repository on github_ - :ref:`forking`. Then:: # checkout and refresh master branch from main repo git checkout master git pull origin master # rename pointer to main repository to 'upstream' git remote rename origin upstream # point your repo to default read / write to your fork on github git remote add origin git@github.com:your-user-name/nipy.git # push up any branches you've made and want to keep git push origin the-fix-im-thinking-of Then you can, if you want, follow the :ref:`development-workflow`. .. include:: git_links.inc nipy-0.3.0/doc/devel/guidelines/gitwash/pull_button.png000066400000000000000000000311351210344137400232120ustar00rootroot00000000000000PNG  IHDR~\iu pHYs   IDATx]|ToߔMHH PKT,"S, O}%"]zHR@dw޽fIBB$a&{3s̙3s72p8 G# ?G#p!}p]G#?G#p!}p]G#?G#p!HKKCVVVuNPF C./y>xbpˏUVe͛7׭=Obyyy()6c\,4lYѬ)4,= {$H& 0,n8^CtqѸ˱gEsZzl6~?vu;Oͽyv+gkx G#}كSԩSª8xy$;w}Z*.6.O[B$ɴ r)ɝĻB? Z}|?ìqu'.=IXHG.A0[rgcǎN,YDx_>kƮG όSN3^ǶQX+PmY+ˁ\K_,?~\#GBPN$,"E؟ A~ǧGLVs',o<5#3( fa0`9UI l>`ŒOлa彣p0e3ggx~Zb>1cƠiӦE*(O_˝)ޝ:8G| < 'g>!A>; oxMۍ9gH0taق.IFp̟3W6bά|`xhs &އĶ?EK+X4|ad#,0+VCSȦٳоq?$` 0ot$WRsp-.vx3k;~ŒO >`4|H>Og,O5E 4}LZ'Š&^xz` hXl,CbM8ִvXI:?? $̯?c{+]DJ___L4 ׿h5 &O  ^d g|tU"?5yɟ0pwX&<[?GWirToV|){<3mLAmxs 6'@9l>.#0}H4tŅx-w}__n Ơu~1b{֮҇#`4|(6y{fm^g][l?2wagsxqt'`oqѪ!!CMcZxL8ϦmE)*ue/fJ{ ž{LhϞ=WZˀ8y36 ‘3ݟL?w $ZMsѬ^ tɉ)"NKto+Щw+N7oG@2_ >tł@Y``0<\"aJ)Ϭ &57"&I! Rs=oֱ8) V8ޑxkme: #Hxӧp8ƿRh,@ #膣kw )@Ȥ r-e0gG|)o$b}"1bPddpyC#W.?E'\lǂ\Zز\m4rognYr2y^ /&욟*SY|!ڜi&rE|v9#' eH\|=0i;@F%!_Am62bK ? ( #`H.Ң #ӑݐ`6ֿfO< ͨOzde/mȄ<@soΛXyɒKcs30oOIZM}5 R-AnJiz$HlY3"1j^8A: GZE_ErrV $1W,PߐT?M!1v\w'_w :\db5nMQ^&SZ ,h׮Knq7mP?څ3PBK2}G6IjwMF:,`*Lъ<-r$PjZaY4l/,y&q-[$e]u߾}'ABWՂgyL&%1/H}0 3wSJ;Fc~1{NĿ?\ꂠ8\"`}H`5%zf40>-|ڊyJw-<˖-1}tt ؿ?q7ni#)}ӡd:җ؁hn)U8D_Nҗ"z@vM$Y/ kg߲CC0[mğev= Lg!Kĩ֮#_<*JzPp. ˌA"ө)(5K@0Mm?_@~}'v U8k ~Z(tn@Pu/71fM |_7P>1/fGn, K/)_QWKcAtهK嗰EVG,]0>\!dD6]4}.>_F MX5GfNY {;Ga1 Z*,Rf-Nslff*e0gJ1)#36fKGzܰ W&}9cpNA-[XP&ɹ[u.lNw}M=]gT̚ghSOgϞ`&eؤ) |4xnwH{ n>X'5!ȋQ(0Mз"d]H001\ka/<[`0->BkG8.ТVUo[1㥓l&IUF˾B["^.W".=REIiWiMb6WW/.MDvT~kg ;DY(4(sy#jᓟephn×/?Wx,j[ П|\Ld1q@)2j8ˊ}'`㝑|v ( j:/}Za;,Tw(pDQBU2Z}d.s^_|~j?^4IaA'¯vW>TaS I) ֖4$^+0ܹ^=NѺs ""]tg:ea3YLN˳g/tZ9llv4AAslnf6L4Ծbf"^}ϱS Y9P{“].ã䞠{'śrx)帬V _cO%U8c\LdҗVHe =S9SnLH'?lm|,:Z( cq8@,o]UW8UKi<+{? f˘0`X.pnPⱹWS8nL8ftv\+­=bcNIU:%qK^L<+B-p8!PiX\)p8U,8G|p_>x)G#PeʲpʇWÍp8U,8G|p_>x)G#PeʲpʇWÍp8U,8G|p_>x)G#PeʲpʇWÍp8U,8G|p_>x)G#PeʲpʇWÍp8U,8G|p_>x)G#PeʲpʇWÍp8U,8G|p_>x)G#Peʲpʇ+Kq8@D@YF*I8'#pʇw7^#TY p8#-cKp8*WU}xG#PvGJ#-*>NP{@)#mq57 B"hC<jE@O>ǧ"ӪC:9ш /1g.%O_V0$t3D& S݆WT`3kA-sprNdlg.Ν8x"g/!Eo.0nUՆ+ZjUW*@\=SXXe(^l0T:2hVquSM(lx:}0^su*i 7 ,I&g-51xk/cx2S&2[qyZHAͩ#ɑz[Pw ~!} _lH2pm0q;W۶q6_1\=D"|+`-'B-09Tyy0 "u Aj>{`ƄpmȭWw1XXZM90t$Q6h@<FsxE(٤mҮ/}|%fg7е7IAvAPz^TTKu8L: ځ,{+NZ*(Vw_Bn+oAxbPSĬɊ-ʋB_zeʠhƕ qB뗐Њ$㲞CzegڪZO$<g_b̛F>-rOG`V'%|<|࡮x y-U rYѦfdLYǾy8dAZ>\ IfEnA$_Nͳ/[liQyNxvyC;S[>SXm1dqd4v:mS`- ܈6tAx5nCfy9A7Nnʭ kja&5Rnhۼ6^6䦜ǁh3|zc՘W)0;BHwb9bj4 cuɰhjt/nF蓮!mHPHs`"nfVbl8l#ߠ@ {/|3cGw|g,h42G`Ӯ|l7aCwa\]T߉ 7"9=$FNm:6ݒ!c)v5.gBgNϧV㔃+Pw yF+D_ҝ EzqLhIf/@1[Ix}!⃺uB?m"A VD{lŔycۡtxb]?ca6JZIbvby-z==\dryW)ih__:(\JNESOgL"VtjڹkuImvHtUBe_h^&*oM6hGN>g WCaBU|A _ I+,N,ɉ~ʕF)2\ju~]8kGP^AwԈ9ʇum %O6B\ti-%IT2ꃊr*Cm& o^ f2f쒘pF6r%߾tzfdQ OQ2 hZKwV&v@taa iZV~ jOadtNC},ŏ4<RdL'Sԥ9s>ǡa,Jm{|˨CLN]3>^| vM.\5|xU wnsI,!Τ-gI<4 nۣ 1f4GOJ~F\VD؜A<)IaR$1tX6{y _?x$ ;6\KC=;!keؖpMhBT1X9]l A&-q4 ž|Yj:/; PÓ͂bdٱILMQjb =)7M7@ >G'5j4abwTm |9٠9!mʻ@ w+ۀVJh-7iĽ>;$ \V&·/>:=C+:joxFjobpH;;x'#%nȋJ$9nYioP.eӠ1Ypr>̦ oH ^Bs@jr)l+kl7QWSg]-u9[ӈՑIU.d+)/'Ъ jFw$@20HV~$˛')^ЫI-l}V z'&h`űbi.2b J+W"lR*d f$He bɔ)D{2E= u yS/,oe@t6Gځ+Fk 4UZ됰{-"ikKpq ώ u9yG; bYR~S&@tmļԀMBHygCju/.Ykس4c|:(>*6mBjڐ&lM4X.mv6\[lS)o"jegGʸ4@|d:tjH.*ъ x=ŷj 4$e%z FN8џEgD׎K( _kҥȩF~JeY.i7!%5O߫cm'lŶN6ܔl$ɊfD9hP3;okJ-RY~uʒ{?$Zܶ;۱yՏ$:ڣK;r;zCbwād >vvQ 6at܆BDȗ*XMTcKVGӪ. >,o;~g&M~)?lFRv_;r1gbK5$_5::EuZsZG)&rЊMn.@gT"T:5,JVC`R]Ehi~ 2߶hk'5!h߆k[&xB4v>EMWBb<iR+UŖ)$K'Ϯ3pSb7\8X0: <;+K#;Yv#^;7҆ZtBZш՘Xj,8(G0S[ gbI T&(dt Vn O t…NAF*Qϓtj#+)e& LdȾSx 2K.Ҳ ]?tj rrjCm#-ϡ{.M9d,de8_7,mf$i3O(lI IɆл 5$PER Cg1~wf )kVzփϥ7csb7?ƳSn>yH̫TShj$R:;o .<Ѫfڌ;^EapiWSGJ6#>|nV7ՄԠpǟxL{.鴡)kK~b$|& r's;% '* ;mAӬ?g~ݒ<|}=p)LW|>}LIRT/M}twI4mP{Ċuo" vZ ,7:t`J!6%) E ge4)ZF,{ MX%Ft8W23- ^ y/pQP2ZSyfRC1.6ꈼ>^l$ཹo9"l$Crr#6R $36iXiEG ╭_cYd;̚=3!k-Nr䲘dF #UəPFPBxKI+\;&{&{eTR%}@Y3rr)|wE1`h׬&RF\C1T~4j%3&lgz dQ4#-ߝYzr/! zjI^p8@+ hWp8:youSp8eA[eAp8nW&.p8 -r8jWՀ G,pWOYy9@5@[Հ G,?V#9}nIENDB`nipy-0.3.0/doc/devel/guidelines/gitwash/set_up_fork.rst000066400000000000000000000036501210344137400232100ustar00rootroot00000000000000.. _set-up-fork: ================== Set up your fork ================== First you follow the instructions for :ref:`forking`. Overview ======== :: git clone git@github.com:your-user-name/nipy.git cd nipy git remote add upstream git://github.com/nipy/nipy.git In detail ========= Clone your fork --------------- #. Clone your fork to the local computer with ``git clone git@github.com:your-user-name/nipy.git`` #. Investigate. Change directory to your new repo: ``cd nipy``. Then ``git branch -a`` to show you all branches. You'll get something like:: * master remotes/origin/master This tells you that you are currently on the ``master`` branch, and that you also have a ``remote`` connection to ``origin/master``. What remote repository is ``remote/origin``? Try ``git remote -v`` to see the URLs for the remote. They will point to your github_ fork. Now you want to connect to the upstream `nipy github`_ repository, so you can merge in changes from trunk. .. _linking-to-upstream: Linking your repository to the upstream repo -------------------------------------------- :: cd nipy git remote add upstream git://github.com/nipy/nipy.git ``upstream`` here is just the arbitrary name we're using to refer to the main nipy_ repository at `nipy github`_. Note that we've used ``git://`` for the URL rather than ``git@``. The ``git://`` URL is read only. This means we that we can't accidentally (or deliberately) write to the upstream repo, and we are only going to use it to merge into our own code. Just for your own satisfaction, show yourself that you now have a new 'remote', with ``git remote -v show``, giving you something like:: upstream git://github.com/nipy/nipy.git (fetch) upstream git://github.com/nipy/nipy.git (push) origin git@github.com:your-user-name/nipy.git (fetch) origin git@github.com:your-user-name/nipy.git (push) .. include:: git_links.inc nipy-0.3.0/doc/devel/guidelines/howto_document.rst000066400000000000000000000047371210344137400222670ustar00rootroot00000000000000.. _howto_document: ============================ How to write documentation ============================ Nipy_ uses the Sphinx_ documentation generating tool. Sphinx translates reST_ formatted documents into html and pdf documents. All our documents and docstrings are in reST format, this allows us to have both human-readable docstrings when viewed in ipython_, and web and print quality documentation. Building the documentation -------------------------- You need to have Sphinx_ (version 0.6.2 or above) and graphviz_ (version 2.20 or greater). The ``Makefile`` (in the top-level doc directory) automates the generation of the documents. To make the HTML documents:: make html For PDF documentation do:: make pdf The built documentation is then placed in a ``build/html`` or ``build/latex`` subdirectories. For more options, type:: make help Viewing the documentation ------------------------- We also build our website using sphinx_. All of the documentation in the ``docs`` directory is included on the website. There are a few files that are website only and these are placed in the ``www`` directory. The easiest way to view the documentation while editing is to build the website and open the local build in your browser:: make web Then open ``www/build/html/index.html`` in your browser. Syntax ------ Please have a look at our :ref:`sphinx_helpers` for examples on using Sphinx_ and reST_ in our documentation. The Sphinx website also has an excellent `sphinx rest`_ primer. Additional reST references:: - `reST primer `_ - `reST quick reference `_ Consider using emacs for editing rst files - see :ref:`rst_emacs` Style ----- Nipy has adopted the numpy_ documentation standards. The `numpy coding style guideline`_ is the main reference for how to format the documentation in your code. It's also useful to look at the `source reST file `_ that generates the coding style guideline. Numpy has a `detailed example `_ for writing docstrings. .. _`numpy coding style guideline`: http://scipy.org/scipy/numpy/wiki/CodingStyleGuidelines Documentation Problems ---------------------- See our :ref:`documentation_faq` if you are having problems building or writing the documentation. .. include:: ../../links_names.txt nipy-0.3.0/doc/devel/guidelines/index.rst000066400000000000000000000005371210344137400203320ustar00rootroot00000000000000.. _development_guidelines: ======================== Development Guidelines ======================== .. only:: html :Release: |version| :Date: |today| .. toctree:: :maxdepth: 2 howto_document sphinx_helpers gitwash/index commit_codes testing debugging optimization open_source_devel make_release changelog nipy-0.3.0/doc/devel/guidelines/make_release.rst000066400000000000000000000231601210344137400216350ustar00rootroot00000000000000.. _release-guide: *********************************** A guide to making a nipy release *********************************** A guide for developers who are doing a nipy release .. _release-tools: Release tools ============= There are some release utilities that come with nibabel_. nibabel should install these as the ``nisext`` package, and the testing stuff is understandably in the ``testers`` module of that package. nipy has Makefile targets for their use. The relevant targets are:: make check-version-info make check-files make sdist-tests The first installs the code from a git archive, from the repository, and for in-place use, and runs the ``get_info()`` function to confirm that installation is working and information parameters are set correctly. The second (``sdist-tests``) makes an sdist source distribution archive, installs it to a temporary directory, and runs the tests of that install. If you have a version of nipy trunk past February 11th 2011, there will also be a functional make target:: make bdist-egg-tests This builds an egg (which is a zip file), hatches it (unzips the egg) and runs the tests from the resulting directory. .. _release-checklist: Release checklist ================= * Review the open list of `nipy issues`_. Check whether there are outstanding issues that can be closed, and whether there are any issues that should delay the release. Label them ! * Review and update the release notes. Review and update the :file:`Changelog` file. Get a partial list of contributors with something like:: git log 0.2.0.. | grep '^Author' | cut -d' ' -f 2- | sort | uniq where ``0.2.0`` was the last release tag name. Then manually go over ``git shortlog 0.2.0..`` to make sure the release notes are as complete as possible and that every contributor was recognized. * Use the opportunity to update the ``.mailmap`` file if there are any duplicate authors listed from ``git shortlog``. * Check the examples in python 2 and python 3, by running something like:: cd .. ./nipy/tools/run_log_examples.py nipy/examples --log-path=~/tmp/eg_logs in a python 2 and python 3 virtualenv. Review the output in (e.g.) ``~/tmp/eg_logs``. The output file ``summary.txt`` will have the pass file printout that the ``run_log_examples.py`` script puts onto stdout while running. * Check the ``long_description`` in ``nipy/info.py``. Check it matches the ``README`` in the root directory, maybe with ``vim`` ``diffthis`` command. * Do a final check on the `nipy buildbot`_ * If you have travis-ci_ building set up you might want to push the code in its current state to a branch that will build, e.g:: git branch -D pre-release-test # in case branch already exists git co -b pre-release-test git push origin pre-release-test * Make sure all the ``.c`` generated files are up to date with Cython sources with:: ./tools/nicythize Release checking - buildbots ============================ * Check all the buildbots pass * Run the builder and review the possibly green output from http://nipy.bic.berkeley.edu/builders/nipy-release-checks This runs all of:: make distclean python -m compileall . make sdist-tests make bdist-egg-tests make check-version-info make check-files * You need to review the outputs for errors; at the moment this buildbot builder does not check whether these tests passed or failed. * ``make bdist-egg-tests`` may well fail because of a problem with the script tests; if you have a recent (>= Jan 15 2013) nibabel ``nisext`` package, you could try instead doing:: python -c 'from nisext.testers import bdist_egg_tests; bdist_egg_tests("nipy", label="not slow and not script_test")' Eventually we should update the ``bdist-egg-tests`` makefile target. * ``make check-version-info`` checks how the commit hash is stored in the installed files. You should see something like this:: {'sys_version': '2.6.6 (r266:84374, Aug 31 2010, 11:00:51) \n[GCC 4.0.1 (Apple Inc. build 5493)]', 'commit_source': 'archive substitution', 'np_version': '1.5.0', 'commit_hash': '25b4125', 'pkg_path': '/var/folders/jg/jgfZ12ZXHwGSFKD85xLpLk+++TI/-Tmp-/tmpGPiD3E/pylib/nipy', 'sys_executable': '/Library/Frameworks/Python.framework/Versions/2.6/Resources/Python.app/Contents/MacOS/Python', 'sys_platform': 'darwin'} /var/folders/jg/jgfZ12ZXHwGSFKD85xLpLk+++TI/-Tmp-/tmpGPiD3E/pylib/nipy/__init__.pyc {'sys_version': '2.6.6 (r266:84374, Aug 31 2010, 11:00:51) \n[GCC 4.0.1 (Apple Inc. build 5493)]', 'commit_source': 'installation', 'np_version': '1.5.0', 'commit_hash': '25b4125', 'pkg_path': '/var/folders/jg/jgfZ12ZXHwGSFKD85xLpLk+++TI/-Tmp-/tmpGPiD3E/pylib/nipy', 'sys_executable': '/Library/Frameworks/Python.framework/Versions/2.6/Resources/Python.app/Contents/MacOS/Python', 'sys_platform': 'darwin'} /Users/mb312/dev_trees/nipy/nipy/__init__.pyc {'sys_version': '2.6.6 (r266:84374, Aug 31 2010, 11:00:51) \n[GCC 4.0.1 (Apple Inc. build 5493)]', 'commit_source': 'repository', 'np_version': '1.5.0', 'commit_hash': '25b4125', 'pkg_path': '/Users/mb312/dev_trees/nipy/nipy', 'sys_executable': '/Library/Frameworks/Python.framework/Versions/2.6/Resources/Python.app/Contents/MacOS/Python', 'sys_platform': 'darwin'} * ``make check-files`` checks if the source distribution is picking up all the library and script files. Look for output at the end about missed files, such as:: Missed script files: /Users/mb312/dev_trees/nipy/bin/nib-dicomfs, /Users/mb312/dev_trees/nipy/bin/nifti1_diagnose.py Fix ``setup.py`` to carry across any files that should be in the distribution. * Check the documentation doctests pass from http://nipy.bic.berkeley.edu/builders/nipy-doc-builder * You may have virtualenvs for different python versions. Check the tests pass for different configurations. If you have pytox_ and a network connection, and lots of pythons installed, you might be able to do:: tox and get tests for python 2.5, 2.6, 2.7, 3.2. I (MB) have my own set of virtualenvs installed and I've set them up to run with:: tox -e python25,python26,python27,python32,np-1.2.1 The trick was only to define these ``testenv`` sections in ``tox.ini``. These two above run with:: make tox-fresh make tox-stale respectively. The long-hand not-tox way looks like this:: workon python26 make sdist-tests deactivate etc for the different virtualenvs. Doing the release ================= * The release should now be ready. * Edit :file:`nipy/info.py` to set ``_version_extra`` to ``''``; commit. Then:: make source-release * Once everything looks good, you are ready to upload the source release to PyPi. See `setuptools intro`_. Make sure you have a file ``\$HOME/.pypirc``, of form:: [distutils] index-servers = pypi [pypi] username:your.pypi.username password:your-password [server-login] username:your.pypi.username password:your-password * Once everything looks good, upload the source release to PyPi. See `setuptools intro`_:: python setup.py register python setup.py sdist --formats=gztar,zip upload * Trigger binary builds for Windows from the buildbots. See builders ``nipy-bdist32-26``, ``nipy-bdist32-27``, ``nipy-bdist32-32``. The ``exe`` builds will appear in http://nipy.bic.berkeley.edu/nipy-dist . Download the builds and upload to pypi. * Trigger binary builds for OSX from the buildbots ``nipy-bdist-mpkg-2.6``, ``nipy-bdist-mpkg-2.7``, ``nipy-bdist-mpkg-3.3``. ``egg`` and ``mpkg`` builds will appear in http://nipy.bic.berkeley.edu/nipy-dist . Download the eggs and upload to pypi. * Download the ``mpkg`` builds, maybe with:: scp -r buildbot@nipy.bic.berkeley.edu:nibotmi/public_html/nipy-dist/*.mpkg . Make sure you have `github bdist_mpkg`_ installed, for the root user. For each ``mpkg`` directory, run:: sudo reown_mpkg nipy-0.3.0.dev-py2.6-macosx10.6.mpkg root admin zip -r nipy-0.3.0.dev-py2.6-macosx10.6.mpkg.zip nipy-0.3.0.dev-py2.6-macosx10.6.mpkg Upload the ``mpkg.zip`` files. (At the moment, these don't seem to store the scripts - needs more work) * Tag the release with tag of form ``0.3.0``:: git tag -am 'Second main release' 0.3.0 * Now the version number is OK, push the docs to sourceforge with:: cd doc make upload-stable-web-mysfusername where ``mysfusername`` is obviously your own sourceforge username. * Set up maintenance / development branches If this is this is a full release you need to set up two branches, one for further substantial development (often called 'trunk') and another for maintenance releases. * Branch to maintenance:: git co -b maint/0.2.x Set ``_version_extra`` back to ``.dev`` and bump ``_version_micro`` by 1. Thus the maintenance series will have version numbers like - say - '0.2.1.dev' until the next maintenance release - say '0.2.1'. Commit. Don't forget to push upstream with something like:: git push upstream maint/0.2.x --set-upstream * Start next development series:: git co main-master then restore ``.dev`` to ``_version_extra``, and bump ``_version_minor`` by 1. Thus the development series ('trunk') will have a version number here of '0.3.0.dev' and the next full release will be '0.3.0'. If this is just a maintenance release from ``maint/0.2.x`` or similar, just tag and set the version number to - say - ``0.2.1.dev``. * Push tags:: git push --tags * Announce to the mailing lists. .. _pytox: http://codespeak.net/tox .. _setuptools intro: http://packages.python.org/an_example_pypi_project/setuptools.html .. _travis-ci: http://travis-ci.org .. include:: ../../links_names.txt nipy-0.3.0/doc/devel/guidelines/open_source_devel.rst000066400000000000000000000007551210344137400227250ustar00rootroot00000000000000.. _open_source_devel: ========================= Open Source Development ========================= For those interested in more info about contributing to an open source project, Here are some links I've found. They are probably no better or worse than other similar documents: * `Software Release Practice HOWTO `_ * `Contributing to Open Source Projects HOWTO `_ nipy-0.3.0/doc/devel/guidelines/optimization.rst000066400000000000000000000030671210344137400217520ustar00rootroot00000000000000.. _optimization: ============== Optimization ============== In the early stages of NIPY development, we are focusing on functionality and usability. In regards to optimization, we benefit **significantly** from the optimized routines in scipy_ and numpy_. As NIPY progresses it is likely we will spend more energy on optimizing critical functions. In our `py4science group at UC Berkeley `_ we've had several meetings on the various optimization options including ctypes, weave and blitz, and cython. It's clear there are many good options, including standard C-extensions. However, optimized code tends to be less readable and more difficult to debug and maintain. When we do optimize our code we will first profile the code to determine the offending sections, then optimize those sections. Until that need arises, we will follow the great advice from these fellow programmers: Kent Beck: "First make it work. Then make it right. Then make it fast." `Donald Knuth on optimization `_: "We should forget about small efficiencies, say about 97% of the time: premature optimization is the root of all evil." Tim Hochberg, from the Numpy list:: 0. Think about your algorithm. 1. Vectorize your inner loop. 2. Eliminate temporaries 3. Ask for help 4. Recode in C. 5. Accept that your code will never be fast. Step zero should probably be repeated after every other step ;) .. include:: ../../links_names.txt nipy-0.3.0/doc/devel/guidelines/sphinx_helpers.rst000066400000000000000000000151211210344137400222510ustar00rootroot00000000000000.. _sphinx_helpers: ==================== Sphinx Cheat Sheet ==================== Wherein I show by example how to do some things in Sphinx (you can see a literal version of this file below in :ref:`sphinx_literal`) .. _making_a_list: Making a list ------------- It is easy to make lists in rest Bullet points ^^^^^^^^^^^^^ This is a subsection making bullet points * point A * point B * point C Enumerated points ^^^^^^^^^^^^^^^^^ This is a subsection making numbered points #. point A #. point B #. point C .. _making_a_table: Making a table -------------- This shows you how to make a table -- if you only want to make a list see :ref:`making_a_list`. ================== ============ Name Age ================== ============ John D Hunter 40 Cast of Thousands 41 And Still More 42 ================== ============ .. _making_links: Making links ------------ Cross-references sections and documents ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Use reST labels to cross-reference sections and other documents. The mechanism for referencing another reST document or a subsection in any document, including within a document are identical. Place a *reference label* above the section heading, like this:: .. _sphinx_helpers: ==================== Sphinx Cheat Sheet ==================== Note the blank line between the *reference label* and the section heading is important! Then refer to the *reference label* in another document like this:: :ref:`sphinx_helpers` The reference is replaced with the section title when Sphinx builds the document while maintaining the linking mechanism. For example, the above reference will appear as :ref:`sphinx_helpers`. As the documentation grows there are many references to keep track of. For documents, please use a *reference label* that matches the file name. For sections, please try and make the *refence label* something meaningful and try to keep abbreviations limited. Along these lines, we are using *underscores* for multiple-word *reference labels* instead of hyphens. Sphinx documentation on `Cross-referencing arbitrary locations `_ has more details. External links ^^^^^^^^^^^^^^ For external links you are likely to use only once, simple include the like in the text. This link to `google `_ was made like this:: `google `_ For external links you will reference frequently, we have created a ``links_names.txt`` file. These links can then be used throughout the documentation. Links in the ``links_names.txt`` file are created using the `reST reference `_ syntax:: .. _targetname: http://www.external_website.org To refer to the reference in a separate reST file, include the ``links_names.txt`` file and refer to the link through it's target name. For example, put this include at the bottom of your reST document:: .. include:: ../links_names.txt and refer to the hyperlink target:: blah blah blah targetname_ more blah Links to classes, modules and functions ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ You can also reference classes, modules, functions, etc that are documented using the sphinx `autodoc `_ facilites. For example, see the module :mod:`matplotlib.backend_bases` documentation, or the class :class:`~matplotlib.backend_bases.LocationEvent`, or the method :meth:`~matplotlib.backend_bases.FigureCanvasBase.mpl_connect`. .. _ipython_highlighting: ipython sessions ---------------- Michael Droettboom contributed a sphinx extension which does pygments syntax highlighting on ipython sessions .. sourcecode:: ipython In [69]: lines = plot([1,2,3]) In [70]: setp(lines) alpha: float animated: [True | False] antialiased or aa: [True | False] ...snip This support is included in this template, but will also be included in a future version of Pygments by default. .. _formatting_text: Formatting text --------------- You use inline markup to make text *italics*, **bold**, or ``monotype``. You can represent code blocks fairly easily:: import numpy as np x = np.random.rand(12) Or literally include code: .. literalinclude:: elegant.py .. _using_math: Using math ---------- In sphinx you can include inline math :math:`x\leftarrow y\ x\forall y\ x-y` or display math .. math:: W^{3\beta}_{\delta_1 \rho_1 \sigma_2} = U^{3\beta}_{\delta_1 \rho_1} + \frac{1}{8 \pi 2} \int^{\alpha_2}_{\alpha_2} d \alpha^\prime_2 \left[\frac{ U^{2\beta}_{\delta_1 \rho_1} - \alpha^\prime_2U^{1\beta}_{\rho_1 \sigma_2} }{U^{0\beta}_{\rho_1 \sigma_2}}\right] This documentation framework includes a Sphinx extension, :file:`sphinxext/mathmpl.py`, that uses matplotlib to render math equations when generating HTML, and LaTeX itself when generating a PDF. This can be useful on systems that have matplotlib, but not LaTeX, installed. To use it, add ``mathpng`` to the list of extensions in :file:`conf.py`. Current SVN versions of Sphinx now include built-in support for math. There are two flavors: - pngmath: uses dvipng to render the equation - jsmath: renders the math in the browser using Javascript To use these extensions instead, add ``sphinx.ext.pngmath`` or ``sphinx.ext.jsmath`` to the list of extensions in :file:`conf.py`. All three of these options for math are designed to behave in the same way. Inserting matplotlib plots -------------------------- Inserting automatically-generated plots is easy. Simply put the script to generate the plot in any directory you want, and refer to it using the ``plot`` directive. All paths are considered relative to the top-level of the documentation tree. To include the source code for the plot in the document, pass the ``include-source`` parameter:: .. plot:: devel/guidelines/elegant.py :include-source: In the HTML version of the document, the plot includes links to the original source code, a high-resolution PNG and a PDF. In the PDF version of the document, the plot is included as a scalable PDF. .. plot:: devel/guidelines/elegant.py :include-source: Emacs helpers ------------- See :ref:`rst_emacs` Inheritance diagrams -------------------- Inheritance diagrams can be inserted directly into the document by providing a list of class or module names to the ``inheritance-diagram`` directive. For example:: .. inheritance-diagram:: codecs produces: .. inheritance-diagram:: codecs .. _sphinx_literal: This file --------- .. literalinclude:: sphinx_helpers.rst nipy-0.3.0/doc/devel/guidelines/testing.rst000066400000000000000000000131601210344137400206740ustar00rootroot00000000000000.. _testing: ========= Testing ========= Nipy uses the Numpy_ test framework which is based on nose_. If you plan to do much development you should familiarize yourself with nose and read through the `numpy testing guidelines `_. Writing tests ------------- Test files ^^^^^^^^^^ The numpy testing framework and nipy extensions are imported with one line in your test module:: from nipy.testing import * This imports all the ``assert_*`` functions you need like ``assert_equal``, ``assert_raises``, ``assert_array_almost_equal`` etc..., numpy's ``rand`` function, and the numpy test decorators: ``knownfailure``, ``slow``, ``skipif``, etc... Please name your test file with the *test_* prefix followed by the module name it tests. This makes it obvious for other developers which modules are tested, where to add tests, etc... An example test file and module pairing:: nipy/core/reference/coordinate_system.py nipy/core/reference/tests/test_coordinate_system.py All tests go in a test subdirectory for each package. Temporary files ^^^^^^^^^^^^^^^ If you need to create a temporary file during your testing, you could use one of these three methods, in order of convenience: #. `StringIO `_ StringIO creates an in memory file-like object. The memory buffer is freed when the file is closed. This is the preferred method for temporary files in tests. #. `nibabel.tmpdirs.InTemporaryDirectory` context manager. This is a convenient way of putting you into a temporary directory so you can save anything you like into the current directory, and feel fine about it after. Like this:: from ..tmpdirs import InTemporaryDirectory with InTemporaryDirectory(): f = open('myfile', 'wt') f.write('Anything at all') f.close() One thing to be careful of is that you may need to delete objects holding onto the file before you exit the ``with`` statement, otherwise Windows may refuse to delete the file. #. `tempfile.mkstemp `_ This will create a temporary file which can be used during testing. There are parameters for specifying the filename *prefix* and *suffix*. .. Note:: The tempfile module includes a convenience function *NamedTemporaryFile* which deletes the file automatically when it is closed. However, whether the files can be opened a second time varies across platforms and there are problems using this function on *Windows*. Example:: from tempfile import mkstemp try: fd, name = mkstemp(suffix='.nii.gz') tmpfile = open(name) save_image(fake_image, tmpfile.name) tmpfile.close() finally: os.unlink(name) # This deletes the temp file Please don't just create a file in the test directory and then remove it with a call to ``os.remove``. For various reasons, sometimes ``os.remove`` doesn't get called and temp files get left around. Many tests in one test function ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ To keep tests organized, it's best to have one test function correspond to one class method or module-level function. Often though, you need many individual tests to thoroughly cover (100% coverage) the method/function. This calls for a `generator function `_. Use a ``yield`` statement to run each individual test, independent from the other tests. This prevents the case where the first test fails and as a result the following tests don't get run. This test function executes four independent tests:: def test_index(): cs = CoordinateSystem('ijk') yield assert_equal, cs.index('i'), 0 yield assert_equal, cs.index('j'), 1 yield assert_equal, cs.index('k'), 2 yield assert_raises, ValueError, cs.index, 'x' Suppress *warnings* on test output ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ In order to reduce noise when running the tests, consider suppressing *warnings* in your test modules. This can be done in the module-level setup and teardown functions:: import warnings ... def setup(): # Suppress warnings during tests to reduce noise warnings.simplefilter("ignore") def teardown(): # Clear list of warning filters warnings.resetwarnings() Running tests ------------- Running the full test suite ^^^^^^^^^^^^^^^^^^^^^^^^^^^ For our tests, we have collected a set of fmri imaging data which are required for the tests to run. To do this, download the latest example data and template package files from `NIPY data packages`_. See :ref:`data-files`. Running individual tests ^^^^^^^^^^^^^^^^^^^^^^^^ You can also run nose from the command line with a variety of options. To test an individual module:: nosetests test_image.py To test an individual function:: nosetests test_module:test_function To test a class:: nosetests test_module:TestClass To test a class method:: nosetests test_module:TestClass.test_method Verbose mode (*-v* option) will print out the function names as they are executed. Standard output is normally supressed by nose, to see any print statements you must include the *-s* option. In order to get a "full verbose" output, call nose like this:: nosetests -sv test_module.py To include doctests in the nose test:: nosetests -sv --with-doctest test_module.py For details on all the command line options:: nosetests --help .. _coverage: .. include:: ./coverage_testing.rst .. include:: ../../links_names.txt nipy-0.3.0/doc/devel/images.rst000066400000000000000000000026131210344137400163350ustar00rootroot00000000000000=================== Describing images =================== Here we set out what we think an image is and how it should work in our code. We are largely following the nifti_ standard. What is an image? ================= An image is the association of a block (array) of spatial data, with the relationship of the position of that data to some continuous space. Therefore an image contains: * an array * a spatial transformation describing the position of the data in the array relative to some space. An image always has 3 spatial dimensions. It can have other dimensions, such as time. A slice from a 3D image is also a 3D image, but with one dimension of the image having length 1. The transformation is spatial and refers to exactly three dimensions. :: import numpy as np import neuroimaging as ni img = ni.load_image('example3d.img') arr = img.get_data() assert isinstance(arr, np.ndarray) xform = img.get_transform() voxel_position = [0, 0, 0] world_position = xform.apply(voxel_position) assert world_position.shape = (3,) An image has an array. The first 3 axes (dimensions) of that array are spatial. Further dimensions can have various meanings. The most common meaning of the 4th axis is time. The relationship of the first three dimensions to any particular orientation in space are only known from the image transform. .. include:: ../links_names.txt nipy-0.3.0/doc/devel/index.rst000066400000000000000000000004451210344137400162000ustar00rootroot00000000000000.. _developers-guide-index: ================= Developer Guide ================= .. only:: html :Release: |version| :Date: |today| .. toctree:: :maxdepth: 2 development_quickstart install/index guidelines/index planning/index code_discussions/index tools/index nipy-0.3.0/doc/devel/install/000077500000000000000000000000001210344137400160025ustar00rootroot00000000000000nipy-0.3.0/doc/devel/install/debian.rst000066400000000000000000000024241210344137400177600ustar00rootroot00000000000000=================================== Debian / Ubuntu developer install =================================== Dependencies ------------ See :ref:`installation` for the installation instructions. Since NiPy is provided within stock distribution (``main`` component of Debian, and ``universe`` of Ubuntu), to install all necessary requirements it is enough to:: sudo apt-get build-dep python-nipy .. note:: Above invocation assumes that you have references to ``Source`` repository listed with ``deb-src`` prefixes in your apt .list files. Otherwise, you can revert to manual installation with:: sudo apt-get build-essential sudo apt-get install python-dev sudo apt-get install python-numpy python-numpy-dev python-scipy sudo apt-get install liblapack-dev sudo apt-get install python-sympy Useful additions ---------------- Some functionality in NiPy requires additional modules:: sudo apt-get install ipython sudo apt-get install python-matplotlib sudo apt-get install mayavi2 For getting the code via version control:: sudo apt-get install git-core Then follow the instructions at :ref:`trunk_download`. And for easier control of multiple Python modules installations (e.g. different versions of IPython):: sudo apt-get install virtualenvwrapper nipy-0.3.0/doc/devel/install/fedora.rst000066400000000000000000000012701210344137400177740ustar00rootroot00000000000000========================== Fedora developer install ========================== See :ref:`installation` This assumes a recent Fedora (>=10) version. It may work for earlier versions - see :ref:`installation` for requirements. This page may also hold for Fedora-based distributions such as Mandriva and Centos. Run all the ``yum install`` commands as root. Requirements:: yum install gcc-c++ yum install python-devel yum install numpy scipy yum install sympy yum install atlas-devel Options:: yum install ipython yum install python-matplotlib For getting the code via version control:: yum install git-core Then follow the instructions at :ref:`trunk_download` nipy-0.3.0/doc/devel/install/index.rst000066400000000000000000000004471210344137400176500ustar00rootroot00000000000000.. _distribution-installs: ================================================ Developer installs for different distributions ================================================ .. only:: html :Release: |version| :Date: |today| .. toctree:: :maxdepth: 2 debian fedora windows nipy-0.3.0/doc/devel/install/windows.rst000066400000000000000000000053001210344137400202240ustar00rootroot00000000000000================================ Development install on windows ================================ The easy way - a super-package ------------------------------ The easiest way to get the dependencies is to install PythonXY_ or the `Enthought Tool Suite`_ . This gives you MinGW_, Python_, Numpy_, Scipy_, ipython_ and matplotlib_ (and much more). The hard way - by components ---------------------------- If instead you want to do it by component, try the instructions below. Requirements: * Download and install MinGW_ * Download and install the windows binary for Python_ * Download and install the Numpy_ and Scipy_ binaries * Download and install Sympy_ Options: * Download and install ipython_, being careful to follow the windows installation instructions * Download and install matplotlib_ Alternatively, if you are very brave, you may want to install numpy / scipy from source - see our maybe out of date :ref:`windows_scipy_build` for details. Getting and installing NIPY --------------------------- You will next need to get the NIPY code via version control: * Download and install the windows binary for git_ * Go to the windows menu, find the ``git`` menu, and run ``git`` in a windows terminal. You should now be able to follow the instructions in :ref:`trunk_download`, but with the following modifications: Running the build / install --------------------------- Here we assume that you do *not* have the Microsoft visual C tools, you did not use the ETS_ package (which sets the compiler for you) and *are* using a version of MinGW_ to compile NIPY. First, for the ``python setup.py`` steps, you will need to add the ``--compiler=mingw32`` flag, like this:: python setup.py build --compiler=mingw32 install Note that, with this setup you cannot do inplace (developer) installs (like ``python setup.py build_ext --inplace``) because of a six-legged python packaging feature that does not allow the compiler options (here ``--compiler=mingw32``) to be passed from the ``build_ext`` command. If you want to be able to do that, add these lines to your ``distutils.cfg`` file :: [build] compiler=mingw32 [config] compiler = mingw32 See http://docs.python.org/install/#inst-config-files for details on this file. After you've done this, you can run the standard ``python setup.py build_ext --inplace`` command. The command line from Windows ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The default windows XP command line ``cmd`` is very basic. You might consider using the Cygwin_ bash shell, or you may want to use the ipython_ shell to work in. For system commands use the ``!`` escape, like this, from the ipython prompt:: !python setup.py build --compiler=mingw32 .. include:: ../../links_names.txt nipy-0.3.0/doc/devel/install/windows_scipy_build.rst000066400000000000000000000173541210344137400226260ustar00rootroot00000000000000.. _windows_scipy_build: Building Scipy/Numpy on Windows with Optimized Numerical Libraries ================================================================== This involves compiling several libraries (ATLAS, LAPACK, FFTW and UMFPACK) and then building `numpy `_ and `scipy `_ from SVN source. But as with most things Windows, this turns out to be a slightly tricky affair. The following has been tested on Windows Vista Enterprise 32bit only, but should theoretically work on other Windows platforms. It also used Python 2.5. Ideally, a big chunk of this page should move to the scipy/numpy site. And also ideally should become a single script. But it's also good to know exactly how you got there. Prerequisites ~~~~~~~~~~~~~ * You need Windows Vista enterprise/ultimate with `SUA `_ enabled and installed or Windows (others, including other Vista variants) with `Cygwin `_ installed. You cannot install the SUA package on a non enterprise or ultimate Vista edition. * MinGW (`installer `_) with gcc 3.4.5 (choose the candidate option when installing) and the `msys `_ environment installed. You will need to download the following packages for msys: * bzip2-1.0.3-MSYS-1.0.11-snapshot.tar.bz2 * coreutils-5.97-MSYS-1.0.11-snapshot.tar.bz2 * diffutils-2.8.7-MSYS-1.0.11-snapshot.tar.bz2 * gawk-3.1.5-MSYS-1.0.11-snapshot.tar.bz2 * make-3.81-MSYS-1.0.11-snapshot.tar.bz2 * msysCORE-1.0.11-2007.01.19-1.tar.bz2 * binutils-2.17.50-20070129-1.tar.gz Just unpack all the package contents in a single directory and copy them over to the MinGW installation directory. You may want to add the following to the system path: :: set PATH=[PATH TO]\MinGW;[PATH TO]\MinGW\libexec\gcc\mingw32\3.4.5;%PATH% * Numerical Libraries * `ATLAS latest developer version `_ * LAPACK `lapack 3.1 scroll down to Available software `_ * FFTW `fftw-3.1.2 `_ * UMFPACK `download UMFPACK, UFConfig, AMD `_ Installation ~~~~~~~~~~~~ * Create a directory called BUILDS, BUILDS/lib, BUILDS/include * Unpack all the numerical library files in BUILDS * Create subversion check out directories for scipy and numpy in BUILDS * Start SUA c-shell or cygwin shell * Start msys.bat:: PATH=/mingw/libexec/gcc/mingw32/3.4.5:$PATH; export PATH * Change directory to location of BUILDS. (/dev/fs/driveletter/... in SUA, /cygdrive/driveletter/... in cygwin, /driveletter/... in msys) Compiling ATLAS ^^^^^^^^^^^^^^^ * This is done in the SUA/Cygwin shell. In Cygwin you probably want to follow the instructions at `Installing Scipy on Windows `_ * ``cd ATLAS; mkdir build; cd build`` * Run `../configure` (This will probably fail but will leave you with xconfig) * Run `./xconfig --help` (to see all options) * Run `../configure -O 8 -A 16 -m 3189 -b 32` (replacing the values with your machine configuration) * Edit Make.inc to provide correct L2SIZE * Run `make` (leave your computer and go do something else for about an hour) Compiling LAPACK ^^^^^^^^^^^^^^^^ * This is done in the msys shell * `cd lapack_XX` * Copy make.inc.example to make.inc * Edit the following lines in make.inc:: PLAT = _NT OPTS = -funroll-all-loops -O3 -malign-double -msse2 BLASLIB = -L/driveletter/[PATH TO]/BUILDS/ATLAS/build/lib -lf77blas -latlas * Run `make lib` Combining LAPACK and ATLAS ^^^^^^^^^^^^^^^^^^^^^^^^^^ * Stay in the msys shell after compiling LAPACK * Go to the ATLAS/build/lib directory * Execute the following commands:: mkdir tmp; cd tmp cp ../liblapack.a ../liblapack_ATLAS.a ar -x ../liblapack.a cp [PATH TO]/lapack_NT.a ../liblapack.a ar -r ../liblapack.a *.o rm *.o ar -x ../liblapack.a xerbla.o ar -r ../libf77blas.a xerbla.o * Copy liblapack.a, libf77blas.a, libcblas.a, libatlas.a to BUILDS/lib * Copy the ATLAS/include to BUILDS/include/ATLAS Compiling UMFPACK ^^^^^^^^^^^^^^^^^ * Stay in msys shell * Goto UFconfig * Edit UFConfig/UFconfig.mk:: BLAS = -L/driveletter/[PATH TO]/BUILDS/lib -llapack -lf77blas -lcblas -latlas -lg2c LAPACK = -L/driveletter/[PATH TO]/BUILDS/lib -llapack -lf77blas -lcblas -latlas -lg2c XERBLA = * Run the following commands:: cd ..\AMD make cd ..\UMFPACK make * Copy libamd.a (from AMD), libumfpack.a (from UMFPACK) to BUILDS/lib * Copy UMFPACK/include to BUILDS/include/UMFPACK * Copy UFconfig/ufconfig.h to BUILDS/include * Copy AMD/include/amd.h to BUILDS/include Compiling fftw ^^^^^^^^^^^^^^ .. note:: The latest versions of scipy do not link to FFTW, so this step is no longer useful for scipy * Stay in msys shell * Goto fftw_XX * `mkdir build; cd build` * Run the following command:: ../configure --prefix=/c/DOWNLOADS/BUILDS/ --enable-sse2 --disable-dependency-tracking --enable-threads --with-our-malloc16 --with-windows-f77-mangling --with-combined-threads * Run `make` OR `make -j 4` if you have multiple processors (it'll make things go faster. This build on msys in vista takes a while) * Copy `.libs/libfftw3.a` to BUILDS/lib * Copy fftw_XX/api/fftw3.h to BUILDS/include Compling numpy/scipy ^^^^^^^^^^^^^^^^^^^^ .. note:: As above, note that the FFTW linking here is no longer useful for the scipy install * Open a Windows cmd window and make sure you can execute python. * Make a copy of each of the libs in BUILDS/lib and rename them from libname.a to name.lib * Rename lapack.lib to flapack.lib * rename site.cfg.example to site.cfg * Edit site.cfg in the numpy directory. Replace the blas_opt and lapack_opt section with:: [atlas] libraries = f77blas, cblas, atlas, g2c library_dirs = driveletter:\[PATH TO]\MinGW\lib;driveletter:\[PATH TO]\BUILDS\lib include_dirs = driveletter:\[PATH TO]\BUILDS\include\ATLAS [lapack] libraries = flapack, f77blas, cblas, atlas library_dirs = driveletter:\[PATH TO]\MinGW\lib;driveletter:\[PATH TO]\BUILDS\lib [amd] library_dirs = driveletter:\[PATH TO]\MinGW\lib;driveletter:\[PATH TO]\BUILDS\lib include_dirs = driveletter:\[PATH TO]\BUILDS\include libraries = amd [umfpack] library_dirs = driveletter:\[PATH TO]\MinGW\lib;driveletter:\[PATH TO]\BUILDS\lib include_dirs = driveletter:\[PATH TO]\BUILDS\include\UMFPACK libraries = umfpack [fftw3] library_dirs = driveletter:\[PATH TO]\MinGW\lib;driveletter:\[PATH TO]\BUILDS\lib include_dirs = driveletter:\[PATH TO]\BUILDS\include libraries = fftw3 * Edit numpy/distutils/fcompiler/gnu.py. Find the line that says `opt.append('gcc')` and comment it `# opt.append('gcc')`. This is probably a Vista SUA thing and perhaps won't be required when using Cygwin to compile ATLAS. * Copy site.cfg to ../scipy/site.cfg * Compile numpy:: cd numpy python setup.py config --compiler=mingw32 build --compiler=mingw32 bdist_wininst * Install numpy from the numpy/dist folder * Compile scipy:: cd scipy python setup.py config --compiler=mingw32 build --compiler=mingw32 bdist_wininst * Install scipy from the scipy/dist folder * Test installations. In python run:: import numpy import scipy numpy.test() scipy.test() numpy.show_config() scipy.show_config() nipy-0.3.0/doc/devel/planning/000077500000000000000000000000001210344137400161425ustar00rootroot00000000000000nipy-0.3.0/doc/devel/planning/TODO.rst000066400000000000000000000076061210344137400174520ustar00rootroot00000000000000.. _todo: =========================== TODO for nipy development =========================== This document will serve to organize current development work on nipy. It will include current sprint items, future feature ideas, and design discussions, etc... Documentation ============= * Create NIPY sidebar with links to all project related websites. * Create a Best Practices document. * Create a rst doc for *Request a review* process. Tutorials --------- Tutorials are an excellent way to document and test the software. Some ideas for tutorials to write in our Sphinx documentation (in no specific order): * Slice timing * Image resampling * Image IO * Registration using SPM/FSL * FMRI analysis * Making one 4D image from many 3D images, and vice versa. Document ImageList and FmriImageList. * Apply SPM registration .mat to a NIPY image. * Create working example out of this TRAC `pca `_ page. Should also be a rest document. * Add analysis pipeline(s) blueprint. Bugs ==== These should be moved to the nipy_ bug section on github. Placed here until they can be input. * Fix possible precision error in fixes.scipy.ndimage.test_registration function test_autoalign_nmi_value_2. See FIXME. * Fix error in test_segment test_texture2 functions (fixes.scipy.ndimage). See FIXME. * import nipy.algorithms is very slow! Find and fix. The shared library is slow. * base class for all new-style classes should be *object*; preliminary search with ``grin "class +[a-zA-Z0-9]+ *:"`` Refactorings ============ * image.save function should accept filename or file-like object. If I have an open file I would like to be able to pass that in also, instead of fp.name. Happens in test code a lot. * image._open function should accept Image objects in addition to ndarrays and filenames. Currently the save function has to call np.asarray(img) to get the data array out of the image and pass them to _open in order to create the output image. * Add dtype options when saving. When saving images it uses the native dtype for the system. Should be able to specify this. in the test_file_roundtrip, self.img is a uint8, but is saved to tmpfile as float64. Adding this would allow us to save images without the scaling being applied. * In image._open(url, ...), should we test if the "url" is a PyNiftiIO object already? This was in the tests from 'old code' and passed:: new = Image(self.img._data, self.img.grid) img._data is a PyNIftiIO object. It works, but we should verify it's harmless otherwise prevent it from happening. * Look at image.merge_image function. Is it still needed? Does it fit into the current api? * FmriImageList.emptycopy() - Is there a better way to do this? Matthew proposed possibly implementing Gael's dress/undress metadata example. * Verify documentation of the image generators. Create a simple example using them. * Use python 2.5 feature of being able to reset the generator? * Add test data where volumes contain intensity ramps. Slice with generator and test ramp values. * Implement `fmriimagelist blueprint `_. Code Design Thoughts ==================== A central location to dump thoughts that could be shared by the developers and tracked easily. Future Features =============== Put ideas here for features nipy should have but are not part of our current development. These features will eventually be added to a weekly sprint log. * Auto backup script for nipy repos to run as weekly cron job. We should setup a machine to perform regular branch builds and tests. This would also provide an on-site backup. * See if we can add bz2 support to nifticlib. * Should image.load have an optional squeeze keyword to squeeze a 4D image with one frame into a 3D image? .. include:: ../../links_names.txt nipy-0.3.0/doc/devel/planning/index.rst000066400000000000000000000003241210344137400200020ustar00rootroot00000000000000.. _development_planning: ====================== Development Planning ====================== .. only:: html :Release: |version| :Date: |today| .. toctree:: :maxdepth: 2 roadmap.rst TODO.rst nipy-0.3.0/doc/devel/planning/roadmap.rst000066400000000000000000000013711210344137400203210ustar00rootroot00000000000000.. _roadmap: ============== Nipy roadmap ============== We plan to release a protoype of NIPY_ by the Summer of 2009. This will include a full FMRI analysis, 2D visualization, and integration with other packages for spatial processing (SPM_ and FSL_). We will continue to improve our documentation and tutorials with the aim of providing a full introduction to neuroimaging analysis. We will also extend our collaborations with other neuroimaging groups, integrating more functionality into NIPY and providing better interoperability with other packages. This will include the design and implementation of a pipeline/batching system, integration of registration algorithms, and improved 2D and 3D visualization. .. include:: ../../links_names.txt nipy-0.3.0/doc/devel/tools/000077500000000000000000000000001210344137400154745ustar00rootroot00000000000000nipy-0.3.0/doc/devel/tools/index.rst000066400000000000000000000003151210344137400173340ustar00rootroot00000000000000.. _developer_tools: ================= Developer Tools ================= .. only:: html :Release: |version| :Date: |today| .. toctree:: :maxdepth: 2 tricked_out_emacs virtualenv-tutor nipy-0.3.0/doc/devel/tools/tricked_out_emacs.rst000066400000000000000000000135241210344137400217170ustar00rootroot00000000000000.. _tricked_out_emacs: =================================== Tricked out emacs for python coding =================================== Various ways to configure your emacs that you might find useful. See emacs_python_mode_ for a good summary. .. _rst_emacs: ReST mode --------- For editing ReST documents like this one. You may need a recent version of the rst.el_ file from the docutils_ site. .. _rst.el: http://docutils.sourceforge.net/tools/editors/emacs/rst.el ``rst`` mode automates many important ReST tasks like building and updating table-of-contents, and promoting or demoting section headings. Here is the basic ``.emacs`` configuration:: (require 'rst) (setq auto-mode-alist (append '(("\\.txt$" . rst-mode) ("\\.rst$" . rst-mode) ("\\.rest$" . rst-mode)) auto-mode-alist)) Some helpful functions:: C-c TAB - rst-toc-insert Insert table of contents at point C-c C-u - rst-toc-update Update the table of contents at point C-c C-l rst-shift-region-left Shift region to the left C-c C-r rst-shift-region-right Shift region to the right .. note:: On older Debian-based releases, the default ``M-x rst-compile`` command uses ``rst2html.py`` whereas the command installed is ``rst2html``. Symlink was required as a quick fix. doctest mode ------------- This useful mode for writing doctests (``doctest-mode.el``) cames with ``python-mode`` package on Debian-based systems. Otherwise see doctest-mode_ project page. code checkers ------------- Code checkers within emacs can be useful to check code for errors, unused variables, imports and so on. Alternatives are pychecker_, pylint_ and pyflakes_. Note that rope_ (below) also does some code checking. pylint_ and pyflakes_ work best with emacs flymake_, which usually comes with emacs. pychecker_ `````````` This appears to be plumbed in with ``python-mode``, just do ``M-x py-pychecker-run``. If you try this, and pychecker_ is not installed, you will get an error. You can install it using your package manager (``pychecker`` on Debian-based systems) or from the pychecker_ webpage. pylint_ ``````` Install pylint_. Debian packages pylint_ as ``pylint``. Put the `flymake .emacs snippet`_ in your ``.emacs`` file. You will see, in the emacs_python_mode_ page, that you will need to save this:: #!/usr/bin/env python import re import sys from subprocess import * p = Popen("pylint -f parseable -r n --disable-msg-cat=C,R %s" % sys.argv[1], shell = True, stdout = PIPE).stdout for line in p.readlines(): match = re.search("\\[([WE])(, (.+?))?\\]", line) if match: kind = match.group(1) func = match.group(3) if kind == "W": msg = "Warning" else: msg = "Error" if func: line = re.sub("\\[([WE])(, (.+?))?\\]", "%s (%s):" % (msg, func), line) else: line = re.sub("\\[([WE])?\\]", "%s:" % msg, line) print line, p.close() as ``epylint`` somewhere on your system path, and test that ``epylint somepyfile.py`` works. pyflakes ```````` Install pyflakes_. Maybe your package manager again? (``apt-get install pyflakes``). Install the `flymake .emacs snippet`_ in your ``.emacs`` file. flymake .emacs snippet `````````````````````` Add this to your .emacs file:: ;; code checking via flymake ;; set code checker here from "epylint", "pyflakes" (setq pycodechecker "pyflakes") (when (load "flymake" t) (defun flymake-pycodecheck-init () (let* ((temp-file (flymake-init-create-temp-buffer-copy 'flymake-create-temp-inplace)) (local-file (file-relative-name temp-file (file-name-directory buffer-file-name)))) (list pycodechecker (list local-file)))) (add-to-list 'flymake-allowed-file-name-masks '("\\.py\\'" flymake-pycodecheck-init))) and set which of pylint_ ("epylint") or pyflakes_ ("pyflakes") you want to use. You may also consider using the ``flymake-cursor`` functions, see the ``pyflakes`` section of the emacs_python_mode_ page for details. ropemacs_ --------- rope_ is a python refactoring library, and ropemacs_ is an emacs interface to it, that uses pymacs_. pymacs_ is an interface between emacs lisp and python that allows emacs to call into python and python to call back into emacs. Install ```````` - rope_ - by downloading from the link, and running ``python setup.py install`` in the usual way. - pymacs_ - probably via your package manager - for example ``apt-get install pymacs`` - ropemacs_ - download from link, ``python setup.py install`` You may need to make sure your gnome etc sessions have the correct python path settings - for example settings in ``.gnomerc`` as well as the usual ``.bashrc``. Make sure you can `import ropemacs` from python (which should drop you into something lispey). Add these lines somewhere in your `.emacs` file:: (require 'pymacs) (pymacs-load "ropemacs" "rope-") and restart emacs. When you open a python file, you should have a ``rope`` menu. Note `C-c g` - the excellent `goto-definition` command. Switching between modes ----------------------- You may well find it useful to be able to switch fluidly between python mode, doctest mode, ReST mode and flymake mode (pylint_). You can attach these modes to function keys in your ``.emacs`` file with something like:: (global-set-key [f8] 'flymake-mode) (global-set-key [f9] 'python-mode) (global-set-key [f10] 'doctest-mode) (global-set-key [f11] 'rst-mode) emacs code browser ------------------ Not really python specific, but a rather nice set of windows for browsing code directories, and code - see the ECB_ page. Again, your package manager may help you (``apt-get install ecb``). .. include:: ../../links_names.txt nipy-0.3.0/doc/devel/tools/virtualenv-tutor.rst000066400000000000000000000142671210344137400216120ustar00rootroot00000000000000Setting up virtualenv ===================== .. Contents:: Overview -------- virtualenv_ is a tool that allows you to install python packages in isolated environments. In this way you can have multiple versions of the same package without interference. I started using this to easily switch between multiple versions of numpy without having to constantly reinstall and update my symlinks. I also did this as a way to install software for Scipy2008_, like the Enthought Tool Suite (ETS_), in a way that would not effect my current development environment. This tutorial is based heavily on a blog entry from Prabhu_. I've extended his shell script to make switching between virtual environments a one-command operation. (Few others who should be credited for encouraging me to use virtualenv_: Gael_, Jarrod_, Fernando_) Installing ---------- Download and install the tarball for virtualenv_:: tar xzf virtualenv-1.1.tar.gz cd virtualenv-1.1 python setup.py install --prefix=$HOME/local Note: I install in a local directory, your install location may differ. Setup virtualenv ---------------- Setup a base virtualenv directory. I create this in a local directory, you can do this in a place of your choosing. All virtual environments will be installed as subdirectories in here.:: cd ~/local mkdir -p virtualenv Create a virtualenv ------------------- Create a virtual environment. Here I change into my virtualenv directory and create a virtual environment for my numpy-1.1.1 install:: cd virtualenv/ virtualenv numpy-1.1.1 Activate a virtualenv --------------------- Set the numpy-1.1.1 as the *active* virtual environment:: ln -s numpy-1.1.1/bin/activate . We *enable* the numpy-1.1.1 virtual environment by sourcing it's activate script. This will prepend our `PATH` with the currently active virtual environment.:: # note: still in the ~/local/virtualenv directory source activate We can see our `PATH` with the numpy-1.1.1 virtual environment at the beginning. Also not the label of the virtual environment prepends our prompt.:: (numpy-1.1.1)cburns@~ 20:23:54 $ echo $PATH /Users/cburns/local/virtualenv/numpy-1.1.1/bin: /Library/Frameworks/Python.framework/Versions/Current/bin: /Users/cburns/local/bin: /usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/usr/X11/bin:/usr/local/git/bin Install packages into a virtualenv ---------------------------------- Then we install numpy-1.1.1 into the virtual environment. In order to install packages in the virtual environment, you need to use the *python* or *easy_install* from that virtualenv.:: ~/local/virtualenv/numpy-1.1.1/bin/python setup.py install At this point any package I install in this virtual environment will only be used when the environment is active. Pragmatic virtualenv -------------------- There are a few more manual steps in the above process then I wanted, so I extended the shell script that Prabhu_ wrote to make this a simple one-command operation. One still needs to manually create each virtual environment, and install packages, but this script simplifies activating and deactivating them. The `venv_switch.sh` script will: * Activate the selected virtual environment. (Or issue an error if it doesn't exist.) * Launch a new bash shell using the ~/.virtualenvrc file which sources the virtualenv/activate script. * The activate script modifies the PATH and prepends the bash prompt with the virtualenv label. `venv_switch.sh`:: #!/bin/sh # venv_switch.sh # switch between different virtual environments # verify a virtualenv is passed in if [ $# -ne 1 ] then echo 'Usage: venv_switch venv-label' exit -1 fi # verify the virtualenv exists VENV_PATH=~/local/virtualenv/$1 # activate env script ACTIVATE_ENV=~/local/virtualenv/activate echo $VENV_PATH if [ -e $VENV_PATH ] then echo 'Switching to virtualenv' $VENV_PATH echo "Starting new bash shell. Simply 'exit' to return to previous shell" else echo 'Error: virtualenv' $VENV_PATH 'does not exist!' exit -1 fi rm $ACTIVATE_ENV ln -s ~/local/virtualenv/$1/bin/activate $ACTIVATE_ENV # Launch new terminal bash --rcfile ~/.virtualenvrc Now to activate our numpy-1.1.1 virtual environment, we simply do:: venv_switch.sh numpy-1.1.1 To deactivate the virtual environment and go back to your original environment, just exit the bash shell:: exit The rcfile used to source the activate script. I first source my .profile to setup my environment and custom prompt, then source the virtual environment. `.virtualenvrc`:: # rc file to initialize bash environment for virtualenv sessions # first source the bash_profile source ~/.bash_profile # source the virtualenv source ~/local/virtualenv/activate Installing ETS 3.0.0 -------------------- As another example, I installed ETS_ 3.0.0 for the Tutorial sessions at Scipy2008_. (Note the prerequisites_.) Set up an ets-3.0.0 virtualenv:: cburns@virtualenv 15:23:50 $ pwd /Users/cburns/local/virtualenv cburns@virtualenv 15:23:50 $ virtualenv ets-3.0.0 New python executable in ets-3.0.0/bin/python Installing setuptools............done. cburns@virtualenv 15:24:29 $ ls activate ets-3.0.0 numpy-1.1.1 numpy-1.2.0b2 Switch into my ets-3.0.0 virtualenv using the `venv_switch.sh` script:: cburns@~ 15:29:12 $ venv_switch.sh ets-3.0.0 /Users/cburns/local/virtualenv/ets-3.0.0 Switching to virtualenv /Users/cburns/local/virtualenv/ets-3.0.0 Starting new bash shell. Simply 'exit' to return to previous shell Install ETS_ using easy_install. Note we need to use the easy_install from our ets-3.0.0 virtual environment:: (ets-3.0.0)cburns@~ 15:31:41 $ which easy_install /Users/cburns/local/virtualenv/ets-3.0.0/bin/easy_install (ets-3.0.0)cburns@~ 15:31:48 $ easy_install ETS .. include:: ../../links_names.txt .. _Prabhu: http://prabhuramachandran.blogspot.com/2008/03/using-virtualenv-under-linux.html .. _Gael: http://gael-varoquaux.info/blog/ .. _Jarrod: http://jarrodmillman.blogspot.com/ .. _Fernando: http://fdoperez.blogspot.com/search/label/scipy .. _Scipy2008: http://conference.scipy.org/ .. _prerequisites: https://svn.enthought.com/enthought/wiki/Install nipy-0.3.0/doc/documentation.rst000066400000000000000000000006361210344137400166450ustar00rootroot00000000000000.. _documentation-main: ==================== NIPY documentation ==================== .. only:: html :Release: |version| :Date: |today| Download `PDF `_ Contents: .. toctree:: :maxdepth: 2 users/index.rst labs/index.rst devel/index.rst faq/index.rst api/index.rst publications license .. only:: html * :ref:`genindex` * :ref:`modindex` * :ref:`search` nipy-0.3.0/doc/faq/000077500000000000000000000000001210344137400140045ustar00rootroot00000000000000nipy-0.3.0/doc/faq/documentation_faq.rst000066400000000000000000000042201210344137400202340ustar00rootroot00000000000000.. _documentation_faq: =================== Documentation FAQ =================== .. _installing_graphviz_on_OSX: Installing graphviz on OSX -------------------------- The easiest way I found to do this was using MacPorts_, all other methods caused python exceptions when attempting to write out the pngs in the inheritance_diagram.py functions. Just do:: sudo port install graphviz And make sure your macports directory (``/opt/local/bin``) is in your PATH. Error writing output on OSX --------------------------- If you are getting an error during the **writing output...** phase of the documentation build you may have a problem with your graphviz_ install. The error may look something like:: **writing output...** about api/generated/gen api/generated/nipy api/generated/nipy.algorithms.fwhm Format: "png" not recognized. Use one of: canon cmap cmapx cmapx_np dia dot eps fig hpgl imap imap_np ismap mif mp pcl pic plain plain-ext ps ps2 svg svgz tk vml vmlz vtx xdot ... Exception occurred: File "/Users/cburns/src/nipy-repo/trunk-dev/doc/sphinxext/ inheritance_diagram.py", line 238, in generate_dot (name, self._format_node_options(this_node_options))) IOError: [Errno 32] Broken pipe Try installing graphviz using MacPorts_. See the :ref:`installing_graphviz_on_OSX` for instructions. Sphinx and reST gotchas ----------------------- Docstrings ^^^^^^^^^^ Sphinx_ and reST_ can be very picky about whitespace. For example, in the docstring below the *Parameters* section will render correctly, where the *Returns* section will not. By correctly I mean Sphinx will insert a link to the CoordinateSystem class in place of the cross-reference *:class:`CoordinateSystem`*. The *Returns* section will be rendered exactly as shown below with the *:class:* identifier and the backticks around CoordinateSystem. This section fails because of the missing whitespace between ``product_coord_system`` and the colon ``:``. :: Parameters ---------- coord_systems : sequence of :class:`CoordinateSystem` Returns ------- product_coord_system: :class:`CoordinateSystem` .. include:: ../links_names.txt nipy-0.3.0/doc/faq/index.rst000066400000000000000000000003151210344137400156440ustar00rootroot00000000000000.. _faq-index: ===== FAQ ===== .. only:: html :Release: |version| :Date: |today| Frequently asked questions about nipy .. toctree:: :maxdepth: 2 why licensing documentation_faq nipy-0.3.0/doc/faq/johns_bsd_pitch.rst000066400000000000000000000147351210344137400177100ustar00rootroot00000000000000.. _johns-bsd-pitch: Why we should be using BSD ========================== John Hunter - 16 Dec 2004 I'll start by summarizing what many of you already know about open source licenses. I believe this discussion is broadly correct, though it is not a legal document and if you want legally precise statements you should reference the original licenses cited here. The `Open-Source-Initiative `_ is a clearing house for OS licenses, so you can read more there. The two dominant license variants in the wild are GPL-style and BSD-style. There are countless other licenses that place specific restrictions on code reuse, but the purpose of this document is to discuss the differences between the GPL and BSD variants, specifically in regards to my experience developing matplotlib_ and in my discussions with other developers about licensing issues. The best known and perhaps most widely used license is the :term:`GPL`, which in addition to granting you full rights to the source code including redistribution, carries with it an extra obligation. If you use GPL code in your own code, or link with it, your product must be released under a GPL compatible license. Ie you are required to give the source code to other people and give them the right to redistribute it as well. Many of the most famous and widely used open source projects are released under the GPL, including linux, gcc and emacs. The second major class are the :term:`BSD` and BSD-style licenses (which includes MIT and the python PSF license). These basically allow you to do whatever you want with the code: ignore it, include it in your own open source project, include it in your proprietary product, sell it, whatever. python itself is released under a BSD compatible license, in the sense that, now quoting form the PSF license page There is no GPL-like "copyleft" restriction. Distributing binary-only versions of Python, modified or not, is allowed. There is no requirement to release any of your source code. You can also write extension modules for Python and provide them only in binary form. Famous projects released under a BSD-style license in the permissive sense of the last paragraph are the BSD operating system, python and !TeX.h I believe the choice of license is an important one, and I advocate a BSD-style license. In my experience, the most important commodity an open source project needs to succeed is users. Of course, doing something useful is a prerequisite to getting users, but I also believe users are something of a prerequisite to doing something useful. It is very difficult to design in a vacuum, and users drive good software by suggesting features and finding bugs. If you satisfy the needs of some users, you will inadvertently end up satisfying the needs of a large class of users. And users become developers, especially if they have some skills and find a feature they need implemented, or if they have a thesis to write. Once you have a lot of users and a number of developers, a network effect kicks in, exponentially increasing your users and developers. In open source parlance, this is called competing for mind share. So I believe the number one (or at least number two) commodity an open source project can possess is mind share, and mind share means you want as many damned users using your software as you can get. Even though you are giving it away for free, you have to market your software, promote it, and support it as if you were getting paid for it. Now, how does this relate to licensing, you are asking? Most software companies will not use GPL'd code in their own software, even those that are highly committed to open source development, such as enthought_, out of legitimate concern that use of the GPL will "infect" their code base by its viral nature. In effect, they want to retain the right to release some proprietary code. And in my experience, companies make for some of the best developers, because they have the resources to get a job done, even a boring one, if they need it in their code. Two of the matplotlib backends (FLTK and WX) were contributed by private sector companies who are using matplotlib either internally or in a commercial product -- I doubt these companies would have been using matplotlib if the code were GPL. In my experience, the benefits of collaborating with the private sector are real, whereas the fear that some private company will "steal" your product and sell it in a proprietary application leaving you with nothing is not. There is a lot of GPLd code in the world, and it is a constant reality in the development of matplotlib that when we want to reuse some algorithm, we have to go on a hunt for a non-GPLd version. Most recently this occurred in a search for a good contouring algorithm. I worry that the "license wars", the effect of which are starting to be felt on many projects, have a potential to do real harm to open source software development. There are two unpalatable options. 1) Go with GPL and lose the mind-share of the private sector 2) Forgo GPLd code and retain the contribution of the private sector. This is a very tough decision because their is a lot of very high quality software that is GPLd and we need to use it; they don't call the license `viral `_ for nothing. The third option, which is what is motivating me to write this, is to convince people who have released code under the GPL to re-release it under a BSD compatible license. Package authors retain the copyright to their software and have discretion to re-release it under a license of their choosing. Many people choose the GPL when releasing a package because it is the most famous open source license, and did not consider issues such as those raised here when choosing a license. When asked, these developers will often be amenable to re-releasing their code under a more permissive license. Fernando Perez did this with ipython, which was released under the :term:`LGPL` and then re-released under a BSD license to ease integration with scipy and other enthought products. The LGPL is more permissive than the GPL, allowing you to link with it non-virally, but many companies are still loath to use it out of legal concerns, and you cannot reuse LGPL code in a proprietary product. So I encourage you to release your code under a BSD compatible license, and when you encounter an open source developer whose code you want to use, to do the same. Feel free to forward this document on them. .. include:: ../links_names.txt nipy-0.3.0/doc/faq/licensing.rst000066400000000000000000000057531210344137400165230ustar00rootroot00000000000000.. _licensing: =========== Licensing =========== How do you spell licence? ------------------------- If you are British you spell it differently from Americans, sometimes: http://www.tiscali.co.uk/reference/dictionaries/english/data/d0082350.html As usual the American spelling rule (always use *s*) was less painful and arbitrary, so I (MB) went for that. Why did you choose BSD? ----------------------- We have chosen BSD licensing, for compatibility with SciPy, and to increase input from developers in industry. Wherever possible we will keep packages that can have BSD licensing separate from packages needing a GPL license. Our choices were between: * :term:`BSD` * :term:`GPL` John Hunter made the argument for the BSD license in :ref:`johns-bsd-pitch`, and we agree. Richard Stallman makes the case for the GPL here: http://www.gnu.org/licenses/why-not-lgpl.html How does the BSD license affect our relationship to other projects? ------------------------------------------------------------------- The BSD license allows other projects with virtually any license, including GPL, to use our code. BSD makes it more likely that we will attract support from companies, including open-source software companies, such as Enthought_ and Kitware_. Any part of our code that uses (links to) GPL code, should be in a separable package. Note that we do not have this problem with :term:`LGPL`, which allows us to link without ourselves having a GPL. What license does the NIH prefer? --------------------------------- The NIH asks that software written with NIH money can be commercialized. Quoting from: `NIH NATIONAL CENTERS FOR BIOMEDICAL COMPUTING `_ grant application document: A software dissemination plan must be included in the application. There is no prescribed single license for software produced in this project. However NIH does have goals for software dissemination, and reviewers will be instructed to evaluate the dissemination plan relative to these goals: 1. The software should be freely available to biomedical researchers and educators in the non-profit sector, such as institutions of education, research institutes, and government laboratories. 2. The terms of software availability should permit the commercialization of enhanced or customized versions of the software, or incorporation of the software or pieces of it into other software packages. There is more discussion of licensing in this `na-mic presentation `_. See also these links (from the presentation): * http://www.rosenlaw.com/oslbook.htm * http://www.opensource.org * http://wiki.na-mic.org/Wiki/index.php/NAMIC_Wiki:Community_Licensing So far this might suggest that the NIH would prefer at least a BSD-like license, but the NIH has supported several GPL'd projects in imaging, :term:`AFNI` being the most obvious example. .. include:: ../links_names.txt nipy-0.3.0/doc/faq/why.rst000066400000000000000000000143751210344137400153570ustar00rootroot00000000000000.. _why-faq: ========= Why ... ========= Why nipy? --------- We are writing NIPY because we hope that it will solve several problems in the field at the moment. We are concentrating on FMRI analysis, so we'll put the case for that part of neuroimaging for now. There are several good FMRI analysis packages already - for example :term:`SPM`, :term:`FSL` and :term:`AFNI`. For each of these you can download the source code. Like SPM, AFNI and FSL, we think source code is essential for understanding and development. With these packages you can do many analyses. Some problems are that: * The packages don't mix easily. You'll have to write your own scripts to mix between them; this is time-consuming and error-prone, because you will need good understanding of each package * Because they don't mix, researchers usually don't try and search out the best algorithm for their task - instead they rely on the software that they are used to * Each package has its own user community, so it's a little more difficult to share software and ideas * The core development of each language belongs in a single lab. Another, more general problem, is planning for the future. We need a platform that can be the basis for large scale shared development. For various reasons, it isn't obvious to us that any of these three is a good choice for common, shared development. In particular, we think that Python is the obvious choice for a large open-source software project. By comparison, matlab is not sufficiently general or well-designed as a programming language, and C / C++ are too hard and slow for scientific programmers to read or write. See why-python_ for this argument in more detail. We started NIPY because we want to be able to: * support an open collaborative development environment. To do this, we will have to make our code very easy to understand, modify and extend. If make our code available, but we are the only people who write or extend it, in practice, that is closed sofware. * make the tools that allow developers to pick up basic building blocks for common tasks such as registration and statistics, and build new tools on top. * write a scripting interface that allows you to mix in routines from the other packages that you like or that you think are better than the ones we have. * design ways of interacting with the data and analysis stream that help you organize both. That way you can more easily keep track of your analyses. We also hope this will make analyses easier to run in parallel, and therefore much faster. .. _why-python: Why python? ----------- The choice of programming language has many scientific and practical consequences. Matlab is an example of a high-level language. Languages are considered high level if they are able to express a large amount of functionality per line of code; other examples of high level languages are Python, Perl, Octave, R and IDL. In contrast, C is a low-level language. Low level languages can achieve higher execution speed, but at the cost of code that is considerably more difficult to read. C++ and Java occupy the middle ground sharing the advantages and the disadvantages of both levels. Low level languages are a particularly ill-suited for exploratory scientific computing, because they present a high barrier to access by scientists that are not specialist programmers. Low-level code is difficult to read and write, which slows development ([Prechelt2000ECS]_, [boehm1981]_, [Walston1977MPM]_) and makes it more difficult to understand the implementation of analysis algorithms. Ultimately this makes it less likely that scientists will use these languages for development, as their time for learning a new language or code base is at a premium. Low level languages do not usually offer an interactive command line, making data exploration much more rigid. Finally, applications written in low level languages tend to have more bugs, as bugs per line of code is approximately constant across many languages [brooks78]. In contrast, interpreted, high-level languages tend to have easy-to-read syntax and the native ability to interact with data structures and objects with a wide range of built-in functionality. High level code is designed to be closer to the level of the ideas we are trying to implement, so the developer spends more time thinking about what the code does rather than how to write it. This is particularly important as it is researchers and scientists who will serve as the main developers of scientific analysis software. The fast development time of high-level programs makes it much easier to test new ideas with prototypes. Their interactive nature allows researchers flexible ways to explore their data. SPM is written in Matlab, which is a high-level language specialized for matrix algebra. Matlab code can be quick to develop and is relatively easy to read. However, Matlab is not suitable as a basis for a large-scale common development environment. The language is proprietary and the source code is not available, so researchers do not have access to core algorithms making bugs in the core very difficult to find and fix. Many scientific developers prefer to write code that can be freely used on any computer and avoid proprietary languages. Matlab has structural deficiencies for large projects: it lacks scalability and is poor at managing complex data structures needed for neuroimaging research. While it has the ability to integrate with other languages (e.g., C/C++ and FORTRAN) this feature is quite impoverished. Furthermore, its memory handling is weak and it lacks pointers - a major problem for dealing with the very large data structures that are often needed in neuroimaging. Matlab is also a poor choice for many applications such as system tasks, database programming, web interaction, and parallel computing. Finally, Matlab has weak GUI tools, which are crucial to researchers for productive interactions with their data. .. [boehm1981] Boehm, Barry W. (1981) *Software Engineering Economics*. Englewood Cliffs, NJ: Prentice-Hall. .. [Prechelt2000ECS] Prechelt, Lutz. 2000. An Empirical Comparison of Seven Programming Languages. *IEEE Computer* 33, 23--29. .. [Walston1977MPM] Walston, C E, and C P Felix. 1977. A Method of Programming Measurement and Estimation. *IBM Syst J* 16, 54-73. nipy-0.3.0/doc/glossary.rst000066400000000000000000000213621210344137400156360ustar00rootroot00000000000000========== Glossary ========== .. glossary:: AFNI AFNI_ is a functional imaging analysis package. It is funded by the NIMH, based in Bethesda, Maryland, and directed by Robert Cox. Like :term:`FSL`, it is written in C, and it's very common to use shell scripting of AFNI command line utilities to automate analyses. Users often describe liking AFNI's scriptability, and image visualization. It uses the :term:`GPL` license. BSD Berkeley software distribution license. The BSD_ license is permissive, in that it allows you to modify and use the code without requiring that you use the same license. It allows you to distribute closed-source binaries. BOLD Contrast that is blood oxygen level dependent. When a brain area becomes active, blood flow increases to that area. It turns out that, with the blood flow increase, there is a change in the relative concentrations of oxygenated and deoxygenated hemoglobin. Oxy- and deoxy- hemoglobin have different magnetic properties. This in turn leads to a change in MRI signal that can be detected by collecting suitably sensitive MRI images at regular short intervals during the blood flow chance. See the the `wikipedia FMRI`_ article for more detail. BrainVisa BrainVISA_ is a sister project to NIPY. It also uses Python, and provides a carefully designed framework and automatic GUI for defining imaging processing workflows. It has tools to integrate command line and other utilities into these workflows. Its particular strength is anatomical image processing but it also supports FMRI and other imaging modalities. BrainVISA is based in NeuroSpin, outside Paris. DTI Diffusion tensor imaging. DTI is rather poorly named, because it is a model of the diffusion signal, and an analysis method, rather than an imaging method. The simplest and most common diffusion tensor model assumes that diffusion direction and velocity at every voxel can be modeled by a single tensor - that is, by an ellipse of regular shape, fully described by the length and orientation of its three orthogonal axes. This model can easily fail in fairly common situations, such as white-matter fiber track crossings. DWI Diffusion-weighted imaging. DWI is the general term for MRI imaging designed to image diffusion processes. Sometimes reseachers use :term:`DTI` to have the same meaning, but :term:`DTI` is a common DWI signal model and analysis method. EEGlab The most widely-used open-source package for analyzing electrophysiological data. EEGlab_ is written in :term:`matlab` and uses a :term:`GPL` license. FMRI Functional magnetic resonance imaging! It refers to MRI image acquisitions and analysis designed to look at brain function rather than structure. Most people use FMRI to refer to :term:`BOLD` imaging in particular. See the `wikipedia FMRI`_ article for more detail. FSL FSL_ is the FMRIB_ software library, written by the FMRIB_ analysis group, and directed by Steve Smith. Like :term:`AFNI`, it is a large collection of C / C++ command line utilities that can be scripted with a custom GUI / batch system, or using shell scripting. Its particular strength is analysis of :term:`DWI` data, and :term:`ICA` functional data analysis, although it has strong tools for the standard :term:`SPM approach` to FMRI. It is free for academic use, and open-source, but not free for commercial use. GPL The GPL_ is the GNU general public license. It is one of the most commonly-used open-source sofware licenses. The distinctive feature of the GPL license is that it requires that any code derived from GPL code also uses a GPL license. It also requires that any code that is statically or dynamically linked to GPL code has a GPL-compatible license. See: http://en.wikipedia.org/wiki/GNU_General_Public_License and http://www.gnu.org/licenses/gpl-faq.html ICA Independent component analysis is a multivariate technique related to :term:`PCA`, to estimate independent components of signal from multiple sensors. In functional imaging, this usually means detecting underlying spatial and temporal components within the brain, where the brain voxels can be considered to be different sensors of the signal. See the `wikipedia ICA`_ page. LGPL The lesser GNU public license. LGPL_ differs from the :term:`GPL` in that you can link to LGPL code from non-LGPL code without having to adopt a GPL-compatible license. However, if you modify the code (create a "derivative work"), that modification has to be released under the LGPL. See `wikipedia LGPL `_ for more discussion. Matlab matlab_ began as a high-level programming language for working with matrices. Over time it has expanded to become a fairly general-purpose language. See also: http://en.wikipedia.org/wiki/MATLAB. It has good numerical algorithms, 2D graphics, and documentation. There are several large neuroscience software projects wtitten in matlab, including :term:`SPM software`, and :term:`EEGlab`. PCA Principal component analysis is a multivariate technique to determine orthogonal components across multiple sources (or sensors). See :term:`ICA` and the `wikipedia PCA`_ page. PET Positron emission tomography is a nethod of detecting the spatial distributions of certain radiolabeled compounds - usually in the brain. The scanner detectors pick up the spatial distribution of emitted radiation from within the body. From this pattern, it is possible to reconstruct the distribution of radiactivity in the body, using techniques such as filtered back projection. PET was the first mainstream technique used for detecting regional changes in blood-flow as an index of which brain areas were active when the subject is doing various tasks, or at rest. These studies nearly all used :term:`water activation PET`. See the `wikipedia PET`_ entry. SPM SPM (statistical parametric mapping) refers either to the :term:`SPM approach` to analysis or the :term:`SPM software` package. SPM approach Statistical parametric mapping is a way of analyzing data, that involves creating an image (the *map*) containing statistics, and then doing tests on this statistic image. For example, we often create a t statistic image where each :term:`voxel` contains a t statistic value for the time-series from that voxel. The :term:`SPM software` package implements this approach - as do several others, including :term:`FSL` and :term:`AFNI`. SPM software SPM_ (statistical parametric mapping) is the name of the matlab_ based package written by John Ashburner, Karl Friston and others at the `Functional Imaging Laboratory`_ in London. More people use the SPM package to analyze :term:`FMRI` and :term:`PET` data than any other. It has good lab and community support, and the :term:`matlab` source code is available under the :term:`GPL` license. VoxBo Quoting from the Voxbo_ webpage - "VoxBo is a software package for the processing, analysis, and display of data from functional neuroimaging experiments". Like :term:`SPM`, :term:`FSL` and :term:`AFNI`, VoxBo provides algorithms for a full FMRI analysis, including statistics. It also provides software for lesion-symptom analysis, and has a parallel scripting engine. VoxBo has a :term:`GPL` license. Dan Kimberg leads development. voxel Voxels are volumetric pixels - that is, they are values in a regular grid in three dimensional space - see http://en.wikipedia.org/wiki/Voxel water activation PET A :term:`PET` technique to detect regional changes in blood flow. Before each scan, we inject the subject with radiolabeled water. The radiolabeled water reaches the arterial blood, and then distributes (to some extent) in the brain. The concentration of radioactive water increases in brain areas with higher blood flow. Thus, the image of estimated counts in the brain has an intensity that is influenced by blood flow. This use has been almost completely replaced by the less invasive :term:`BOLD` :term:`FMRI` technique. .. include:: links_names.txt nipy-0.3.0/doc/history.rst000066400000000000000000000024701210344137400154730ustar00rootroot00000000000000=================== A history of NIPY =================== Sometime around 2002, Jonthan Taylor started writing BrainSTAT, a Python version of Keith Worsley's FmriSTAT package. In 2004, Jarrod Millman and Matthew Brett decided that they wanted to write a grant to build a new neuoimaging analysis package in Python. Soon afterwards, they found that Jonathan had already started, and merged efforts. At first we called this project *BrainPy*. Later we changed the name to NIPY. In 2005, Jarrod, Matthew and Jonathan, along with Mark D'Esposito, Fernando Perez, John Hunter, Jean-Baptiste Poline, and Tom Nichols, submitted the first NIPY grant to the NIH. It was not successful. In 2006, Jarrod and Mark submitted a second grant, based on the first. The NIH gave us 3 years of funding for two programmers. We hired two programmers in 2007 - Christopher Burns and Tom Waite - and began work on refactoring the code. Meanwhile, the team at Neurospin, Paris, started to refactor their FFF code to work better with python and NIPY. This work was by Alexis Roche, Bertrand Thirion, and Benjamin Thyreau, with some help and advice from Fernando Perez. In 2008, Fernando Perez and Matthew Brett started work full-time at the UC Berkeley `Brain Imaging Center `_. Matthew in particular came to work on NIPY. nipy-0.3.0/doc/index.rst000066400000000000000000000006631210344137400151030ustar00rootroot00000000000000.. _about_nipy: ==== NIPY ==== NIPY is a python project for analysis of structural and functional neuroimaging data. Please see our :ref:`documentation-main` and feel free to hold us to the high ideals of :ref:`nipy-mission`. *The NIPY team* .. We need the following toctree directive to include the documentation .. in the document heirarchy - see http://sphinx.pocoo.org/concepts.html .. toctree:: :hidden: documentation nipy-0.3.0/doc/labs/000077500000000000000000000000001210344137400141565ustar00rootroot00000000000000nipy-0.3.0/doc/labs/datasets.rst000066400000000000000000000127171210344137400165300ustar00rootroot00000000000000 ============================= Volumetric data structures ============================= Volumetric data structures expose numerical values embedded in a world space. For instance, a volume could expose the T1 intensity, as acquired in scanner space, or the BOLD signal in MNI152 template space. The values can be multi-dimensional, in the case of a BOLD signal, the fMRI signal would correspond to a time series at each position in world space. .. currentmodule:: nipy.labs.datasets.volumes.volume_img The image structure: :class:`VolumeImg` ======================================= The structure most often used in neuroimaging is the :class:`VolumeImg`. It corresponds, for instance, to the structure used in the Nifti files. This structure stores data as an n-dimensional array, with n being at least 3, alongside with the necessary information to map it to world space. :definition: A volume-image (class: :class:`VolumeImg`) is a volumetric datastructure given by data points lying on a regular grid: this structure is a generalization of an image in 3D. The voxels, vertices of the grid, are mapped to coordinnates by an affine transformation. As a result, the grid is regular and evenly-spaced, but may not be orthogonal, and the spacing may differ in the 3 directions. .. image:: datasets/volume_img.jpg The data is exposed in a multi dimensional array, with the 3 first axis corresponding to spatial directions. A complete description of this object can be found on the page: :class:`VolumeImg`. Useful methods on volume structures ==================================== .. currentmodule:: nipy.labs.datasets.volumes.volume_field Any general volume structures will implement methods for querying the values and changing world space (see the :class:`VolumeField` documentation for more details): .. autosummary:: :toctree: generated VolumeField.values_in_world VolumeField.composed_with_transform Also, as volumes structure may describe the spatial data in various way, you can easily to convert to a :class:`VolumeImg`, ie a regular grid, for instance to do implement an algorithm on the grid such as spatial smoothing: .. autosummary:: :toctree: generated VolumeField.as_volume_img Finally, different structures can embed the data differently in the same world space, for instance with different resolution. You can resample one structure on another using: .. autosummary:: :toctree: generated VolumeField.resampled_to_img **FIXME:** Examples would be good here, but first we need io and template data to be wired with datasets. More general data structures =============================== .. currentmodule:: nipy.labs.datasets.volumes.volume_img The :class:`VolumeImg` is the most commonly found volume structure, and the simplest to understand, however, volumetric data can be described in more generic terms, and for performance reason it might be interesting to use other objects. Here, we give a list of the nipy volumetric data structures, from most specific, to most general. When you deal with volume structures in your algorithms, depending on which volume structure class you are taking as an input, you can assume different properties of the data. You can always use :meth:`VolumeImg.as_volume_img` to cast the volume structure in a :class:`VolumeImg` that is simple to understand and easy to work with, but it may not be necessary. Implemented classes -------------------- Implemented classes (or `concrete` classes) are structures that you can readily use directly from nipy. .. currentmodule:: nipy.labs.datasets.volumes.volume_grid :class:`VolumeGrid` In a :class:`VolumeGrid`, the data points are sampled on a 3D grid, but unlike for a :class:`VolumeImg`, grid may not be regular. For instance, it can be a grid that has been warped by a non-affine transformation. Like with the :class:`VolumeImg`, the data is exposed in a multi dimensional array, with the 3 first axis corresponding to spatial directions. .. image:: datasets/volume_grid.jpg Abstract classes ------------------ .. currentmodule:: nipy.labs.datasets.volumes.volume_data Abstract classes cannot be used because they are incompletely implemented. They serve as to define the interface: the type of objects that you can use, or how you can extend nipy by exposing the same set of methods and attributes (the `interface`). :class:`VolumeData` In this volumetric structure, the data is sampled for some points in the world space. The object knows how to interpolate between these points. The underlying values are stored in a multidimensional array-like object that can be indexed and sliced. .. image:: datasets/volume_data.jpg This is an abstract base class: it defines an interface, but is not fully functional, and can be used only via its children class (such as :class:`VolumeGrid` or :class:`VolumeImg`). .. currentmodule:: nipy.labs.datasets.volumes.volume_field :class:`VolumeField` This is the most general volumetric structure (base class): all the nipy volume expose this interface. This structure does not make any assumptions on how the values are internal represented, they may, for instance, be represented as a function, rather than as data points, or as a data structure that is not an array, such as a graph. .. image:: datasets/volume_field.jpg This is also an abstract base class: it defines the core nipy volumetric data structure interface: you can rely on all the methods documented for this class in any nipy data structure. nipy-0.3.0/doc/labs/datasets/000077500000000000000000000000001210344137400157665ustar00rootroot00000000000000nipy-0.3.0/doc/labs/datasets/viz_volume_data.py000066400000000000000000000012301210344137400215240ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Use Mayavi to visualize the structure of a VolumeData """ from enthought.mayavi import mlab import numpy as np x, y, z, s = np.random.random((4, 20)) mlab.figure(1, fgcolor=(0, 0, 0), bgcolor=(1, 1, 1)) mlab.clf() src = mlab.pipeline.scalar_scatter(x, y, z, s) sgrid = mlab.pipeline.delaunay3d(src) mlab.pipeline.surface(sgrid, opacity=0.4) mlab.pipeline.surface(mlab.pipeline.extract_edges(sgrid), color=(0, 0, 0)) mlab.pipeline.glyph(sgrid, mode='cube', scale_factor=0.05, scale_mode='none') mlab.savefig('volume_data.jpg') mlab.show() nipy-0.3.0/doc/labs/datasets/viz_volume_field.py000066400000000000000000000012171210344137400217030ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Use Mayavi to visualize the structure of a VolumeData """ from enthought.mayavi import mlab import numpy as np s = np.random.random((5, 5, 5)) # Put the side at 0 s[0, ...] = 0 s[-1, ...] = 0 s[:, 0, :] = 0 s[:, -1, :] = 0 s[..., 0] = 0 s[..., -1] = 0 mlab.figure(1, fgcolor=(0, 0, 0), bgcolor=(1, 1, 1)) mlab.clf() src = mlab.pipeline.scalar_field(s) mlab.pipeline.volume(src, vmin=0, vmax=0.9) # We save as a different filename than the one used, as we modify the # curves. mlab.savefig('volume_field_raw.jpg') mlab.show() nipy-0.3.0/doc/labs/datasets/viz_volume_grid.py000066400000000000000000000017661210344137400215560ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Use Mayavi to visualize the structure of a VolumeGrid """ from enthought.mayavi import mlab import numpy as np from enthought.tvtk.api import tvtk dims = (4, 4, 4) x, y, z = np.mgrid[0.:dims[0], 0:dims[1], 0:dims[2]] x = np.reshape(x.T, (-1,)) y = np.reshape(y.T, (-1,)) z = np.reshape(z.T, (-1,)) y += 0.3*np.sin(x) z += 0.4*np.cos(x) x += 0.05*y**3 sgrid = tvtk.StructuredGrid(dimensions=(dims[0], dims[1], dims[2])) sgrid.points = np.c_[x, y, z] s = np.random.random((dims[0]*dims[1]*dims[2])) sgrid.point_data.scalars = np.ravel(s.copy()) sgrid.point_data.scalars.name = 'scalars' mlab.figure(1, fgcolor=(0, 0, 0), bgcolor=(1, 1, 1)) mlab.clf() mlab.pipeline.surface(sgrid, opacity=0.4) mlab.pipeline.surface(mlab.pipeline.extract_edges(sgrid), color=(0, 0, 0)) mlab.pipeline.glyph(sgrid, mode='cube', scale_factor=0.2, scale_mode='none') mlab.savefig('volume_grid.jpg') mlab.show() nipy-0.3.0/doc/labs/datasets/viz_volume_img.py000066400000000000000000000013021210344137400213670ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Use Mayavi to visualize the structure of a VolumeImg """ from enthought.mayavi import mlab import numpy as np rand = np.random.RandomState(1) data = rand.random_sample((5, 4, 4)) mlab.figure(1, fgcolor=(0, 0, 0), bgcolor=(1, 1, 1)) mlab.clf() src = mlab.pipeline.scalar_field(data) src.image_data.spacing = (0.5, 1, 0.7) src.image_data.update_data() mlab.pipeline.surface(src, opacity=0.4) mlab.pipeline.surface(mlab.pipeline.extract_edges(src), color=(0, 0, 0)) mlab.pipeline.glyph(src, mode='cube', scale_factor=0.2, scale_mode='none') mlab.savefig('volume_img.jpg') mlab.show() nipy-0.3.0/doc/labs/datasets/volume_data.jpg000066400000000000000000000607651210344137400210060ustar00rootroot00000000000000JFIFC     C  <"  FF'CjǑNe>egurmSξoQ?Ƅށ9Me_t4zL9M>)!c(qTc*6]aϴd.lØij)&r-}աaAkj,)tRY8c==i.9/k;#yѣf8fadBSGUJ\Ga,RzYqJv8eX[{7'\M>9%dʝח4T_ubPk MpΔ4#㾩8Skиk|"ijPH7Q7Q+9 'Jە-1&F*l<͓_u{*|g:.rֲ7Q- ߤlk+űZeTL4z;7La[n՛lj?M>t@brGasN_5M trNg64ZXZlWl1g}g*Ɇ},aJ!zw3_826,;2'Eb~ cL,Ϝl<{NqMBNU PίtuEfr%|-;r$cr7asNY\ŝն\?_&zzu֦G_I[Bϩnw7.#c6k9SڋB+%5,aխ9yDK+zy 1y*C3aE/*F)1̞:,~~{CaoxtpK@`~T~ͤǺw{u@4{. P@!"$%&16pMCRmS7#:@n.h@ =-YPU{<9c@NBs:s)nb:Ƈډeآe. *c}'UZbE<nO vhcz#-TfWc,]V8ɛ(VQ?ʠE} PDGNB5= Q ܂)8U]G$Q+A_D% Ŧ^SemxoݼP <Cdmk.ז\.V_-#:S|8( xFV_^PSGϣϣ5:ϝ<ΞTa. ^Z(7/u*cHuڰkɤ3m+}߄,jѧ(ȉ^]w#8Əϧϧá4-lgO3^63w)T#_ߛN.vEyҢpٕcJ7&ޱYKB3kP--0O3kխ~١0*Ay hE=W^0M.^R&k0fTwA~7*P>vݹj`%*+c*L–42Ag\t1gɶiєXa߳CƼ\R}ys8 f,at $aӖnJql:Q;G ~ x#6@|NSF g^yȍ;LF, -uY]le_YԄӨ;KEaXBXb Q45@ħwRp2'Y0_|I#7(7$m+vzj$Q=m]#9xhDTXi"l̮XgWX pu@}R x_¤hj-!{*L0} -ke- [eY —6O;O>6^E?Ef41Be@w*[Phx ڿFJ(z\Y. / lZ[VqW_cQQ6naϕ-KM@J(hxuu*/mYw5;AϤOՌ;z4@'-xqȻz/-U? @Ѷkv!XVbz@U3}`VATpBFh_L-.,2&^NǍ2Mr[] MɂxQٳzm_ٚSr#^*z-P/l* vc\pլ&PS$SZ}ދZ@%Zf{Ie 0hhbGb7aU@!Źa7Gu<,eze(mfR[x|,#2Hocr"g](LjTH-P3i 0։ZѬ*֘k2h+b5^#Ȧ<>1F6GBl F@\Z3ם,hO$TR%lWHgԜQVzܧTDh*-e0Yn-`Svauaꖧ'F@d}jr&>V.aE46zl>jΫؗ7Q Íai/gGrRKŕўV޺KN77J<0EQ`G2j1Dh7Zod՚Vx/U/R%q+lsz+R~ZWL]+Xqq :*$CiHZM]Y`C[L}3 {} ~f{W  Amww=|p#9QèRl4C>~}+v(֡dssIh˨"e We* +_o7!1"Qa@Aq02#BPRb?{5Q7>:> {u #zXHsˆ~`Yt*Kݳ[NqPa?9TRS{>%D붫Ik*Dv~(3yc~ :fr,ppYGS)G-sO!rjSa7 z]JI=ĺIϻG(+;6P@q*'ը!ӗnG3}¬ĝS1y%-nzT8H* E5ogYo#OqjOpv?⮥kKEow}e)5ڪҌ5WE-,с4Rֻ[6N5.g+o*zzJqURG=-NokwikzmV6*Vl!AxI)-*Cfn<#MhPR5yޟSa66fs>.lGY#k[U)Jܧ GѼ~)E+%5Igʶ~SS#GI쨍qnХ :Ȭ9=q:*J6^sGL*JB4AR/PUt}_nclb ,9ѽ3YSRܣ3 fsevLjSFjp!F8!1AQaq"2@0#$P3R?}3;-)@C AΰݫSKLUSAuxE)(MTh"cn=Lw!u4i.wה2uNg;|5PT[ F I{KXiV&JfVIـK֡b^ڦܺiZVc1N%EFq>Thf3 |~IǤzH ̾+Nk MvA# sj7^Fbb\E\ObyDUC.$1,a"١  0 6"ܖ*@G1H KѴnf?M,y?]-(Kᦜ?|`8c2hD i)C]O^6V_8aPWuϔbF-avyv㎱lLu*ݪJM:w£l|VjV>RuJ/h'3g+3I_  ~aΫjҤۺz6 d9BSQ*L~WMIIЃL [%LNz'ڔo6ӟ.}Grir߁D^Ke/)? Eⷥ(p=}C58m?e6:kF0-̘bݮ8|CNUXJ8h*PDE^˂QGNY62I#v;l,\xp;߿UImn8$>S: &?mFm(WJCQ@XzJytN84;$tY߄/Jtdq#p)NLrƯS;ПQ<00mdbM)yl’}DV#Žt$w'`U ZK9;i]Ok5܁8EC 44mX ]NNeqv}Qv_b:d,vKٽ%|.R:_5Zսj;~ @հ,}MBl>ͤU}aI˘sw]i =Ϡc*/W_ rQ_C=A$r8PszHv c2\u]A \ׅ'cAQ8#wuy-| )O ZF16;5Fmܫ}c < M6){QoQݷsސsȓ|**J~"QUڄ*Rϋ!Uq?i a_\rND}I~2'EYVn5qme]{^eMĝ4PRu[^n*uڏMMo NAp7]0Ĉfݬ݄\o:yĜHnqHIf84\-CkNV8x2-[KI-z9W4ܹRRjYUѷp^يbɹKJҏcBEJ帔b.8ߚؘĦ56[5IoUG"9,nWrSK*-KG,><Ư6fPӣAor[er23Hu:ҽ4{tV<Fk 5MmsVƒc)9bPT0V9FmFt$l]⼅!2=q`ԯ Cd~gOaO@Ӕ^1"y)r>WPecgĦ銹J9[ۑs\Ng _QywJE^GXWuᭊ,6 Hj}Bޣ[OIX#LaL)Pc#N)##GfTr$m_ae5NIޓ1}i\>Ooh#PD C\_&\7c3.#?*Nه%.rђi'sNl+M#ZĜ^ Yq$+{"H }LAW H?$:sN'֧mRK(q(LrUs3T“q:զHnq( ;#IJוCųHI[7 F3PWMAD ፖNZpvٜnIq_)Ϳvx`LOjC~Kd0]("Q<՟s`׶|RKKҷUmSօuQuP@Cb3_~GӖT7`j6ѳ̥nTv;\el:s׎9Jdx(Z|?Dw) (ʓ z:/#@eCr'"N $iw,OFb}W?.ْcn0pNWd{)|ǛKo8Sv㕃:?=o3it"Gb*Ef]?<X?9%إ@>ȏ <3淏^'Me`+@2ws{>u&Uc128B%uƛ7 "ծöAG4{qvd9A'V@YI<˒0F!.s6yG^=<8=e}*myFQj܌8ona}q٪ >X;{gmqJ[.iPe2ԑTF,}Y%Sr.9)..]JQ`(0B[OhY;Zo: wil %+eD4NGE#aiJb;jʜ)gs?IOy JI6dxM/@ik9uolÎE*VS AẼy3vzێZwVi*(yY$ӷV-ly01j0%Ė͜Co-B5E9__`7WeKm$Sꏉ hζ?^ʷMluq":/2RT.$~Ѷ4$7/>5*-Gy/1~,/ދ_ejLP K߳ 6eBT=|=46g,7O]۬$BM~w8TJg2z'ʔ0lujHy埽+՝!A܁k?$ًOQ˫=~ڲ,]mi` qRRӮ97 # 3ŊZmv3{+ T3wkծeSkyxaoHǔʚ}zIPƸ\ /5j˟6v3wTpiN2GT_9gndܨM_حzZ[)[}ui Ey>I?߇Bޞs $Vv}`OAC2xLUxb $ߺal.|AܥpYȯYȥր[jGQXVg6*HJR,^ҫbfPZ7BnYD`͞TíGz]LⳄĊT$h1Lz)Yrw{Fo'Eך m.n'@Ui[4ۯ\)gz,QyG;7&qv6,7Pt~uMNhJ>kYJAqZ܋8T'hڷi8~b]S#%5QA1;5zm/"} /hWb9Q#1ujc[z+l}Rz8=Qꅬ Z# Fb2YqZƩ=Gq# A?  _P%NI7Z ފ~q;]c U .; R0^a&nI$j%7.8 V۱IDyDH!M[ë~[ۙ}ԛOkv<M]TٮeB}NU% K *lfQRoV3:8;/wk*++m:ï~EY`npq+Bt-&8jE@9_sh+čĥpjE7w*,=cjާQ>1+h#M\,^iTeeQ_oF}kCf6psx7ӝBHSfrɏej>6bI3/VDj߷qr/ niJ*o.E%C}_UV(hC+%L.FA"A 4FO5ˈc]]([ROȘ:ꂇ8޳cu8H$4^U{['A_'T7GhO1HEK4anB@?zPқ} |1Q[VyKz\%jҼ4e ǫYi)2T<\t6HFK;Cb͇a23\߈NARiRSe 8v,!6|Jo֫h=Zle=-v ?f-,68:}Vbp 9/Iodw S SUruD65z ?!玡8DΥm֓pz1ATtr5>?p%^s-W+HPkpY9ܥVpG6A哵1bSk{p,L;1z~Ծ6 wXSy][eٸێ2fs@J ̿WC'YP-p[~aui: -hJ~C PUfr)HMMT'OV+؏b_FG,&:+4?T>FI2%[n^Km.,AG]U)+θu~ m]z& [ҡb0gy V="J̒)-ؖR9kcҎO:bvrRL9:;PBr1V%_*QnTM3>c-[ߕa1z骹{+}7/~M p4Oǯ}nP+,#,Yf]X[wK->=<<yB6k]XLvS}Iܯ6uZW攖}ksA|1g`:ϕ }Y?QRR!VyJĄ pJuT B܁_|=LO8wQRZOY.< tҗ3 why0<1Uj$@MGD6sڽK߉WG`Na.eXZH%uWThf:+յvh-l5W@B{S9P W~=ntRI;6f U(ށeė3m XJǣ|>Dfx]n~}*r(Lz&T)6i||akmA@9mGeZ5JQ9+_g^&Sxm1'FŎԴ+Tlj~CCq" .m̍. FWp5j9  VllT,ueu;r\UvKOKG0  p Ul&Ԝemͫ?©;+Gfur$' =#!V6O݆T4 P{+5X lK<g`T5Mx]l}>RX78x]9ibi"OUVuJv:GۗxXb@KSQ3H/ٍJSs)cJsg,fQH| 4kogamQ[dXͨIVg7}T9lƝAC\)'x8y(-JrD#]I~Nr65z{usҹ*m,k 2S}=IkX󚄄Ki<ׄU)Sb,:)>׎R;eQ(O>"Tf2+}KTZ% $ubXn۳zj!68obJ֝^'{FmUkK3{yjyy \ -ԊI2)j3)Lhp(ه[ `(]Θg9H_. θ17AQ%@[Hu\5G nSa+!1AQaqP@ p?! qw4K !/q6k' T"6٫/`gYM=bQ]6ψǥ&|B" 띅`3{q4{b>@gsAk7%W#}bb %erq$Tpu̘M f&c)4B3+s/,.Q/K&䐄 Z_Rs Py vTyUF[$ȷgp+󜒞*\t0WP'̜AD8meסwi'ǤEM͠/ӢԔ7D]44p*lc2O ;n\"/q0M+Qh\d (Eluұ'0&{k+y=,a'ԔG7W^fcYgA%1bA^?R"fG-cͮ!.} qH\0'5DC2{I}g<}b$MNDVDKzZn#8(Oqq$Ot taء(xaIC&9HS4ߠS`.Cj)Q  DQS14YHL3 ewBwݽ 3kr`^}\Lxg?\$0(&@4YQMK@V<=May VE/&,*g>`UD~;WXTsRszGAuIڰy5BzGY s{ Qn`~ `Jpm. _:"'gPXd77_7 4Ip vV-(GD2+dAp VH?p˛cr"Q1--BL;" rE|V"l#tN1=")*yZK YdYBy˰@Us;0[aSl(񺝞rXh/~'*TA%!:@\zn0p`D39wA&2G0p51(=NnahQɐfǼK?:c : Qo9)Qϐ.Dqx=dEA3rJjH&a~Odp8BBOP!Tc Mv-4 ?+#e;G9'n86HKĿ@+SJRM0+Sm>ɵ Jć+`?8cE._8t_FWToѬWXܠ@Y KUy ڰf26VJ_9U"n^x0((C^A9 Y^"fVUrGU !ʪOGO@D Kv]C{/iEfr)>pݮTINۋ˯yr={7$7.ǙOX4c)OJ|L2A4n,ZěNP*.3p։[Ye!?0uJ+6CGm ]59r\!烠2 j rMh~D,dp[yJk H2.=p -3*2!sA2"c@[L9w%~{{ʍZ>\F_"ɣ!o/w%a e/|eZ_RnbrrTMvGEGYP n0;%!ܻY@1dwq͉}Àgmϻz%u Ȥ3vR%)' 0d2 r [Ckh@$#!$Bh1Bac][y ֺ̮gH,kIӍIx80`>cíO b\pQw UCA$+Ea8yNYU"nX hlȖȜrH&'mDx%䓏@FJ7$b Y*G|A#fߟLd SM9rcS϶KjGY;R k>@PrMI6b-K\$Լ}0|h]XK :-x@N<&Cf CrS:<)გzчEfr)>rlwOʧUPp 1 {uiixF8NZ ,5diJF 6`MrEĢ3ԀӍsF-%pb5 Xar.@FڢaSc1t)Xa{= _r,{7Gvh:,2XR1r|$۶ f8/M F@͖L8\+ihdf rF6l]pPݼю A oh:,2XR0V m k N%+[92rM~2Ign2vk md]e01MvI<zDQKVQ6*6Ȉ=,-l)(?DbX.DSnlvI| F4G#E 3Bd$ "_66pG Pq_X#fjl#vѺ&Ekf`AnR4i'N'$ؓB'0Ý-@(N|mO |7Lp3H'#ܷy+SkJ8,Evqc^h HX;?Ǝ>3Zİ\]$]w+@B  ƀ>q%\%ˇHH4&B!"kLwJeÀ2&D[CF%7>BFmY0Ji07F ;JB2H)*6;_GTx2E#eqUBT:{@lqٯkwpǜ+}@^h>D Hf(O7OζRScaR)ebLm̠lRP:~\ ytר渕=Q Aɥ%ǩBEԟ\&-BR{a{/j,.Bf-6Ľ>&LlCaWI "M[Kg4V BfIiu=WeG(7^P^)˫T{$gp7j< ! (Esr1` ~C4Н>^1 9N6Q.( Ǟ[3 v~"KDhHb$P9<)g<# x{Cܩ))MF|R +v)0ǷfnQDqߧZ Om3@VpAWLfpˡC\F (XT߇=05iG9]_.*6f 㛒XZ+C 퉁&5H A-A+!\jY &Nu0ʄ$:b`:f&F- dXށ E N9Ҕ# z@ez" eYQr{)I[Tu{^*ÝojBkNW-I]P)!1AaQq0@P?|lT ?n8\~xKYk6~*yV_sIDcG‘=l}hK.R`@Sh 0AmH-yBK%cKHEiğkNꐳâ%pB4󐁖0VץD'#+n䡆 ϊ,VaV-jX VUo쾠T=F?)]nM^uVJɒ&B%,̝q%iV≷~$ ^yP #&Xɱ6ً.` $B#rRF8JF"@\ Ǥ{ ^]LjFh@W)(xR"h&Fˡ3\c8zt H.A@oZ̑,BځDfojm7ˀF(Q-C?>ܣd +荀,܃TQ=f@#BK"I M&!1AQPa@qp?ؓ/inrиE;0#O&cѥ:ݕj @6sHJmb_#(+_(z*I>^M iPa+0ʈ@a$jC)|}y #VA :̺{V.q* PUjy@Q yD}A ㊚j/FrX@+Q J̩jhTs c=  Q&.A3%H`@X5U1>g{ApGHoR*|v3΢UWK!h7VKԃ݃h/L o`6A<4"N$G; BH"(XдD1/CZB *^*5_^Ŀޞu¨CS RՂ0(Z[(VD>rnq!]iL8tᯇΩ)QxGzJRDN/f#)X)F?9r¦i't.؉`jhb>>f0"o2oB Hhs؊*Kv,8|VFGޭǒPPhnAo0mJrFG"ASN}_`hYw&|' GaDW!Plw{' IPm^iNNlxTD,}Fd@$PzŢLZdXTۖܐ}l|ar(QTph0hM )8L %HvUKQQ(86̿ <@y@mE9(9΄O5q$к"JǿѬ#&8.dpdG dNBΕl {9) LLc(Ƞ8xrypCI[9ò$FB ĴxA*AJ*ӛBhDMwRlp`x`W{/~L})npOvf@F!>`Q8 ;v :\֞Qg*"􁢹0tpIl* TDCECK o= s8"֍f/+b¦ J|DEL#|uB8" NjmG j׾ّ͘R*l(. 2T_gE*mYH P~@V}s6L !~ -V jVbyAB?ӡW-Sҡ%KeJ&%R -mCQLQ q=7N90ABD:_p0TE# 0 U(&/C""ۼL t0 _Q5V%/@)@c.&+ $x-t8HQAW*e Yr,<TQ.ڃ@90`µ xi2w8dffkkSaF|+]Ҩ  )FH&"4-*XP)>t @iUl(uO%[素3y""/*׷zѣ <r)$k 0ۏHA]jj.A""/^9",LJE]2G M<cY Z^%v$y.ӄ `sޥ z1("k9Բ}؈}K4)WxCe(o,zAÿHO` BǒRJbh#m^ KHЂgLA<"<%-;1*kd$t4:~8)ϤH]M*x#ZDCzMM Pg^5W}skhEZGޟx,TB6F M j9̒_gk%+ s 3c\0~pMEt2+(q}Ѭa;) j&(ڹ柡ʢ 5U6LqY> ]3*e7|/x(U_Ё )EXPtr>N  ӸveRzU"QXu@@9.~j HѶ֍581CXx"TBT $b4PsҕyAw :20O\(dwd& Z aƠΨn$bD]fʏ3BBC3ƿE'hzn_ffi#V(O!P#,(L82 KњiO*c$(fG`qDj# 4'-|HjM ޷QK*p ¥ebl랿UR-{;K&ϟ~|xj%8P`S]hXh(#5> *b!@ؾ(Ob;ZcMƂzwW†O"`02@ƧKQXH qB A ԀcU3 h3l T.Bb}Ȩ gDgaS M&VʜE-qңOe2"^gm!h[8H3 `F@ :a8s`;sOj+LZ .رkt#Y( ? Ѧ~P4͊dKLEO՟F A7Q?==6t0#x _4D bpxfYwmO !r~&T M9؆¥&6I"+`E vd E?@\DO\%:3vͤ/QHd#+,z&\eG9x&H*R:<Ȼ' ph mXV |yj- IdN%o{ eBUHn|p5cKn lf,c^|2arghER:".Td# ;bA$x=&ƦHDS8ذ9$![eЂUl-j]0.~= iTByjAT!+NhE[Y$DE;+^.vtFUϥ̴'Z{nn\c*ݺ{fL2)A3keRn22$Yo譹 f=ث6yJi0lVjʳ:uw&|7]*"޶>~vt칶Ϩ|~a3wX3:O+\Ba>;HΝUHUGlU]Z{ի3ʭ6MnUW1<ܑF15or{]+S.6# {;aZ_1斻mv˚4G>NFcYZN/4K۝-3us|j̩['KlGbش;ɥ{UJ^',O[j[rQz0ʕ?e5f"G=)Y%ַsJ+1ʄu]_ k>)7W6|ة*& \Gs/0:ײK2N+`xZ옱>cb٪ɡ݈]rcscFϧ܆+Zu֯Egj{5֏x+%̭G%N[ =]mk4Fi"V:3 EEya"i],NQHݬr*jNv$sEK#rot7O E˪uc_\9.旇EU״ItجMM5CtaZ -Liiqx_Ec>>䭓@ic]ϙE-Rk9S駨tCN1bZ[13Ћ$)Y?N*4Gwol WeCi+BcoDWV9uY;1=gRWú. klLw|Oϼovx&d}e[%Vx, 39Q.Gű)"th ~>vk_ϬdzmIBdJ\ZxUw< ?5(Xߤ(u$n_U+5DVM4sVjըbi=xJ# =JR|sĕ iOnFvc=ONq|(بQlnϹIܜoɪS[#m{i;1\N4lk~kkVפ<[d}RF3ftSUT2':9y :|uAHtԥ6{k+;`R=u%{ĻMMu^2lUlRmc$x)Ufy5|N["N*ޤoQ#._IzgkkWpF,h],jj}$,ھ |-y *jtJv2jLmd:Pck!dt]=QXejNl4I$C;)Kz5O~Jf+i{Xk-oQ=KjW,ֵ|p?["MMEo #DC[Q}kt 5gMNJVl :3C/KMM̺4X=H1Eb[$s&?:;D{?AŒUjqGVKdݥM>;uJν $ v*downsGO'JY0odӬ݇Pբr=F6s73"*7zXiqZnB^ߣBk/b9Żqn,[,ik)_Ÿ;r=do2n8Yo)aӥD ;l\&Գ% d>nd"O fSkFg4h_ g&HdrwR9ZOk|Y79MF">?3X\tص;I7'Ȓ+Xr H4W'p[ez_CҤWV3kAL4;ޕ۹ߺj9ҙJMnnfOo?<fٕЫ{ {>^{3K#&z.-:۸H^wvZGvg_HJ"N)c2Y1YVi6 (8_~nkyd4kdjmC:15~gͷi 7U!1={(wJ{W{^TZT/$lvM͞MȌ޲j4v=TK_5d6j]~ BfoaTdG,+Z1D]{F]nWn1bչ}ml}QpjQcmӚ:]%UTwY;,Gë|'MkDK~o{qujgwHi1ʱؖoXn:x`')j³Ld,dXvclr7'yiٙHչe2Lzk93Tdhoky6ią#-VrbvThbwX[3Ak%VջVdU\V!2˟UHE#9\~&>~>6f^2:M)ç~|Q~n4\E"ĎEFnϤ{cG\&p(Fhel.ϑ\X!k>2wv4kfR1ucg[jvod3UȑqzY){n3+S#굝LL6&uz,?/$_%TN7?I%soOczk{Qi|y8w)X$vy@D罐^/==}3ՖkVy .j6hw1zgc缉P&Oy~81+ fxdrvuQ|5{| kkҤ];ע-𑌅=!zSݰ nb XM7'N~ )6"5]½GJ>#187]qٯ.O#1 Ko_]dzQboU푼* NOLCknDmV"ŴGxω9ij'SGV霷^>#T%{vI *"ZDdR;KJ[*ݲXlNW7Y$aӣiueC&ibin1L"8/ y_(bER>Teυ ~7l"%Q /cu2U7H5l] I"]~qIp8x4\R׹ΒM^0wIz'L(ΚUṧeDֻ>jO[ ;kVLV-EF/=+YJ(̝% Wŝߨմ!$ke/E݄R`3$fSQZƩ=blѥźh348"Mg_lj('پ(,M30aΒ.C(Yj^U&qʷ9_#Y[ic1R{m4:gGټ[D$+kuÈs]V|ZhlNGi@q ;wfɼF=mּV]ڎ fTYԫԷB;o7e9 U^!7P}Dk[g6n_\Pq({ 9=]edѧJrV"54ڑ}Yq#c2=b^^X.^ F^'be]筅ϑj۷J_Q2SokS t:䖜szKM.GȷJEb.jr_Jq>߼pbzsئXn+ު~GÖ0k{L]m >']R73Sq4H^ϱkdr(׺9#xNo|²Zr9׾K)mm<(&i`1W{߯{o mUK?;dqiTZj#1]bGxޏwstxrT5;q {G=FnVĜRD[ëSxq.qq%}]һaۗSz+,xs\uDODXiyW%3WMtg_G]oX8p&iD&lUޛ6FrnRQ'c:ƨ,9w+-F$wZ;Ico\v nP\3i kRJ+&S]ZuR0UŮ*!1xO݇cRVH4ߞ8c2P"7~޶7 ˂2ӻqv۾$ڭR9_%eޡ@_SbpUk{H(f{_ 3=k+{MfUyaً Џk"FYuyɸ3VIKff׹[k،-\4l㥋=yiUsU[zT^Q=Y._u'=CDV:dޜF΍fƭKnD;u=Ѣx8c޶UEC<*Ӻ6$Ҫ{'5,om}m D"A,Q9rL665<0脝|l69ʍeڨMVV>iI#R74ӤU.WjZܷ\YR{Ьx@uL֟Rے85Co_$M\%ry+G趦DIOnX7Ƶv,FIꆕ9T.TzJثlcby o،W;,*/˰5u:mτvBGF&'43g5j|ïE2嬲dIR+UQuvYk%IQ,Ld\MS+eв8_ʷتJ[ks-v9Ňhk9xlW#}hlxkW-N8_:aΫ[ 3.FڡgjNSn%ڰS^?rLx{x{.e1nXH+i졍)g#E6g. zqC'dtdUwpYK㡩 mOu;ܱnӏKbոe$t4KbK}cOFQn#߷'F+G~2aEgع6-%U+cOv3wQgU4hSi[P3Qt0qQj/$iY–j$k2tQVQlYeNRv9{:4ܷM#CXLKl7'Tfxzezǡ#*.RdKwΟ"Siv)TYwE;҇^᫩G2VIsMj,nu_K[r3{6 TlTmɺMkvtQ+Ƀz:Ez;i0"o k)3~OQNnk-\T\1ɶtluC ~$m=B#NhYll88t=ࡖD>h[?3ӍwFNjHRP欥tFt Eog1wSE:?fIᏔ>VugOg(]?TogOgٿ{ȣc:m? /NPw7꿩OFb'sTBnipy-0.3.0/doc/labs/datasets/volume_grid.jpg000066400000000000000000000664301210344137400210150ustar00rootroot00000000000000JFIFC     C  >"  y##C|y駏^Ɋ:;U mG;yjz c`ry\dm~7y#qk}[`al.m@];ج%Eۿwz yT'Pt'-u,6amWծri~xFGZBd7mkErkK.WS<\E8}U=ˮ:X :S)5iZQIedd[UHjv3s[8:&5?7nw%zph~Uo7+Lשp<>Ɖ!x YEco&YlF.JQ[_Ddpr[CX5;VU*gGJNTƵ񊽫kqV?oT,ak b+$uEH{/;E戞sT|`[xM/3 WZ ϶ҷ]V+)?m f?EAgU> }^mXepk^!x2ZSu ߬ʮa<7xJ.1u];I0uQjRyBB0s*uƮi@j-1ȯX41mN‘2CRm7Й1<~y2cWs^,&먧GoD3l!zfC5M5Ѽ]n3m7Qٔ~M-,3jiEk1EM1hE{]?ο%oċ!r.Z)P[iwE)wP) B&Zsc[>G@>岰#ym׮I,|LGl"fdho/C&5.BL*]bnIi jf Uպ1\b齑`HXZ>cRi5l8&B+ɝtviQB'\Ij$E5Zd뫫K}}YA`_Cw@|%–VV^L"˗!)nºÏ7_Uny5+nTֿѸ mS |>\e壼`;*! Sח1sCjk(O"(2Tק W}V r f!UjbÙq0ftWKuA‘0Cj53Q ,&&q~Mޗ5mؑXwv2ʪ6?͎͵P[‰0SeBF [&?]y9d0vF.V̾xc*D_BDym,8J{&`X{{,ܱXWm.ҟS\F/ᨸ%1%S~E1eb 髪P6-3+t: -8ϽF@İm3wxU>On 06)p6.ONEXs8~eu~!tFb|P_ \ZWS18́bcN !HiuvN,rP8c:]!u)iK"Z1w`&&H\z̈́5ΰG"ٙg<WvL<9u.e`C\ R!^\ EZ ܞO4 n%S9~ $2ʚ;EGz=te+Ym $QS)fLHՐĿSHaUtV8sTh%)^l.{QU*@}m f, _)t?ˮv4ઘr;9?yrhѦci#X^ʨSqB\r z^of,oGn2کeU,OTh.ӅMKJk%hdzFk)E<~}er͍}E^Π:4l;7J1|UX6[`ʪRN ŌrBkz~u/V5NE_ўh9z҃$s˜q!8pxS.|vޘjE'!%S(dYS"E%c@+*um44Nia^`qJ],+F :f3}ꌠ: ǒI@d]1,!+L-1T{!siZ>v~2Ӧ) $+g$*m2+ԋ|pao=Ĥ[A:nc'Tzo{r%"njTڮ/.|Hެ ֒~٣*w}|jo޴ Xѝ~[+B,XaX{}M1f~ZERm, XsMaQtwWdv"s$N&sRn3r~%sK˛:1;S-y񛏿ļݜ%3E{F_WKvkkl%AzPm?]EFCMjΫ@2>!1AQq"a02@R$3B#Pb?FU׺e؁wA}o.br#+'†<>!jSu._wby(&鴦ēo|F%ܤOoU"^\۷\ː 56w9 aӋZnbNgZ'(}+F%oP(%x~&8WmBd{LX9>ɹY&zWxIbq ē ьǜXKR:I7HsMWnaX#,́˞^02]Oߞ|b^ryO+RT%vyr5GE_l(p2 DIf6aL:աyZkl^LxDtGq[$ H5W!FV_$|ceL`]*!R6lM#Sz3s~MN`I1 (! IM¯xh'1,K௖PJMw{}3E=+J]w~Di?8̷VJtVӻY6yU'B+rl;q$*M\4RNckÞW2my70cO1˶ښ'x{f}o}(T] {!GeRC ! i8FVD/)Hq8 xzy./HRZԠr)g X|q"gxRpaG^vjm-@j}F9=T52+˅mmRrY-k1>VVss;ƣ GT%˄Zޝx-‹'v.qC Ɣߖvy h,cp~-SL X̝Te$*,6R./N()F4:uRdn?8FS4ԆMjKh*V'R9ega\mߞ~lI)zX[dc0]Lq '!W#6rnvfAJbMwؽi!϶ٞB6r%o# p]ůp3 !7k{IJVeP&wTo$zR+LSH_60IX #p ͅw$(JJsyRi`%{[3#;K&O&_SF FAɵ%H:|nLJ:ipz6\=LIO#ROeG"SKLq}|l'-FmV4H 'yą6x.$cJUn6BV,ձӺ+CHݬG/] n8/(n3l܅U96YZy9Bn>qg3쩙S0da#_!YtU _q%9h,mUR e$۸EtptVXjĝ :8\pG2{ϳmΊՔaP85NDz':!1AQaq"0@B#2RPb?n@._y(q4 õR³E6_[:Xs>W T-tBbJSNDq9>܋Ʌ9B[{*qƴw(HTT^;okk/̳%U#Rae^ơӯHo]PsMNXA$)h&.#a~ZT%&JU}vDbX-LL?6u(9T~]R>QAٶWvcWlQ$x}5Mi;kٞ~#C;f s׫Ft*l󵻈чwRݑ#[j]ВHyW%37R^x) #Vfj*b}'3&yqKOtnm?'eM [! ԃm%28T?\q 138mGY~VMEc'A{w񍏨Éu):78iwW0pDRi&UYgdHĨm; ~)sE悔+kHvWZT+/{8 -Te;œV1D֛S\$y EoyFzxE *?"xD0#_9!'Pkw0B)`2]"Jh˺MNwsbJ+%%i}}r-|"q_4 piUzMAEZ^PqfN(f| u,7a*&MS/.bBvZmʮ|;U?d}ep;Z6rLzgYwe|MK~QJp zi~bN2孍g2#WSV5*x^zRf5e˟K$E6a3ͩ6e)k⻾hC&acHl%^=LKv}cs jM2H74%w;ms4g]u"q)kj[l2H6=|wĤ!a+ S HBl>i66^^DYbotKO ,jHqhXhttSd;T%Ļŕ Ʊ7I#8JRA:]%7Dj^W  !1AQ"2aBq#3PRb@CSr$4Tct%Us5D`uv?hUG,SS6 [ U>m SeoeKRs򄍠 qao;#<l1N@arSs\GzFm8f]e⛎?Ѷ(EGZ䔫'u6ttݟK_CqG@'jqj2j ;+=pY^Js)Qb~J}!7Zlu*EiI%_{᭭0QJtsG<.Ae)_nG\mi)Z.:*Omyb"C y0 `%*e$ݥ韺/nÂˁIP#6h* ŇjI{MGQ4߷OW8Ǔ+DYe^^8U_4 ^#˥htESd `bIQIs\➶ðq 5{/H%NT`a}_D}gN"NJVΫ3|N-^\QO֙U[i!hP#-)p:XbtPk)Jo6!mWhj-ć}Ӡ';Y܋*NdDhsQTǙ. wmJ^Xog)Rݓv k6^-ⴵ!l!i:}UJ/oOBYt _+$#_1tR:lSvg᯵T!:Tu(c9/sP|!*VT}%toY} mom^z]sG.!-_*y~s<2J+R4 >=O!ѓ*ĉ2<qXZlRj{6D:huOWӆ)5-%7dXa2CP[=o#+5 S>Gq'Sg2 1{)UPm~ d':[Hb.[iT*JGO,&}i/V].rd#y3%VEpp11 2^!%:] +Tw#2jr:19[ʝ 6*Cl \ہ<ݢBgqWQ3%Ay ݟ<"nv:qS ]$ y]:q!S[l\mqؕTږU-+Rkw*G߅mT7cy;JXn0++o_ .yLwjjtӜ րsZٵIN V,-{Dp9N+p!a 4B[i)0b%xZ+[5۞feoKg++5nYRa?cd~sIcpC/l{fgP\fĥ7:_'0Bi+SC)bMJBd88KB-}:ߨUgKZom3V4Ҧ#dgOu U{iBj;FXl!)UI z<Dg^{7[N&Jrf73c'V=NmDuѨM)Dmj1PsmFUq!S~QmN[W6$L# ~=8cjM$%BG>TPS$7ph!S} C U獘LEJިyA:[%>0 yj}墒mmEXGKPeʣi逘M"2+9p7FҒn7'A"V|YA0`uV{'"e OՠNDгPCjaQro{)R+gJm ˑJ$  &,Vmɲ[x#*AF(ٰ;6̵J)6o_󧧩ĜDYW{uvuo2tq[hozq)rV<(&"Ens#.#"ں>jH!Ytw/^ۓ[0uZS c i4IUl5g&EG|B}EPmoӾvYh36S P=m5LM,4 RSdTowMI'/n%hXVpFK~^?$HvK mNdӎopd9@H/'b!@;=F([uIPsfKO9[׸)̜͝ngseY BKCݑ!)r{J]܏4 1P'xEvG9Vd-nFNqjb>x,6엊ɾl.=N)B[H1$ucxOv2Zw:7ibħAEJѰ:u,RpVrl?˛oi\ CDZSRWSdE4pyw|+}.Xz,%ӳ4ݧLY4Y%l/ 43 RxѹSNuHyc\UU)6O[Bp5PLw[I7qdIz4˭tb3Pd }Vsr=1/m6I~*5J܈PGy5:m%ii$_Caymܥ* Vz +f)Y*78/1`pon>qQn# RU=]կ^~72ltt]2\p"FKisNۼR|G>z;}eYR܁W>H[~ʛKbMo  !Qdȯ -dkoS0hGu8--lpT-ENBŝ>4]Uv4%I* BuʚHCBqJq`%>xh֥(H#3%khij>*fv,/$BAV8Rgvg|.M)G<\qpU'?ny Ǝb~Cj[ZDrUVR.#9Hfh\6Y);qn47=F6w\9m\,ޣy!~M.'jM/2dGCUkpREYW V6R`bvŲ㬥INQ^mw[7@WY%M6;[5HEks5Ey'e >P6Z 3)uŴ8r6T«e GRs>[(O1F}NSEr"#,MGʇ &O_ze5̙ؿ4*i^<6+:ۯSRP*=E [p7Not.*)jȘ35 * ?;tM\F=M~(sYTzE3J[P$\Ua2XPq Re>On*cP`t7)^cus9b+[rXe=XUm@<5za; FU[]VciS6&TíNur0[N ?VCyR/kj<2KJVZ]@_8۬QlFNuDam4+KUkDݺs7̈M֭ ]AL`l rGvob3IT]ʲ0m$6':-.s0M2'J}cu>W"r ejZZ,^]<'h^]QVo(4T[KyakNu:^6a 'y*6m22l[-"+ߝn=#-6! ~^N};jIi7<0k+V.ȥ/O6mQe)sppeزKNW>czG4W"RdrR*ABVk [ 5Dȇ;ޥ@^һ+Ö?[; rCO3$m>NvAc!ԁ}OiiQAUcCI(̓Rirj]r\0留(Bl3-ũWy9m0) TLVih͛Q=I9 uQK&T)]G]cP|8m%I^H8?Tz/>(ӧUejr7+i$hh `}FsmrBf}~5|etmKgEؤc pĪF̪ldN'mg~22٥ }E zicj P4K/(y/au$36r1YP~Cىӈ,b\ 8 iM)pZ 8>ш n 7wY_LH"3T?GUۈSvZc)tJs߲ae.R.ry{Xv0I[W s"3)<1r0D!cW\\|OD)ІۂZ!b`9׳(G <%74oZwqzpaQE"|d׿ h0:M챢+udelنqaQLF77/OmJmoEcb*a_A.,}@|nZ '$U!US o{7Sswd'/p<Б6Ô^ޏ'{.'^Vnc)5@QQGU[G@Ӻu,q:n B 1'fXq0$iUGڎ4nNV0̘k=>qU#D3ImhM@0onjN#FQ^UJv2\dܼ_ɡ>lHE_)6e/TP-xSG ؽSHV1m9Oy9}S^Tu?G{إ УƊJO,}VfOBys 8mhdwR{bMjb J*=aæmz=㢬}9# qX\Y)‚ԋm[2ZZtguI'#ǫқ+Ij79w.l3 뮩nrUA(mq  WHm_iE+}IDlwCOh%V8Ku(z\.^9T273u>$J<$:˭ zfMϱ&-ra*~2^_R)cΧ.s=^8oevf U H}_7Q*](UwrLaԠz׀n~>BZ͂G\7CL7;-̕z5_"Uq*(ZngKs5Jzn%-܈F{$ pkpdqe="؜N eK>(Q?^NB)tHߧhH"n\4-Ge@Hgi6na )lG$gywIRK -(P2 }>`w\.YŃD$ TS"=א;/D.B&D'\ϒ+:nƅw8|9Y%Q.3k۷m>`pd)si d2_Hk$l؛ᗥ<3.ɺo`0MhJFvwm>[īv I㊵(N(Os fX DhO5)^˷Nʼt*Po~^x >=ⷑdI7?# z*n yþv{Q|8 qbPJd<c{rjL@m Hq(s(瞘X QuR6=БQ8NTunb ox{׶\ENSnzRwsES`y^P9-RAkxSki_/C I֮X>g)lS_O~ GĵWӊS:3[^$Ď Jegx+;Zj!{1 /t+ǖ\)/r*PFӒ>j.ԝ@Ѵ;5-J-j6 q#hb1> wYGŦ) Ln Cc]p1-YZUu/D:>;Bo-/HS\Ab/SCHm>=C׉K~l(w6e"8?"HN9N2Ba1)dcJPo?&grG;5> %Ta0jmUni>Aܪ:'D(] EРG(416Nu\nIOɎ*yM[D I&bתeEA$gzqGAm#UJcms<5U5-U)o$ /%V͕iUa]AyXR|$=Y2֕6Ҽ`_(_KesACR {-)!1AQaqP@`?!'#Rφ2u) fKaZBaZk' G}!X=Rxj2M V*uX xq&6'Aa1H%K9Ā:DA2@ ?YnaH1,FB "Tdj ATcD!-Hv.$KCDn񼙆 L{%!b{$ 'L*Dt/xR!WhzL@ qKtJ|9H=|4=@PXD<Ȧ6MtYX[fE։I .@vBE!BGS[_ V} )൛p4PN0J>ϔZ(*{AـJT3t TJ~rK/M`Epcʬ? '$;`l"AzHDf 7"b\@oK^aLzͿIR4VLj)`;m$͸(N;Tn`J ~I"˰'p*F9Xė*-'M>KK-yM!1]E^,\,$^N5T`mGKM\^x0(pa 7K49EaOcd](kIn[`IPHmE*@!w?$?T&*]-DvhKFF(+NT>i.M4#.+QyK^:l;Jb@,f8Z"`.'\b#A`b3cy%\lkqq,b{Zab|@DѐC)jH`bفd:^8B];9"'C \)26(QܡidʳuP ccny:5Ԕ9*NE[cF,Ht~xh{]+`Pf78Hl\ aCM+"@hFіd[&j1T+=+4CXV{Kv,7 C"X/:u!,&yĭצC2yXHaN~aа1p&P1!,"1$BTQs^FՓ6KH1ݿ#D*skh(tL $>̗@ɉ=?)ih BsEKxpĵ,סנħY2 =i@Q[x;cÒ'>1[=u$\p|sJN% Nk<N49rY8 vɝA,@ ILKВA8*謐&rG7x2mcr,PE*Bd=n%g%& i0VFUJ 77%XT] $,Xwi \h@8ǨJ88 _VMM;4aM.fr#iFsIrrBӖop9XEN7VMIR*=~$p% !{)!VUZgkL r("/j"D"B˫f鿷 Yv5!DP9\Bgyy'6ALijH\5k `~[B^_qXfAaupUB;H86l#*6m$3eĔ"ʓ͑:{l^e@SBڄyIC܁9"_>`$̐ݱon,ę;a1Rh<,y1@KD0Y(JЮ<%#{WSVH@sz,pAPPj#ֿ )'u8+HMIK3O .GLnP N;`j91Cf, BG7 ?X - K"Z['JZ!tRC'np@=>-#珼ЧM Q3T9\!,Eo"iNxJ=[o6 Xcg 2@Um` RɅG֛h`*->O} AY0kI(S0~R]0WG񊼲GEl_U/0B*-AqMq9PM#6Q˺ (I 8v@`܆5{^%8u\ZH_@FvF A%6 eck, P0`4$'aUnICR#ѱv_,jr$Bƽel+ *UٚPzYt1( XI$6G̤"# *+HtPFJ{\+)!q$C4 j`!Q,$n@*y4;".1(bL\rqxL Z6"0N)@c1/ztYd'D[u˗-8a 8:܀+HaJ-".n%/--$A7 N[>msR)#Jlڞ@<weg_g&/K $)DnN YE"eUז8Q1y?,R&=QFu X_)oAk>X:m " j֌k )*Ze!0z Fo8ͥAJتxuP!:'FEI;VtgPHXrw dE P K V m~. |lmD,$ǘ"4;}O,N{AKPځ%6X dOy&Ezq[d@%`R):(4yrYA'[!kmf0hJ;P-I'3#vP0×/׀j ;Ɋ,}`-<<OA2?|n;s)'|)i~BxB Ao5v",wXM!OL`eVA'ZĖr RP(.#X-ELW AqKWW^\KD‘ $|d"RQ%ݾ^NlޱP+eQP`sEnGsGLI䑬ˌ#3,i@:׉ 8AP60ADLb Tx.i @Ę+ 0l"Ñv<_,`XJ `1sf'1f0poL.>۾300׌:] zt^&+ 0_ K~Hjg7*(p*C#c0DpJ&𝆠C\.bτ Dtye?fB<}ϸ>4TB/Dۯ.YִUhGvdr:SUҺiU3] "s881 xgѴ55_Sяx2-FԺb1`hv$z罹~E,@p"Ԗ! fCU*j~U$h>(.ڪ_(!1AQaq0@P?ƫ,L~n (|cgט8, Ɍ\mL(gx0R(Nܽ8S[*dJ}7ŠȂĹ5 uڼ+͐'b(mQUtt<ny߁%}*J.(RT6` Vf2&XƆDy $ƕ۾"C- }7LZ?2zs$ABSҢL9ⳍ6l@mA2!8态D*Tt8A9a&yph\mT-Y@\r磕gob.nSZ!PC!!`E$ s%|`~Br}CJjL/h'*+\75Wa{p(yM$.Qi\0ZKJ5 eZ\j*_Ǡ`crOو HyCF qz:"qփs_?xEE9UFFbG SEZ WX]aTR% 7 (#b" 7Pո_}v&<ĉ{xxSn1=CîiDabq V 9WO^YLM:{3Ŕ9;z[ W)AEۏD]1PrN'(@ڇ!䨞Dc4GMMWgyp^TT,mYeqٚJ|7>)we\TU5\U # hnarZb&DzO(@.3 rM% zgYQd=֚dP~q#UT.n̬WW`ϗ5iWhy|̂q&UvӀ@_+߮$X @8/zRl2?g$WHm&ƕߛǑ śhS'U0 ؉P}!:ix>p`BRL'G\F<"B*FLcdL$ݪx ' Lَ^JϱEZ*02yI@),N. ہS+p}mke) :<_ /Ljl@4kV}zc /}o'!1AQa@Pq`?g(6ch V=4Kq`/9xyNpf2Ql9A 17_&8N8pcR"= r*DC<*^(<ڒO͈N߼:_:ɦIX"(_pDd99BRYyn{L=} FI(X됄 xPŒa8ReEOe+>!BCS`1Pq ы97H%I'?( 0>.0QCI-PJh8cxQȓ˘Jg牢dX &:0U =l0FZzHy0y %@s' &HA[F@a^/ڠcuԔ~8֙Y(! He1@3m P+{?"vr f8#n$ bTGcѵYnܔ()b4A"(>`tZS4S<K<=Zn8<J GQ/ F@EB@ylmQDBcт*qtE]*3?%Nvw"b)&zYDP9bo{ĀBxxC`dR`9*4{>':{rX}rt P@xRPPeU^Ȫ: Q Ņ)6醃QTq8bNA 5W @ RdIM(JC(#v[3Ԁ`ƒA hzi…5$EHV"F~j1/ i)> xfY Z#*=chĤbP若 a\/gB |)xRr"EzOί[pX6 B#>@ 6xOsmpULKa#jCTQ  @02ՠ>A ㋁N_X}QCteBLڙ11խIWg$V,{rLӜv"W@ 7A5hK@"x?[lgc 8=q=<!DFhB  qA. AGhmBa?'3R7%:DlkQu<F< Y~H 8^wIfcjKCKY[/h^Jy:"PK >% ([/[&7xR`zi $kS_CcĒLi!A`BhCzcEǓʖ2%- &Hӕq G 0ۧ.k-:$=T=!& KW^a$"T!c tblP!pTV8)29  " rPYyLL4Xj֖dMǤrCNnGIh0=K.nKrh%x D[jbvJ79XC~%A"Az{PӟԤ3 (w }iT?T:kL_Kiv}dj(Bov]höy;BT_VM3izj$WXHi3ѤҰ3k'Z**8u=0 HiH$plң67ŏqU;ׁ+Oʋ~PWcH|ti\,ykGYd$cnnQ%bu$@'p8hEI*RQf|.$$Q)|6Hp6j_H@Qp mQzz1g;BA8JIֈ ũIA{ZGCU/mOW`>=+ܵS]s⏝-^Bނ%+%H#]~."{P2 (vG\D2'?K=-<`HEx"1qUP'< %(9 #}O3,SWQTW5p TNRvq*3X)` i= _} #|bFF-ըFkJ^:Né”h 7\i ԇ?K|AоWE۲LJg%n]$Ex3P9YaDr ȢH‡E Zi4EgCj#xiRbSW~8~g]{s)-9`[{-gO$bFt/ƅ,C8ڔZ8X+B~g}5$[+7ӏͬ24[a;$E8䝇~ƐxKN| AOSL,T]b*5%i,#XQI޶&b`L7/X[_&0rRb=xl2FKxle>eEṿJB(Qr\R}"$$rK +y`+c@SzϪ҃EJD A=A 'BD\L&/REڏ-}2&?+J NbfG=/8|9:RR.Sg}0nc݉ '#0K>ЛP~h聖)0$`H 4 zn+~8GT J &r"6}i) FKXh 4K,dOKN'eť1T cX/1cr䣡(ο8JCҋË́Ö4ݚFcr UU{ǯ@p4=3 Vu|GX(5z8ؓXHVy % c&JgŐF;YYr4@d .L ؉w)-V @aA\WiS@j5PnbM0Se+}#SW4et,vT E,<%E6:$u Ejnipy-0.3.0/doc/labs/datasets/volume_img.jpg000066400000000000000000001267121210344137400206440ustar00rootroot00000000000000JFIFC     C  >"  pG?z7Yr Fy>9 *܇`),7:2Q쳨3h[G'ܟU]q8狶)磰`21= wg;@yu^ygS1nn_d~_sZO;M }G2{K&cvW26۪V_wgLb]!jX t~wzsd,z~{3* smW.ZXg 'v #pJ[i*ɌFB;:\0`.V{IƱ;IW#fkZmiqnGeFVj]9ԫ/%5yս9Den*%nZ*uX7#\M6U|ekhv.p,_QKZms!|%Ծ=%Dw[#05\{lDqC@ֵt.2#}RjD4'U6AI}saI+^pn, YgU3r!ˇEg雫R=hS|g!5YG춍D.s`UU>V~7&{#Z\[`3Vǹ3*/gV+MBh 9-oO*-\Tm ,ZekQO귦J{蟪 #:{hU== ƱVܴwUF_ʷys]dxrskCKg'BuZ_6vy)rSR[eʗ_H5W'Y˥|y9E=3_LЫ/(v^{j~vܪf=0nڍ6~OsTw6x29Қ۷eKohχfwxezfeWYQb\0Y}Ӏ!MBl/k$Z#pWNZ_/w6\k'5nX?7d3vKYe?b[گnzx$ƙ]7*Y*hϖ5^黥ʮf$׊V^k|>}}Zr/c,婗{>5OMޙkujv嬈r<2qWv'rYt:!к7feVU;"?k^AIU${ܮӍf캯Ŭ~W>fUdh<?OOHu+gȬ/I**չguI-uyWr/d˖z3t[C&qfӯ=ivm]TВ6ՐQK7 /_O-٣M΀0@P!0$&15#37pO t+-U1b6ح&ЈyRU0bzW[myc]ZsP齺s3{#+ Yu'4MҞhI:>=f dPXFy_)hcڽ7K~TmzdH^ko"O4l6q+^VʁcCXXBrI]T’& omBv+rk]bP$$c!W6t F@4]j~5kA_,;(^-skZbM}cE%.Z>2V-0k4v4*BV]+@,s}KLBe'X[!^6y˘YԎ}a\^3't@ojM(e\o*(=*Rz[pok/kq~Әz"z-vk pF_dnkEО{_1XbR&FY(w0ޕN;ʐپ.aCe֬}][]s$ud~꩔PnQBp&2hRK&޺^WH ?>_@aPU.+hHÙ7kS f\ތ.OIЊk.İ/'k"< ?Z{52`tI_sdmʹ~LH['I?!ztUnFRAkVv~~^eOIs\%cOlT?'^Z QK9$#ioN ?߽1pT0S|KD~StP-w`F7t5#:nQ\|ҫF@,`lIʴ]-zMHmU ڏj3eAk cݯqP3CWe;zU2;]ˮH^ pK.|%Rɶ h>&mȄaYKaT-Qu|* J~r=l),v?l#ր_i.kh0W[ˈ}*vس^3u`BP \| kx$\ UBj%~cʼAk{,3eoFt 5x_ىʹBehMݶ4c[W0LUV# Zc[,~ܵI19rK/_//,{t_X*lkyrƬNAMk0DAфZ;-j-Q= kWM}Lu]n,hcHpMHm0]P&7DfUcU}uM~Gâc6𵖠5t駸K;M!$ `)xWdpĺAQ; L sXQea_JH6~UxuKkg'eRϱk`;͢.8&;dfl5_S>\gESsGUZR#-^-*O4XDm},/y^Fr$,4DRU(ZصiSG̩7Z\O#D5CW]o[h=VɏXkq]6.: TQvH/^Fue0[~d",ߑuuԜ?S^ DE*ly=7AfJKkGDc^rK8*h»hJviSrX֗̅*Z1}Eh# W-nv^Ѹcr`"ݛo""1bXr(U{۩&J:|VuRW$P[VVASׄ0box~r?i "I!KzczˉunڷUNe~-*[s,VZwfA]k\#E\ǶurU-h^Լkg R++$MJwJ7ፚ/H5"zAm\Z?8]ng~{3?ڵoPA-rYYe"KR ^U['w@bɂ?VgK`!l$ǯ`0cU}W]ϟcY`ZF Eb =95T&C5b&0@$ˮ$*b- ZzPr5Zp# 8!4Db9п~g 3 Cpשv|@yc|2EFѭ.dvE!9Z#AiU ~&1YYc>48A]{iKqAigk<μ =l1BaBsFUEm 4uXֵ:8p V|H͔!VIk֣Xg)+5͡O\lRiv{9m$ο;ms'3h>PL%!/`֩CET[fCcY`] MXE3|{?nOiv8&O|pYPWVV ۄ9bֻ{9?|.Gz M `} poili\l9{+ ī+/K_ #}t)\, C6.wY@Ęm$4cʘl>SEo_^g^!>!1AQa"q02#@BRb$P?m fjbZjZqH#~s9i* FO+B,NlǺjj}nnm {vx(\~?kY2\{+`|lu;XAyݲtĭ2SJ-y{X]+tZr+tt mMShgeZYe vp6D {W|y̗U͞Z;/=,$ y*F\&p~p8bLK!WYXl-Jӕ&7 Vi)THQF]{<YaPg2O.N6 ۺ{mMF_ +M O!!k ,AaÔlD|lo.r+e%f$[|LМPp+"F;fuٳ筽x3uyF*[M:&gwJKÊl4 lsP]*VRQRTꋁѼ,-|teʔ t0w7| ?XB'Ҭ!a(\/Cb-t1%va&pA οzU?-<:e2a }8 uڻ#1--8\62Dvr7HZ1e6PO{G?KO*J6:` cj3R}bVb0,XSH1/Y3+KSǴNí9(گ}!R,FtY'U7-dmD+t^Һ9CTf޵|}v:v|{RK޿Xx[F=%EaHuĂ@Q<94}a"*)yECxzOg:d1|17Sh3P(kmтmQc aʹj?.trΦH5Ry|@N@G 7&qK}i=+J$,Sj "~x<-V&$u8npUǟ1 -25rn,xiD-$&BB핑ȪHx[FvsSQXC[{۲ű/hl䘝iQׇَ/t(Fh0U;k.nX^}_JK%{)i_X#"n#v~ R>ǭ!Jt& tB̃%G^6whdJ:%r¤\!.#rȩ Xws]]uԓ1OMнAuYIZeί2Gpld㪸:Zq\@^ϿPJoKD=JI ܛq+E}}!eդ jGln6Cki϶q{c爷?8KVG>'oy 7g6>}'#k,GuYt^ѳuWeUEJߒoITP1,U)n7ÿWtf )J62 - *ZPC}bq;.%Ŵ >$Q:(V h ?2L=)+0S5 lQa78fBNZXK4Ʉ+;,[u73B:_럖So pJo{8'C/qJE>I<,p1R|TI=IǁM"3SnVc}S%)R l>'5")Νt>1X6[vJZ|=!1AQa"q02@RBbPr?N~]Sߤ~Q%CHt87bY HMHmE2e_^$?3eʸCUFINyʔI=0P1{kʘN.ߕYl8KcM~4rryz&?y~C|o1ST 6UVChwS ڰ()927̍<;kəgx[k8mV#j?@x!gs\ϠnIMt,&Jj< ETB'k+Ma<>UP |so7S%^g%ԀND^c/O/8Z[?5QbXЛxӼD `y>Gi?l7t!qh4ME<Ϻ` mV*ʺz5òI /mT\q"Et,ѬP49Tq8vHZ}46e$ko>G-`5p̂RF6:DHL\+E­a&%2m<9*TK/07yFD™/lG sqpA濦ZK _h&k@ydyNto`C+,WF_4l=<- S*\QN$IО9jPou:T-u>Vb\0?6mQM\Tī*-JP8U ]f%sIZemj.Xt ai=NzVxUޝhd)v-MSh]B LjF$}ڥ0IN󷮱ԪV4HP$V[)%Hi9Iܢv=+QJRIp)MT((jnYr=).>wv>?NP9ļ 2d h EBdmN+wqC[%BͮFw/(-&A V˟yDĥqԐ{Q'192WYńxMZݟifG,YbQtiMm 撜b4RgޱFL /v]af?Sڕ) kgX&]1R 5PMKq9?Q1M,%RܪM76v4Ha&Ѝ;s. &4X[`go぀ Ǹ>askey\$dekI 'Z,4O1/c\ى2e?yro潮5#<}Tj"M.K(u>-y6Nt%g_!ֺ#;Qer] ZQR*3C m9xx+V)n9eptEraì8^-7}?Wm j[WFU+wN֞K2q2 9ıW.4vҮى&1ro-nwyphsӄVq2xY)N'[u#ҡy| JNrf:1`\饅Α=6OGm~NL-aFQrJnG~\;fQJSm/UG(su)e]G#D?#-C,ZȌZMĒ->1H\n,qqTp8p 2mE>ze8[FZXe%^߯K 𚼶>b~~;"CdBEN֦zA|&(. m1.N$"t*Jprrzr^)5 N/X)#'cφxZS(x_؉T0XRO ,Z)fug.Fy*:WQK9*mIsv\ǟ"qBYwϹ00AݿQ):•0V\wB[= |Mqw6nRa@fA=xnvjqܘ Jw{#gbGuˇ FKi-';')q+˄=u:1v[=;sq7ɸ\ 1-Mqd&D@˘j#hiA JRHPqS¤gLYfO͞𾣉+$(J]s7*p qE:Yo9C2 KH:Å!"h|7}"-m *7?g̺fYFT$Z18䃺OTNQ ƫ_ҟp" 7m$%V_Ihva.8W+1tDAN)LIJ%S؃oKF6y"F^3Se-:{TSw/i>$\+~׊UirN!>=?3Q|Vifl(0.e(PR  !1"AQ2aq#BR3r$@CSbPs%4DE 0Tcup?_hmi[}Q. RAZgRM*-!P8K87]o> +Ux٥Ѕ~ɬp')kivƗl 6Hӥc")n ^St,l\ 7/nmISnGT ݣW38jmKiTw@=!srr3M273#{k[?jbb90lZǺ%7Ekl{?T"R&%&j\MJa!B&7* ¬T3 vMݼr?'HKn)8 )Egug{Xf`MS] VŁX~j#< u9Gb7_j9T)+h7o1}"*u]UnƕpB?!'F7*-߼m:L41PAn xk{`Ibf'v 3lH+ZWCsrq!h+ 0ÔjLx0'SlL}N@,irc-T\Tnvwt~?(\3u)6;4%BAJn}/o)E*6a%-W(`; #I˾5MmۉҖCJlGv?'^x1#?w%f&ۼz&C6({Ok. KK+ˎ&)4]LS͒JGuibG8U;n[HfHg6HI<?g%|z9UZ=dۀ[,G$ 8K~aJjBՊYwki]AiG#FPjm|s$=NuS<L'_U=% ^5Ŵ4&cztꟄ>#5&%z{h/vJʹG}Vh}=c#HOKUiuLs@W?ETdژe/6yɥVi]x'B}>]"Va|9ہ.wTo3pj9A&:aCag-&Liʬ;H&>kWu"W%f'7-\ǀ9_!VfnY#l. q !դ^Sj]<9r&]с''pjlJ7$$k/)tPD'fY`8uB+hv=p~(_uB񽞚Kc~;2w>L?C YrK_lj ۩t>1+rTR5 R0gV| *GnAH^cNg>B9,_wc8):IpkTLSՁjcEom6uԻlqY捲.59!͘J%7a˛woAyKOzVi8f8.aFJ Go6#є-a/N#3E>m6ԳH!z(IDelXm]]Lzi^TiR,>\Igz=ii',?F*Zi5V®7WlS?>zlZ>ϑLZ,\'6Fe%ˊ,?f|$JLRVe\Ҏtk^.W^h*vP nY {NZGj55Nd 2u'+ܥ@chKRR2ni۫^ݰSIwn(:."nn>䠽!|M*3(i$_p0ċ{ 󀴛PmJ)}I2MڣMsvWǔκKVn"CS_0HoЈܻ,rn[+ܰ 9]fN4e,զKi|77#OzB6 7W9[P~F 0֥O|GyPӧ}$e]ŗǴtf&kxT3O/[gۤӥhn@z7Fl:{B7md~6 (W('g4P <.d}oO/5ުSg%|[m: (2htCmts!\'֯4a3lZ5;wޟGjedB+I=륡K29SY8Lu*&UEn)%1lte>PO}I›RQ٪0ސw~C lj֔l_5$4r9 BV^\jM3%5PmwtC̻wl'8NO,llBM :ÔY}hnITM/%6+6RO;A?IqKNHY) h|P}'*?c~FQhUfS+>i9ɉJXҴtX_(jԦmĕUanei@$gLX¯2Ed c5R5 ӛF,¤vzUʣQ)iۤmn?gm>1b2!m(o"B&'ɖR>neYy rƐZfnNWֶ] p(UXWڴ:9s/6m{=e!+1[ e]Qe sM u))AQiҰ9c%ò敻”>;COqyz4~3UE%Ϻ#Œ Jh-<.7rK@^ZB-n]%QCw\8Tf\>j"0.'mM/̄ͷދß8Sh ӬD6vUt٣s=cDהT4"5rc%'Gv c3_-;<:E*'X\}I7hMmïȵ9:V8d++]nqJ8_}:( yZHRM8KL2&Rӷ C Y)fm=F>73V|OudN S.4w[wGc=k'YgUCCU6ukx؉\hO,B5QvSW~jL /zuOI=r,Ir/0/~y[T _aI)HxKtp󀒬΂$듊#xK"?|cҴ֦F NozB}R`U'$&kzcxZ{Ѿ_N+Cgr]=p `eUZ~/j[zIRܫŷZY$~0j̃[SK5MǾUKh:M ~զ~rjA\R06&:,N=yUZR?S '+U6jRމܺӍ[.onREI,%)+B]Ze*$a;*仛CIƷ_M(r2YpcMЈL˘B.pN"AI: ɰ?81&'l%uYgCmA>%QE%B%ĴnUʲ<#넠2㿫11?Arw[l*]'%?('VNI;}8o^ V-Do4(\40a'a*D9%x*?xujlՕtDVr@x1 j_N&Yekp%E'̤uB5Rl퍡6뽩A)QSx8Jb:L̰OTX%]A<>>F ō7Hjes|l=uZ7m#.&76_K7PD11\[MKo`[l ZZPoϟwC8%ZaInm7uf(.*V!9^?ĎӜK iDa'4Ji+ ٲRy}"O2vkBVW m|홄Si\U`Ȣ,t!e##\aIN_\ v, ۮa EZRrFgeMgq yt7LH2uf_$~0h2ԝZCҨ`aU( ~Q a#źs:3ɼq<.[hQ+0rbe;fң-X Wڪ~Je֩ WaâؐkiimKT,zmd2zaP -L2߼w"!%psc(Dat%k>%X2ujq SjY$VaXdɿ྄U.n)Df"H%O4BuȿF\F`[ RLRdR eu}s7:9ႊz7'SNvXKMīr;:R+HQL*Pu,wRv*Cm6MOUiiZu#1;zU+5M'&1X!2 uBWB6ΙJ,~V9==%L2P9ńۻA6BY4(yciWwe*fDm3hkz-UaYqenRGx L;OqJF!CoC!.MzCdj,]fQ[+%j`\|Y>jat*7PvimnlĬ}wYZW"وrU!u2(id$Y]!bKm5$MC˫bCEE R((e6mi^| /zyĮ *C]G%CV:\Q%H[R&8ͧzf382 `"^!yӴ^jIyȃ)5e zBa]wxe?>>e\ _`oxRl,>k^^&)R7^Q~a=VgAăi* 0˦Yk Ҷ2nڦnX1}(T;-\/e:>WPJʮ7쎞&v[]ؓFeV\¶_Fd_W%*ȤfiXKB1|p$pbj=ۨzwn 8TLՀU#|R]rlđ)^Vcb\LE[EyK~,gJ^HΌ.#N_t*+BjaԗOfqtICE^89^*sIw tVo6zHKnJhk, Sؚv%ZN69+>JQX_k\sNg噇k fZl",~_f%e&N CU0.OԴrHT/W)wOǜzj%err4hS pw[B%vr(h45K~aR-wwhqiyKE ~pҎhu:ó'jM\SR36KI.ۗ|yz(Jq[nRCQ>Vj[9}wd̛rK˾r=s @Uq KpC^!0m-&Pòs)uYJIIFg(bAZf1:ʰ6Mo`*Sܬ@M}eR;P B_RFbDTƅTx_G} $B^1?ugܾ%FyXhAnO3 ٙ%v4Ĵ^u9}rN[ZjOx-=D4{8ZSV!N:å$L eg/(6bxbSi^m)fQhW[ :^97I)Z]JwQ22GFUdջ zaQ-\ć;,|.$$9o/Rp,vv~4N,[beɄ4AN8qmLT4e yi:1K~}{2Bqk}`m:IK=Dۼ/`,}*nTJfBEΞÆ\q1d݈6 NP~S^pgiPqi aHc`XiˊV"4YuҤ A uɫH8]++\ZJT<"An?vp~l NĐpE%kbA\:Cݞ{; +p>ꆗ)tbeg3(0<ۘOy/筢(I8?XA풋W$Mb: 8&UÌ mƥK2 IJu[)IS(Sđq1T 'wm]aSڻ) j_sחHr[dL@e+{gxe'%l%@ZǞF8sW5L::}SqGU)mn)+™WRbIz#hiJ5By2P!>PrNYmKLĥ r=YyI: M3?e]z 8U*.T+ZUe%V2u֜P 3 |찿DX R>´Sjڃĥ9aq pg*;UVl"6ޫpJu:Ĥ%TMȼe}ۺ|9B %DÓ* O8SzQ j S(SJQy&wvRj¤0̖ѴBRfKcZk))oP37kJI*HJ#A-:MV^.lZYCxC˷22~W!:ÉHݸ8w9XڙOh]F57,-k-8"^`]lB=99f%\m Q$xZU MTcU&=zmxVz,d"C*[|]J6Uvjk+5/4 =&]8ių{9zsVX^8/k`![5TҪFAGچN'#ЃNN#%aUzߑL>V Сp7<ѱ Mԏ8ۨɪJzVt=̖ G_zP.s*)HMu:Fȥ%+LVȌ-'aɩݭ;/PoCKl_<ȩU|+aKyN#%}^2R3RO"̵9.Fd}J#^&ix ȏMH΁e8SMפ+f7.5)fH=[R~RCEِ%[$/hX -ac":E[h6V[M*VQW uӠe҆0±0aPQaqyo[qW*la^qu"j/%6~Ħe'8__pCl UZ;HXr?do.y*A\4pW&y+W)ry7%/&Z;rysts~Ϯa7$ڒ(s{QZfi,kYk꾽b[.Kֲ !`W(5)jq6i˖zV$T n$ ͺC5*UE5t-6s(&U&YW8rc76u搼M1e+V\*2)V(b6BJjji}~Ӥvueǂ<oJӧQF=xqG=P;*ҿA#ֻs:\P>0QmᖰX ZJTݺL0 Hdct) E7-(-&b^Q}DfX¡puS.M%͎3Nd\s11Af %VO9|>J){?| F4"L΋~Q/~^]7HiFQ媬q.6[XRI 2zek 36gl^',m0 +®?k#dG3κt?\zcpC;[R}%$fZi(i7H<-nJ6uS|W>2Y-&K[O}8C;&3.X9s[(dk w L#O,A峩G!pnqJZO =uNi#6tg?+Ef|txx\ { Ү(9)LBsei)3ad^ZiX)_R{=rj}/ZK XLԉ/u_/y_%1xmzYzĈonN)wtz9ˀC.*d(vœ?twԋr&;d3N Mm̌bNJ9wҝA _iqY ԎpfPJe7@zU1Z6;eoYEHݵ?^pr@J^P9}0X R겈~]ƜL2Q /RIōVe8ڽGf©!XD [ID}y7NK̸ipG_f?ΩOGDܓO)*J^l(^vT&f~ZgR4ֈtԄlJRke+A(FRGp& ]mZB.ޖK \q٦]±~/C4uZIt:͸ qn%Rʰ& DvU-s۽tPa idv `,pubAU82C3HȰlYO/CZq8 !M99_e=˽P $wS2(ZuJ [{K*␝l1FYIAIKGOz]nmB_8bBI_OOtfg J:u8w'm`6bU9LL8NPv{bЗ楔Dyo}>:\ï>cykW_iڹIqvj8Cq&s¦}iķ%)xb/L8﫩=.Gm0wPuז{\?;T:RH9Bh:rRR!.%ڥ00-KeKI?l%RL񾐹Ku]T/krA/_ze Wk3mS+ Jzm");}wcFotÎ&,PC4daY{UؖJAQH˥Ɩ8б{ÔjZN*B&p-4^QIVR~k+rA+.!r,IK7 PD9UH}ĭxY_[N%&Q(F(\ BJ+w}b]M$) 9r?&}8ӆGQZ6ux9U+l&]oL-k2NJ5Eop pkaU}qWLݐ}ۺ|4쇟[x_tfx}PYk*ٗ_ؿ,O͛ܔskXS.J3,p. RnY8 )-ͼJR1[vX|e]y{CÔʳ 90հ̶Sw]ZAß(nFZIUNe%B]UC_X.꯰x(ZAX&mJ6KRnlwhwM쌸 h(,>Q_nmV7ܸ_[_W$Tä@<^̢̼=n-NIcpw=RyD*vJNmr06c!M.Hc09\D9C!BLW8E?h{)O KVB>0Y0U,!|4ة`Mh'uO"T & M+BF#]C6Nmv=g8Bmx.0'.?jEQ Q%"@J|~Fo շz/fFP㟉XTp@RՐiGnzzq 0f0ԍ2In[^r5F"HH4 MH`+2Nt ]QY@;˼K ':XOB>n8VD,fJlOL+E]9A!@dF:[u`9*%+8n3W9/J+?Q",RFu H9#~Y&R oIa5?-E%P߯0v 3e:-S Ĥhh\KÌ/'ISm}JjBDMZ"<vSy@zh G8$JsKkp5֗Q > [IpC4c GQ9pz 7nYE+ ߝyY}jn~2JH5#).em%hbkbwɟFl+ JR.. |1PD %- a<=C 6r41!B 9$<Yt xm*m9_T|t'dVXn$l.&\Wby l4lD2j @ XAOi-(%>0Ͽ = ĞF0ek > E t NSyN5Ѡ4F>mrS2Is&Uv0*wrC8nr,kQPdX`SӡnQ'<^Лό':. s@V T@}~ GXL}?BHBvP5IwT{5->!\&0잰/#y0b|~z$@0/ p %Ъ,|zZ7d/c 86eDk MǾzM;XhSbC:vx:YtAzeVv 0t9iK0nd^q4 s[>3rP*]*M3Hԏdl 4jUQBi=l0qPY$d!md>k"&t`)1^P<. e8ԉISvl0<.v)3&.'[qkm} HzŇ){0HlȄb4̌ a@43 SьGCɇu8#•G&v`gf)yǬ"NLjŌu0 + ]J 5K& 7X@3)9|z`yAy#iiIȪ9S_nXMXٔ#K# 5M( tRBXHr۾j럱ẳxB`#q@BdrEIɄX~r3q3 ^.fEN1P䰬wu;&z-t2mIbQ+TDf1 4 Ha$ }Ns8b/B- 7 hc\bHɸVF o 7)43&~Gpa:'i UF>6X=|,۫pk5`?1pw@Qw{vAJ#L!,oV«5uy"SX-=6X]J6_X#ЃF<``*\WBʨbIGKO33RqSg ;A((a5DVCt`?-p<":|s)nF-rUVvŚ- F؉]ѣ<S%@N 1 uæB% pFcPUI㷶U)%?b{4%>󧱹2*NpUeZԉ9$>0SxE%V %U9g 8:eabuSxk`y1{yFi:R&)lTQ4P/nJ!0HC:9k46"imt+ %< /H kȴ }aLIgP$*y8k;vc)/ڃb~1}_%0LaLI2rϛ'_$e:!= * ;,#` NUk0qbvY zqjWZ\OHqf-~O3Ƴ|MdKL=^%@*OSHェ&>[V%S90K RʁH *' -?v>2fDDmsNxDi\ПEU 0S"V+I '(hz p$KUDy4A q) l9>G+ sXQY^ 4.);嚫uG ^kB}-E8ko+&A;Prx1d3ATZt2`hJ31Tc#dj%f΄ $ O o¥@W 5&B@efPF2nY0)5Pqe0:SWX1GcN?D$1CByfx:`XaO|eO')GͰhd 2K1Ƣ"Pĭ8+%HrA''ɏ)ʁvT~ N65 q L46jsuh2ymPab"y{W)ic :}PnFK*Iw3t{{JٌtbB(k>CENȗ~ jъh[.C#A2,&LO[\W9äOYdPt!kN+& V:>|q$Ha ӅM5+M.Q:zɀ?$gZq%AeDLd <\j 0 A+{)I\.D;kڌrܚ/# E4XvGΜ}U9 6DjYsKdtFԚ'_~] U7h)6nh5LIl2[.ĻkB$GAF d)v-~NUfg*9Io+MĿ[2\hȉJ-ܨ.Or5 I?LJ62) jeٴg"eKքO1zI RT5 W@Y&l 9yE$P$Wtl0AOW3A$ه:>Sdxg@D:fF D2Dp`ؓ3G!5fBiEtƲePT @vA*{#PbPu:|Œu0ebJ0HCYT@ _zww@|8B {`%ঔOشJ1~ %`*0ZOdʄcLtטJRojiLjz]SY(^(@"y>ћPhul.Kh*@RDZ1@%$87ffNEZlxb|HD`IܿŠ`˩,ؘnI}p>dH#Ry*:KsPʑXH4&4I`eUX e %51[0 ~F JBoxBiCHq;@˝G8D+婇Ј)(? 4HlNuO | n@mpfsX[[!3,y[BͰ2<-,g26Sϳs<+ Fi.sl/LZQ0 K+Ax8{~yAK~252b8?U F@o\@1ZSύ'+`F5^bރh K ctTr Qa%xcm4i#,7D71;J+hɆXlv.!e+l{vLlBujR6<:&"Pd_o408]ΓBap:{JY0TFlspP7d H2;=" `=[y4D@(l9u$ h:DPpG&wdK# 0&ΈU-z@h08D-/fIhWlMQ¨;{G 7? Ǟ 9>pbX" Rxb,89(,Rır3o:6V~n1r|pJs7Ɠ"=TADI!'H< 8l)=2s/>Uf]+˯cC5$R?+8)|BHVDYTBAB_5iy6.9A"eh$Hu@r49g*C۟Y"`*sʒfĄ$Ԕ䮵*h Nqy=D H]-Xӣp5NnIdPiUUmUmƝΣ\#Ѥ IR]iJ}:D霨uDD H!H|]reA_ҿoxxAdd9uEb(uGx0)7snpJ &j *! N9܉|<P<%SNQO88u9&r x;upёG;bF ,}R*&AC"Xg5yDlLf-!j._P <1KɨC0(D?+Gy[[~ ,$@=чjKliIq =xsGP1qe9SmQTׄ}Zc oY _?E?hPhB9VC䆀7TwICS|+3l㵙hW(Z~R;5*p  ӳܐ#d!`@Y(!1AQaq0@P?p /O٧<6j%$iy8 Vģ 15 xwy L\E*I ?:~1B|_|@fJT&Y;hzjf_B KOOu:I)|q`PD+3k'Ҩ4C =T2rs)bɳQ*#rYwi#{$:Ziזe ec+:s*דryЃ^y_/d #Oh&v>!ާ U|ErHb8zs2Ѵ`[^ p; cˆW}Ք5 \z?gSxg|S ƪR.tntH:9Y!ɯ9``L>5!}<m!Z.y1*l#8(]P14#u=G?840\ӰxbI2/N}aٔ|_KìO|Q/gT̨RjļuA2-X3O=q#tն@h"B8B |pG¦J*/AKhɍx<2!FWl*=W*UW ^g%xfbihB0c !FlN*.#V ܊Pk]kF2sa Q>2L$N 01rPWLt<:|{[X5ָ;:DpD !:c)X aj٠cG׆|lxEd bOq0;;w-\/'>uvo&9k5rp`t6s^a#ǗLLՑa_)!1AQaq0@ P?B e .Cҥz。=xn7YR <}}k:捇r'#$Nn"P+1!/xF2>@rH@:pcO39^  1 BJ:<~ݰ3Ey:ika@ l>qŰ[WgD\?g<; fY W:Bn 9W_x4&|rNSA(ȩhR=st \9oAb, s8"F(vS3))dz!>ޞ,`zJLHX?ql !)h|_NJ?J_@sBDy5m`;,0DX6̖BɑNP(>Ir A۬H#aΜ;)HL=~D U 6ةqt,(o(ˆ fP>K3*4|L됯uuc^H!r> pME+`~딠͠[ucRq!%[prgW޹q|0c®|%UNT!JDEUhSV|ɯ=RsW-Ѹ"=.YvB!!FGO }ۧ 8%Tn=ҩxjR0ʅ0hJI0&TbLWf}# #&c<Cݲd3.q^-Lq+dն1M!=QЀW՝ noy^Lu.#JMj={:qd1G81v|ro댚`]p*JYz`UT08θ( |QZ5BS1ݍ'\$4" BQO4Q7 ` H 8?;ǿxU(=#'6y4]T =ܥC  qt]k uC3X2ʮQ)xUBcB\ffpLOݨk~4GN\_ugB*',U4dGY<_LaIgd 6u6=.H{0bU OU$f\-# .w@+x QTGniՠ1H8h'`m%+GUZ"scU0_A*"8iiU,lhWaaɥMcLJ]nZ0@n?D#F8 `K-b Bm:o|94 7H9!x8@EL".(,h ? !ޚĎ} "`غȊz,'ے9ۡRb8H1 ;Ci ݉i 1UXKkj†%\Ys8L5ev˝HA`zvU { 76^r|̼p! /HxM癅43ap"AH$NY3m{$1x/|G!BOrƜQ#Q@ph)r<E*ߤp%wُY=1imсa9KK^9jICSݜB= 04\b`ȯ ۷PqKi{#]ܙ#0GLpRKV!ҁ!TUmKv '/֊0Zz1clrTQM x* `q V;LԤ [x" dJ^TU *Tc gp8k($7H%B>$.j]Ȭ9TFE⚾E. 8 1T 3Rx+ Z/`K Bv DC*0#]ByR%u ,?8Tj^ye33 !2X a}Fت(Ջc\ )& 0o]k 5T`=jG4 v=HžSFUxyxED<:aOO!sLh#b&0@كnhh"?֘mLWH#?SyY ib{d)o1O+2-4z6,( ?x&6* "(`Vis Ƈ;l@g ?~ľfSkV@UC;>@^2?f,8YΝdAQ*xUH u0Ȅķ`]|Pܐ!}aDW/4|B3Zdm3,N$!G)OO=lkK"aA(GQDr yNUEҖ@VP:'WUl7T{їe)IO,Q+j|jnE|L*C E Rſ+τF@^YӎkL!e]OAF8ֶМVAy~" #nso!`'r pȴ DY)ꕨ8 @D}@eaD}3>F/44xȃH:k "sG=llHӟ!/+ z )eCQf!iQ' (Ag{-d\pB(CB/ E8*#\25ᦼѮT<$a^=sO)G6*1h#^drKh,2mJ>)y@.pxKE S`AAIĹs F B`J8ԢbV"%Q,TAm! JڬJu\7x-8>~RЍU3T[ ϲ>~uap@xU Q}_t(O`tcI{:">cDtq */._ATBYw=٢D`B}0P*&At|xZ&(XTHT(|ME}<<o//MvP %e,3C[P=3j$Cgsр0)u(3:Sj#p`;H ~ER0/UDIqƐ<QU 4ժ\(oD{ ~ xN<_B?}Tk\1z6% D-ͻ:DO!j.1=??ޝW"s؄mS/KL _+^kW<҇I-Z &&$KPIX}rc!C`V22`)9P,v ",?t괩$b!.].AU+iA"8A񰠂xGo@l`zFrYTR^o-^:r*! i9U!q81k伪!Qj`gq&PG’̆+4fprAE@ ȝ~Wxs%pz_糱z2M7+$ LQDA%T0Q:ɯko :ӊ,̣d+g؀>X:]a#zPx>QkL5r_(G5maLB]ty DgQGZP01՘8_ 8E!@63{B>- (tf񳑵 ]֧NlAdp&7UyE,v 8*hDҮ@V<>e NPKi,(0W/z)BPG8œ )|x` Czӛql>59:yIƨڀ8MmQ$B xZOth 08nd0cph&@Q4BQ:% E ?}9 !jwfθ3 >#H@d<$$չ;!}UUYޛ3?}|ǐ%Eρ A'~A7y&ZNrKP/\s\w?pXCSIQ HCT<2e@5$KtQe:'C[aeA\ |Ԅ'= ithbX4*i?`"qφ}:kWKA[#"'/@ W72@_ FUShnfW jCQ/E$=:/sP\*W(B>Ŝ~ DФ8zl2Z"%IB5Pqʨe騥F!$Z>Y-h  $ G^|E&(6/|=D9!~,*M 1 UG d0[A_vS@6P.Ҩv,>$x('U$ -bR=*)VH%8A`rE'?_;#Ăc|GFA& ԥ?Al#- =ZŬU$l) !t! )[+Ad:& D B(|C;N84ȁj"Ym*G7Esʉ'YeN` &2kQPG80ZL Ɵ=~c29N^rlQA%RuAPExxҀNaH·,an+͔Q[ǮLh( tuV DD <$[T4wVPSTg^H*R'~1 ,hhq-P0I>1!BA#p }?y_eJ4`- <R3Bl4p8}@*熀 CgWk%z_#2MfܨViѐF,-,tHv!D__1&? ?E`(+."nk&+Xrzi"X ~P( AJ>V8e +vA\)2x$ث%BE*òFQ3 brӘS? th@#`K5%-6"Ƞ%4HN%(3d* )Qk_# ҜBRrtFp0KF $Vslik'U**A  +]QF2QTʰh#mU8Q(?3@~Ef2&^xeRSB̥-u"$%ŋ)Rxak&jR{0[,RX USaqT_08y39!(4E`CuXsan|)PVkbA&r=ZcRd ɻJ1 jCDzѣ'Tg ?9,+jj9)J Uj7L2~̔Y.?$OuGPczYèQևghR"$ފ5̏ȅP0i:YdŪq' sj# &|6D & âgҬ|NV=EmDIPPk#%AuDDbJW%)>u )׽ atR2k)ZNH3qQ`9Ʈ$րp!%)FKΠ&" 1Hp֯e %i. Yao?*(љPE78X&늠drI 㻷P\=Azk*v,S6PhtK4s$s*V~&NGG]Ta:}O"οC#yxT@^VaБ((v D} ߜ娴"!r8 }'faDg)* W  <vH!~UX?YIRt3)ߐ( )Ap<$ImM'JU[{OR:I)*u>U.tEȱЪޗW%ܢ^ ߷H6*_Gz3\=u'p (ɺ{¡|_A:|GUy=>g/3lkaL}B-,돽jBV1R*h] ??OxvO£^ZӏXQ Vr{6xZWWח* gaQ0>!^(X,$iIý@x5 Bh zTo(1B3EAy4mo{[zT t===fd2!$HZ::fxmF>\uUfmI'S p=੧ Uǧz R)$t:RBL&jPQV166QR^ϐJBbX,a@.C>G>d* kjT"IL&W===;9|HRd2Ǖ+J˱h"r9$I̙3'Tt$$T]*b^GWs@\*fߏ~ DLiZjcYXt:Ixj믿Z '|2J#R 6lܹs妻8ƒ%KՅ-[qL| 0V+IOU(Ij&͝H$ݍ|>>r@H&t-r[5MdYTU$Iر7p <>φ pYgauAO?FVBP@>nj3B2v /1zfyjH&zzzP(P,Q,8t:\.!J(Qz9%J^=v)/xG166={ e/{!W,T*ٳgsUW]uHK(- m428ҵV3%J\3LH LtYk qZڴi~_"|;w.x\qXl֯_|>H G,lقH$Cooo8w7վF&=n3lZm.qrC9sSQTUѕl6fl&ˬ :${bO't!f~zfmoCWUr-?~H ׯŋ1::&r8RG.ff4}JIfD.;OwJ]H?o6a=XSɢBwIݻp9SO=΀g9d#DRΝ;wq6n܈l6Z*BuDPW%VqζdTɪ%P`˖-YfkxBqCރ>_ݺ%Sԏ*r<Vrl6Cw ]ݖD q5Ü^vJ{.\zZJD??3Έ{*;\W^y%N8| qQ?DOO"f͚d2fB \Ch$nwHQtS: B]$mtZVkLRnzUj;jdYdYj0f[n=܃d2~陒ַ5ioSr~<8c,[ vB,O"S4J] PzxxxxxB,YO=T\p/݋ \.cƌ f< I?˶VE*m)DYىLzL!jMf'T&I(0jY7LƱYT,m!59=00/_Wp0|* !.͛Vxqxqc۶m *3 9$m$z=[cRVHDˎ ֆK,$ɬϥTRUZ]]]B]zR\<%ZTm۶F2ĩ:i2/~q /rJC/_|t#D(˸{ca̙epx`ppp˜K.\rL&Gy( qfkJfBooodD0N^G\vkE}խʥ%\G!n6fg~K1T]I:T*7K淉H*6TŕW^}k8e9(xBqcݸ;l6ߏp?n&$1vRa_lMhkGZ/ D|j둗JSi@3Jku5L?sjǐ3d^ɯtM8s$PJcvŠ+bƌm1$]nf?gJ]RHcV+$|QYky"^#Hĺv&'xAiZ|CkhDvn݊gy%Q̟?dk֬9J%Y&\XBĞ={b_Jeʮj:^/,h_x_O~xgxScylZpŋn#$4֍8F+NPsmh&y$T*T*rE\VEH4K&ZV\W׳nAIB2kUY$ @׬r/_| 1JӁ#Vܰa:,^:\X-[pW-kx;O|bJ{n|k_Ćh[[N:)ta;w.x\qXl֯_|>?cԢVꫯFɤ%]\[wzq{تvp¥{J_ZE\nS%-9S5lFTAd]{`T*!zRRI˺&$rZ\ @PuV\zđUc< -Z;\q+[l^oo&E Em67ER<;/K a̙ #-8C f_Wq-4a׮];P*044I_ҵ}#uq\S((JaC=q';Cj5Z rlf͊^U߼y3x  ̘1ByϩT vQA8I.xöm۰f| RҤ.i%Gf3[Ri8RsGd]IeкWJWj*%fUU\iW(/7kiRGf˖-ÛfO(vhu]>iS F:SrrиRQbll 7|sVPT*(Jf|5kVۢ:,(!Q'?!cfX]vYd\.7 N֬Y qhh(ܛ[טtx$t+fzN%phmIEH¦vTɦRtTf+RMBRh"{ZǖP *gzA\ÑO(w.\/̜93T8PBi;oD>G.C6E*޽{#AԔ,044nJu,4G?@drJ\y8xqذxb^|>btqZBY@TTҤQbdJ%Тdv *ty-^xߺ#d͒MmxYZ}-9hypM7sj:O(;^nC\t"ƍCbȌ,vaj6Z;>1Yhp5%N3f zzz$tWgv6de tF j5p lj'>{ʷO;4|G8ԨzV lmj68b+u%94fo"kHHڨZ` yS%ֵIh6m?kJrH&(nMoP4O(cP.fGZ,I…as\d( vf%iJt6 $yJ2@d2\. P(nn(Lir u2v0u=lݺ7nDP^"Ngvb޼y 7܀[/p\.[n ےOsC Rw'B;D*t;ɐ|1@ySTh'iY^ _W1j/ƮƩƶx=yE~ǩhJO(A;^?O#j !vY6 o.,쳡(dl6Cgdd?q%O6/'R A\.ev>H&h|O9h.ɟ e\.P& d2<:#he f8OJVAl9]w.i7 K_-[6e8زe .a``\.DUUdYBʢvMފ-ThKBH1-ko`^- SsY )'P(2&ILo&nv ޯ3U8drɒ%xꩧ> s.{ضmnvڵ+$K:d2G;#՘ 1)ԗ%$M"@:]*>g^g@Nkc@6 U< TŭT*سg-[F>{2p+_ .\|RC yf,_bgac̿_Js%H'5KQf|$X"F]>ӨʧCo{qz+}jǓd+^|EX===xx>/2w7o)2՗= 4 <UVa͚5d2tfbcK\q#lbC瞜ƪQg!(PCPk{_%;5Iu`R9uβ=3عs'͙3lvJe/~W_}5.r\x 8TXt)mۆ9sD”JҔemPZbdU@i]\u{%aT*㚍J\ҖS,$hH4\%6NSCIK(v؁g;w.z{{90P^|Ÿ⋧SH=?3bތc6lG7>XUBsct1$T(UR+ZI+rn E5ؙݹs'?#BV$>Oo|cLlK.%\.ɄQzxxxr1cF\ng7+8JCc]o$RwG qDR E8kTcjZ)]#Uyz'|N,#ˣ$UmxDoooB] eBerhh(Tpk2bͺ$sY_:;Ev(CtY{~olCTWϩ$UP%8.׆p˯T*E{ >sfqԣX,[oŞ={ڒpt6K]VUr8` %h5V7QeUmGe c^Wɵ݈Dxt:ڶj SUu|`Wx\õDϙ6mBS[g{ԕ)nC;#gکlա*-mXR%Zw / Rv%XI=8Ͷef'_ ]V6YӄRҥKq}ahh(0GII@P,Jrm3Amt:"kpJB;HI`mY$iLXvϘ1if ~_X,sΙpP߶Mu8SOa}syx? ߏk׶mm1?qW1Tpyuom%rJ\.W7=UJr9Nݵq ܌CCtTtIQ7*16UU=v=R>_-il޼7x#.C;f O?e˖a۶m =uY;Ɨ) a#ADTj6JkbSuYffول)Yxc9}rYJ%,]s' 0<<%K' UIŇ|h]]8&茌:q uk#8]3*mxtOpLdbM\]'\.]ڈl;R"nଳ?A{7qfݻwo7n܈o1t6{l?{- `pp͡6c:YꊄxnE]G7U,qlt:6NddRATBTjT=+V*CͥST$~e S38㐵cP.[ ׯP%T"i nMG#FYY.t&b9W:FV =u(YMva"jZ8pm-]&Ⱥ骠YfLNb qn~T*?!of:4cZk.,X +1cF[ouqpe}r0Y|jF/zK/@˗/GB]S+1M'TT{J8aJllw%|iL$UF].t]&v$RAU>U2Z.LD*Y'^CE'3itB%wApFkbF?"kҵJ*kcfw.k)P$% T,%6MU@80immЮDD"΄ll"xN*kkgj6زe ~~wqattׯ͛iӦj§mݒA :RgNp4JO "vn̙3}}}xk_JÆM6u{]ئ5ť41*ʤ^$NZ+t%!f](aMtiIꤵ{q=Pdu:r*N$:t&z7nO~ d$ɍ륙TF3LH*It!KBgIT\pDtsKϣȝyVQ'5GϧF \n6Xr%*>яP(xc~  Y7|sD 9sfd, c>Rh93| Y+X=%XihZmJKhuB+˻:~N㫉@bcjq9z]ƚѸsU[X6Wv˗>Vk'Ŀ]u__d3gv!r.kd՘Y:5?;ktS\uUj? .0ԤV "7W:*T5P=}%*∗ ɲe0,quw.uAdK($ۥHZ6]WuNn UD"q-S kl;ֿ>JkGp'Fq@_>JBרn.H&mn>`g!p 1CC3}[jvjfя~tpW1V۷@[&d\@%H~AhKw %u*Gޓ:׸]U.H~ש\ݷlۉD###x۞T$Ac=.Ocw U$Uj D4VRVͲ\/0RNYwٙ*jI9ս*>qs 0Z?O9c*A͛7ctt?<֭[n 0Pl;[Bɶ*lvsdn~%ɶɨ*T{&Z-ٳk׮T*N: 3f̈W~QuIl^ۡ.3 #tǹl&_$paELXi =&U7Z04V 0Ѻкe})T7+{"B磭fe;0k֬u*p 066x$=h3;!i[b V #. 5tDHlvGI#LuRcU|OxՅ###'?\.wHf]jaݸk{ny̚5+LP.K/폪P*3Bi)|Dj;!k6T*ql$-w}{8駟p,ԥܬPuq~i%cc̺K K~gAO5~#uJ`]*u|҉O W\.E6=. JWS2<<WU̙3gJmQG(Ylm8)6e_ z 4.$Sj5<~guv@ũj UQE6TJ8TeYY+go%T < P(=۶mw]i̞=;Ldd4|]]].nH&V*ܱ$W2ˢ3 M5Mw*> VXJ|+>dã#CñW=Uxe`LN,16nɭ֎5;U N"Ku~u|н9YK RD"8aNRX`;&p:XfQE]i@4rmD`w $gB]Q`}qma`` l{044t;v`ݺu ۝N̕XƅyYo:vH,nNH$mտ8~hX'XTJxjTqjN5ԆٺcJ%./fZ!N6ٱc^xP>ظq#"4J;U%Uع?NR>3K(-tGgp2x^WIׂæÒJi S*k"0{)=T*xᇱl2$Icƌ%M4[@ۼ T*vEƌk 4]5:lOj{zz& ]sυr9̝;t1xY'x"jD0.n3cYb DIhU3;)z]+hKx,bC^&cC:ZdZ5fwOd7GOX5FGGQTd^itt4\c-8!YMu0l2 *.;qIv6*lX"6iqMu\&YBe:QCL&q}T*᳟ch4k ~i0"Bh{;C *v9&Z4ƖLBQ*]~)%Ͳl)B6,^L*q }}}r'Zl'T힄&>Yq\ (rǴh`ѢEشiS$Owa֐хfa1v}<Ʉ}TEFKam'./N=cYt$N,T' S(_k(/P!EJG<,Jo~-[ JE2Uep %UlK($ܒ7SDD6 -!+)vdqr8biݖr W>SWh66oތxއ cҨXx1y0JJFHCs5Uu @lذ8eU񏺨+k] mJ]VUuZ |śmF>>c@EuS,aiaL&$xZ*>rDUKGD .;T4:qq#5| ˡRGżyz#P[?xhl D%L*֥dU#&X6]Z2i݁v䗝x^&n")]궄_:K2%5Asxx~;>lO(r֭ҥKmNǬq6d҆Op6S"A7Jʾй,ֵjϫJ:I&+J%}\۷MEj2.]jR^b_ !Z-m`<,PAEg݆ѥ";aI"@&  v VDy\֣?~R-esl~gI3jO{zzP*0|?#^W`pʻ|+#JpzX:8vR\$ÁI^Pe B4;VgȣUS5j.Rig6Җg|O"15h6Xz5n E8dR ,!.@lDB'6xn6pN^k": m.Ed-\r%y~#A . Sqta˖-/~-m WPX7W:avpL f^{rh{+X"l8W\}.wkks:O" rgS#Pڌa׬ߕe ɕhms%y0NhŧƸ ുkg*s:+.L̆2zZ-,x!GXp!VZ&hB%OIIN ,t }RO pW?״NWNFXƂnt:%K`ǎKH*Ⴣmxk=OcBۣ8kx/?=g'j%n pyddZc փgmH1ౌ_d wUz$G$lZXn`\q{X,W#ʚFGI $.<(tӥPYBRbQ!Aޏ˽ogv0nlv"`o\*vm8jK⡇B@oooZbPu$nYہ_$ϯ!'gDN{E*m5mR5,]*? .D6k_ڃv[yL݋͛7qɪ.zWD*ȕ8Xe.r-~ǺQu""WNc^w-tJ|̙3=Bl6b 8 D6>٘zH0m,JM8OӡzCx\ó C)Ad2HYjyTk c Qq,@4~ s#^z hw}x+_ 1~{tMp]j.7xBTDNT;] $gۚfSɪorԃb:@%IE~hepK7|3z!/o[Eۏ%u|UI\$m>7j}OǩΒ#.ZՏbr?׽>Ӝ [NPs3);òT)혽8e_Ic 9O$QkL$௄WU%vcDbT t}Ǹ Z-|rnrl66Z5d<E(5s9Ʒ0|d΄L=[RWX b`` 20)J.\}5pI؁52W-)T*H$Z:]q~jDضU}<֮]ZgQrxS5 v5}]I6̒,1%Wq%Ǫú\WK*mq^&:gnÆٗKZ:΃ g;Pqrk0<< `||RoUy>zԀnϯ,3K^TҲ$r$Mvͪb8QEBs-tRI$2{Q,E(_c0wuك~2y[o144l6 =8ε"D@lɛk'u.kP8ޕj鲞1ރu5 ,'7x#vލ=y NF~0 w788$hhNq+|.N>>*ٺdefmXҙdSOaʕxӛt@1G,شiSډP{+98.R"Iu-;,O˥,R]đLs@,ͮ٧n8B3 J(iP]K;k >N->3~fLS+ [vڅ+V/#42?C"@?t]K_Vqk'd;Ygq4Eث1p;;pbI.cʩNd[};J%<d2x[N:=,Z- 'i IDATѕ-4_=+&g2P8Po.*d)Nj`?-b\~U.y.)Lؾ};n& o8 eT͛+54 p@$\@\mdr|]G%R4 a;o5H(5Zc(.,G#Bbh Uw؈4B\[l97:+d#)֑5wƯku2J=N̜9}}} DH\$N['"ډ!оNF'*JEo,ۏ=à g&ikU#mɥNj\:7z4FFFpM7a``yk|L1|k_ٳ1{쎄5mڡmE@ o㈭In*@[R+4Ӿq!u[+(\|*q՝^ǝ7nB)tqD(BʺpwD*ePls`o" 6%qڪNhB>@imvXCٙP@Jպ]]O 'RjZQi6CjEΆ;)M_;ɥ$UŲepwN7d2A._T p)<#;jR=zvͅ[%p SS\.G&qV=qMpuPVAeg4'nEG?'> _oG udE(jU ʗ%]D@@;sod]ϵUEO\*LTF^.&S9F'qIc_qh'Ӂ z]w}QK2u)kFhWw67+t66J-mTZ^Ow(J!=YK"*Jhf)VR,gc7HI֫J}<=oly4E]׿n 000Jv@vϜ] Nl&>S’;Sm?ukhPynKJ3N{]/\a vIMr%؟N,Y=)qiw` Cqj@, p .g#۽X@ y )|WA]UOGװbl]^8>dsc f>(ސVʱ$Y6AHZg#-?1ڷ.|>:˻$]\nKGNxL 򺖐LV|!eߪ: /zϴud2|or ;t_1\5ٺ\@=.jѸqKHq8=0턲h^ɚ4 Ut)tߺ\qVúFB#FBb,abx.+ UVcST^ 縆nucH+ h_ڞ߆bz/">cΜ9x|lb֭o~D"JDa׳ڟKmDȝ#'R/yNKry%;ѱD:U7YZp `uzo.066  cܹT!$\ U'/@ qdq+MLm?8٘򸈗V@w"<ގC.u!36גK;0wШb4Nĥh'`.(uAv>K*]ʘ"ΘNFBǙK>h}'[r14j]WWW$fk| z󨚓L&J;H\tF*׿+8wٲe f`81Bc\$Ψة*4{%g@ :Foy]xC$Hܦ.콸A '͸ԐUStL5KY}?qExǤڊǡs=n @TRO _mk,%sʭ}ςmk"7Fq̞51b]uxAp.ܭ\c hKk,Ґ$WlJY T*G7>pmܯTHB\p2#a}#XRc{ RADTI.*V"Ovuj\޳Sgj\}m ''4<ƊH$- gg@ĒkXH*|@hbmָI()SOg4 N>ZJu\mNL6(Gs=˗\.}o,8ضm~n^vL Lc{e{]/mqJ6l"$lzDPq/kW& w8"CܽZAޯW 8pLs]ϒqk'u8BIQ.Q,Q.Qj,V˺uV]6T`H$rU(<؄s+ cfL$Q#cݎdih<ui-Tʼnw|,IKPtk%RāEPRA*իq '8},cXn]m־/%V,J#ڷ$նȾ1 ۀ~ο-́] Z. nn.%! &EBIL# ~tt+V@?8p8cYri4u*&yάz l ymyS Xn ~z@5Ҿ&Ce<}:ɵЉ{hYtc290&T'wPL+\x1V\&dp砩 cv Hm \̥!(ijgҸåMcVujɫUK카n ~w8V`Y@TxAq2sRUz]]]袋b طUwމM6a̙k,v;Mh `Tlz>gJ,iT"v&id&<$ %⹵jUBFBwK΋":twwu{_|j"\h>'Jf;7-omɜY!&9>2=eK u'l6Ct\|>L2J"lޟ:5T/qCDz'8ix%r9_VjKVw|38S'|Fi%T*I  \Fjo(U]v"5\DδhUS6ut߳Qֆg]tvFzMvAƒU-< KVjEI]CIukoJo|hZ1clVo* QE^L\m6Zsf[3jN䊰=O3z5O.mX֑NTyjk9_cx{{MjB6P'lz=\64+}Bq!ƒ- A;>w0/M%DV%M.H;$v"TNFV ffk)_&]l<:kFQƥ{N;nD-.nf7&0.ƺntuXFO<.\?"txo>.1D'BJl2L8೽2!@T{j%2p nYbk,[?º{`h\^z-kBbUVhsuC0ْm=/nwэ#nvbIdr"Ae?O$I1;2Jڗv:Mo3zqR%h4d3`ݺuG{ׇ!4Ѩ<`ƸWX5Q#Ձ[c8+0 ZBw QU<$l]~1lWj4v74궵dzݹs'V^s='R k׮Ewwwx]&H dkvC㉕kv)4].eew@ )'HڬbqejU]!]uUHX7YCʉU+6CMCl4|X,b_Hf t =jNI ´[]"R)tRhuomi%xACDHFrJL82*+"wuR^"dTŎ5:>Ij6Z֑kC+TTUl6dD磘6 4~f^eH"dl :ťa25l5UpI kh wdPט v%Fjh{pX# Qw3h@ +&M5lҍ.2+Ϻ}줁U-C-pфM6kŜ9sڶyz#\&;p(]uՑ I] L ZuFZIV1Ȫ޻ܹ&}jƂb)fL1vگy~>gօN(FGGqupg,5~iǶC(ʕ rAd"z"^ xo"b'?X~7H?{1Axޗ}f|rMؗX͎VmhݓV"TL>4ѹ;NsqaX"  T~xF7ow|G.ަeH$( a2~$D}X${nrVM~6Wa qXǀaPq倯iPK )pu8=^˪i64W$["JޯJ,.CR q饗UzY H$axzs НN%=VTw2?g׉?Y]^Kުv*> jl{>A[["ֶ}~b$5~uۺeLnFѶ㖍PЧ*%?o~_|\pױ+V`׽v]v-J0׿/eutu>j^վ[xuWհDX{-Nv"VYԱO\Pɰ!nLޤ;XoF.n.ua'۶mâEBdR9%:xkCX%bl^ݝd(TRa۰ԨXcP劝 G˫jU}htlCw)xOFղSqK U]/;d Ӄ;wF&)"8IC|NjP2#b /I ꄓD+ v<`ĉ+?a%tSꄍdNjvYfG~u*If9NIO׾jw^YD{4ؒ,c_T*CTB*/1hP(-{ qdq"PT*0661T*$k>jޛfvR]֪_jݶ&J0\0NK(i9.Q³ϢV:|s-O?l (d2Z>V K7zF闌Hw*Ɖ[B^FHJRh$H&H Ha]" ]tIrˈ G̓NBK$$PV}gjhUPdZNJv;+e}XuZHZgl~hhzq,cشiSH&]n >~ҪUJB AI/M\jU*콰h#,&q5 .kDCRFlV #/CBe@W/Fj`/!T /gٳg-Z3f}x2.;#=8FZ nڟ+ FGGW"o龾p@t:/s0XVC2Y*PVՅ\.0FC81p.[nkE(TYgWu2P $XH&5\ @H݋իWwBuݴH ᥿O~;ԅ9Hދ?+¥㪈*AzְpBlذgv[G-[2J30F$II#)q]=;}W;2B`n'9Jz$6Z* ño| K.J%?w%z?_A* _7h;jkNw:7]I"xL1Iw|E\&pVЈF)QU(su8C#mQWIKw%1ZJ4޵^I]ۚR4eYqؾ};?xGH$DH0IW Ju3G6秉dƻȱG }֍OQiju{T®"D7 V1JjmH8&χkpDj&Tu4$fgD $HqKB^G㥾_۴8J$FW&KІpra=;%c.O%>D˯^)=c8U* oރދ=s0sEKc+FRjn%vӸ'xfI'{_Ǟ<*Qdg&1V#,,IW9gJl6 85񷮎ADuV,_oy[v܉{\.2 ,:PZW.Ejᚱ;mFHE؏tPԄ4PIx}} h_%b>H$:y+{|P(D^]uJ= leZŦMpBŬY&lhd,WH 'KT u*j[7n}.~13fkxNIxW}(lmGmqP322={`ll,il.ifUBGMuT&FJTUR`k;ӆ[vg? e4,:8vNk#j<# |FXT6`~tJlUX͐s͂`d;M;SfaGvRΝ;` o8 ƍtR̞=;@d\6V5B:[bzI5We]IVuj5FSI0N(uVFcM"D*qJNӉ}MD(!E`|w\YtFǝŋSNr?~ӟbΝ3gNdohUrJ;}DڃdUlkup.mȎN}9&i޷HzU~U}[_b|gL! Bx#H PNH9yp$ J"DQ)RpCb86챙a3]V_w=-ߗַ[N:G\Bݗ.]&,A3T{;ON\+R:=goTi0D]&L'OYQlGD*/ S2(ԓ"vH9I4W &3Oɠ҅99pʃ29ZsDשR+z9gO6ˀrdtS'CW^: 4&r ;Ͳ}zi\&^+xP@l _髠-dcv#wqn n('''Q\`2~v6_&s>n=)wG *dqםR\XXH` _^b @o?nâb~Y>G~4#e+^a9pQ`޻#y2Km+ʅE2d5h"V`ksu*GGG L N:UڗʞPPV*0(I}l/i#u@OǑ/C`)#^ ʈnrJU45̈́ԏ^$bbYXJ{M4Y:>GGGbjj x;V,syrCޚĽu?__qh969ױP8r@6i؛C-T>ʰL.--%rqqv^/Eh8>ljQI\#+,UPT1+T/iee%Pr62;e?h`uT`xTTjt"kTr\]]-L`YP*G.dkz4^e0ٳZʂ&xhMWVHL@MADŽװ^.t,ȼP^s N*J#OuqZdXQW)!?w:m>Ǵ^XߢV ,qqBYET6ksp((PJ- :yٶd&F:^/ۢu}&v`'Pi @Y4R?{`2*ȫLD6:;yFzYkKKX(ʔsj.Xo-}666 +S'''j2$ jZս*#P3 R-\9W DgJXOOt{94V_2 8}4fggn -P,{Ǜ}JLj~fT1`xn/?\MT%FTe|TlpHwS %KV o))z֫uD:嘳-rQn??c=w][/K{Q_o=Xp,4J Eeޝ]ȁ:8"0`1^Ȣru3 p8q"η-xGJ뮀6$vrii 'N>@^ǡCR?{l,}[}\Qvp'䒃_Mw$naO\R-JW`~׮tn\OJWfYJKҍ=u%/+>Gƽe0hoL90Β:)N@s@ysc;?"4g]vǏkWVVE[e;k򗿌 .Tb֘謖/`a qoKzA9d@ֺbTEIy6YFZV@ Sg {{X_2EAQYʮ҅AʶpzXYYǏIr8?S}e 4?JA3hpjWW{JWfSkZҧ Θ9l(%*,ND`7ǺҥKC*:e*NN'ٿnmh,ϵ]P\hm^W9ԫ=-=s V W>/z+M{p ݙgϢhŋP]o455@ry^)jNZrq:fggwl4iz5*)2Js4,oМ7JT t6*tn6p42 ~fsssi)Kd_zN#MY׳f\`k*Ǣhl\b!bUzRg:-P]pQGirr')>.szCS˜:JzjgΜ|ӟFjJ^ETqLMMcnO177Ĵ6Up{n;w ziQfVr'uPd8EK{_~$I^֑?O}Sx3Jŕ- \"VL4`4!Yv(0*w])S &] :U6 gAQŗ3?,*j[XmKexM>?15_ΐ:畷m|soou @?ϭUs:~rjs^8̅ķ+e׹|j[N>SN_]]Źslus;̣K/C0G,q4w3r:.TRҝmP=MPSNam->& řA]yGzxrr3"Ο?SNazz333AJ Y6.h`C 766"&fPw&Oz^KDМpD4fg=4y-v/O~hs +9O_K&L%A%ǐ&P;(Kqpʈ`IY/PvNW7G2c{x_YQXَى; fzzB>iX^/\qZ SSSOIwpSU8َ5Eʊ`0q١kA (B$,zɓ'$,//W^]wݵ-X@o_O+sDϢT2KF4=)pJ%"|߹ǏrP^ FWNH9|bz^:8pRXAΝ;/?1^z6Vxb!1==Vy֜7IAz b)#651S 7J2./oOk5dnSOa߾}hZ^bQoA`P8 i*K`MmjjK.gi>k4 au-44BTr*>+b }8A<};?k/k~`zzF(fYcVWW199d[^qnHZgޝ=e5G$9G]T> Jvy`_)3nOٶjvs=9'Q#b/ȣ;*])\xʹ^iqLkkk|#xG (/C2Ы Xb<я~}Hfߏ`zzO~cǎܹs|;wƑ#GR.l @T8Zf-9 $Evy=R|Ieؗ@q5Te=քInXngud63wG8뜱װYJ͵,_GWw* AދFD鞫c5rO x z)`vv6wݴMDž H)wC EEgu`) elh>;a =l[Ʋ6E)e6ί^^)kX^LLH2튼 #[и Ǔ?Aݻ]/" CaZ I033)\ND eACB&Odtk͋݃%1U<,3T*^]P/wnhL#gjY9\ϯm"0#HVЭ̙cʾWЭse~G @^VM0: Ur{zÜ?} 2l2$<ب(Nj:]uH}AD+;p>䈓2ZTFәV&e?؊V tFqPaEsܘ+Cϲ>t'˅ pxѣx'qܹ0QvttfE:'WO̭oKG N0={sCs 2Rʾ)>Sv (2S@ظJRX<5{dbuFb@g ~UIk$0JFi_P]]PF8DŁȔw;y?[睧 4# us EE K)ii`(0+++qy8q"|Ι3gpQ4MT*:u ǎktt4mE|là׍+#-r2Aj<}C/Q}"q8zuZkPvбVֽZX|{^& W)๔V9)Q~(ON3Džm =m&Y QGFFD4ዣ\Ƶm؁A+4t~ib',QM44jpxȌD ٕACj8Z< $$L*#ը z B  t8|UtVFЈ׫<[ e9l =6.ŕJ|Vմu1` )IȻaKe\2FoH5}<3H qT!fd)$xpWtہMCY:a;M 8sLȚnmWtG+=/s̰22#[3@r4"Ot'7e)j XJ}?2ߏGy?|w Bل;𶷽p^a9QVfD+~`O#LkغuּN jCe&H'WmV%J ۷OU:'-9=D3)>.;en!@`ȑƝS<< p:h^l_Z,ryҎ v:\tu9P6ŋػwoP*EItP!r":b^MC-?76r=ztF6W(aX:sۿE̞=RRbONt̠F|3>!DF [0w/@Q"RB{}݇W^y%z߲r-;Hdu7k&T@@ݚF@&{Nv{9 <W IDATikp<սR GVA|EAʈ* {Μ9T@vVkkkX^^Ṇ@PQ/PT4q/pov7h}umh @Ee(Yg1Sc|2yߩ.f9;|u|^k'RQ8)/kkkxWo~;٭~}^+Ǐۗ G2"Զ)G $zQqwӣ~$;waϞ=4ːLA姫+zp's'[r3tTsI<Z}ۀv9J}}l*N"ܳQF3!P`_S{p~aS=9;]kZIѩ2L^C&YW2quu5:8Џ$Pd4o ԍ^ʘ>yZM R6*Abɾ*~ 9JxH9#mke}{Nn=x{ELutW'I$rT܊`+tъȹxѦ9#r@@I?Ȉo#TmM63%׋N.ý;njp㈏?{mm-\z@Lum\+U7{^0kJ {-;c^:^Zg kyByDTH֍\S0I\1* xiH>hSC̿*ѠF\!n*2vV +DR)@ :kO>U<H\ s 38d 4̤́`0(yȰ(lzi!evee%mz:;N苂XWe4S5RJUl/QkBn.9}2T- 666_%8p5]pر bQ珀27j5m'W(%2'0|:=Evmenn ӷgGGGqmayycccY@ l٪ H#pk{_o(OIvO=,b~4@*9+DV =yѠ;PpN&=ajRVOn||zTH"XulϛTcޙۢ:;B@1!IȾgBߞɔ+9gɮu@髱!P@9*9OYp0!`UgB se .Չpg9Q ɼ[m/b̲+Asx}}=-R,rf{?rs>wTyu*G>:7pNt|_?C]6Y˫{ NǏ}N 9\qg OW9 9)Cs_uDkD8`P8(97jHۢTTѳ6iT028({ Rڨ2 yz-=2#P9h@!_*z_U9ƤV-oy .]B_n79*vލݑuAY?W+TV(/^_NъpokIi^ vPA"s6222Tn~jH#`ϺhQvye jPB#nl?CVZ;UTRPnz+4ZT]qέh##C 2:dʃ_x0R`rrrHp9ҝY.ҢLY,j{d9.#0Ay=} AC>,+IXf!)N~]d+-ֈ%W"ҝt/o*1r pGvRBݑW69N8QR=sdx׻ޅUBhRrq㋡oOA!nP/g٢ `HWg7Ք WO4Zsz s=# :~nhTU1uE`"4h]h}j5wdiB,3zc'R <@oT.йz[^zNT~P /fn喂=QƺSFJ3^$<+c }7zYz8 (=6)|BO?C.:XwQډʮ2$/ ׬ ZJrDWb&(T,з:eZ]RBYp (/ᥗ^J Ӵg!`yg+) k8+TCj׫̺u}Z>Rf2UR*[z ۨ S"?eGۭ55@~duV!b4KCޚ.FD?|3&wOxE uA@1/?wJyԱSD9j )|z=+8yd3N9"RYIٳ/^ 2^|EwFMN*ֈiK_?E$7uhfyS4~7Or"#01w }pnpNDGM}c dts~c7:drLOǙ,u V.Y("ԙxegggq<ӸpB@DxH赑E|@ўSؗlUxKzQDuQyS~aT uOΈX gP^{ ۬l}θh]{7d`&y >3gJ/bC\54T"Pc)_WWWqwa^%S 򊔵3VJ%C*g ǖ}iݢ:s<˗ʂ]Ny,a: {B.tgr}mrݢL)CL:e`uQhZ1ʹ3OC:dU3o{#_F܌J^-t|ܩ"0; 0@X뤈0P:icv1ћ(3XhWՕz6}ZZ։~yfn=q5j$`^-s {y<#oݘo*;v5TTdP&(;\cBAQZ-0b,4\sH÷tH\ ,!|!dT4M=hmI$u:UpR FsO#:::nyee%O<_4=xk:c 5r3Qs'V__(<Pp;)Յ;e)O-"(yP.c4#N4q>Uu5iZ"߽{7LNNQ޿ $9WƉRn~KdrvQyJV(;±fNP999Y#r MۥEtGC^K5X<9 bb`#cƾW>U}GJwkʊUUz81F (^|E>}t hGtnp(SjϨHhXdF;3Kd;*8{,;$# P=99zZ z,IsWVV:*Wr W'I]ʇ〜M^ʊ7 PV*:tKKK8vVVV ޕ2&;F(iX#õyzv*f4NyddFsssINw)ջt4߀ޛ@RMYqs+Z \:՘yEP*XeB"@yDZg4MuT*s=X[[??`ii)Y ;)eJS?qLi.CepEXe`A//mo{[mWs<̨tƀ-0 U$.Rǂy9E5. j&62Zu-GcDzDu,4"X!UE]Tc?D}߮o¹E>p| mS7zonVII(LLL^llױ{n9st ϟ?Gbvv333ʹ$sE?ǎPP8̱頟y72@NЎ({u[etn-vO}''h Oרwj0;;[8jax׻ޅ .`ccFf Pt¥Ou CԶ<7g8;cV8^Tzt({CnnvSZ5v:Yoll$l*ߏҁvmm-ysdܙXlW޼G\X7W(Gi'Z?~zjԅ'uMM=T>c\Ăk_8H}zUM%#kS c.EA_n/ܹsvutto}[k׮6*8vp>7Vp3zn̙3{-,@Ԉ7&w&NSFcWMRo+ mK/s9x &vq[ ` T"Dsg'u] D=9RT[Tϕm,R&''{noӧRވW&:L:Sk"}«Eu8+󯞾Ae jX%LP]eddo~C_49r bddXT*1awhT Pq!F xhˍ=`TGN'[Hcc+٣_zJuKcCo>$Xs8+P7C"@ e Zb~~333jṚ9"'\~(PRFBcY5FM` תRTcii \S=XdOItb Qq oq E˟ʵHC+} .PcsثZYYIγ.^R 2#y:2sN]7:N2>ɦ.ipi9s.l;*e"jHJ3Ǿl(TG ~VD_k+3r~q"{gΗMHqjA%25]C=΋کpҍ{||˅ƱŅ ?>ǔ vtҮC9_;@Yh ס_Wp |ӟH]}5vr/=:_wRhk9{vcǎԩSbQ>V+T066Z`zq̙p 58u1==Bw~z9@: @\Ps#C_D[:A q~ØɌxNֲP7zgI7f2 z?$rqj5LOO~НV9k=jT4J@ǂjyyyM+RZ*W RB<ͱ–:4"i 4tsr!u*ܹT~@Y qWŪrLmpe}FŢFԙwtTh)ʹq`0(::.^_fDR ZYY΄-~"ˇ%#SEgD3?C (tyTr:inx(o~{:V%0ɵd'ua`>tr/(3LG}{1lMd47999䌔`H4g<ԫ %lSDc ;$StJVS (:qy.T ;y q642@'I!P.)}{'ӈȁދ/Q6%X"Q@tV_e/uhF rav-}SmzRCYaJFVfffځ)żՉicfȩ0>>cǎ ڵ F#"@Q&A~ńIQct>PRQVoJ;ޏt*~!|(Ҍ IDATJ*kSul+egFGGSR772MV566Vv3? ϣ9]>?ek#̡:$*|؆U&kteELYeN(xz~F@蠋u/4 +AR޵/X'd%W?>2FQ(y9OcNU70u}!nMɱ1JN8U\A@Iy&4<ɓNU`2Qίbwl)DU\T4P':EٽI`+WTFϪT* =, z429`ms h' RPdxod^%PT#wφ ? EVU- +9uĹCdVա ˖;3ns:%/I: 5Lϗ`\\\b%5\E}TΙI3{]ř^3Xl;X$RH: pRS|01;^?SӧOO6'W= 077i4mdYh('| 휚za2rJ.qQ}$v]}PG$nrEyhXlccϟ߬O+̜ َfP{xʨ`~GNic~9"ܫuPBžrBAhٟ*x3Y5ZT@ES bcc#`4xʘkT*R1SCG,)F'NĀ-p >eME,òM:=>) OCI\?9/J}6sJ$*+ 8^ =oll (c/3zuǢG{ Wo9Ehl2 8X+e8wlG(󿴴/baaԑ೹"{~~sssiemG 0zIr0M,O)|k_K ّh>H/S919g+$U$ PCz$WG^%Jn&eWT%)E[QpDwE2&9JZ^5Rۍ!vR$ @bN?/ ;A+=m/c[l2GvX( rrca޽a2 RYm+h(Ι*r'G@7@?Q~G{TUygDl6N묬DsKCqoQ7r,A 1ϖS۲p=zWy6u]سg:tq/N4q*:' as-P,"TTqf:QŋxKe^ǣ>{sssiFtydko^ [^T=2WVVկ~ ՓeFTTz$ՙh᡿c'YFty%O*rE "P155r%~ʙb.gMS` l#gJe0wƨ|Jntt4)4) 2P܈ZCL4V7Wlq\nw{`3O|"yrMqd! q _3":,5HLX}@KIKw QC9SR?zpS8l{=aTh`ff&9QZ_b0^WHQ}e]r&K|O],z2b)H3킬~ 8Ί,c]WB:ŋپwߍ+833f9'۫ͶqJ$bQ8{,N>gb׮]AUdlp~s1Um1GFP:>:#OgZe{ieA )YacG(X3'er҅M_BH~9A cTG TY RQ$>Z=L#bjh(1F '*bҁR~ .V stL{-z/;5H4#g{(8rTqZeYVUsZ N+XSݧlouFb;x/T* 8:`%V!ʗ:SѼS V(43_>% ..ٵk|A޽;-nϛq aAM>33[nNJ}ӧOرcΚc%~9I:mMQ..QR>eQv8X]]1??JG ,يhu7SrRPWpT|B42 9L"c DpB{;^ڇLEɅϋ+PsxIsJOWoUv jNZ'B_RJUہ;P\X(G ʌ18NakʛedTW֕8{B(tU2Z`[Ɔ}DG@/'p{ %"qv%5#VIFT)K`:?SR!qˍ]Zp+N??_ѣ)}c޽N瞧`hllT@Qژo+?2us@ҏձT*)ti~z1SG!f211lv`0H!pmVcĉspEctDYIo(8b6YV w`"Np0̑I>UcM/!33&d`?xN3[9|p:;GBR%b3܀X;S`h#Fsjtܤ^QNY `PZn|PhS8k/m:FU"euw80u$_z_5n> n-ZqY#!ʤGs"ǫ4!yX*P`ly±g{VVVpÇC%frzzd<9o Io7jft:hi`|y\xh6CiZouZ)T&ciLj:LRf}V0Dw~D+D,:>NS (/^o}[xߎ+++iP|$34%uH#=*%׾{+(:~7 (0vwOΙ(0z>I8`LMgG"E>99BˁD>gUYR/WUxp7'u:ҙCW[]]o}݇|#x' I0&s&W[Iw:nH=Nh_,cLLL$嬹EbȁPvQ7X_U(;ľzs-nynXf;R'v'?t\(7t:Ǣ(3ߑAS| 7wXSV5%nciipDnTZᮻJ;UQXR^Xqtv)8tO?wGYj*?ɎSu >]uKuuȁ,e.8|0^z%?~|($Jd||DFFFҹӗl1]%y jL9h $r/v{ Be,NfK\)FM40e ̃`i24F7rW/&;N iHX)EqQeee~;4_ezz?8N:zI1([J KawK)zÈ (vF&RRIG{>怔?{ȟs1D Ry?'uZMeDU*pwM;C7W}Mk8tz衝 5^;Ns:E]h.*;T9^[[ l%odJ 2ϗ||ojUʞ+9(.kll z333t:t| +O,X#Wp2eXԦ"@Ψm~Q0^e6;;K{8_}(iݰPAU:tU`^O9 3: *5t 'Ͱ`\j:h@zI}\D!bO*#䆐uzE}l#|3GlnEVj>p4. US_ϕ~Q@N 0)3dȜ!O,& ]tKuAG2;eW2>9nD8g''')6@?A=ӧ~6o̩kZIT{RjC֠(Rpw'|$|mk>\{i{N>SNeáC…8ֿZ_َd}U/qnjZ%;v .] ˴!;WM]A:@wpJV[v%oB7Ŧy>MOGQirtpq"xDzءzd5dWdu|*hem.eXX͛b!S"H{6b[e鹒nhH^KKKX\\DM QC8Z͛~qfRCr Q\c\ȇ_円8{ jhk8EU̸:  )Xe95TpNТsйƾ0Ο[M^lӝwމO}SVh4xϞ=?T*|_ǓO>t2Zk0$%N :M|s4dۯg:ח=ceG.qFl-l+Z sssFZbvvw{ERSO=ga,v}Q}<D Y,OFb3=]J1y4-*Z صkWʝtvR >W";8ZV!uȑ#wM#oxEnt_'"@߫,y+X@~;NccGI߈_umY*(<;ڏդ7}`s/_N(+j?ؽ{7c cԎIu'b>TҎ:|aTg^Qn'r퍮ԺU (`~~w_CV͓=]7V6L4, T\^e0Ή˼/ wntt6DacWY;o|mUYuVB'#3v^`03&P QAH[hqÀp)9Ůyw9Gt]+}%FBtu%v9}ο猔ny_]EJg# :jxUzdIXd}vڅ{Z}ȑ#g΢V%z$盶>F tZ\\I$5ԏ:I?Umf|/;?sLK1U(+| *kqЊ+xp= .8v<9n7 GmsQ; $ۡ[pz+JʍT0GE+Kl=j=@{G'nNn4,pX7.ʢJ7bQel3VEPAYt8 SiVf^V6ǜ˔;cMo^[w<圉:f2FE;새Q^su# ^{v>esRy{E?k|Ug'LzG?}[n]j 2@Yt4:F,{k*y,e^/sss) \s{@]"B'b7u+ (=4N;I@Lt)Pj` (nYl6 yUh=YP@'U2\:P*8UjGM|3]Vd*S,.H (51BF,H /ܨCiۏX$:+Hg {1T˱jxԸyz׿[\8&b4 z {9:T4zxUFQ|a֗DIԏyQP-Y޼eh::?V އ2D[NHއӕ;H8NLL`nní:&n}Ngndd`/4|}h`nn.e||n7apa4ѯQl}m1u}U\R7r<,JP|xF8efE t 7p *@V +UF"NcІ lS7c7S|<<9WEg-=7TsӜUdrZ3FT)&g{#mcE /ʄ!wH:tDHXLV855:F.%#N`Ɉ;0 .UR+ IDATʢ<m {}37W"R`ox?}gQʗiXg$tZfgg155Jvܽ{wZP-,-J0t(r+ (&ohVfv?бR6ыCXn0%gwN"Dα*@qV-qpe&&&xPxV:@᫫jC!pT5@suSZ}P^ʦFDCtJmuoG3ľǟJBJm4@ɭxL0|ԙQQIϱm*<|0{ #|4E7P]t~&lXh#}I#ew~U*Ti,olqSʝUYt#yú͞2}gBwp4Ҟ 2drKKK ;0Lh4 {vh: IW?::N icv%؆\_DH6J]u:!*?*C|1֪\j^ *+^Wh;(r!Ji@XhT9"x0PDpr6Ġ0Q܋zeN?J(1\ۦ ',20C z$xD 9R_ wjHYkcĘrj"BV] J^s,#&Rr>U/HCFwg5Y8Zz!NjhT ACv"'E6Db%yJ摆fSSSf0-,,ҥKXZZJWWW V%t_ ~FxzNY 귝I_ }{<E$JU:ӤJj8vWAq:P= UYP:2zOH}z=RC ๧Cl\3r*zZ!C (U/V&zx{=z'N(`Di. ʔeʸu.΀*B0G:Nbzޥy޴mnll, FFF:hyE9u]mtR@,)ҭe2r0rSWa=&Z/Os]p{o嬲\I`$*x 6Ka)GcHV:PdU"G J`Q(9*MӅ.h*]_F@,Ys͙E8*;dȚ:bLh8GC )Nzv^ 4# ſU&֝2r>|MWpY&#pmqgK =Sh8T*t:N`t{4r9i,y &FwRoXJekOB Ոx&|s;>( VN.Z-p\Wd[ ZO=(F$5uJ#][s _NS mf;QG@:xuz_b+kFS𽱱 2WP@# `Et7!VWS'- CN|nu [$^qh~}}nwh^W~F@ Rߜz.; ʈ1# l6h4pQt(wTLe7r!`M}J%I,"qc} N+22pt,Ya]\kCO}m&Cg,Uu~Prvy~ƖȩpRE玒 G̩Դ .iDDrs:;'m`0(})(nUW( Υ/GQh{]7e`="@kP~?VHG=(` AU(7G[ 4PD+FFFR9UGً&u'BJ{cH< a0$@=n繣)U'A=ݐs&3?U_4f:VқFIQ2Q3 si@Tw{e,o^t{ R#hTs, F$?GOvV 6$(H"h{ Z-1;R}\F}NE.ө(#ۭ:UP*`H}]yRx8d 8_\ZIGQ0=fyyjFfvznC>t:i{$.Xl6^^^FNGB~6W;+ȉlpNEeAYM_gt0 \ :>7K|O?6nFOfۂp)wQ⊎E 7`@̞P):\Gܰ@N$g|mll$PI}㲮 XՐFTEeqT󺺺x.,EAHhϜq䋻V@˒NΪ9Ty(:I ݲs>Ou9/gQ_z[vR~+3I).|g V"U^Jaff&tN @+'$&AC.J%m# ˜\l)]Mld4< ls! SB\ݚnȩXZZŋqE|c;BÇT*ַg&U -(=H'5V :AVdͤ" $ WvTTУTe\xϟ (G+ S9ўΚDE!@w(폲^7q}>?H'l:ם'AT\ԉBp293337vYW.XS 9^nFe,+V8 7_V/y .2em(>\{EƉ2}ދw@1Jw𽲝g8k} Y+4]I:i;g7a\^^FRw `s@ϢRN\\\ę3gtGM磲LNNO׾W^yN'TPZwP*?(g$9Z#}t 6*+ Dt?C7fINs,*cR mQcx?U| 7 y?=qjНԕ W]%_|$ *5bgjp8ǔ9pC r,7/YsG`߹ԝh8rv:s%P줎,Η(&:, iG@<>ʟ]6~2}zLK9IEm:X.ԉX[[CiE(޽oV*uv8'cߎO|0W_EN"`eeuUu{o^77I4BB郦žx@m`yeQ22:33,kea]tJ-( e( ]PH{MGڄZgsgL&L&Ѐ)SkO<}&nݺPկbϞ=ZuM~٨&U_Zuy.ulj*L,IQeɣL&v0ߚ-K%;,5Yv5 s 6l$z<ˬּZ..e4ާ|K(鞢g2=N())ܗ#1NXDMܮs % w.s]ծ]Rڦr-ɎYlC6Pzq9?kcڪ$mmU۟R!ֱO&ETb jrfIM $0Zj ä*feY'?I7.5vww p(L5j(ym۶ᩧBIIIP> U[n%xo~vNIDR\5E𔨰ˮfn]}>{nlar d+r*%}^۪JNɛdIt}[^ҒTZ2I7:zlGtCєMt@z'rvdY4:,\{Y E$Aii)s NZz/{9᳿SeJ2|> )o] ˥H-l2k[U'.\pI| N`1q+qSj%66YJ*VYP δ:ddEEEH$th;f$균2bժU8Ss$LсƀVUU.㝦F%bѢEbxDz[FG" P7[ZZYEb z$J4CT4ִJReP̚:Bbۊ+;vd̪:`nXKFZv=:yIIU]j ˫} tP]'u,:xJ:5o>8bX~=lق7x#P:I=N*WO;j+})Υں\IW!H'[UEBu8g櫤_5炒ZӾH֝>K25i =-No\'AN8Ĕ)Sp>|nMּdIo}|:hyyy7o^9%Y%Nc8쳏n+**pwG455JJJd`2]u kj2i|U5%H%յ^s߫ϦU>luwtww#Ja(W[qJm'6]M5d}]mu8T>;`zP`.Uds)!jaJ{-%J޵y^Ux=o|hhhf|ޚU\itgNYT5ӈO:Zҫ-Y">RP45#a^uRwLWԩS{Fwww0Np\gUC}1RUVRbJS8PMV{/.˄0:YEޢc~&vׄNV-M9r$O~N8$ eg\l@UTrɗ O<>]kZABĀ AX$SM])oaږ{g]d@@-X֗c2[^%nM\m\ ^?}uZn;PRj`%\e/2{2ma? Pv䆑X,T*8ܯI?XB;vl$(nμNTэD"BD|%ɐ Ʉ%ɤ\*ΚXL'\Yk.viY U( :E! / Db!Sx<ǒ%K>&MI& oy睠$M5-jٙE(a;Sn}/`݁w$aɒ%b8<̚5 xׂDYAfM˗RI|U/"IJ5pUN+(h]abdI3,dP'ߩ𠁸yq.rjۺR K'v}QbLB伵'Oy|Đ ̓#W_ &^WQU*|aڿmŊ/**Z/H#GDyyyMQ$'|a蟡5d2hiiA* JZI"|l*62ύP[ ,kܹXta%lb;M*yH9EZ\XXuʊ gM&]iD] GKN,W8j_DIIIHPXҠbd*|~{^%$@/@'LR\ZU mX@h} PYIdhz:ʦZcz,;iӶ*~cmknAk 8]aĉ}HJV$eJR91.Ҩ߻VuT9րH$J̘1#T&{ǽ/))˿ѣq^ ->ѭANʦRbT7٘v CVɾ?dˌRUy:UM>^ϧ_ IDATYz5.455L%%%(,,̲TZ-WIFPWb^%ɭsN1e,_BI!wrB'7Қh'l?Yhf3QJRy?.25оRdQZS]\$Tro8ymmmAZ0$%,v,K&A%cCSXN I3<|#F@UUU^G+ՃYYF8Sj*A,ghVa#u)LhCtQX[[[>`J7lGf.%= L祓=m ;9˖-O? D" *:] 'TUp7/_ s0DeeqKݍd2@,qH #;{R(]ff pH lSrZo@N55NőH$j?غ}hgyL_$۪gGX7= ;P9DuDF'T'4j܁!F(tl44/YtRgzVo>HT``lC`0h4s=8l6\/;>#HV~4l;3tUuC65/چkݜa1%KPSSs鳊2R=VQMd](A2hV_E[^|Z+OX뻣u (NXF'I7n@CP.\UUUя~# WxB,^+,8pLUEQ'E 7&FY6T++ꚡiJ*sE"RTbb7&ojjVx>gO ;ݗkU`A]]***|tN3|798. ۃAvBA*R$:677!b,[ oZZZ_J>S%J4%v$*]:ˤ..˄;Gh9϶9KI8~]$jWHU8U&[/,Y3 >tYl^{fɵ)Ak_k۬ /:8#u*TMH"C*p|R5V'{ EGSފ!:VTN ZjY.UK*v?`6ѣGc9CP=fŋc۶mqUb*g.<}>@o>LM]*p!l4/UoտR6TςGh %7\]uttUUU8qɄH$jtE$le{m{x'0wB(=vX>d2曡~5IAХjjv^?[Uﮮ.,ZCIIV!O>$v??DXAsZIŌ$-.BR'x₶i5!t.|4 JUgjc -.A=jPm|^&l*k­80,oIqkv466ۇ,++êU_ FjUD+KeI$_HZ O&ʷ&0a.d2ipr;̒NS#q9IPj}.k!tgY#GJKKQTT}w0ϵY1QeYP|17e"TeECp=۩+tM] %}Ǐ+W&# 555x嗃TrT'-~/fl: TQ&;fU~b&cuY5e[s1Mȶh %Y.h>U.y]{*Is8eW7T :yg[!G(ff$0Qԇ`;Rm?Krtx-%C-YKYP SO=F!I]Iנ𮮮ٵ9,@u653h[J ,^8lÉB]]*++qfS(Yv<d%wݧ5]`}kVҁrt\R)̚5 _|;l0Lsss@&V>wʝLtoɑbL]*$.iv@cgҒH[OڧٺpU_g_nqϿsUc\2uuCKQgg'ZZZp¬eÐ$yyy+p}9>dU\z[_\K0zd=5*wwwC~~>fΜ9do> u֗{֗[:sɿ dV\iB<FQRR&(e'POkdr(7~8z~U>omF]BlrԩH$Yu?J]s" AkoK\ՍsdpW8{%%%Xf ꫯ"L\;٨ dv}n]]]2D0gg?ZZZ#,K~JN>;::uٹۃz5j)`/A2_h>kFiݖP*%zڮ9WDzcx%{Ze(`YbhuQ%ʪsYQm]ZBl۱E*Ji] f.>1cƌ#_<ƻYZ=vV@ak%~oN;1fr-w>MZ:VE*Wf]qVKrEO4Q]]kx8{ H7plق_z"%OE~Lteee1rsiӦ7n J>G}]}UTyd2hkk 3ޠ6lذz*^|E|_<q(..5\JWYeO* +L]նՄL n,a %ϡʖώ/J(_ ]r "%W놿gdR}^CPVTTছn?ClpƥjϑaY>dn1vs::V9I42ٍ̪/v>*]JmihnnƔ)SpBd#7" xbࡇ M”T pϞL%2E$#lpI4Ř1cvZl۶  zJ(8ugikkkR ,3D0nܸx \uUgyB)~l '\6Кu T2.Œ.K\c-;=ߩoɜU7})VRB-Bz@rͳd?(Ӭ4y]YQtsycĉKbL f9t~Ae3'dt#%]*[ӒNaX`k (fϞ۷#Jȝ&6vH"T>U]*8cǎł eeeXh}]`hllDWWWumgY:9rH X,ɓ'#J駟FCCDh("`;mM҂'"xג%K{H"#G V#!хYN/ Zi3ݤ`+Ӱ|jQ-Z9}ӺuuudP*׶cU$yYS>,\)o.KNS yB T\BW@$ &8 DU5ZE&Pvtt`ɸ꫇JŰ|rb1lݺ5Pށ^߰PLTJ4UYY N|A x;vK*L tS"X .IJeˎ1f͚D"=67GPLRhoo_ In}%?o$#"iEu 1I(7%???~,N)SRi uG'nk~ӟ@b:Jpꩧӟ;ɑ,(NCc&.;ݱoՔx}ZLLӥ&<Qö6kUAv&H$)@QÞSB h&k*"%W}W.>gOE5uYr]zQgEV2//%%%Ɗ+_ |@lڰ}nB*/e2;I&FdFQI֭lLZҪ&,ZB"80~xÞ={B޾DT馼1}!=b@ qYgk>hĈxGs9fʹ#UTtsP_FX!ZlBr%@ϥ]j}G<[@.4ZZZ/$"%K=+|M8p 0Jhkvcǎĉ(4'ƭ'َO}dع:|L0!0nXX,: x'w^d2٩D&߭¹瞋s Auuu7pcǢ)!L9ܬx$pJKxin ~*YʕOݢ(H(R3= ѪqurJ d.+XȊ)RXdBmO亽---\pLa|&L:GŃ>OT]th4n3Dee%~Y&jfqQfY_v.&L5kBdݫ;'{.F;B;^ mbXp~!ǏGKK PXX+l#sܱV*v,i!JB-wkZZAuR^rTO%Z~2s 0ǻqT:RM_DbJau[κƪ,Uv1%K6 %0pRqE }k.|[ H2iBL4 W\qEpnl0\D0j(chjjBGGG$ɸ Jie2|#0'!'HvnV{hmm OԶc;뮻ӦM'wFQQQ$M*PZ2inV+CU¬5 /SSQ4 Qj d0^W8(`I5glxEEE(** |n_=[9DрZ5ﲊ8,TvvvNOd (((8fذ'"HnR"'ag@hkjjshX`o㗿e0$t+P7 ,N}g'\N6aXf {1Y&vݨ 0mڴa?8ՅPjJB5kk%PגVTgmɪͥ @hmo%.koPi* x<бHMJ@I( 4kn禹B:iӘ{d"j׉'zf9%MN2|RYGH38;w fwցp͛uh43fO=ThPi{e#ΝT*{&P'/@X1J;.BnJhooH&@J V%JLmd5{W9rSPP(D׶8cX@&ҧ<ƦRe2WIB % UTQ%Y.{NCュ MMM矏cԄPRR 6bx 1̚5 f8O&=tKP-2+)'xo⧦oyI[ɣKW삒I(-_IJÀ'.`ĉG?{' 3hkkb4/U$C$\$%xkXbkJl z>M$NP6kHޗLTSR(m 6ϿQYYٳgUJ!s!)z1y>Ϫ.n$YGPU(,ԥ%՗SɕRPPI*$ 5Ky*++L&C>}@6!)k:II#л/TץERT6&$٣y#FPM{vJ7^9-P&}]TWWoF"87 0@,l2o`z*|V|vUt&2vrcjj -K-usm@۔w}HR+y-v%ƶA+6xԈfF9FG1+ռH+5~gdRɝ6ڶpri]3 r#A7ܴ\T&&L8/`{?0{l,]?s)?@}}=>_:^xqӟ͛7vQ=<<<<<(FGAcccV U&]DH6*T-RE\u]XEA=LP $QK!~`V)9(uF' "ߵ~ ٳgN=T\1bD>A'> ~;K.miL6 ոyf\z饃]\a"뺩Y.J޴uڊ,_'Oph"̚5 s֬Yooù ,ߓK/f̘>??SN {xxbӦM:ujzqH$xG|/2]dAԩSQZZ|;Y9մk`9 K:5ȄDȥtZ++Ve4Zjfh iZ}_J7&s%r+mt=ڊbɸ袋 Po `Æ 8s>,~{kݺu:( 曇}N::,̞=; _M/^N|_Ǘ%3xc0f\{غu+8".L&D@HfbXVj KTvȵͷt:D*i-/@DZO]u>0ඌ,ZԵBb,ڊF̝;so`ڵg>{{/uЇ}CUU ?>$yك3g3֭[C9Ŏ%sEyy{Nw 0ԩSse)X SLAss3y晀Te4 : a\7W@%bT72oQ>@hu\z%G4w<9iesĞJى &`޼y3ȦGq]wk_zk MWVVꍌ9Ѐ W~B<3g֭[|LW_}5?a7f 8IԊ+o>Ǘ%L>mݺCc`(..S3gd$IP_~Vy*%M6jyhNfT5IЛN\TS'TCz, Ih]h`zF"YN+#$:]vЄc`馛|wZ[[Յɛ/bhى?Okw}CqW͛qꩧ8g̘1җ磴;ve̟?oqg-[r ***v gqnfs=PZ }u(i "c*H$ Q?C%$Ѯm%l]D%t|Jz+[R};I"ۑJ؈YcƌW\qo"N@(}MCMM 6mڄ*D*իG+O<nVd2?.]?cUMvR˖-òe˂͛+Wbԩ3+? {]X,qkE&]ꫯň!k%b$V$? Uy%uD%iܯ&n’K]}` $ʹ$&&5b,J/SP&!l@TJVUUꫯFwwXBH;wĮ]=z4n\,~~M6aڴi6m>O೟,,XsH.\K,9眃bK򗿌h4o=8W^ҥKDpW^ UUUǬ\CH$H%H$Nka(**fTr|&f5;벃4#O$ iNjL&yռ2[©|\c.$\drĉXx{o?Y:$R1nWW݋d2&;Ad8:Get5E \2D*BEEO'iD"A]]Z[[s@Ydj燢Cؒɚ`št::Zz2 D;/ʕ+1a??۷oGuu5M7݄z,ZD>(կbڵشiӐ4ٳwygniǒdA-NI%uL!sJ677QӫVB]]݀ЀtB>L%F6P:VIRH; V+xB1طoN9w/f̘oBIII(8nVu]صkW=dG|뭷qF$ s8]nQ}%i 2$mذgqFV]n$I|+*]H3QQQ2koLo&LӑݻwqYD3g1EBFC*駟k9$8m۶Í^2I?GK.Yy$CvKKKq嗣!~Z𚅅=z{:JH0Гz_^h^Fhll;w,))1cdH$PVVH$;F"L0&L8syM]]]W9lڴ ֭u>d2y桥 زe /_[n_>q>+WĕW^<֛6<0馛p}Geu}5ڰj*<3ضmΝ?<.B}ضm[({hll /..F<?%8Rx;mlܸ1 @Oʜ#>o{{;>ⷿ-~>۱l2L2/ޓdq 9r.1,شi/n_`@E{{;.2`_ .. autosummary:: :toctree: generated affine_img_src nipy-0.3.0/doc/license.rst000066400000000000000000000012461210344137400154140ustar00rootroot00000000000000.. _nipy-license: ======================== NIPY License Information ======================== .. _nipy-software-license: Software License ----------------- Except where otherwise noted, all NIPY software is licensed under a `revised BSD license `_. See our :ref:`licensing` page for more details. .. _nipy-documentation-license: Documentation License --------------------- Except where otherwise noted, all NIPY documentation is licensed under a `Creative Commons Attribution 3.0 License `_. All code fragments in the documentation are licensed under our software license. nipy-0.3.0/doc/links_names.txt000066400000000000000000000146761210344137400163170ustar00rootroot00000000000000.. -*- rst -*- .. vim: ft=rst .. This rst format file contains commonly used link targets and name substitutions. It may be included in many files, therefore it should only contain link targets and name substitutions. Try grepping for "^\.\. _" to find plausible candidates for this list. .. NOTE: reST targets are __not_case_sensitive__, so only one target definition is needed for nipy, NIPY, Nipy, etc... .. nipy .. _nipy: http://nipy.org/nipy .. _`NIPY developer resources`: http://nipy.sourceforge.net/devel .. _`NIPY data packages`: http://nipy.sourceforge.net/data-packages .. _`nipy github`: http://github.com/nipy/nipy .. _`nipy trunk`: http://github.com/nipy/nipy .. _`nipy mailing list`: http://mail.scipy.org/mailman/listinfo/nipy-devel .. _nipy pypi: http://pypi.python.org/pypi/nipy .. _nipy issues: http://github.com/nipy/nipy/issues .. _`nipy bugs`: http://github.com/nipy/nipy/issues .. _`nipy sourceforge`: http://nipy.sourceforge.net/ .. _`nipy launchpad`: https://launchpad.net/nipy .. other related projects .. _nipy community: http://nipy.org .. _dipy: http://nipy.org/dipy .. _`dipy github`: http://github.com/Garyfallidis/dipy .. _nibabel: http://nipy.org/nibabel .. _`nibabel github`: http://github.com/nipy/nibabel .. _nipy development guidelines: http://nipy.org/devel .. _nipy buildbot: http://nipy.bic.berkeley.edu .. Documentation tools .. _graphviz: http://www.graphviz.org/ .. _Sphinx: http://sphinx.pocoo.org/ .. _`Sphinx reST`: http://sphinx.pocoo.org/rest.html .. _reST: http://docutils.sourceforge.net/rst.html .. _docutils: http://docutils.sourceforge.net .. Licenses .. _GPL: http://www.gnu.org/licenses/gpl.html .. _BSD: http://www.opensource.org/licenses/bsd-license.php .. _LGPL: http://www.gnu.org/copyleft/lesser.html .. _MIT License: http://www.opensource.org/licenses/mit-license.php .. Operating systems and distributions .. _Debian: http://www.debian.org .. _NeuroDebian: http://neuro.debian.net .. _Ubuntu: http://www.ubuntu.com .. _MacPorts: http://www.macports.org/ .. Working process .. _pynifti: http://niftilib.sourceforge.net/pynifti/ .. _nifticlibs: http://nifti.nimh.nih.gov .. _nifti: http://nifti.nimh.nih.gov .. _sourceforge: http://nipy.sourceforge.net/ .. _github: http://github.com .. _launchpad: https://launchpad.net/ .. Python packaging .. _distutils: http://docs.python.org/2/library/distutils.html .. _setuptools: http://pypi.python.org/pypi/setuptools .. _distribute: http://pypi.python.org/pypi/distribute .. _pip: http://pypi.python.org/pypi/pip .. _old and new python versions: https://launchpad.net/%7Efkrull/+archive/deadsnakes .. _pypi: http://pypi.python.org .. _example pypi: http://packages.python.org/an_example_pypi_project/setuptools.html#intermezzo-pypirc-file-and-gpg .. _github bdist_mpkg: https://github.com/matthew-brett/bdist_mpkg .. Code support stuff .. _pychecker: http://pychecker.sourceforge.net/ .. _pylint: http://www.logilab.org/project/pylint .. _pyflakes: http://divmod.org/trac/wiki/DivmodPyflakes .. _virtualenv: http://pypi.python.org/pypi/virtualenv .. _git: http://git.or.cz/ .. _flymake: http://flymake.sourceforge.net/ .. _rope: http://rope.sourceforge.net/ .. _pymacs: http://pymacs.progiciels-bpi.ca/pymacs.html .. _ropemacs: http://rope.sourceforge.net/ropemacs.html .. _ECB: http://ecb.sourceforge.net/ .. _emacs_python_mode: http://www.emacswiki.org/cgi-bin/wiki/PythonMode .. _doctest-mode: http://www.cis.upenn.edu/~edloper/projects/doctestmode/ .. _bazaar: http://bazaar-vcs.org/ .. _subversion: http://subversion.tigris.org/ .. _nose: http://somethingaboutorange.com/mrl/projects/nose .. _`python coverage tester`: http://nedbatchelder.com/code/modules/coverage.html .. Other python projects .. _numpy: http://numpy.scipy.org .. _scipy: http://www.scipy.org .. _cython: http://www.cython.org/ .. _ipython: http://ipython.org .. _`ipython manual`: http://ipython.org/ipython-doc/stable/index.html .. _matplotlib: http://matplotlib.sourceforge.net .. _ETS: http://code.enthought.com/projects/tool-suite.php .. _`Enthought Tool Suite`: http://code.enthought.com/projects/tool-suite.php .. _python: http://www.python.org .. _mayavi: http://code.enthought.com/projects/mayavi/ .. _sympy: http://sympy.org .. _nibabel: http://nipy.org/nibabel .. _networkx: http://networkx.lanl.gov/ .. _pythonxy: http://www.pythonxy.com/ .. _python (x, y): http://www.pythonxy.com/ .. _EPD: http://www.enthought.com/products/epd.php .. _EPD free: http://www.enthought.com/products/epd_free.php .. _Anaconda CE: https://store.continuum.io/ .. _Unofficial Windows binaries: http://www.lfd.uci.edu/~gohlke/pythonlibs .. Python imaging projects .. _PyMVPA: http://www.pymvpa.org .. _BrainVISA: http://brainvisa.info .. _anatomist: http://brainvisa.info .. Not so python imaging projects .. _matlab: http://www.mathworks.com .. _spm: http://www.fil.ion.ucl.ac.uk/spm .. _eeglab: http://sccn.ucsd.edu/eeglab .. _AFNI: http://afni.nimh.nih.gov/afni .. _FSL: http://www.fmrib.ox.ac.uk/fsl .. _FreeSurfer: http://surfer.nmr.mgh.harvard.edu .. _voxbo: http://www.voxbo.org .. _fmristat: http://www.math.mcgill.ca/keith/fmristat .. Visualization .. _vtk: http://www.vtk.org/ .. General software .. _gcc: http://gcc.gnu.org .. _xcode: http://developer.apple.com/TOOLS/xcode .. _mingw: http://www.mingw.org .. _cygwin: http://cygwin.com .. _macports: http://www.macports.org/ .. _VTK: http://www.vtk.org/ .. _ITK: http://www.itk.org/ .. _swig: http://www.swig.org .. Functional imaging labs .. _`Brain Imaging Center`: http://bic.berkeley.edu/ .. _`functional imaging laboratory`: http://www.fil.ion.ucl.ac.uk .. _FMRIB: http://www.fmrib.ox.ac.uk .. Other organizations .. _enthought: .. _kitware: http://www.kitware.com .. General information links .. _`wikipedia FMRI`: http://en.wikipedia.org/wiki/Functional_magnetic_resonance_imaging .. _`wikipedia PET`: http://en.wikipedia.org/wiki/Positron_emission_tomography .. _ANALYZE: http://www.grahamwideman.com/gw/brain/analyze/formatdoc.htm .. _NIfTI1: http://nifti.nimh.nih.gov/nifti-1/ .. _MINC: http://wiki.bic.mni.mcgill.ca/index.php/MINC .. Mathematical methods .. _`wikipedia ICA`: http://en.wikipedia.org/wiki/Independent_component_analysis .. _`wikipedia PCA`: http://en.wikipedia.org/wiki/Principal_component_analysis .. People .. _Matthew Brett: https://matthew.dynevor.org .. _Yaroslav O. Halchenko: http://www.onerussian.com .. _Michael Hanke: http://apsy.gse.uni-magdeburg.de/hanke .. _Gaël Varoquaux: http://gael-varoquaux.info/ .. _Keith Worsley: http://www.math.mcgill.ca/keith nipy-0.3.0/doc/mission.rst000066400000000000000000000001721210344137400154500ustar00rootroot00000000000000.. _nipy-mission: =================== What is NIPY for? =================== .. include:: mission.txt *The NIPY team* nipy-0.3.0/doc/mission.txt000066400000000000000000000011661210344137400154630ustar00rootroot00000000000000The purpose of NIPY is to make it easier to do better brain imaging research. We believe that neuroscience ideas and analysis ideas develop together. Good ideas come from understanding; understanding comes from clarity, and clarity must come from well-designed teaching materials and well-designed software. The software must be designed as a natural extension of the underlying ideas. We aim to build software that is: * clearly written * clearly explained * a good fit for the underlying ideas * a natural home for collaboration We hope that, if we fail to do this, you will let us know. We will try and make it better. nipy-0.3.0/doc/publications.rst000066400000000000000000000012051210344137400164610ustar00rootroot00000000000000============ Publications ============ Peer-reviewed Publications -------------------------- K. Jarrod Millman, M. Brett, `"Analysis of Functional Magnetic Resonance Imaging in Python," `_ Computing in Science and Engineering, vol. 9, no. 3, pp. 52-55, May/June, 2007. Posters ------- Taylor JE, Worsley K, Brett M, Cointepas Y, Hunter J, Millman KJ, Poline J-B, Perez F. “BrainPy: an open source environment for the analysis and visualization of human brain data.” Meeting of the Organization for Human Brain Mapping, 2005. See the :ref:`BrainPy HBM abstract `. nipy-0.3.0/doc/references/000077500000000000000000000000001210344137400153565ustar00rootroot00000000000000nipy-0.3.0/doc/references/brainpy_abstract.rst000066400000000000000000000060451210344137400214440ustar00rootroot00000000000000.. _brainpy-hbm-abstract: ============================ BrainPy HBM abstract, 2005 ============================ This is the abstract describing the BrainPy / NIPY project from the `HBM2005 `_ conference. BrainPy: an open source environment for the analysis and visualization of human brain data ========================================================================================== Jonathan Taylor (1), Keith Worsley (2), Matthew Brett (3), Yann Cointepas (4), John Hunter (5), Jarrod Millman (3), Jean-Baptiste Poline (4), Fernando Perez (6) 1. Dept. of Statistics, Stanford University, U.S.A. 2. Dept. of Mathematics and Statistics, !McGill University, Canada 3. Department of Neuroscience, University of California, Berkeley, U.S.A 4. Service Hospitalier Frédéric Joliot, France 5. Complex Systems Laboratory, University of Chicago, U.S.A. 6. Department of Applied Mathematics, University of Colorado at Boulder, U.S.A. Objective --------- What follows are the goals of BrainPy, a multi-center project to provide an open source environment for the analysis and visualization of human brain data built on top of python. While the project is still in its initial stages, packages for file I/O, script support as well as single subject fMRI and random effects group comparisons model are currently available. Methods ------- Scientific computing has evolved over the last two decades in two broad directions. One, there has been a movement to the use of high-level interface languages that glue existing high-performance libraries into an accessible, scripted, interactive environment, eg IDL, matlab. Two, there has been a shift to open algorithms and software because this development process leads to better code, and because it more consistent with the scientific method. Results & Discussion -------------------- The proposed environment includes the following: * We intend to provide users with an open source environment which is interoperable with current packages such as SPM and AFNI, both at a file I/O level and, where possible, interactively (e.g. pymat -- calling matlab/SPM from python). * Read/write/conversion support for all major imaging formats and packages (SPM/ANALYZE, :term:`FSL`, :term:`AFNI`, MINC, NIFTI, and :term:`VoxBo` * Low-level access to data through an interactive shell, which is important for developing new analysis methods, as well as high-level access through GUIs for specialized tasks using standard python tools. * Visualization of results using pre-existing tools such as :term:`BrainVisa`, as well as support for development of new tools using VTK. * Support for MATLAB style numeric packages (Numarray) and plotting (matplotlib_). * Support for EEG analysis including EEG/MEG/fMRI fusion analysis. * Support for spatio-temporal wavelet analysis (`PhiWave `_) Conclusions ----------- BrainPy is an open-source environment for the analysis and visualization of neuroimaging data built on top of python. .. include:: ../links_names.txt nipy-0.3.0/doc/sphinxext/000077500000000000000000000000001210344137400152675ustar00rootroot00000000000000nipy-0.3.0/doc/sphinxext/README.txt000066400000000000000000000006341210344137400167700ustar00rootroot00000000000000=================== Sphinx Extensions =================== Thesea are a few sphinx extensions we are using to build the nipy documentation. In this file we list where they each come from, since we intend to always push back upstream any modifications or improvements we make to them. * From matploltlib: * inheritance_diagram.py * From numpy: * numpy_ext * From ipython * ipython_console_highlighting nipy-0.3.0/doc/sphinxext/autosummary_generate.py000077500000000000000000000166561210344137400221220ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: r""" autosummary_generate.py OPTIONS FILES Generate automatic RST source files for items referred to in autosummary:: directives. Each generated RST file contains a single auto*:: directive which extracts the docstring of the referred item. Example Makefile rule:: generate: ./ext/autosummary_generate.py -o source/generated source/*.rst """ import glob, re, inspect, os, optparse, pydoc from autosummary import import_by_name try: from phantom_import import import_phantom_module except ImportError: import_phantom_module = lambda x: x def main(): p = optparse.OptionParser(__doc__.strip()) p.add_option("-p", "--phantom", action="store", type="string", dest="phantom", default=None, help="Phantom import modules from a file") p.add_option("-o", "--output-dir", action="store", type="string", dest="output_dir", default=None, help=("Write all output files to the given directory (instead " "of writing them as specified in the autosummary:: " "directives)")) options, args = p.parse_args() if len(args) == 0: p.error("wrong number of arguments") if options.phantom and os.path.isfile(options.phantom): import_phantom_module(options.phantom) # read names = {} for name, loc in get_documented(args).items(): for (filename, sec_title, keyword, toctree) in loc: if toctree is not None: path = os.path.join(os.path.dirname(filename), toctree) names[name] = os.path.abspath(path) # write for name, path in sorted(names.items()): if options.output_dir is not None: path = options.output_dir if not os.path.isdir(path): os.makedirs(path) try: obj, name = import_by_name(name) except ImportError, e: print "Failed to import '%s': %s" % (name, e) continue fn = os.path.join(path, '%s.rst' % name) if os.path.exists(fn): # skip continue f = open(fn, 'w') try: f.write('%s\n%s\n\n' % (name, '='*len(name))) if inspect.isclass(obj): if issubclass(obj, Exception): f.write(format_modulemember(name, 'autoexception')) else: f.write(format_modulemember(name, 'autoclass')) elif inspect.ismodule(obj): f.write(format_modulemember(name, 'automodule')) elif inspect.ismethod(obj) or inspect.ismethoddescriptor(obj): f.write(format_classmember(name, 'automethod')) elif callable(obj): f.write(format_modulemember(name, 'autofunction')) elif hasattr(obj, '__get__'): f.write(format_classmember(name, 'autoattribute')) else: f.write(format_modulemember(name, 'autofunction')) finally: f.close() def format_modulemember(name, directive): parts = name.split('.') mod, name = '.'.join(parts[:-1]), parts[-1] return ".. currentmodule:: %s\n\n.. %s:: %s\n" % (mod, directive, name) def format_classmember(name, directive): parts = name.split('.') mod, name = '.'.join(parts[:-2]), '.'.join(parts[-2:]) return ".. currentmodule:: %s\n\n.. %s:: %s\n" % (mod, directive, name) def get_documented(filenames): """ Find out what items are documented in source/*.rst See `get_documented_in_lines`. """ documented = {} for filename in filenames: f = open(filename, 'r') lines = f.read().splitlines() documented.update(get_documented_in_lines(lines, filename=filename)) f.close() return documented def get_documented_in_docstring(name, module=None, filename=None): """ Find out what items are documented in the given object's docstring. See `get_documented_in_lines`. """ try: obj, real_name = import_by_name(name) lines = pydoc.getdoc(obj).splitlines() return get_documented_in_lines(lines, module=name, filename=filename) except AttributeError: pass except ImportError, e: print "Failed to import '%s': %s" % (name, e) return {} def get_documented_in_lines(lines, module=None, filename=None): """ Find out what items are documented in the given lines Returns ------- documented : dict of list of (filename, title, keyword, toctree) Dictionary whose keys are documented names of objects. The value is a list of locations where the object was documented. Each location is a tuple of filename, the current section title, the name of the directive, and the value of the :toctree: argument (if present) of the directive. """ title_underline_re = re.compile("^[-=*_^#]{3,}\s*$") autodoc_re = re.compile(".. auto(function|method|attribute|class|exception|module)::\s*([A-Za-z0-9_.]+)\s*$") autosummary_re = re.compile(r'^\.\.\s+autosummary::\s*') module_re = re.compile(r'^\.\.\s+(current)?module::\s*([a-zA-Z0-9_.]+)\s*$') autosummary_item_re = re.compile(r'^\s+([_a-zA-Z][a-zA-Z0-9_.]*)\s*.*?') toctree_arg_re = re.compile(r'^\s+:toctree:\s*(.*?)\s*$') documented = {} current_title = [] last_line = None toctree = None current_module = module in_autosummary = False for line in lines: try: if in_autosummary: m = toctree_arg_re.match(line) if m: toctree = m.group(1) continue if line.strip().startswith(':'): continue # skip options m = autosummary_item_re.match(line) if m: name = m.group(1).strip() if current_module and not name.startswith(current_module + '.'): name = "%s.%s" % (current_module, name) documented.setdefault(name, []).append( (filename, current_title, 'autosummary', toctree)) continue if line.strip() == '': continue in_autosummary = False m = autosummary_re.match(line) if m: in_autosummary = True continue m = autodoc_re.search(line) if m: name = m.group(2).strip() if m.group(1) == "module": current_module = name documented.update(get_documented_in_docstring( name, filename=filename)) elif current_module and not name.startswith(current_module+'.'): name = "%s.%s" % (current_module, name) documented.setdefault(name, []).append( (filename, current_title, "auto" + m.group(1), None)) continue m = title_underline_re.match(line) if m and last_line: current_title = last_line.strip() continue m = module_re.match(line) if m: current_module = m.group(2) continue finally: last_line = line return documented if __name__ == "__main__": main() nipy-0.3.0/doc/sphinxext/inheritance_diagram.py000066400000000000000000000327401210344137400216240ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Defines a docutils directive for inserting inheritance diagrams. Provide the directive with one or more classes or modules (separated by whitespace). For modules, all of the classes in that module will be used. Example:: Given the following classes: class A: pass class B(A): pass class C(A): pass class D(B, C): pass class E(B): pass .. inheritance-diagram: D E Produces a graph like the following: A / \ B C / \ / E D The graph is inserted as a PNG+image map into HTML and a PDF in LaTeX. """ import inspect import os import re import subprocess try: from hashlib import md5 except ImportError: from md5 import md5 from docutils.nodes import Body, Element from docutils.parsers.rst import directives from sphinx.roles import xfileref_role def my_import(name): """Module importer - taken from the python documentation. This function allows importing names with dots in them.""" mod = __import__(name) components = name.split('.') for comp in components[1:]: mod = getattr(mod, comp) return mod class DotException(Exception): pass class InheritanceGraph(object): """ Given a list of classes, determines the set of classes that they inherit from all the way to the root "object", and then is able to generate a graphviz dot graph from them. """ def __init__(self, class_names, show_builtins=False): """ *class_names* is a list of child classes to show bases from. If *show_builtins* is True, then Python builtins will be shown in the graph. """ self.class_names = class_names self.classes = self._import_classes(class_names) self.all_classes = self._all_classes(self.classes) if len(self.all_classes) == 0: raise ValueError("No classes found for inheritance diagram") self.show_builtins = show_builtins py_sig_re = re.compile(r'''^([\w.]*\.)? # class names (\w+) \s* $ # optionally arguments ''', re.VERBOSE) def _import_class_or_module(self, name): """ Import a class using its fully-qualified *name*. """ try: path, base = self.py_sig_re.match(name).groups() except: raise ValueError( "Invalid class or module '%s' specified for inheritance diagram" % name) fullname = (path or '') + base path = (path and path.rstrip('.')) if not path: path = base try: module = __import__(path, None, None, []) # We must do an import of the fully qualified name. Otherwise if a # subpackage 'a.b' is requested where 'import a' does NOT provide # 'a.b' automatically, then 'a.b' will not be found below. This # second call will force the equivalent of 'import a.b' to happen # after the top-level import above. my_import(fullname) except ImportError: raise ValueError( "Could not import class or module '%s' specified for inheritance diagram" % name) try: todoc = module for comp in fullname.split('.')[1:]: todoc = getattr(todoc, comp) except AttributeError: raise ValueError( "Could not find class or module '%s' specified for inheritance diagram" % name) # If a class, just return it if inspect.isclass(todoc): return [todoc] elif inspect.ismodule(todoc): classes = [] for cls in todoc.__dict__.values(): if inspect.isclass(cls) and cls.__module__ == todoc.__name__: classes.append(cls) return classes print 'todoc?',todoc raise ValueError( "'%s' does not resolve to a class or module" % name) def _import_classes(self, class_names): """ Import a list of classes. """ classes = [] for name in class_names: classes.extend(self._import_class_or_module(name)) return classes def _all_classes(self, classes): """ Return a list of all classes that are ancestors of *classes*. """ all_classes = {} def recurse(cls): all_classes[cls] = None for c in cls.__bases__: if c not in all_classes: recurse(c) for cls in classes: recurse(cls) return all_classes.keys() def class_name(self, cls, parts=0): """ Given a class object, return a fully-qualified name. This works for things I've tested in matplotlib so far, but may not be completely general. """ module = cls.__module__ if module == '__builtin__': fullname = cls.__name__ else: fullname = "%s.%s" % (module, cls.__name__) if parts == 0: return fullname name_parts = fullname.split('.') return '.'.join(name_parts[-parts:]) def get_all_class_names(self): """ Get all of the class names involved in the graph. """ return [self.class_name(x) for x in self.all_classes] # These are the default options for graphviz default_graph_options = { "rankdir": "LR", "size": '"8.0, 12.0"' } default_node_options = { "shape": "box", "fontsize": 10, "height": 0.25, "fontname": "Vera Sans, DejaVu Sans, Liberation Sans, Arial, Helvetica, sans", "style": '"setlinewidth(0.5)"' } default_edge_options = { "arrowsize": 0.5, "style": '"setlinewidth(0.5)"' } def _format_node_options(self, options): return ','.join(["%s=%s" % x for x in options.items()]) def _format_graph_options(self, options): return ''.join(["%s=%s;\n" % x for x in options.items()]) def generate_dot(self, fd, name, parts=0, urls={}, graph_options={}, node_options={}, edge_options={}): """ Generate a graphviz dot graph from the classes that were passed in to __init__. *fd* is a Python file-like object to write to. *name* is the name of the graph *urls* is a dictionary mapping class names to http urls *graph_options*, *node_options*, *edge_options* are dictionaries containing key/value pairs to pass on as graphviz properties. """ g_options = self.default_graph_options.copy() g_options.update(graph_options) n_options = self.default_node_options.copy() n_options.update(node_options) e_options = self.default_edge_options.copy() e_options.update(edge_options) fd.write('digraph %s {\n' % name) fd.write(self._format_graph_options(g_options)) for cls in self.all_classes: if not self.show_builtins and cls in __builtins__.values(): continue name = self.class_name(cls, parts) # Write the node this_node_options = n_options.copy() url = urls.get(self.class_name(cls)) if url is not None: this_node_options['URL'] = '"%s"' % url fd.write(' "%s" [%s];\n' % (name, self._format_node_options(this_node_options))) # Write the edges for base in cls.__bases__: if not self.show_builtins and base in __builtins__.values(): continue base_name = self.class_name(base, parts) fd.write(' "%s" -> "%s" [%s];\n' % (base_name, name, self._format_node_options(e_options))) fd.write('}\n') def run_dot(self, args, name, parts=0, urls={}, graph_options={}, node_options={}, edge_options={}): """ Run graphviz 'dot' over this graph, returning whatever 'dot' writes to stdout. *args* will be passed along as commandline arguments. *name* is the name of the graph *urls* is a dictionary mapping class names to http urls Raises DotException for any of the many os and installation-related errors that may occur. """ try: dot = subprocess.Popen(['dot'] + list(args), stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=True) except OSError: raise DotException("Could not execute 'dot'. Are you sure you have 'graphviz' installed?") except ValueError: raise DotException("'dot' called with invalid arguments") except: raise DotException("Unexpected error calling 'dot'") self.generate_dot(dot.stdin, name, parts, urls, graph_options, node_options, edge_options) dot.stdin.close() result = dot.stdout.read() returncode = dot.wait() if returncode != 0: raise DotException("'dot' returned the errorcode %d" % returncode) return result class inheritance_diagram(Body, Element): """ A docutils node to use as a placeholder for the inheritance diagram. """ pass def inheritance_diagram_directive(name, arguments, options, content, lineno, content_offset, block_text, state, state_machine): """ Run when the inheritance_diagram directive is first encountered. """ node = inheritance_diagram() class_names = arguments # Create a graph starting with the list of classes graph = InheritanceGraph(class_names) # Create xref nodes for each target of the graph's image map and # add them to the doc tree so that Sphinx can resolve the # references to real URLs later. These nodes will eventually be # removed from the doctree after we're done with them. for name in graph.get_all_class_names(): refnodes, x = xfileref_role( 'class', ':class:`%s`' % name, name, 0, state) node.extend(refnodes) # Store the graph object so we can use it to generate the # dot file later node['graph'] = graph # Store the original content for use as a hash node['parts'] = options.get('parts', 0) node['content'] = " ".join(class_names) return [node] def get_graph_hash(node): return md5(node['content'] + str(node['parts'])).hexdigest()[-10:] def html_output_graph(self, node): """ Output the graph for HTML. This will insert a PNG with clickable image map. """ graph = node['graph'] parts = node['parts'] graph_hash = get_graph_hash(node) name = "inheritance%s" % graph_hash path = '_images' dest_path = os.path.join(setup.app.builder.outdir, path) if not os.path.exists(dest_path): os.makedirs(dest_path) png_path = os.path.join(dest_path, name + ".png") path = setup.app.builder.imgpath # Create a mapping from fully-qualified class names to URLs. urls = {} for child in node: if child.get('refuri') is not None: urls[child['reftitle']] = child.get('refuri') elif child.get('refid') is not None: urls[child['reftitle']] = '#' + child.get('refid') # These arguments to dot will save a PNG file to disk and write # an HTML image map to stdout. image_map = graph.run_dot(['-Tpng', '-o%s' % png_path, '-Tcmapx'], name, parts, urls) return ('%s' % (path, name, name, image_map)) def latex_output_graph(self, node): """ Output the graph for LaTeX. This will insert a PDF. """ graph = node['graph'] parts = node['parts'] graph_hash = get_graph_hash(node) name = "inheritance%s" % graph_hash dest_path = os.path.abspath(os.path.join(setup.app.builder.outdir, '_images')) if not os.path.exists(dest_path): os.makedirs(dest_path) pdf_path = os.path.abspath(os.path.join(dest_path, name + ".pdf")) graph.run_dot(['-Tpdf', '-o%s' % pdf_path], name, parts, graph_options={'size': '"6.0,6.0"'}) return '\n\\includegraphics{%s}\n\n' % pdf_path def visit_inheritance_diagram(inner_func): """ This is just a wrapper around html/latex_output_graph to make it easier to handle errors and insert warnings. """ def visitor(self, node): try: content = inner_func(self, node) except DotException, e: # Insert the exception as a warning in the document warning = self.document.reporter.warning(str(e), line=node.line) warning.parent = node node.children = [warning] else: source = self.document.attributes['source'] self.body.append(content) node.children = [] return visitor def do_nothing(self, node): pass def setup(app): setup.app = app setup.confdir = app.confdir app.add_node( inheritance_diagram, latex=(visit_inheritance_diagram(latex_output_graph), do_nothing), html=(visit_inheritance_diagram(html_output_graph), do_nothing)) app.add_directive( 'inheritance-diagram', inheritance_diagram_directive, False, (1, 100, 0), parts = directives.nonnegative_int) nipy-0.3.0/doc/sphinxext/ipython_console_highlighting.py000066400000000000000000000067371210344137400236170ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """reST directive for syntax-highlighting ipython interactive sessions. """ #----------------------------------------------------------------------------- # Needed modules # Standard library import re # Third party from pygments.lexer import Lexer, do_insertions from pygments.lexers.agile import (PythonConsoleLexer, PythonLexer, PythonTracebackLexer) from pygments.token import Comment, Generic from sphinx import highlighting #----------------------------------------------------------------------------- # Global constants line_re = re.compile('.*?\n') #----------------------------------------------------------------------------- # Code begins - classes and functions class IPythonConsoleLexer(Lexer): """ For IPython console output or doctests, such as: .. sourcecode:: ipython In [1]: a = 'foo' In [2]: a Out[2]: 'foo' In [3]: print a foo In [4]: 1 / 0 Notes: - Tracebacks are not currently supported. - It assumes the default IPython prompts, not customized ones. """ name = 'IPython console session' aliases = ['ipython'] mimetypes = ['text/x-ipython-console'] input_prompt = re.compile("(In \[[0-9]+\]: )|( \.\.\.+:)") output_prompt = re.compile("(Out\[[0-9]+\]: )|( \.\.\.+:)") continue_prompt = re.compile(" \.\.\.+:") tb_start = re.compile("\-+") def get_tokens_unprocessed(self, text): pylexer = PythonLexer(**self.options) tblexer = PythonTracebackLexer(**self.options) curcode = '' insertions = [] for match in line_re.finditer(text): line = match.group() input_prompt = self.input_prompt.match(line) continue_prompt = self.continue_prompt.match(line.rstrip()) output_prompt = self.output_prompt.match(line) if line.startswith("#"): insertions.append((len(curcode), [(0, Comment, line)])) elif input_prompt is not None: insertions.append((len(curcode), [(0, Generic.Prompt, input_prompt.group())])) curcode += line[input_prompt.end():] elif continue_prompt is not None: insertions.append((len(curcode), [(0, Generic.Prompt, continue_prompt.group())])) curcode += line[continue_prompt.end():] elif output_prompt is not None: insertions.append((len(curcode), [(0, Generic.Output, output_prompt.group())])) curcode += line[output_prompt.end():] else: if curcode: for item in do_insertions(insertions, pylexer.get_tokens_unprocessed(curcode)): yield item curcode = '' insertions = [] yield match.start(), Generic.Output, line if curcode: for item in do_insertions(insertions, pylexer.get_tokens_unprocessed(curcode)): yield item #----------------------------------------------------------------------------- # Register the extension as a valid pygments lexer highlighting.lexers['ipython'] = IPythonConsoleLexer() nipy-0.3.0/doc/sphinxext/numpy_ext/000077500000000000000000000000001210344137400173175ustar00rootroot00000000000000nipy-0.3.0/doc/sphinxext/numpy_ext/__init__.py000066400000000000000000000000001210344137400214160ustar00rootroot00000000000000nipy-0.3.0/doc/sphinxext/numpy_ext/docscrape.py000066400000000000000000000361011210344137400216350ustar00rootroot00000000000000"""Extract reference documentation from the NumPy source tree. """ import inspect import textwrap import re import pydoc from StringIO import StringIO from warnings import warn class Reader(object): """A line-based string reader. """ def __init__(self, data): """ Parameters ---------- data : str String with lines separated by '\n'. """ if isinstance(data,list): self._str = data else: self._str = data.split('\n') # store string as list of lines self.reset() def __getitem__(self, n): return self._str[n] def reset(self): self._l = 0 # current line nr def read(self): if not self.eof(): out = self[self._l] self._l += 1 return out else: return '' def seek_next_non_empty_line(self): for l in self[self._l:]: if l.strip(): break else: self._l += 1 def eof(self): return self._l >= len(self._str) def read_to_condition(self, condition_func): start = self._l for line in self[start:]: if condition_func(line): return self[start:self._l] self._l += 1 if self.eof(): return self[start:self._l+1] return [] def read_to_next_empty_line(self): self.seek_next_non_empty_line() def is_empty(line): return not line.strip() return self.read_to_condition(is_empty) def read_to_next_unindented_line(self): def is_unindented(line): return (line.strip() and (len(line.lstrip()) == len(line))) return self.read_to_condition(is_unindented) def peek(self,n=0): if self._l + n < len(self._str): return self[self._l + n] else: return '' def is_empty(self): return not ''.join(self._str).strip() class NumpyDocString(object): def __init__(self, docstring, config={}): docstring = textwrap.dedent(docstring).split('\n') self._doc = Reader(docstring) self._parsed_data = { 'Signature': '', 'Summary': [''], 'Extended Summary': [], 'Parameters': [], 'Returns': [], 'Raises': [], 'Warns': [], 'Other Parameters': [], 'Attributes': [], 'Methods': [], 'See Also': [], 'Notes': [], 'Warnings': [], 'References': '', 'Examples': '', 'index': {} } self._parse() def __getitem__(self,key): return self._parsed_data[key] def __setitem__(self,key,val): if not self._parsed_data.has_key(key): warn("Unknown section %s" % key) else: self._parsed_data[key] = val def _is_at_section(self): self._doc.seek_next_non_empty_line() if self._doc.eof(): return False l1 = self._doc.peek().strip() # e.g. Parameters if l1.startswith('.. index::'): return True l2 = self._doc.peek(1).strip() # ---------- or ========== return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1)) def _strip(self,doc): i = 0 j = 0 for i,line in enumerate(doc): if line.strip(): break for j,line in enumerate(doc[::-1]): if line.strip(): break return doc[i:len(doc)-j] def _read_to_next_section(self): section = self._doc.read_to_next_empty_line() while not self._is_at_section() and not self._doc.eof(): if not self._doc.peek(-1).strip(): # previous line was empty section += [''] section += self._doc.read_to_next_empty_line() return section def _read_sections(self): while not self._doc.eof(): data = self._read_to_next_section() name = data[0].strip() if name.startswith('..'): # index section yield name, data[1:] elif len(data) < 2: yield StopIteration else: yield name, self._strip(data[2:]) def _parse_param_list(self,content): r = Reader(content) params = [] while not r.eof(): header = r.read().strip() if ' : ' in header: arg_name, arg_type = header.split(' : ')[:2] else: arg_name, arg_type = header, '' desc = r.read_to_next_unindented_line() desc = dedent_lines(desc) params.append((arg_name,arg_type,desc)) return params _name_rgx = re.compile(r"^\s*(:(?P\w+):`(?P[a-zA-Z0-9_.-]+)`|" r" (?P[a-zA-Z0-9_.-]+))\s*", re.X) def _parse_see_also(self, content): """ func_name : Descriptive text continued text another_func_name : Descriptive text func_name1, func_name2, :meth:`func_name`, func_name3 """ items = [] def parse_item_name(text): """Match ':role:`name`' or 'name'""" m = self._name_rgx.match(text) if m: g = m.groups() if g[1] is None: return g[3], None else: return g[2], g[1] raise ValueError("%s is not a item name" % text) def push_item(name, rest): if not name: return name, role = parse_item_name(name) items.append((name, list(rest), role)) del rest[:] current_func = None rest = [] for line in content: if not line.strip(): continue m = self._name_rgx.match(line) if m and line[m.end():].strip().startswith(':'): push_item(current_func, rest) current_func, line = line[:m.end()], line[m.end():] rest = [line.split(':', 1)[1].strip()] if not rest[0]: rest = [] elif not line.startswith(' '): push_item(current_func, rest) current_func = None if ',' in line: for func in line.split(','): if func.strip(): push_item(func, []) elif line.strip(): current_func = line elif current_func is not None: rest.append(line.strip()) push_item(current_func, rest) return items def _parse_index(self, section, content): """ .. index: default :refguide: something, else, and more """ def strip_each_in(lst): return [s.strip() for s in lst] out = {} section = section.split('::') if len(section) > 1: out['default'] = strip_each_in(section[1].split(','))[0] for line in content: line = line.split(':') if len(line) > 2: out[line[1]] = strip_each_in(line[2].split(',')) return out def _parse_summary(self): """Grab signature (if given) and summary""" if self._is_at_section(): return summary = self._doc.read_to_next_empty_line() summary_str = " ".join([s.strip() for s in summary]).strip() if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str): self['Signature'] = summary_str if not self._is_at_section(): self['Summary'] = self._doc.read_to_next_empty_line() else: self['Summary'] = summary if not self._is_at_section(): self['Extended Summary'] = self._read_to_next_section() def _parse(self): self._doc.reset() self._parse_summary() for (section,content) in self._read_sections(): if not section.startswith('..'): section = ' '.join([s.capitalize() for s in section.split(' ')]) if section in ('Parameters', 'Returns', 'Raises', 'Warns', 'Other Parameters', 'Attributes', 'Methods'): self[section] = self._parse_param_list(content) elif section.startswith('.. index::'): self['index'] = self._parse_index(section, content) elif section == 'See Also': self['See Also'] = self._parse_see_also(content) else: self[section] = content # string conversion routines def _str_header(self, name, symbol='-'): return [name, len(name)*symbol] def _str_indent(self, doc, indent=4): out = [] for line in doc: out += [' '*indent + line] return out def _str_signature(self): if self['Signature']: return [self['Signature'].replace('*','\*')] + [''] else: return [''] def _str_summary(self): if self['Summary']: return self['Summary'] + [''] else: return [] def _str_extended_summary(self): if self['Extended Summary']: return self['Extended Summary'] + [''] else: return [] def _str_param_list(self, name): out = [] if self[name]: out += self._str_header(name) for param,param_type,desc in self[name]: out += ['%s : %s' % (param, param_type)] out += self._str_indent(desc) out += [''] return out def _str_section(self, name): out = [] if self[name]: out += self._str_header(name) out += self[name] out += [''] return out def _str_see_also(self, func_role): if not self['See Also']: return [] out = [] out += self._str_header("See Also") last_had_desc = True for func, desc, role in self['See Also']: if role: link = ':%s:`%s`' % (role, func) elif func_role: link = ':%s:`%s`' % (func_role, func) else: link = "`%s`_" % func if desc or last_had_desc: out += [''] out += [link] else: out[-1] += ", %s" % link if desc: out += self._str_indent([' '.join(desc)]) last_had_desc = True else: last_had_desc = False out += [''] return out def _str_index(self): idx = self['index'] out = [] out += ['.. index:: %s' % idx.get('default','')] for section, references in idx.iteritems(): if section == 'default': continue out += [' :%s: %s' % (section, ', '.join(references))] return out def __str__(self, func_role=''): out = [] out += self._str_signature() out += self._str_summary() out += self._str_extended_summary() for param_list in ('Parameters', 'Returns', 'Other Parameters', 'Raises', 'Warns'): out += self._str_param_list(param_list) out += self._str_section('Warnings') out += self._str_see_also(func_role) for s in ('Notes','References','Examples'): out += self._str_section(s) for param_list in ('Attributes', 'Methods'): out += self._str_param_list(param_list) out += self._str_index() return '\n'.join(out) def indent(str,indent=4): indent_str = ' '*indent if str is None: return indent_str lines = str.split('\n') return '\n'.join(indent_str + l for l in lines) def dedent_lines(lines): """Deindent a list of lines maximally""" return textwrap.dedent("\n".join(lines)).split("\n") def header(text, style='-'): return text + '\n' + style*len(text) + '\n' class FunctionDoc(NumpyDocString): def __init__(self, func, role='func', doc=None, config={}): self._f = func self._role = role # e.g. "func" or "meth" if doc is None: if func is None: raise ValueError("No function or docstring given") doc = inspect.getdoc(func) or '' NumpyDocString.__init__(self, doc) if not self['Signature'] and func is not None: func, func_name = self.get_func() try: # try to read signature argspec = inspect.getargspec(func) argspec = inspect.formatargspec(*argspec) argspec = argspec.replace('*','\*') signature = '%s%s' % (func_name, argspec) except TypeError, e: signature = '%s()' % func_name self['Signature'] = signature def get_func(self): func_name = getattr(self._f, '__name__', self.__class__.__name__) if inspect.isclass(self._f): func = getattr(self._f, '__call__', self._f.__init__) else: func = self._f return func, func_name def __str__(self): out = '' func, func_name = self.get_func() signature = self['Signature'].replace('*', '\*') roles = {'func': 'function', 'meth': 'method'} if self._role: if not roles.has_key(self._role): print "Warning: invalid role %s" % self._role out += '.. %s:: %s\n \n\n' % (roles.get(self._role,''), func_name) out += super(FunctionDoc, self).__str__(func_role=self._role) return out class ClassDoc(NumpyDocString): extra_public_methods = ['__call__'] def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc, config={}): if not inspect.isclass(cls) and cls is not None: raise ValueError("Expected a class or None, but got %r" % cls) self._cls = cls if modulename and not modulename.endswith('.'): modulename += '.' self._mod = modulename if doc is None: if cls is None: raise ValueError("No class or documentation string given") doc = pydoc.getdoc(cls) NumpyDocString.__init__(self, doc) if config.get('show_class_members', True): if not self['Methods']: self['Methods'] = [(name, '', '') for name in sorted(self.methods)] if not self['Attributes']: self['Attributes'] = [(name, '', '') for name in sorted(self.properties)] @property def methods(self): if self._cls is None: return [] return [name for name,func in inspect.getmembers(self._cls) if ((not name.startswith('_') or name in self.extra_public_methods) and callable(func))] @property def properties(self): if self._cls is None: return [] return [name for name,func in inspect.getmembers(self._cls) if not name.startswith('_') and func is None] nipy-0.3.0/doc/sphinxext/numpy_ext/docscrape_sphinx.py000066400000000000000000000171171210344137400232340ustar00rootroot00000000000000import re, inspect, textwrap, pydoc import sphinx from docscrape import NumpyDocString, FunctionDoc, ClassDoc class SphinxDocString(NumpyDocString): def __init__(self, docstring, config={}): self.use_plots = config.get('use_plots', False) NumpyDocString.__init__(self, docstring, config=config) # string conversion routines def _str_header(self, name, symbol='`'): return ['.. rubric:: ' + name, ''] def _str_field_list(self, name): return [':' + name + ':'] def _str_indent(self, doc, indent=4): out = [] for line in doc: out += [' '*indent + line] return out def _str_signature(self): return [''] if self['Signature']: return ['``%s``' % self['Signature']] + [''] else: return [''] def _str_summary(self): return self['Summary'] + [''] def _str_extended_summary(self): return self['Extended Summary'] + [''] def _str_param_list(self, name): out = [] if self[name]: out += self._str_field_list(name) out += [''] for param,param_type,desc in self[name]: out += self._str_indent(['**%s** : %s' % (param.strip(), param_type)]) out += [''] out += self._str_indent(desc,8) out += [''] return out @property def _obj(self): if hasattr(self, '_cls'): return self._cls elif hasattr(self, '_f'): return self._f return None def _str_member_list(self, name): """ Generate a member listing, autosummary:: table where possible, and a table where not. """ out = [] if self[name]: out += ['.. rubric:: %s' % name, ''] prefix = getattr(self, '_name', '') if prefix: prefix = '~%s.' % prefix autosum = [] others = [] for param, param_type, desc in self[name]: param = param.strip() if not self._obj or hasattr(self._obj, param): autosum += [" %s%s" % (prefix, param)] else: others.append((param, param_type, desc)) if autosum: out += ['.. autosummary::', ' :toctree:', ''] out += autosum if others: maxlen_0 = max([len(x[0]) for x in others]) maxlen_1 = max([len(x[1]) for x in others]) hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10 fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1) n_indent = maxlen_0 + maxlen_1 + 4 out += [hdr] for param, param_type, desc in others: out += [fmt % (param.strip(), param_type)] out += self._str_indent(desc, n_indent) out += [hdr] out += [''] return out def _str_section(self, name): out = [] if self[name]: out += self._str_header(name) out += [''] content = textwrap.dedent("\n".join(self[name])).split("\n") out += content out += [''] return out def _str_see_also(self, func_role): out = [] if self['See Also']: see_also = super(SphinxDocString, self)._str_see_also(func_role) out = ['.. seealso::', ''] out += self._str_indent(see_also[2:]) return out def _str_warnings(self): out = [] if self['Warnings']: out = ['.. warning::', ''] out += self._str_indent(self['Warnings']) return out def _str_index(self): idx = self['index'] out = [] if len(idx) == 0: return out out += ['.. index:: %s' % idx.get('default','')] for section, references in idx.iteritems(): if section == 'default': continue elif section == 'refguide': out += [' single: %s' % (', '.join(references))] else: out += [' %s: %s' % (section, ','.join(references))] return out def _str_references(self): out = [] if self['References']: out += self._str_header('References') if isinstance(self['References'], str): self['References'] = [self['References']] out.extend(self['References']) out += [''] # Latex collects all references to a separate bibliography, # so we need to insert links to it if sphinx.__version__ >= "0.6": out += ['.. only:: latex',''] else: out += ['.. latexonly::',''] items = [] for line in self['References']: m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I) if m: items.append(m.group(1)) out += [' ' + ", ".join(["[%s]_" % item for item in items]), ''] return out def _str_examples(self): examples_str = "\n".join(self['Examples']) if (self.use_plots and 'import matplotlib' in examples_str and 'plot::' not in examples_str): out = [] out += self._str_header('Examples') out += ['.. plot::', ''] out += self._str_indent(self['Examples']) out += [''] return out else: return self._str_section('Examples') def __str__(self, indent=0, func_role="obj"): out = [] out += self._str_signature() out += self._str_index() + [''] out += self._str_summary() out += self._str_extended_summary() for param_list in ('Parameters', 'Returns', 'Other Parameters', 'Raises', 'Warns'): out += self._str_param_list(param_list) out += self._str_warnings() out += self._str_see_also(func_role) out += self._str_section('Notes') out += self._str_references() out += self._str_examples() for param_list in ('Attributes', 'Methods'): out += self._str_member_list(param_list) out = self._str_indent(out,indent) return '\n'.join(out) class SphinxFunctionDoc(SphinxDocString, FunctionDoc): def __init__(self, obj, doc=None, config={}): self.use_plots = config.get('use_plots', False) FunctionDoc.__init__(self, obj, doc=doc, config=config) class SphinxClassDoc(SphinxDocString, ClassDoc): def __init__(self, obj, doc=None, func_doc=None, config={}): self.use_plots = config.get('use_plots', False) ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config) class SphinxObjDoc(SphinxDocString): def __init__(self, obj, doc=None, config={}): self._f = obj SphinxDocString.__init__(self, doc, config=config) def get_doc_object(obj, what=None, doc=None, config={}): if what is None: if inspect.isclass(obj): what = 'class' elif inspect.ismodule(obj): what = 'module' elif callable(obj): what = 'function' else: what = 'object' if what == 'class': return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc, config=config) elif what in ('function', 'method'): return SphinxFunctionDoc(obj, doc=doc, config=config) else: if doc is None: doc = pydoc.getdoc(obj) return SphinxObjDoc(obj, doc, config=config) nipy-0.3.0/doc/sphinxext/numpy_ext/numpydoc.py000066400000000000000000000130611210344137400215300ustar00rootroot00000000000000""" ======== numpydoc ======== Sphinx extension that handles docstrings in the Numpy standard format. [1] It will: - Convert Parameters etc. sections to field lists. - Convert See Also section to a See also entry. - Renumber references. - Extract the signature from the docstring, if it can't be determined otherwise. .. [1] https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt """ import sphinx if sphinx.__version__ < '1.0.1': raise RuntimeError("Sphinx 1.0.1 or newer is required") import os, re, pydoc from docscrape_sphinx import get_doc_object, SphinxDocString from sphinx.util.compat import Directive import inspect def mangle_docstrings(app, what, name, obj, options, lines, reference_offset=[0]): cfg = dict(use_plots=app.config.numpydoc_use_plots, show_class_members=app.config.numpydoc_show_class_members) if what == 'module': # Strip top title title_re = re.compile(ur'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*', re.I|re.S) lines[:] = title_re.sub(u'', u"\n".join(lines)).split(u"\n") else: doc = get_doc_object(obj, what, u"\n".join(lines), config=cfg) lines[:] = unicode(doc).split(u"\n") if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \ obj.__name__: if hasattr(obj, '__module__'): v = dict(full_name=u"%s.%s" % (obj.__module__, obj.__name__)) else: v = dict(full_name=obj.__name__) lines += [u'', u'.. only:: html', ''] lines += [u' %s' % x for x in (app.config.numpydoc_edit_link % v).split("\n")] # replace reference numbers so that there are no duplicates references = [] for line in lines: line = line.strip() m = re.match(ur'^.. \[([a-z0-9_.-])\]', line, re.I) if m: references.append(m.group(1)) # start renaming from the longest string, to avoid overwriting parts references.sort(key=lambda x: -len(x)) if references: for i, line in enumerate(lines): for r in references: if re.match(ur'^\d+$', r): new_r = u"R%d" % (reference_offset[0] + int(r)) else: new_r = u"%s%d" % (r, reference_offset[0]) lines[i] = lines[i].replace(u'[%s]_' % r, u'[%s]_' % new_r) lines[i] = lines[i].replace(u'.. [%s]' % r, u'.. [%s]' % new_r) reference_offset[0] += len(references) def mangle_signature(app, what, name, obj, options, sig, retann): # Do not try to inspect classes that don't define `__init__` if (inspect.isclass(obj) and (not hasattr(obj, '__init__') or 'initializes x; see ' in pydoc.getdoc(obj.__init__))): return '', '' if not (callable(obj) or hasattr(obj, '__argspec_is_invalid_')): return if not hasattr(obj, '__doc__'): return doc = SphinxDocString(pydoc.getdoc(obj)) if doc['Signature']: sig = re.sub(u"^[^(]*", u"", doc['Signature']) return sig, u'' def setup(app, get_doc_object_=get_doc_object): global get_doc_object get_doc_object = get_doc_object_ app.connect('autodoc-process-docstring', mangle_docstrings) app.connect('autodoc-process-signature', mangle_signature) app.add_config_value('numpydoc_edit_link', None, False) app.add_config_value('numpydoc_use_plots', None, False) app.add_config_value('numpydoc_show_class_members', True, True) # Extra mangling domains app.add_domain(NumpyPythonDomain) app.add_domain(NumpyCDomain) #------------------------------------------------------------------------------ # Docstring-mangling domains #------------------------------------------------------------------------------ from docutils.statemachine import ViewList from sphinx.domains.c import CDomain from sphinx.domains.python import PythonDomain class ManglingDomainBase(object): directive_mangling_map = {} def __init__(self, *a, **kw): super(ManglingDomainBase, self).__init__(*a, **kw) self.wrap_mangling_directives() def wrap_mangling_directives(self): for name, objtype in self.directive_mangling_map.items(): self.directives[name] = wrap_mangling_directive( self.directives[name], objtype) class NumpyPythonDomain(ManglingDomainBase, PythonDomain): name = 'np' directive_mangling_map = { 'function': 'function', 'class': 'class', 'exception': 'class', 'method': 'function', 'classmethod': 'function', 'staticmethod': 'function', 'attribute': 'attribute', } class NumpyCDomain(ManglingDomainBase, CDomain): name = 'np-c' directive_mangling_map = { 'function': 'function', 'member': 'attribute', 'macro': 'function', 'type': 'class', 'var': 'object', } def wrap_mangling_directive(base_directive, objtype): class directive(base_directive): def run(self): env = self.state.document.settings.env name = None if self.arguments: m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0]) name = m.group(2).strip() if not name: name = self.arguments[0] lines = list(self.content) mangle_docstrings(env.app, objtype, name, None, None, lines) self.content = ViewList(lines, self.content.parent) return base_directive.run(self) return directive nipy-0.3.0/doc/users/000077500000000000000000000000001210344137400143765ustar00rootroot00000000000000nipy-0.3.0/doc/users/basic_io.rst000066400000000000000000000042551210344137400167060ustar00rootroot00000000000000.. basic_data_io: =============== Basic Data IO =============== Accessing images using nipy: While Nifti_ is the primary file format Analyze images (with associated .mat file), and MINC files can also be read. Load Image from File ==================== Get a filename for an example file. ``anatfile`` gives a filename for a small testing image in the nipy distribution: >>> from nipy.testing import anatfile Load the file from disk: >>> from nipy import load_image >>> myimg = load_image(anatfile) >>> myimg.shape (33, 41, 25) >>> myimg.affine array([[ -2., 0., 0., 32.], [ 0., 2., 0., -40.], [ 0., 0., 2., -16.], [ 0., 0., 0., 1.]]) Access Data into an Array ========================= This allows the user to access data as a numpy array. >>> mydata = myimg.get_data() >>> mydata.shape (33, 41, 25) >>> mydata.ndim 3 Save image to a File ==================== >>> from nipy import save_image >>> newimg = save_image(myimg, 'newmyfile.nii') Create Image from an Array =========================== This will have a generic affine-type CoordinateMap with unit voxel sizes. >>> import numpy as np >>> from nipy.core.api import Image, vox2mni >>> rawarray = np.zeros((43,128,128)) >>> arr_img = Image(rawarray, vox2mni(np.eye(4))) >>> arr_img.shape (43, 128, 128) Coordinate map ============== Images have a Coordinate Map. The Coordinate Map contains information defining the input (domain) and output (range) Coordinate Systems of the image, and the mapping between the two Coordinate systems. The *input* coordinate system is the *voxel* coordinate system, and the *output* coordinate system is the *world* coordinate system. >>> newimg.coordmap AffineTransform( function_domain=CoordinateSystem(coord_names=('i', 'j', 'k'), name='voxels', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('aligned-x=L->R', 'aligned-y=P->A', 'aligned-z=I->S'), name='aligned', coord_dtype=float64), affine=array([[ -2., 0., 0., 32.], [ 0., 2., 0., -40.], [ 0., 0., 2., -16.], [ 0., 0., 0., 1.]]) ) See :ref:`coordinate_map` for more detail. .. include:: ../links_names.txt nipy-0.3.0/doc/users/coordinate_map.rst000066400000000000000000000142061210344137400201170ustar00rootroot00000000000000.. _coordinate_map: ############################# Basics of the Coordinate Map ############################# When you load an image it will have an associated Coordinate Map **Coordinate Map** The Coordinate Map contains information defining the input (domain) and output (range) Coordinate Systems of the image, and the mapping between the two Coordinate systems. The *input* or *domain* in an image are voxel coordinates in the image array. The *output* or *range* are the millimetre coordinates in some space, that correspond to the input (voxel) coordinates. >>> import nipy Get a filename for an example file: >>> from nipy.testing import anatfile Get the coordinate map for the image: >>> anat_img = nipy.load_image(anatfile) >>> coordmap = anat_img.coordmap For more on Coordinate Systems and thier properties :mod:`nipy.core.reference.coordinate_system` You can inspect a coordinate map:: >>> coordmap.function_domain.coord_names >>> ('i', 'j', 'k') >>> coordmap.function_range.coord_names ('aligned-x=L->R', 'aligned-y=P->A', 'aligned-z=I->S') >>> coordmap.function_domain.name 'voxels' >>> coordmap.function_range.name 'aligned' A Coordinate Map has a mapping from the *input* Coordinate System to the *output* Coordinate System Here we can see we have a voxel to millimeter mapping from the voxel space (i,j,k) to the millimeter space (x,y,z) We can also get the name of the respective Coordinate Systems that our Coordinate Map maps between. A Coordinate Map is two Coordinate Systems with a mapping between them. Formally the mapping is a function that takes points from the input Coordinate System and returns points from the output Coordinate System. This is the same as saying that the mapping takes points in the mapping function *domain* and transforms them to points in the mapping function *range*. Often this is simple as applying an Affine transform. In that case the Coordinate System may well have an affine property which returns the affine matrix corresponding to the transform. >>> coordmap.affine array([[ -2., 0., 0., 32.], [ 0., 2., 0., -40.], [ 0., 0., 2., -16.], [ 0., 0., 0., 1.]]) If you call the Coordinate Map you will apply the mapping function between the two Coordinate Systems. In this case from (i,j,k) to (x,y,z): >>> coordmap([1,2,3]) array([ 30., -36., -10.]) It can also be used to get the inverse mapping, or in this example from (x,y,z) back to (i,j,k): >>> coordmap.inverse()([30.,-36.,-10.]) array([ 1., 2., 3.]) We can see how this works if we just apply the affine ourselves using dot product. .. Note:: Notice the affine is using homogeneous coordinates so we need to add a 1 to our input. (And note how a direct call to the coordinate map does this work for you) >>> coordmap.affine array([[ -2., 0., 0., 32.], [ 0., 2., 0., -40.], [ 0., 0., 2., -16.], [ 0., 0., 0., 1.]]) >>> import numpy as np >>> np.dot(coordmap.affine, np.transpose([1,2,3,1])) array([ 30., -36., -10., 1.]) .. Note:: The answer is the same as above (except for the added 1) .. _normalize-coordmap: *************************************************** Use of the Coordinate Map for spatial normalization *************************************************** The Coordinate Map can be used to describe the transformations needed to perform spatial normalization. Suppose we have an anatomical Image from one subject *subject_img* and we want to create an Image in a standard space like Tailarach space. An affine registration algorithm will produce a 4-by-4 matrix representing the affine transformation, *T*, that takes a point in the subject's coordinates *subject_world* to a point in Tailarach space *tailarach_world*. The subject's Image has its own Coordinate Map, *subject_cmap* and there is a Coordinate Map for Tailarach space which we will call *tailarach_cmap*. Having found the transformation matrix *T*, the next step in spatial normalization is usually to resample the array of *subject_img* so that it has the same shape as some atlas *atlas_img*. Note that because it is an atlas Image, *tailarach_camp=atlas_img.coordmap*. A resampling algorithm uses an interpolator which needs to know which voxel of *subject_img* corresponds to which voxel of *atlas_img*. This is therefore a function from *atlas_voxel* to *subject_voxel*. This function, paired with the information that it is a map from atlas-voxel to subject-voxel is another example of a Coordinate Map. The code to do this might look something like the following: >>> from nipy.testing import anatfile, funcfile >>> from nipy.algorithms.registration import HistogramRegistration >>> from nipy.algorithms.kernel_smooth import LinearFilter We'll make a smoothed version of the anatomical example image, and pretend it's the template >>> smoother = LinearFilter(anat_img.coordmap, anat_img.shape) >>> atlas_im = smoother.smooth(anat_img) >>> subject_im = anat_img We do an affine registration between the two. >>> reggie = HistogramRegistration(subject_im, atlas_im) >>> aff = reggie.optimize('affine').as_affine() #doctest: +ELLIPSIS Initial guess... ... Now we make a coordmap with this transformation >>> from nipy.core.api import AffineTransform >>> subject_cmap = subject_im.coordmap >>> talairach_cmap = atlas_im.coordmap >>> subject_world_to_talairach_world = AffineTransform( ... subject_cmap.function_range, ... talairach_cmap.function_range, ... aff) ... We resample the 'subject' image to the 'atlas image >>> from nipy.algorithms.resample import resample >>> normalized_subject_im = resample(subject_im, talairach_cmap, ... subject_world_to_talairach_world, ... atlas_im.shape) >>> normalized_subject_im.shape == atlas_im.shape True >>> normalized_subject_im.coordmap == atlas_im.coordmap True >>> np.all(normalized_subject_im.affine == atlas_im.affine) True *********************** Mathematical definition *********************** For a more formal mathematical description of the coordinate map, see :ref:`math-coordmap`. nipy-0.3.0/doc/users/glm_spec.rst000066400000000000000000000416421210344137400167300ustar00rootroot00000000000000========================== Specifying a GLM in NiPy ========================== In this tutorial we will discuss NiPy's model and specification of a fMRI experiment. This involves: * an experimental model: a description of the experimental protocol (function of experimental time) * a neuronal model: a model of how a particular neuron responds to the experimental protocol (function of the experimental model) * a hemodynamic model: a model of the BOLD signal at a particular voxel, (function of the neuronal model) Experimental model ================== We first begin by describing typically encountered fMRI designs. * Event-related categorical design, i.e. *Face* vs. *Object* * Block categorical design * Continuous stimuli, i.e. a rotating checkerboard * Events with amplitudes, i.e. non-categorical values * Events with random amplitudes Event-related categorical design -------------------------------- .. _face-object: This design is a canonical design in fMRI used, for instance, in an experiment designed to detect regions associated to discrimination between *Face* and *Object*. This design can be graphically represented in terms of delta-function responses that are effectively events of duration 0 and infinite height. .. plot:: users/plots/event.py In this example, there *Face* event types are presented at times [0,4,8,12,16] and *Object* event types at times [2,6,10,14,18]. More generally, given a set of event types *V*, an event type experiment can be modeled as a sum of delta functions (point masses) at pairs of times and event types: .. math:: E = \sum_{j=1}^{10} \delta_{(t_j, a_j)}. Formally, this can be thought of as realization of a :term:`marked point process`, that says we observe 10 points in the space :math:`\mathbb{R} \times V` where *V* is the set of all event types. Alternatively, we can think of the experiment as a measure :math:`E` on :math:`\mathbb{R} \times V` .. math:: E([t_1,t_2] \times A) = \int_{t_1}^{t_2} \int_A dE(v,t) This intensity measure determines, in words, "the amount of stimulus within *A* delivered in the interval :math:`[t_1,t_2]`". In this categorical design, stimuli :math:`a_j` are delivered as point masses at the times :math:`t_j`. Practically speaking, we can read this as saying that our experiment has 10 events, occurring at times :math:`t_1,\dots,t_{10}` with event types :math:`a_1,\dots,a_{10} \in V`. Typically, as in our *Face* vs *Object* example, the events occur in groups, say odd events are labelled *a*, even ones *b*. We might rewrite this as .. math:: E = \delta_{(t_1,a)} + \delta_{(t_2,b)} + \delta_{(t_3,a)} + \dots + \delta_{(t_{10},b)} This type of experiment can be represented by two counting processes, i.e. measures on :math:`mathbb{R}`, :math:`(E_a, E_b)` defined as .. math:: \begin{aligned} E_a(t) &= \sum_{t_j, \text{$j$ odd}} 1_{(-\infty,t_j]}(t) \\ &= E((-\infty,t], \{a\}) \\ E_b(t) &= \sum_{t_j, \text{$j$ even}} 1_{(-\infty,t_j]}(t) \\ &= E((-\infty,t], \{b\}) \\ \end{aligned} Counting processes vs. intensities ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Though the experiment above can be represented in terms of the pair :math:`(E_a(t), E_b(t))`, it is more common in neuroimaging applications to work with instantaneous intensities rather then cumulative intensities. .. math:: \begin{aligned} e_a(t) &= \frac{\partial }{\partial t} E_a(t) \\ e_b(t) &= \frac{\partial }{\partial t} E_b(t) \end{aligned} For the time being, we will stick with cumulative intensities because it unifies the designs above. When we turn to the neuronal model below, we will return to the intensity model. .. _block-face: Block categorical design ------------------------ For block designs of the *Face* vs. *Object* type, we might also allow event durations, meaning that we show the subjects a *Face* for a period of, say, 0.5 seconds. We might represent this experiment graphically as follows, .. plot:: users/plots/block.py and the intensity measure for the experiment could be expressed in terms of .. math:: \begin{aligned} E_a(t) &= E((-\infty,t], \{a\}) &= \sum_{t_j, \text{$j$ odd}} \frac{1}{0.5} \int_{t_j}^ {\min(t_j+0.5, t)} \; ds \\ E_b(t) &= E((-\infty,t], \{b\}) &= \sum_{t_j, \text{$j$ even}} \frac{1}{0.5} \int_{t_j}^ {\min(t_j+0.5, t)} \; ds \\ \end{aligned} The normalization chosen above ensures that each event has integral 1, that is a total of 1 "stimulus unit" is presented for each 0.5 second block. This may or may not be desirable, and could easily be changed. Continuous stimuli ------------------ .. _continuous-stimuli: Some experiments do not fit well into this "event-type" paradigm but are, rather, more continuous in nature. For instance, a rotating checkerboard, for which orientation, contrast, are functions of experiment time *t*. This experiment can be represented in terms of a state vector :math:`(O(t), C(t))`. In this example we have set .. testcode:: import numpy as np t = np.linspace(0,10,1000) o = np.sin(2*np.pi*(t+1)) * np.exp(-t/10) c = np.sin(2*np.pi*(t+0.2)/4) * np.exp(-t/12) .. plot:: users/plots/sinusoidal.py The cumulative intensity measure for such an experiment might look like .. math:: E([t_1, t_2], A) = \int_{t_1}^{t_2} \left(\int_A \; dc \; do\right) \; dt. In words, this reads as :math:`E([t_1,t_2],A)` is the amount of time in the interval :math:`[t_1,t_2]` for which the state vector :math:`(O(t), C(t))` was in the region :math:`A`. .. _event-amplitudes: Events with amplitudes ---------------------- Another (event-related) experimental paradigm is one in which the event types have amplitudes, perhaps in a pain experiment with a heat stimulus, we might consider the temperature an amplitude. These amplitudes could be multi-valued. We might represent this parametric design mathematically as .. math:: E = \sum_{j=1}^{10} \delta_{(t_j, a_j)}, which is virtually identical to our description of the *Face* vs. *Object* experiment in :ref:`face-object` though the values :math:`a_j` are floats rather than labels. Graphically, this experiment might be represented as in this figure below. .. plot:: users/plots/amplitudes.py Events with random amplitudes ----------------------------- Another possible approach to specifying an experiment might be to deliver a randomly generated stimulus, say, uniformly distributed on some interval, at a set of prespecified event times. We might represent this graphically as in the following figure. .. plot:: users/plots/random_amplitudes.py Of course, the stimuli need not be randomly distributed over some interval, they could have fairly arbitrary distributions. Or, in the *Face* vs *Object* scenario, we could randomly present of one of the two types and the distribution at a particular event time :math:`t_j` would be represented by a probability :math:`P_j`. The cumulative intensity model for such an experiment might be .. math:: E([t_1, t_2], A) = \sum_j 1_{[t_1, t_2]}(t_j) \int_A \; P_j(da) If the times were not prespecified but were themselves random, say uniform over intervals :math:`[u_j,v_j]`, we might modify the cumulative intensity to be .. math:: E([t_1, t_2], A) = \sum_j \int_{\max(u_j,t_1)}^{\min(v_j, t_2)} \int_A \; P_j(da) \; dt .. plot:: users/plots/random_amplitudes_times.py ================ Neuronal model ================ The neuronal model is a model of the activity as a function of *t* at a neuron *x* given the experimental model :math:`E`. It is most commonly expressed as some linear function of the experiment :math:`E`. As with the experimental model, we prefer to start off by working with the cumulative neuronal activity, a measure on :math:`\mathbb{R}`, though, ultimately we will work with the intensities in :ref:`intensity`. Typically, the neuronal model with an experiment model :math:`E` has the form .. math:: N([t_1,t_2]) = \int_{t_1}^{t_2}\int_V f(v,t) \; dE(v,t) Unlike the experimental model, which can look somewhat abstract, the neuronal model can be directly modeled. For example, take the standard *Face* vs. *Object* model :ref:`face-object`, in which case :math:`V=\{a,b\}` and we can set .. math:: f(v,t) = \begin{cases} \beta_a & v = a \\ \beta_b & v = b \end{cases} Thus, the cumulative neuronal model can be expressed as .. testcode:: from sympy import Symbol, Heaviside t = Symbol('t') ta = [0,4,8,12,16] tb = [2,6,10,14,18] ba = Symbol('ba') bb = Symbol('bb') fa = sum([Heaviside(t-_t) for _t in ta]) * ba fb = sum([Heaviside(t-_t) for _t in tb]) * bb N = fa+fb Or, graphically, if we set :math:`\beta_a=1` and :math:`\beta_b=-2`, as .. plot:: users/plots/neuronal_event.py In the block design, we might have the same form for the neuronal model (i.e. the same :math:`f` above), but the different experimental model :math:`E` yields .. testcode:: from sympy import Symbol, Piecewise ta = [0,4,8,12,16]; tb = [2,6,10,14,18] ba = Symbol('ba') bb = Symbol('bb') fa = sum([Piecewise((0, (t<_t)), ((t-_t)/0.5, (t<_t+0.5)), (1, (t >= _t+0.5))) for _t in ta])*ba fb = sum([Piecewise((0, (t<_t)), ((t-_t)/0.5, (t<_t+0.5)), (1, (t >= _t+0.5))) for _t in tb])*bb N = fa+fb Or, graphically, if we set :math:`\beta_a=1` and :math:`\beta_b=-2`, as .. plot:: users/plots/neuronal_block.py The function :math:`f` above can be expressed as .. math:: f(v,t) = \beta_a 1_{\{a\}}(v) + \beta_b 1_{\{b\}}(v) = \beta_a f_a(v,t) + \beta_b f_b(v,t) Hence, our typical neuronal model can be expressed as a sum .. math:: \begin{aligned} N([t_1,t_2]) &= \sum_i \beta_i \int_{t_1}^{t_2} \int_V f_i(v,t) \; dE(v,t) \\ &= \sum_i \beta_i \tilde{N}_{f_i}([t_1,t_2]) \end{aligned} for arbitrary functions :math:`\tilde{N}_{f_i}`. Above, :math:`\tilde{N}_{f_i}` represents the stimulus contributed to :math:`N` from the function :math:`f_i`. In the *Face* vs. *Object* example :ref:`face-object`, these cumulative intensities are related to the more common of neuronal model of intensities in terms of delta functions .. math:: \frac{\partial}{\partial t} \tilde{N}_{f_a}(t) = \beta_a \sum_{t_i: \text{$i$ odd}} \delta_{t_i}(t) .. testcode:: from sympy import Symbol, Heaviside ta = [0,4,8,12,16] t = Symbol('t') ba = Symbol('ba') fa = sum([Heaviside(t-_t) for _t in ta]) * ba print(fa.diff(t)) .. testoutput:: ba*(DiracDelta(t) + DiracDelta(t - 16) + DiracDelta(t - 12) + DiracDelta(t - 8) + DiracDelta(t - 4)) .. plot:: users/plots/hrf_delta.py Convolution =========== In our continuous example above, with a periodic orientation and contrast, we might take .. math:: \begin{aligned} f_O(t,(o,c)) &= o \\ f_O(t,(o,c)) &= c \\ \end{aligned} yielding a neuronal model .. math:: N([t_1,t_2]) = \beta_{O} O(t) + \beta_{C} C(t) We might also want to allow a delay in the neuronal model .. math:: N^{\text{delay}}([t_1,t_2]) = \beta_{O} O(t-\tau_O) + \beta_{C} C(t-\tau_C). This delay can be represented mathematically in terms of convolution (of measures) .. math:: N^{\text{delay}}([t_1,t_2]) = \left(\tilde{N}_{f_O} * \delta_{-\tau_O}\right)([t_1, t_2]) +\left(\tilde{N}_{f_C} * \delta_{-\tau_C}\right)([t_1, t_2]) Another model that uses convolution is the *Face* vs. *Object* one in which the neuronal signal is attenuated with an exponential decay at time scale :math:`\tau` .. math:: D([t_1, t_2]) = \int_{\max(t_1,0)}^{t_2} \tau e^{-\tau t} \; dt yielding .. math:: N^{\text{decay}}([t_1,t_2]) = (N * D)[t_1, t_2] ======================== Events with amplitudes ======================== We described a model above :ref:`event-amplitude` with events that each have a continuous value :math:`a` attached to them. In terms of a neuronal model, it seems reasonable to suppose that the (cumulative) neuronal activity is related to some function, perhaps expressed as a polynomial :math:`h(a)=\sum_j \beta_j a^j` yielding a neuronal model .. math:: N([t_1, t_2]) = \sum_j \beta_j \tilde{N}_{a^j}([t_1, t_2]) Hemodynamic model ================= The hemodynamic model is a model for the BOLD signal, expressed as some function of the neuronal model. The most common hemodynamic model is just the convolution of the neuronal model with some hemodynamic response function, :math:`HRF` .. math:: \begin{aligned} HRF((-\infty,t]) &= \int_{-\infty}^t h_{can}(s) \; ds \\ H([t_1,t_2]) & = (N * HRF)[t_1,t_2] \end{aligned} The canonical one is a difference of two Gamma densities .. plot:: users/plots/hrf.py Intensities =========== Hemodynamic models are, as mentioned above, most commonly expressed in terms of instantaneous intensities rather than cumulative intensities. Define .. math:: n(t) = \frac{\partial}{\partial t} N((-\infty,t]). The simple model above can then be written as .. math:: h(t) = \frac{\partial}{\partial t}(N * HRF)(t) = \int_{-\infty}^{\infty} n(t-s) h_{can}(s) \; ds. In the *Face* vs. *Object* experiment, the integrals above can be evaluated explicitly because :math:`n(t)` is a sum of delta functions .. math:: n(t) = \beta_a \sum_{t_i: \text{$i$ odd}} \delta_{t_i}(t) + \beta_b \sum_{t_i: \text{$i$ even}} \delta_{t_i}(t) In this experiment we may want to allow different hemodynamic response functions within each group, say :math:`h_a` within group :math:`a` and :math:`h_b` within group :math:`b`. This yields a hemodynamic model .. math:: h(t) = \beta_a \sum_{t_i: \text{$i$ odd}} h_a(t-t_i) + \beta_b \sum_{t_i: \text{$i$ even}} h_b(t-t_i) .. testcode:: from nipy.modalities.fmri import hrf ta = [0,4,8,12,16]; tb = [2,6,10,14,18] ba = 1; bb = -2 na = ba * sum([hrf.glover(hrf.T - t) for t in ta]) nb = bb * sum([hrf.afni(hrf.T - t) for t in tb]) n = na + nb .. plot:: users/plots/hrf_different.py Applying the simple model to the events with amplitude model and the canonical HRF yields a hemodynamic model .. math:: h(t) = \sum_{i,j} \beta_j a_i^j h_{can}(t-t_i) .. testcode:: import numpy as np from nipy.modalities.fmri.utils import events, Symbol a = Symbol('a') b = np.linspace(0,50,6) amp = b*([-1,1]*3) d = events(b, amplitudes=amp, g=a+0.5*a**2, f=hrf.glover) .. plot:: users/plots/event_amplitude.py Derivative information ====================== In cases where the neuronal model has more than one derivative, such as the continuous stimuli :ref:`continuous-stimuli` example, we might model the hemodynamic response using the higher derivatives as well. For example .. math:: h(t) = \beta_{O,0} \tilde{n}_{f_O}(t) + \beta_{O,1} \frac{\partial}{\partial t}\tilde{n}_{f_O}(t) + \beta_{C,0} \tilde{n}_{f_C}(t) + \beta_{C,1} \frac{\partial} {\partial t}\tilde{n}_{f_C}(t) where .. math:: \begin{aligned} \tilde{n}_f(t) &= \frac{\partial}{\partial t} \tilde{N}_f((-\infty,t]) \\ &= \frac{\partial}{\partial t} \left( \int_{-\infty}^t \int_V f(v,t) \; dE(v,t) \right) \end{aligned} ============= Design matrix ============= In a typical GLM analysis, we will compare the observed BOLD signal :math:`B(t)` at some fixed voxel :math:`x`, observed at time points :math:`(s_1, \dots, s_n)`, to a hemodynamic response model. For instance, in the *Face* vs. *Object* model, using the canonical HRF .. MAYBE SOME DATA PLOTTED HERE .. math:: B(t) = \beta_a \sum_{t_i: \text{$i$ odd}} h_{can}(t-t_i) + \beta_b \sum_{t_i: \text{$i$ even}} h_{can}(t-t_i) + \epsilon(t) where :math:`\epsilon(t)` is the correlated noise in the BOLD data. Because the BOLD is modeled as linear in :math:`(\beta_a,\beta_b)` this fits into a multiple linear regression model setting, typically written as .. math:: Y_{n \times 1} = X_{n \times p} \beta_{p \times 1} + \epsilon_{n \times 1} In order to fit the regression model, we must find the matrix :math:`X`. This is just the derivative of the model of the mean of :math:`B` with respect to the parameters to be estimated. Setting :math:`(\beta_1, \beta_2)=(\beta_a, \beta_b)` .. math:: X_{ij} = \frac{\partial}{\partial \beta_j} \left(\beta_1 \sum_{t_k: \text{$k$ odd}} h_{can}(s_i-t_k) + \beta_b \sum_{t_k: \text{$k$ even}} h_{can}(s_i-t_k) \right) .. PUT IN PLOTS OF COLUMNS OF DESIGN HERE Drift ===== We sometimes include a natural spline model of the drift here. .. PLOT A NATURAL SPLINE .. MAYBE A COSINE BASIS This changes the design matrix by adding more columns, one for each function in our model of the drift. In general, starting from some model of the mean the design matrix is the derivative of the model of the mean, differentiated with respect to all parameters to be estimated (in some fixed order). Nonlinear example ================= The delayed continuous stimuli example above is an example of a nonlinear function of the mean that is nonlinear in some parameters, :math:`(\tau_O, \tau_C)`. .. CODE EXAMPLE OF THIS USING SYMPY =============== Formula objects =============== This experience of building the model can often be simplified, using what is known in :ref:R as *formula* objects. NiPy has implemented a formula object that is similar to R's, but differs in some important respects. See :mod:`nipy.algorithms.statistics.formula`. nipy-0.3.0/doc/users/index.rst000066400000000000000000000006621210344137400162430ustar00rootroot00000000000000.. _users-guide-index: .. This is the source doc for the nipy users guide. The users guide includes the FAQ (a directory below), and glossary. ============ User Guide ============ .. only:: html :Release: |version| :Date: |today| .. toctree:: :maxdepth: 2 introduction installation scipy_orientation tutorial.rst ../glossary .. only:: html * :ref:`genindex` * :ref:`modindex` * :ref:`search` nipy-0.3.0/doc/users/install_data.rst000066400000000000000000000111661210344137400175740ustar00rootroot00000000000000.. _data-files: ###################### Optional data packages ###################### The source code has some very small data files to run the tests with, but it doesn't include larger example data files, or the all-important brain templates we all use. You can find packages for the optional data and template files at http://nipy.sourceforge.net/data-packages. If you don't have these packages, then, when you run nipy installation, you will probably see messages pointing you to the packages you need. ********************************************* Data package installation as an administrator ********************************************* The installation procedure, for now, is very basic. For example, let us say that you need the 'nipy-templates' package at http://nipy.sourceforge.net/data-packages/nipy-templates-0.2.tar.gz . You simply download this archive, unpack it, and then run the standard ``python setup.py install`` on it. On a unix system this might look like:: curl -O http://nipy.sourceforge.net/data-packages/nipy-templates-0.2.tar.gz tar zxvf nipy-templates-0.2.tar.gz cd nipy-templates-0.2 sudo python setup.py install On windows, download the file, extract the archive to a folder using the GUI, and then, using the windows shell or similar:: cd c:\path\to\extracted\files python setup.py install ******************************************* Non-administrator data package installation ******************************************* The simple ugly manual way ========================== These are instructions for using the command line in Unix. You can do similar things from Windows powershell. * Locate your nipy user directory from the output of this:: python -c 'import nibabel.data; print(nibabel.data.get_nipy_user_dir())' Call that directory ````. Let's imagine that, for you, this is ``~/.nipy``. * If that directory does not exist already, create it, e.g.:: mkdir ~/.nipy * Make a directory in ```` called ``nipy``, e.g.:: mkdir ~/.nipy/nipy * Go to http://nipy.sourceforge.net/data-packages * Download the latest *nipy-templates* and *nipy-data* packages * Unpack both these into some directory, e.g.:: mkdir data cd data tar zxvf ~/Downloads/nipy-data-0.2.tar.gz tar zxvf ~/Downloads/nipy-templates-0.2.tar.gz * After you have unpacked the templates, you will have a directory called something like ``nipy-templates-0.2``. In that directory you should see a subdirectory called ``templates``. Copy / move / link the ``templates`` subdirectory into ``/nipy``, so you now have a directory ``/nipy/templates``. From unpacking the data, you should also have a directory like ``nipy-data-0.2`` with a subdirectory ``data``. Copy / move / link that ``data`` directory into ``/nipy`` as well. For example:: cd data cp -r nipy-data-0.2/data ~/.nipy/nipy cp -r nipy-templates-0.2/templates ~/.nipy/nipy * Check whether that worked. Run the following command from the shell:: python -c 'import nipy.utils; print(nipy.utils.example_data, nipy.utils.templates)' It should show something like:: (, ) If it shows ``Bomber`` objects instead, something is wrong. Go back and check that you have the nipy home directory right, and that you have directories ``/nipy/data`` and ``/nipy/templates>``, and that each of these two directories have a file ``config.ini`` in them. The more general way ==================== The commands for the sytem install above assume you are installing into the default system directories. If you want to install into a custom directory, then (in python, or ipython, or a text editor) look at the help for ``nibabel.data.get_data_path()`` . There are instructions there for pointing your nipy installation to the installed data. On unix ------- For example, say you installed with:: cd nipy-templates-0.2 python setup.py install --prefix=/home/my-user/some-dir Then you may want to do make a file ``~/.nipy/config.ini`` with the following contents:: [DATA] path=/home/my-user/some-dir/share/nipy On windows ---------- Say you installed with (windows shell):: cd nipy-templates-0.2 python setup.py install --prefix=c:\some\path Then first, find out your home directory:: python -c "import os; print os.path.expanduser('~')" Let's say that was ``c:\Documents and Settings\My User``. Then, make a new file called ``c:\Documents and Settings\My User\_nipy\config.ini`` with contents:: [DATA] path=c:\some\path\share\nipy nipy-0.3.0/doc/users/installation.rst000066400000000000000000000132371210344137400176370ustar00rootroot00000000000000.. _installation: #################### Download and Install #################### This page covers the necessary steps to install and run NIPY. Below is a list of required dependencies, along with additional software recommendations. ************************ Dependencies for install ************************ Must Have ========= * Python_ 2.5 or later * NumPy_ 1.2 or later: Numpy is an array library for Python * SciPy_ 0.7 or later: Scipy contains scientific computing libraries based on numpy * Sympy_ 0.6.6 or later: Sympy is a symbolic mathematics library for Python. We use it for statistical formulae. Strong Recommendations ====================== * IPython_: Interactive Python environment. * Matplotlib_: python plotting library. Installing from binary packages =============================== For Debian or Ubuntu -------------------- Please use the NeuroDebian_ repository, and install with:: sudo apt-get install python-nipy This will install the dependencies for you. For Fedora, CentOS ------------------ :: sudo yum install numpy scipy sympy python-setuptools sudo yum install python-devel gcc sudo easy_install nibabel sudo easy_install nipy For OSX ^^^^^^^ Install Python, Numpy, and Scipy via their respective ``dmg`` installers. Install via distribute_ / setuptools_ and ``easy_install``. See the distribute_ page for how to install ``easy_install`` and related tools. Then (from the command prompt):: easy_install nipy For Windows ^^^^^^^^^^^ Option 1 """""""" You can make your life much easier by using `Python (X, Y)`_. This will install Python, Numpy, Scipy, IPython, Matplotlib, Sympy and many other useful things. Then go to `nipy pypi`_ and download the ``.exe`` installer for nipy. Double click to install. Option 2 """""""" * Download Python_ and install with the ``exe`` or ``msi`` installer * Download and install the "Scipy stack" from Christophe Gohlke's `unofficial windows binaries`_. * If the nipy version on the `unofficial windows binaries`_ page is current, use that, otherwise, go to `nipy pypi`_, download and install the ``exe`` installer for nipy Option 3 """""""" Consider one of the big Python bundles such as `EPD free`_ or `Anaconda CE`_ for the dependencies. Install nipy from the ``exe`` installer at `nipy pypi`_. Option 4 """""""" Do all the installs by hand: * Download Python_ and install with the ``exe`` or ``msi`` installer. Make sure your python and the scripts directory (say, ``c:\Python27\Scripts``) are on your windows path. * Download Numpy and Scipy ``exe`` installers for your Python version from their respective Numpy and Scipy download sites. * Install distribute_ to give you ``easy_install``. * Install pip_ using ``easy_install`` from a windows ``cmd`` shell:: easy_install pip * Install sympy and nibabel using pip from a window ``cmd`` shell:: pip install sympy pip install nibabel * On 32-bit Windows, install nipy using ``easy_install``:: easy_install nipy This will pick up and use the ``exe`` installer. For 64-bits install use the installer at the `unofficial windows binaries`_ site. Otherwise ^^^^^^^^^ I'm afraid you might need to build from source... .. _building_source: ************************* Building from source code ************************* Dependencies for build ====================== * A C compiler: NIPY does contain a few C extensions for optimized routines. Therefore, you must have a compiler to build from source. XCode_ (OSX) and MinGW_ (Windows) both include a C compiler. On Linux, try ``sudo apt-get build-essential`` on Debian / Ubuntu, ``sudo yum install gcc`` on Fedora and related distributions. Recommended for build ===================== * Cython_ 0.12.1 or later: Cython is a language that is a fusion of Python and C. It allows us to write fast code using Python and C syntax, so that it easier to read and maintain. You don't need it to build a release, unless you modify the Cython ``*.pyx`` files in the nipy distribution. Procedure ========= Developers should look through the :ref:`development quickstart ` documentation. There you will find information on building NIPY, the required software packages and our developer guidelines. If you are primarily interested in using NIPY, download the source tarball from `nipy pypi` and follow these instructions for building. The installation process is similar to other Python packages so it will be familiar if you have Python experience. Unpack the source tarball and change into the source directory. Once in the source directory, you can build the neuroimaging package using:: python setup.py build To install, simply do:: sudo python setup.py install .. note:: As with any Python installation, this will install the modules in your system Python *site-packages* directory (which is why you need *sudo*). Many of us prefer to install development packages in a local directory so as to leave the system python alone. This is merely a preference, nothing will go wrong if you install using the *sudo* method. If you have Python 2.6 or later, you might want to do a `user install `_ python setup.py install --user To install nipy in some other local directory, use the **--prefix** option. For example, if you created a ``local`` directory in your home directory, you would install nipy like this:: python setup.py install --prefix=$HOME/local Installing useful data files ----------------------------- See :ref:`data-files` for some instructions on installing data packages. .. include:: ../links_names.txt nipy-0.3.0/doc/users/introduction.rst000066400000000000000000000007031210344137400176510ustar00rootroot00000000000000.. _introduction: ============== Introduction ============== As you can see, we do not yet have much of a user guide for NIPY. We are spending all our effort in developing the building blocks of the code, and we have not yet returned to a guide to how to use it. We are starting to write general :ref:`tutorial-index`, that include introductions to how to use NIPY code to run analyses. .. toctree:: :maxdepth: 2 ../mission ../history nipy-0.3.0/doc/users/math_coordmap.rst000066400000000000000000000434631210344137400177570ustar00rootroot00000000000000.. _math-coordmap: ********************************************** Mathematical formulation of the Coordinate Map ********************************************** Using the *CoordinateMap* can be a little hard to get used to. For some users, a mathematical description, free of any python syntax and code design and snippets may be helpful. After following through this description, the code design and usage may be clearer. We return to the normalization example in :ref:`normalize-coordmap` and try to write it out mathematically. Conceptually, to do normalization, we need to be able to answer each of these three questions: 1. *Voxel-to-world (subject)* Given the subjects' anatomical image read off the scanner: which physical location, expressed in :math:`(x_s,y_s,z_s)` coordinates (:math:`s` for subject), corresponds to the voxel of data :math:`(i_s,j_s,k_s)`? This question is answered by *subject_im.coordmap*. The actual function that computes this, i.e that takes 3 floats and returns 3 floats, is *subject_im.coordmap.mapping*. 2. *World-to-world (subject to Tailarach)* Given a location :math:`(x_s,y_s,z_s)` in an anatomical image of the subject, where does it lie in the Tailarach coordinates :math:`(x_a,y_a, z_a)`? This is answered by the matrix *T* and knowing that *T* maps a point in the subject's world to Tailarach world. Hence, this question is answered by *subject_world_to_tailarach_world* above. 3. *Voxel-to-world (Tailarach)* Since we want to produce a resampled Image that has the same shape and coordinate information as *atlas_im*, we need to know what location in Tailarach space, :math:`(x_a,y_a,z_a)` (:math:`a` for atlas) corresponds to the voxel :math:`(i_a,j_a,k_a)`. This question is answered by *tailarach_cmap*. Each of these three questions are answered by, in code, what we called a class called *CoordinateMap*. Mathematically, let's define a *mapping* as a tuple :math:`(D,R,f)` where :math:`D` is the *domain*, :math:`R` is the *range* and :math:`f:D\rightarrow R` is a function. It may seem redundant to pair :math:`(D,R)` with :math:`f` because a function must surely know its domain and hence, implicitly, its range. However, we will see that when it comes time to implement the notion of *mapping*, the tuple we do use to construct *CoordinateMap* is almost, but not quite :math:`(D,R,f)` and, in the tuple we use, :math:`D` and :math:`R` are not reduntant. Since these mappings are going to be used and called with modules like :mod:`numpy`, we should restrict our definition a little bit. We assume the following: 1. :math:`D` is isomorphic to one of :math:`\mathbb{Z}^n, \mathbb{R}^n, \mathbb{C}^n` for some :math:`n`. This isomorphism is determined by a basis :math:`[u_1,\dots,u_n]` of :math:`D` which maps :math:`u_i` to :math:`e_i` the canonical i-th coordinate vector of whichever of :math:`\mathbb{Z}^n, \mathbb{R}^n, \mathbb{C}^n`. This isomorphism is denoted by :math:`I_D`. Strictly speaking, if :math:`D` is isomorphic to :math:`\mathbb{Z}^n` then the term basis is possibly misleading because :math:`D` because it is not a vector space, but it is a group so we might call the basis a set of generators instead. In any case, the implication is that whatever properties the appropriate :math:`\mathbb{Z},\mathbb{R},\mathbb{C}`, so :math:`D` (and :math:`R`) has as well. 2. :math:`R` is similarly isomorphic to one of :math:`\mathbb{Z}^m, \mathbb{R}^m, \mathbb{C}^m` for some :math:`m` with isomorphism :math:`I_R` and basis :math:`[v_1,\dots,v_m]`. Above, and throughout, the brackets "[","]" represent things interpretable as python lists, i.e. sequences. These isomorphisms are just fancy ways of saying that the point :math:`x=3,y=4,z=5` is represented by the 3 real numbers (3,4,5). In this case the basis is :math:`[x,y,z]` and for any :math:`a,b,c \in \mathbb{R}` .. math:: I_D(a\cdot x + b \cdot y + c \cdot z) = a \cdot e_1 + b \cdot e_2 + c \cdot e_3 We might call the pairs :math:`([u_1,...,u_n], I_D), ([v_1,...,v_m], I_R)` *coordinate systems*. Actually, the bases in effect determine the maps :math:`I_D,I_R` as long as we know which of :math:`\mathbb{Z},\mathbb{R},\mathbb{C}` we are talking about so in effect, :math:`([u_1,...,u_n], \mathbb{R})` could be called a *coordinate system*. This is how it is implemented in the code with :math:`[u_1, \dots, u_n]` being replaced by a list of strings naming the basis vectors and :math:`\mathbb{R}` replaced by a builtin :func:`numpy.dtype`. In our normalization example, we therefore have 3 mappings: 1. *Voxel-to-world (subject)* In standard notation for functions, we can write .. math:: (i_s,j_s,k_s) \overset{f}{\mapsto} (x_s,y_s,z_s). The domain is :math:`D=[i_s,j_s,k_s]`, the range is :math:`R=[x_s,y_s,z_s]` and the function is :math:`f:D \rightarrow R`. 2. *World-to-world (subject to Tailarach)* Again, we can write .. math:: (x_s,y_s,z_s) \overset{g}{\mapsto} (x_a,y_a,z_a) The domain is :math:`D=[x_s,y_s,z_s]`, the range is :math:`R=[x_a,y_a,z_a]` and the function is :math:`g:D \rightarrow R`. 3. *Voxel-to-world (Tailarach)* Again, we can write .. math:: (i_a,j_a,k_a) \overset{h}{\mapsto} (x_a,y_a, z_a). The domain is :math:`D=[i_a,j_a,k_a]`, the range is :math:`R=[x_a,y_a,z_a]` and the function is :math:`h:D \rightarrow R`. Note that each of the functions :math:`f,g,h` can be, when we know the necessary isomorphisms, thought of as functions from :math:`\mathbb{R}^3` to itself. In fact, that is what we are doing when we write .. math:: (i_a,j_a,k_a) \overset{h}{\mapsto} (x_a,y_a, z_a) as a function that takes 3 numbers and gives 3 numbers. Formally, these functions that take 3 numbers and return 3 numbers can be written as :math:`\tilde{f}=I_R \circ f \circ I_D^{-1}`. When this is implemented in code, it is actually the functions :math:`\tilde{f}, \tilde{g}, \tilde{h}` we specify, rather then :math:`f,g,h`. The functions :math:`\tilde{f}, \tilde{g}, \tilde{h}` have domains and ranges that are just :math:`\mathbb{R}^3`. We therefore call a *coordinate map* a tuple .. math:: ((u_D, \mathbb{R}), (u_R, \mathbb{R}), I_R \circ f \circ I_D^{-1}) where :math:`u_D, u_R` are bases for :math:`D,R`, respectively. It is this object that is implemented in code. There is a simple relationship between *mappings* and *coordinate maps* .. math:: ((u_D, \mathbb{R}), (u_R, \mathbb{R}), \tilde{f}) \leftrightarrow (D, R, f=I_R^{-1} \circ \tilde{f} \circ I_D) Because :math:`\tilde{f}, \tilde{g}, \tilde{h}` are just functions from :math:`\mathbb{R}^3` to itself, they can all be composed with one another. But, from our description of the functions above, we know that only certain compositions make sense and others do not, such as :math:`g \circ h`. Compositions that do make sense include 1. :math:`h^{-1} \circ g` which :math:`(i_a,j_a, k_a)` voxel corresponds to the point :math:`(x_s,y_s,z_s)`? 2. :math:`g \circ f` which :math:`(x_a,y_a,z_a)` corresponds to the voxel :math:`(i,j,k)`? The composition that is used in the normalization example is :math:`w = f^{-1} \circ g^{-1} \circ h` which is a function .. math:: (i_a, j_a, k_a) \overset{w}{\mapsto} (i_s, j_s, k_s) This function, or more correctly its representation :math:`\tilde{w}` that takes 3 floats to 3 floats, is passed directly to :func:`scipy.ndimage.map_coordinates`. Manipulating mappings, coordinate systems and coordinate maps ============================================================= In order to solve our normalization problem, we will definitely need to compose functions. We may want to carry out other formal operations as well. Before describing operations on mappings, we describe the operations you might want to consider on coordinate systems. Coordinate systems ------------------ 1. *Reorder*: This is just a reordering of the basis, i.e. :math:`([u_1,u_2,u_3], \mathbb{R}) \mapsto ([u_2,u_3,u_1], \mathbb{R})` 2. *Product*: Topological product of the coordinate systems (with a small twist). Given two coordinate systems :math:`([u_1,u_2,u_3], \mathbb{R}), ([v_1, v_2], \mathbb{Z})` the product is represented as .. math:: ([u_1,u_2,u_3], \mathbb{R}) \times ([v_1, v_2], \mathbb{Z}) \mapsto ([u_1,u_2,u_3,v_1,v_2], \mathbb{R})`. Note that the resulting coordinate system is real valued whereas one of the input coordinate systems was integer valued. We can always embed :math:`\mathbb{Z}` into :math:`\mathbb{R}`. If one of them is complex valued, the resulting coordinate system is complex valued. In the code, this is handled by attempting to find a safe builtin numpy.dtype for the two (or more) given coordinate systems. Mappings -------- 1. *Inverse*: Given a mapping :math:`M=(D,R,f)` if the function :math:`f` is invertible, this is just the obvious :math:`M^{-1}=(R, D, f^{-1})`. 2. *Composition*: Given two mappings, :math:`M_f=(D_f, R_f, f)` and :math:`M_g=(D_g, R_g, g)` if :math:`D_f == R_g` then the composition is well defined and the composition of the mappings :math:`[M_f,M_g]` is just :math:`(D_g, R_f, f \circ g)`. 3. *Reorder domain / range*: Given a mapping :math:`M=(D=[i,j,k], R=[x,y,z], f)` you might want to specify that we've changed the domain by changing the ordering of its basis to :math:`[k,i,j]`. Call the new domain :math:`D'`. This is represented by the composition of the mappings :math:`[M, O]` where :math:`O=(D', D, I_D^{-1} \circ f_O \circ I_{D'})` and for :math:`a,b,c \in \mathbb{R}`: .. math:: f_O(a,b,c) = (b,c,a). 4. *Linearize*: Possibly less used, since we know that :math:`f` must map one of :math:`\mathbb{Z}^n, \mathbb{R}^n, \mathbb{C}^n` to one of :math:`\mathbb{Z}^m, \mathbb{R}^m, \mathbb{C}^m`, we might be able differentiate it at a point :math:`p \in D`, yielding its 1st order Taylor approximation .. math:: f_p(d) = f(d) + Df_p(d-p) which is an affine function, thus creating an affine mapping :math:`(D, R, f_p)`. Affine functions are discussed in more detail below. 5. *Product*: Given two mappings :math:`M_1=(D_1,R_1,f_1), M_2=(D_2, R_2, f_2)` we define their product as the mapping :math:`(D_1 + D_2, R_1 + R_2, f_1 \otimes f_2)` where .. math:: (f_1 \otimes f_2)(d_1, d_2) = (f_1(d_1), f_2(d_2)). Above, we have taken the liberty of expressing the product of the coordinate systems, say, :math:`D_1=([u_1, \dots, u_n], \mathbb{R}), D_2=([v_1, \dots, v_m], \mathbb{C})` as a python addition of lists. The name *product* for this operation is not necessarily canonical. If the two coordinate systems are vector spaces and the function is linear, then we might call this map the *direct sum* because its domain are direct sums of vector spaces. The term *product* here refers to the fact that the domain and range are true topological products. Affine mappings --------------- An *affine mapping* is one in which the function :math:`f:D \rightarrow R` is an affine function. That is, it can be written as `f(d) = Ad + b` for :math:`d \in D` for some :math:`n_R \times n_D` matrix :math:`A` with entries that are in one of :math:`\mathbb{Z}, \mathbb{R}, \mathbb{C}`. Strictly speaking, this is a little abuse of notation because :math:`d` is a point in :math:`D` not a tuple of real (or integer or complex) numbers. The matrix :math:`A` represents a linear transformation from :math:`D` to :math:`R` in a particular choice of bases for :math:`D` and :math:`R`. Let us revisit some of the operations on a mapping as applied to *affine mappings* which we write as a tuple :math:`M=(D, R, T)` with :math:`T` the representation of the :math:`(A,b)` in homogeneous coordinates. 1. *Inverse*: If :math:`T` is invertible, this is just the tuple :math:`M^{-1}=(R, D, T^{-1})`. 2. *Composition*: The composition of two affine mappings :math:`[(D_2, R_2, T_2), (D_1,R_1,T_1)]` is defined whenever :math:`R_1==D_2` and is the tuple :math:`(D_1, R_2, T_2 T_1)`. 3. *Reorder domain*: A reordering of the domain of an affine mapping :math:`M=(D, R, T)` can be represented by a :math:`(n_D+1) \times (n_D+1)` permutation matrix :math:`P` (in which the last coordinate is unchanged -- remember we are in homogeneous coordinates). Hence a reordering of :math:`D` to :math:`D'` can be represented as :math:`(D', R, TP)`. Alternatively, it is the composition of the affine mappings :math:`[M,(\tilde{D}, D, P)]`. 4. *Reorder range*: A reordering of the range can be represented by a :math:`(n_R+1) \times (n_R+1)` permutation matrix :math:`\tilde{P}`. Hence a reordering of :math:`R` to :math:`R'` can be represented as :math:`(D, \tilde{R}, \tilde{P}T)`. Alternatively, it is the composition of the affine mappings :math:`[(R, \tilde{R}, \tilde{P}), M]`. 5. *Linearize*: Because the mapping :math:`M=(D,R,T)` is already affine, this leaves it unchanged. 6. *Product*: Given two affine mappings :math:`M_1=(D_1,R_1,T_1)` and :math:`M_2=(D_2,R_2,T_2)` the product is the tuple .. math:: \left(D_1+D_2,R_1+R_2, \begin{pmatrix} T_1 & 0 \\ 0 & T_2 \end{pmatrix} \right). 3-dimensional affine mappings ----------------------------- For an Image, by far the most common mappings associated to it are affine, and these are usually maps from a real 3-dimensional domain to a real 3-dimensional range. These can be represented by the ubiquitous :math:`4 \times 4` matrix (the representation of the affine mapping in homogeneous coordinates), along with choices for the axes, i.e. :math:`[i,j,k]` and the spatial coordinates, i.e. :math:`[x,y,z]`. We will revisit some of the operations on mappings as applied specifically to 3-dimensional affine mappings which we write as a tuple :math:`A=(D, R, T)` where :math:`T` is an invertible :math:`4 \times 4` transformation matrix with real entries. 1. *Inverse*: Because we have assumed that :math:`T` is invertible this is just tuple :math:`(([x,y,z], \mathbb{R}), ([i,j,k], \mathbb{R}), T^{-1})`. 2. *Composition*: Given two 3-dimensional affine mappings :math:`M_1=(D_1,R_1, T_1), M_2=(D_2,R_2,T_2)` the composition of :math:`[M_2,M_1]` yields another 3-dimensional affine mapping whenever :math:`R_1 == D_2`. That is, it yields :math:`(D_1, R_2, T_2T_1)`. 3. *Reorder domain* A reordering of the domain can be represented by a :math:`4 \times 4` permutation matrix :math:`P` (with its last coordinate not changing). Hence the reordering of :math:`D=([i,j,k], \mathbb{R})` to :math:`([k,i,j], \mathbb{R})` can be represented as :math:`(([k,i,j], \mathbb{R}), R, TP)`. 4. *Reorder range*: A reordering of the range can also be represented by a :math:`4 \times 4` permutation matrix :math:`\tilde{P}` (with its last coordinate not changing). Hence the reordering of :math:`R=([x,y,z], \mathbb{R})` to :math:`([z,x,y], \mathbb{R})` can be represented as :math:`(D, ([z,x,y], \mathbb{R}), \tilde{P}, T)`. 5. *Linearize*: Just as for a general affine mapping, this does nothing. 6. *Product*: Because we are dealing with only 3-dimensional mappings here, it is impossible to use the product because that would give a mapping between spaces of dimension higher than 3. Coordinate maps --------------- As noted above *coordinate maps* are equivalent to *mappings* through the bijection .. math:: ((u_D, \mathbb{R}), (u_R, \mathbb{R}), \tilde{f}) \leftrightarrow (D, R, I_R^{-1} \circ \tilde{f} \circ I_D) So, any manipulations on *mappings*, *affine mappings* or *3-dimensional affine mappings* can be carried out on *coordinate maps*, *affine coordinate maps* or *3-dimensional affine coordinate maps*. Implementation ============== Going from this mathematical description to code is fairly straightforward. 1. A *coordinate system* is implemented by the class *CoordinateSystem* in the module :mod:`nipy.core.reference.coordinate_system`. Its constructor takes a list of names, naming the basis vectors of the *coordinate system* and an optional built-in numpy scalar dtype such as np.float32. It has no interesting methods of any kind. But there is a module level function *product* which implements the notion of the product of *coordinate systems*. 2. A *coordinate map* is implemented by the class *CoordinateMap* in the module :mod:`nipy.core.reference.coordinate_map`. Its constructor takes two coordinate has a signature *(mapping, input_coords(=domain), output_coords(=range))* along with an optional argument *inverse_mapping* specifying the inverse of *mapping*. This is a slightly different order from the :math:`(D, R, f)` order of this document. As noted above, the tuple :math:`(D, R, f)` has some redundancy because the function :math:`f` must know its domain, and, implicitly its range. In :mod:`numpy`, it is impractical to really pass :math:`f` to the constructor because :math:`f` would expect something of *dtype* :math:`D` and should return someting of *dtype* :math:`R`. Therefore, *mapping* is actually a callable that represents the function :math:`\tilde{f} = I_R \circ f \circ I_D^{-1}`. Of course, the function :math:`f` can be recovered as :math:`f` = I_R^{-1} \circ \tilde{f} I_D`. In code, :math:`f` is roughly equivalent to: >>> from nipy.core.api import CoordinateMap, CoordinateSystem >>> in_cs = CoordinateSystem('ijk', 'voxels') >>> out_cs = CoordinateSystem('xyz', 'mm') >>> map = lambda x : x + 1 >>> coordmap = CoordinateMap(in_cs, out_cs, map) >>> domain = coordmap.function_domain >>> range = coordmap.function_range >>> f_tilde = coordmap.function >>> in_dtype = domain.coord_dtype >>> out_dtype = range.dtype >>> def f(d): ... return f_tilde(d.view(in_dtype)).view(out_dtype) The class *CoordinateMap* has an *inverse* property and there are module level functions called *product, compose, linearize* and it has methods *reordered_input, reordered_output*. For more detail on the ideas behind the coordmap design, see :ref:`coordmp-discussion` nipy-0.3.0/doc/users/plots/000077500000000000000000000000001210344137400155375ustar00rootroot00000000000000nipy-0.3.0/doc/users/plots/amplitudes.py000066400000000000000000000006641210344137400202660ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This figure is meant to represent an event-type design with events at times [0,4,8,12,16] and amplitudes [0,1.1,2.3,0.9,0.3]. """ import pylab import numpy as np pylab.scatter([0,4,8,12,16], [0,1.1,2.3,0.9,0.3], c='r', marker='o') a = pylab.gca() a.set_yticks([0,2]) a.set_xlabel('Time') a.set_ylabel('Amplitude') nipy-0.3.0/doc/users/plots/block.py000066400000000000000000000012521210344137400172030ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This figure is meant to represent an event-type with Faces presented at times [0,4,8,12,16] and Objects presented at [2,6,10,14,18]. There are two values for Y: one for 'Face' and one for 'Object' """ import pylab import numpy as np for t in [0,4,8,12,16]: pylab.plot([t,t+0.5], [1,1], c='r', label='Face', linewidth=3) for t in [2,6,10,14,18]: pylab.plot([t,t+0.5], [0,0], c='b', label='Object', linewidth=3) a = pylab.gca() a.set_ylim([-0.1,1.1]) a.set_yticks([0,1]) a.set_yticklabels(['Object', 'Face']) a.set_xlim([-0.5,10]) a.set_xlabel('Time') nipy-0.3.0/doc/users/plots/event.py000066400000000000000000000012111210344137400172250ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This figure is meant to represent an event-type with Faces presented at times [0,4,8,12,16] and Objects presented at [2,6,10,14,18]. There are two values for Y: one for 'Face' and one for 'Object' """ import pylab import numpy as np pylab.scatter([0,4,8,12,16], [1,1,1,1,1], c='r', marker='o', label='Face') pylab.scatter([2,6,10,14,18], [0,0,0,0,0], c='b', marker='o', label='Object') a = pylab.gca() a.set_ylim([-0.1,1.1]) a.set_yticks([0,1]) a.set_yticklabels(['Object', 'Face']) a.set_xlim([-0.5,10]) a.set_xlabel('Time') nipy-0.3.0/doc/users/plots/event_amplitude.py000066400000000000000000000016451210344137400213040ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np import pylab from nipy.modalities.fmri.utils import events, Symbol, lambdify_t from nipy.modalities.fmri.hrf import glover # Symbol for amplitude a = Symbol('a') # Some event onsets regularly spaced onsets = np.linspace(0,50,6) # Make amplitudes from onset times (greater as function of time) amplitudes = onsets[:] # Flip even numbered amplitudes amplitudes = amplitudes * ([-1, 1] * 3) # Make event functions evs = events(onsets, amplitudes=amplitudes, g=a + 0.5 * a**2, f=glover) # Real valued function for symbolic events real_evs = lambdify_t(evs) # Time points at which to sample t_samples = np.linspace(0,60,601) pylab.plot(t_samples, real_evs(t_samples), c='r') for onset, amplitude in zip(onsets, amplitudes): pylab.plot([onset, onset],[0, 25 * amplitude], c='b') pylab.show() nipy-0.3.0/doc/users/plots/hrf.py000066400000000000000000000007601210344137400166730ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Plot of the canonical Glover HRF """ import numpy as np from nipy.modalities.fmri import hrf, utils import matplotlib.pyplot as plt # hrf.glover is a symbolic function; get a function of time to work on arrays hrf_func = utils.lambdify_t(hrf.glover(utils.T)) t = np.linspace(0,25,200) plt.plot(t, hrf_func(t)) a=plt.gca() a.set_xlabel(r'$t$') a.set_ylabel(r'$h_{can}(t)$') nipy-0.3.0/doc/users/plots/hrf_delta.py000066400000000000000000000007751210344137400200520ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This plot demonstrates a neuronal model that is a sum of delta functions times coefficient values """ import matplotlib.pyplot as plt # Coefficients for a and b ba = 1 bb = -2 # Times for a and b ta = [0,4,8,12,16] tb = [2,6,10,14,18] for t in ta: plt.plot([t,t],[0,ba],c='r') for t in tb: plt.plot([t,t],[0,bb],c='b') a = plt.gca() a.set_xlabel(r'$t$') a.set_ylabel(r'$n(t)$') nipy-0.3.0/doc/users/plots/hrf_different.py000066400000000000000000000016461210344137400207250ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This example uses a different HRF for different event types """ import numpy as np import matplotlib.pyplot as plt from nipy.modalities.fmri import hrf from nipy.modalities.fmri.utils import T, lambdify_t # HRFs as functions of (symbolic) time glover = hrf.glover(T) afni = hrf.afni(T) ta = [0,4,8,12,16]; tb = [2,6,10,14,18] ba = 1; bb = -2 na = ba * sum([glover.subs(T, T - t) for t in ta]) nb = bb * sum([afni.subs(T, T - t) for t in tb]) nav = lambdify_t(na) nbv = lambdify_t(nb) t = np.linspace(0,30,200) plt.plot(t, nav(t), c='r', label='Face') plt.plot(t, nbv(t), c='b', label='Object') plt.plot(t, nbv(t)+nav(t), c='g', label='Combined') for t in ta: plt.plot([t,t],[0,ba*0.5],c='r') for t in tb: plt.plot([t,t],[0,bb*0.5],c='b') plt.plot([0,30], [0,0],c='#000000') plt.legend() plt.show() nipy-0.3.0/doc/users/plots/neuronal_block.py000066400000000000000000000017071210344137400211130ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This figure is meant to represent the neuronal block model with Faces at times [0,4,8,12,16] and Objects presented at [2,6,10,14,18] each presented for 0.5 seconds and a coefficient of +1 for Faces, -2 for Objects. """ import pylab import numpy as np from sympy import Symbol, Piecewise, lambdify ta = [0,4,8,12,16]; tb = [2,6,10,14,18] ba = Symbol('ba'); bb = Symbol('bb'); t = Symbol('t') fa = sum([Piecewise((0, (t<_t)), ((t-_t)/0.5, (t<_t+0.5)), (1, (t >= _t+0.5))) for _t in ta])*ba fb = sum([Piecewise((0, (t<_t)), ((t-_t)/0.5, (t<_t+0.5)), (1, (t >= _t+0.5))) for _t in tb])*bb N = fa+fb Nn = N.subs(ba,1) Nn = Nn.subs(bb,-2) NNl = lambdify(t, Nn) tt = np.linspace(-1,21,121) pylab.plot(tt, [NNl(float(_t)) for _t in tt]) a = pylab.gca() a.set_ylim([-5.5,1.5]) a.set_ylabel('Neuronal (cumulative)') a.set_xlabel('Time') pylab.show() nipy-0.3.0/doc/users/plots/neuronal_event.py000066400000000000000000000013711210344137400211370ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This figure is meant to represent the neuronal event-related model and a coefficient of +1 for Faces, -2 for Objects. """ import pylab import numpy as np from sympy import Symbol, Heaviside, lambdify ta = [0,4,8,12,16]; tb = [2,6,10,14,18] ba = Symbol('ba'); bb = Symbol('bb'); t = Symbol('t') fa = sum([Heaviside(t-_t) for _t in ta]) * ba fb = sum([Heaviside(t-_t) for _t in tb]) * bb N = fa+fb Nn = N.subs(ba,1) Nn = Nn.subs(bb,-2) Nn = lambdify(t, Nn) tt = np.linspace(-1,21,1201) pylab.step(tt, [Nn(_t) for _t in tt]) a = pylab.gca() a.set_ylim([-5.5,1.5]) a.set_ylabel('Neuronal (cumulative)') a.set_xlabel('Time') pylab.show() nipy-0.3.0/doc/users/plots/random_amplitudes.py000066400000000000000000000010041210344137400216130ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This figure is meant to represent an event-type design with events at times [0,4,8,12,16] and random amplitudes centered at [0,1.1,2.3,0.9,0.3]. """ import pylab import numpy as np for t, y in zip([0,4,8,12,16], [0,1.1,2.3,0.9,0.3]): pylab.plot([t,t], [y-0.1,y+0.1], c='r', linewidth=3) a = pylab.gca() a.set_yticks([0,2]) a.set_xlim([-1,18]) a.set_xlabel('Time') a.set_ylabel('Amplitude') nipy-0.3.0/doc/users/plots/random_amplitudes_times.py000066400000000000000000000011201210344137400230130ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This figure is meant to represent an event-type design with events at random times centered [0,2,4,6,8] and random amplitudes centered [0,1.1,2.3,0.9,0.3]. """ import pylab, matplotlib import numpy as np for t, y in zip([0,4,8,12,16], [0,1.1,2.3,0.9,0.3]): dt = np.array([-0.5,0.5,0.5,-0.5]) dy = np.array([-0.1,-0.1,0.1,0.1]) pylab.fill(t+dt,y+dy, 'r') a = pylab.gca() a.set_yticks([0,2]) a.set_xlim([-1,18]) a.set_xlabel('Time') a.set_ylabel('Amplitude') nipy-0.3.0/doc/users/plots/sinusoidal.py000066400000000000000000000010121210344137400202550ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This figure is meant to represent a continuous stimulus having two features, Orientation and Contrast """ import pylab import numpy as np t = np.linspace(0,10,1000) o = np.sin(2*np.pi*(t+1)) * np.exp(-t/10) c = np.sin(2*np.pi*(t+0.2)/4) * np.exp(-t/12) pylab.plot(t, o, label='Orientation') pylab.plot(t, c+2.1, label='Contrast') pylab.legend() a = pylab.gca() a.set_yticks([]) a.set_xlabel('Time') nipy-0.3.0/doc/users/scipy_orientation.rst000066400000000000000000000044441210344137400207000ustar00rootroot00000000000000============================== Geography of the Scipy world ============================== in which we briefly describe the various components you are likely to come across when writing scientific python software in general, and NIPY code in particular. Numpy ===== NumPy_ is the basic Python array-manipulation package. It allows you to create, slice and manipulate N-D arrays at near C speed. It also has basic arithmetical and mathematical functions (such as sum, mean, and log, exp, sin, cos), matrix multiplication (``numpy.dot``), Fourier transforms (``numpy.fft``) and basic linear algebra ``numpy.linalg``. SciPy ===== Scipy_ is a large umbrella project that builds on Numpy (and depends on it). It includes a variety of high level science and engineering modules together as a single package. There are extended modules for linear algebra (including wrappers to BLAS and LAPACK), optimization, integration, sparse matrices, special functions, FFTs, signal and image processing, genetic algorithms, ODE solvers, and others. Matplotlib ========== Matplotlib_ is a 2D plotting package that depends on NumPy_. It has a simple matlab-like plotting syntax that makes it relatively easy to create good-looking plots, histograms and images with a small amount of code. As well as this simplified Matlab-like syntax, There is also a more powerful and flexible object-oriented interface. Ipython ======= Ipython_ is an interactive shell for python that has various features of the interactive shell of Matlab, Mathematica and R. It works particularly well with Matplotlib_, but is also an essential tool for interactive code development and code exploration. It contains libraries for creainteracting with parallel jobs on clusters or over several CPU cores in a fairly transparent way. Cython ====== Cython_ is a development language that allows you to write a combination of Python and C-like syntax to generate Python extensions. It is especially good for linking C libraries to Python in a readable way. It is also an excellent choice for optimization of Python code, because it allows you to drop down to C or C-like code at your bottlenecks without losing much of the readability of Python. Mayavi ====== Mayavi_ is a high-level python interface to the VTK_ plotting libraries. .. include:: ../links_names.txt nipy-0.3.0/doc/users/tutorial.rst000066400000000000000000000002051210344137400167700ustar00rootroot00000000000000.. _tutorial-index: =========== Tutorials =========== .. toctree:: :maxdepth: 2 basic_io coordinate_map glm_spec nipy-0.3.0/examples/000077500000000000000000000000001210344137400143065ustar00rootroot00000000000000nipy-0.3.0/examples/affine_registration.py000077500000000000000000000065311210344137400207120ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This script requires the nipy-data package to run. It is an example of inter-subject affine registration using two MR-T1 images from the sulcal 2000 database acquired at CEA, SHFJ, Orsay, France. The source is 'ammon' and the target is 'anubis'. Running it will result in a resampled ammon image being created in the current directory. """ from __future__ import print_function # Python 2/3 compatibility from optparse import OptionParser import time import numpy as np from nipy.algorithms.registration import HistogramRegistration, resample from nipy.utils import example_data from nipy import load_image, save_image print('Scanning data directory...') # Input images are provided with the nipy-data package source = 'ammon' target = 'anubis' source_file = example_data.get_filename('neurospin', 'sulcal2000', 'nobias_' + source + '.nii.gz') target_file = example_data.get_filename('neurospin', 'sulcal2000', 'nobias_' + target + '.nii.gz') # Parse arguments parser = OptionParser(description=__doc__) doc_similarity = 'similarity measure: cc (correlation coefficient), \ cr (correlation ratio), crl1 (correlation ratio in L1 norm), \ mi (mutual information), nmi (normalized mutual information), \ pmi (Parzen mutual information), dpmi (discrete Parzen mutual \ information). Default is crl1.' doc_interp = 'interpolation method: tri (trilinear), pv (partial volume), \ rand (random). Default is pv.' doc_optimizer = 'optimization method: simplex, powell, steepest, cg, bfgs. \ Default is powell.' parser.add_option('-s', '--similarity', dest='similarity', help=doc_similarity) parser.add_option('-i', '--interp', dest='interp', help=doc_interp) parser.add_option('-o', '--optimizer', dest='optimizer', help=doc_optimizer) opts, args = parser.parse_args() # Optional arguments similarity = 'crl1' interp = 'pv' optimizer = 'powell' if not opts.similarity == None: similarity = opts.similarity if not opts.interp == None: interp = opts.interp if not opts.optimizer == None: optimizer = opts.optimizer # Print messages print('Source brain: %s' % source) print('Target brain: %s' % target) print('Similarity measure: %s' % similarity) print('Optimizer: %s' % optimizer) # Get data print('Fetching image data...') I = load_image(source_file) J = load_image(target_file) # Perform affine registration # The output is an array-like object such that # np.asarray(T) is a customary 4x4 matrix print('Setting up registration...') tic = time.time() R = HistogramRegistration(I, J, similarity=similarity, interp=interp) T = R.optimize('affine', optimizer=optimizer) toc = time.time() print(' Registration time: %f sec' % (toc - tic)) # Resample source image print('Resampling source image...') tic = time.time() #It = resample2(I, J.coordmap, T.inv(), J.shape) It = resample(I, T.inv(), reference=J) toc = time.time() print(' Resampling time: %f sec' % (toc - tic)) # Save resampled source outroot = source + '_TO_' + target outimg = outroot + '.nii.gz' print ('Saving resampled source in: %s' % outimg) save_image(It, outimg) # Save transformation matrix outparams = outroot + '.npy' np.save(outparams, np.asarray(T)) nipy-0.3.0/examples/algorithms/000077500000000000000000000000001210344137400164575ustar00rootroot00000000000000nipy-0.3.0/examples/algorithms/README.txt000066400000000000000000000002151210344137400201530ustar00rootroot00000000000000################### Algorithms examples ################### Demos of mixture model and clustering algorithms. Examples require matplotlib. nipy-0.3.0/examples/algorithms/bayesian_gaussian_mixtures.py000077500000000000000000000044051210344137400244640ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import print_function # Python 2/3 compatibility __doc__ = """ Example of a demo that fits a Bayesian Gaussian Mixture Model (GMM) to a dataset. Variational bayes and Gibbs estimation are sucessively run on the same dataset. Requires matplotlib Author : Bertrand Thirion, 2008-2010 """ print(__doc__) import numpy as np import numpy.random as nr try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") import nipy.algorithms.clustering.bgmm as bgmm from nipy.algorithms.clustering.gmm import plot2D dim = 2 ############################################################################### # 1. generate a 3-components mixture x1 = nr.randn(25, dim) x2 = 3 + 2 * nr.randn(15, dim) x3 = np.repeat(np.array([-2, 2], ndmin=2), 10, 0) + 0.5 * nr.randn(10, dim) x = np.concatenate((x1, x2, x3)) ############################################################################### #2. fit the mixture with a bunch of possible models, using Variational Bayes krange = range(1, 10) be = - np.inf for k in krange: b = bgmm.VBGMM(k, dim) b.guess_priors(x) b.initialize(x) b.estimate(x) ek = float(b.evidence(x)) if ek > be: be = ek bestb = b print(k, 'classes, free energy:', ek) ############################################################################### # 3. plot the result z = bestb.map_label(x) plot2D(x, bestb, z, verbose=0) plt.title('Variational Bayes') ############################################################################### # 4. the same, with the Gibbs GMM algo niter = 1000 krange = range(1, 6) bbf = - np.inf for k in krange: b = bgmm.BGMM(k, dim) b.guess_priors(x) b.initialize(x) b.sample(x, 100) w, cent, prec, pz = b.sample(x, niter=niter, mem=1) bplugin = bgmm.BGMM(k, dim, cent, prec, w) bplugin.guess_priors(x) bfk = bplugin.bayes_factor(x, pz.astype(np.int), nperm=120) print(k, 'classes, evidence:', bfk) if bfk > bbf: bestk = k bbf = bfk bbgmm = bplugin z = bbgmm.map_label(x) plot2D(x, bbgmm, z, verbose=0) plt.title('Gibbs sampling') plt.show() nipy-0.3.0/examples/algorithms/clustering_comparisons.py000077500000000000000000000031411210344137400236270ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import print_function # Python 2/3 compatibility __doc__ = """ Simple demo that partitions a smooth field into 10 clusters. In most cases, Ward's clustering behaves best. Requires matplotlib Author: Bertrand Thirion, 2009 """ print(__doc__) import numpy as np import numpy.random as nr from scipy.ndimage import gaussian_filter try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") from nipy.algorithms.graph.field import Field dx = 50 dy = 50 dz = 1 nbseeds = 10 data = gaussian_filter( np.random.randn(dx, dy), 2) F = Field(dx * dy * dz) xyz = np.reshape(np.indices((dx, dy, dz)), (3, dx * dy * dz)).T.astype(np.int) F.from_3d_grid(xyz, 6) F.set_field(data) seeds = np.argsort(nr.rand(F.V))[:nbseeds] seeds, label, J0 = F.geodesic_kmeans(seeds) wlabel, J1 = F.ward(nbseeds) seeds, label, J2 = F.geodesic_kmeans(seeds, label=wlabel.copy(), eps=1.e-7) print('Inertia values for the 3 algorithms: ') print('Geodesic k-means: ', J0, 'Wards: ', J1, 'Wards + gkm: ', J2) plt.figure(figsize=(8, 4)) plt.subplot(1, 3, 1) plt.imshow(np.reshape(data, (dx, dy)), interpolation='nearest') plt.title('Input data') plt.subplot(1, 3, 2) plt.imshow(np.reshape(wlabel, (dx, dy)), interpolation='nearest') plt.title('Ward clustering \n into 10 components') plt.subplot(1, 3, 3) plt.imshow(np.reshape(label, (dx, dy)), interpolation='nearest') plt.title('geodesic kmeans clust. \n into 10 components') plt.show() nipy-0.3.0/examples/algorithms/gaussian_mixture_models.py000077500000000000000000000023041210344137400237650ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import print_function # Python 2/3 compatibility __doc__ = """ Example of a demo that fits a Gaussian Mixture Model (GMM) to a dataset The possible number of clusters is in the [1,10] range The proposed algorithm correctly selects a solution with 2 or 3 classes Requires matplotlib Author : Bertrand Thirion, 2008-2009 """ print(__doc__) import numpy as np try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") import nipy.algorithms.clustering.gmm as gmm dim = 2 # 1. generate a 3-components mixture x1 = np.random.randn(100, dim) x2 = 3 + 2 * np.random.randn(50, dim) x3 = np.repeat(np.array([- 2, 2], ndmin=2), 30, 0) \ + 0.5 * np.random.randn(30, dim) x = np.concatenate((x1, x2, x3)) # 2. fit the mixture with a bunch of possible models krange = range(1, 5) lgmm = gmm.best_fitting_GMM(x, krange, prec_type='diag', niter=100, delta=1.e-4, ninit=1, verbose=0) # 3, plot the result z = lgmm.map_label(x) gmm.plot2D(x, lgmm, z, verbose=0) plt.show() nipy-0.3.0/examples/algorithms/mixed_effects.py000066400000000000000000000036231210344137400216420ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import print_function # Python 2/3 compatibility __doc__ = """ This example illustrates the impact of using a mixed-effects model for the detection of the effects, when the first-level variance is known: If the first level variance is very variable across observations, then taking it into account gives more relibale detections, as seen in an ROC curve. Requires matplotlib. Author: Bertrand Thirion, 2012 """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from nipy.algorithms.statistics.mixed_effects_stat import ( generate_data, one_sample_ttest, t_stat) # generate the data N, P = 15, 500 V1 = np.random.randn(N, P) ** 2 effects = 0.5 * (np.random.randn(P) > 0) Y = generate_data(np.ones(N), effects, .25, V1) # compute the statistics T1 = one_sample_ttest(Y, V1, n_iter=5) T1 = [T1[effects == x] for x in np.unique(effects)] T2 = [t_stat(Y)[effects == x] for x in np.unique(effects)] # Derive ROC curves ROC1 = np.array([np.sum(T1[1] > - x) for x in np.sort(- T1[0])])\ * 1. / T1[1].size ROC2 = np.array([np.sum(T2[1] > - x) for x in np.sort(- T2[0])])\ * 1. / T1[1].size # make a figure FIG = plt.figure(figsize=(10, 5)) AX = FIG.add_subplot(121) AX.plot(np.linspace(0, 1, len(ROC1)), ROC1, label='mixed effects') AX.plot(np.linspace(0, 1, len(ROC2)), ROC2, label='t test') AX.set_xlabel('false positives') AX.set_ylabel('true positives') AX.set_title('ROC curves for the detection of effects', fontsize=12) AX.legend(loc='lower right') AX = FIG.add_subplot(122) AX.boxplot(T1, positions=[-0.1, .9]) AX.boxplot(T2, positions=[0.1, 1.1]) AX.set_xticks([0, 1]) AX.set_xlabel('simulated effects') AX.set_ylabel('decision statistic') AX.set_title('left: mixed effects model, \n right: standard t test', fontsize=12) plt.show() nipy-0.3.0/examples/algorithms/ward_clustering.py000077500000000000000000000037771210344137400222460ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import print_function # Python 2/3 compatibility __doc__ = """ Demo ward clustering on a graph: various ways of forming clusters and dendrogram Requires matplotlib """ print(__doc__) import numpy as np from numpy.random import randn, rand try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") from nipy.algorithms.graph import knn from nipy.algorithms.clustering.hierarchical_clustering import ward # n = number of points, k = number of nearest neighbours n = 100 k = 5 # Set verbose to True to see more printed output verbose = False X = randn(n, 2) X[:np.ceil(n / 3)] += 3 G = knn(X, 5) tree = ward(G, X, verbose) threshold = .5 * n u = tree.partition(threshold) plt.figure(figsize=(12, 6)) plt.subplot(1, 3, 1) for i in range(u.max()+1): plt.plot(X[u == i, 0], X[u == i, 1], 'o', color=(rand(), rand(), rand())) plt.axis('tight') plt.axis('off') plt.title('clustering into clusters \n of inertia < %g' % threshold) u = tree.split(k) plt.subplot(1, 3, 2) for e in range(G.E): plt.plot([X[G.edges[e, 0], 0], X[G.edges[e, 1], 0]], [X[G.edges[e, 0], 1], X[G.edges[e, 1], 1]], 'k') for i in range(u.max() + 1): plt.plot(X[u == i, 0], X[u == i, 1], 'o', color=(rand(), rand(), rand())) plt.axis('tight') plt.axis('off') plt.title('clustering into 5 clusters') nl = np.sum(tree.isleaf()) validleaves = np.zeros(n) validleaves[:np.ceil(n / 4)] = 1 valid = np.zeros(tree.V, 'bool') valid[tree.isleaf()] = validleaves.astype('bool') nv = np.sum(validleaves) nv0 = 0 while nv > nv0: nv0 = nv for v in range(tree.V): if valid[v]: valid[tree.parents[v]]=1 nv = np.sum(valid) ax = plt.subplot(1, 3, 3) ax = tree.plot(ax) ax.set_title('Dendrogram') ax.set_visible(True) plt.show() if verbose: print('List of sub trees') print(tree.list_of_subtrees()) nipy-0.3.0/examples/compute_fmri_contrast.py000077500000000000000000000051401210344137400212710ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import print_function # Python 2/3 compatibility import sys USAGE = """ usage : python %s [1x4-contrast] where [1x4-contrast] is optional and is something like 1,0,0,0 If you don't enter a contrast, 1,0,0,0 is the default. An activation image is displayed. This script requires the nipy-data package to run. It is an example of using a general linear model in single-subject fMRI data analysis context. Two sessions of the same subject are taken from the FIAC'05 dataset. The script also needs matplotlib installed. Author: Alexis Roche, Bertrand Thirion, 2009--2012. """ % sys.argv[0] __doc__ = USAGE import numpy as np try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") from nipy.labs.viz import plot_map, cm from nipy.modalities.fmri.glm import FMRILinearModel from nipy.utils import example_data # Optional argument - default value 1, 0, 0, 0 nargs = len(sys.argv) if nargs not in (1, 2, 5): print(USAGE) exit(1) if nargs == 1: # default no-argument case cvect = [1, 0, 0, 0] else: if nargs == 2: # contrast as one string args = sys.argv[1].split(',') elif nargs == 5: # contrast as sequence of strings args = [arg.replace(',', '') for arg in sys.argv[1:]] if len(args) != 4: print(USAGE) exit(1) try: cvect = [float(arg) for arg in args] except ValueError: print(USAGE) exit(1) # Input files fmri_files = [example_data.get_filename('fiac', 'fiac0', run) for run in ['run1.nii.gz', 'run2.nii.gz']] design_files = [example_data.get_filename('fiac', 'fiac0', run) for run in ['run1_design.npz', 'run2_design.npz']] mask_file = example_data.get_filename('fiac', 'fiac0', 'mask.nii.gz') # Load all the data multi_session_model = FMRILinearModel(fmri_files, design_files, mask_file) # GLM fitting multi_session_model.fit(do_scaling=True, model='ar1') # Compute the required contrast print('Computing test contrast image...') n_regressors = [np.load(f)['X'].shape[1] for f in design_files] con = [np.hstack((cvect, np.zeros(nr - len(cvect)))) for nr in n_regressors] z_map, = multi_session_model.contrast(con) # Show Z-map image mean_map = multi_session_model.means[0] plot_map(z_map.get_data(), z_map.get_affine(), anat=mean_map.get_data(), anat_affine=mean_map.get_affine(), cmap=cm.cold_hot, threshold=2.5, black_bg=True) plt.show() nipy-0.3.0/examples/create_tempimage.py000077500000000000000000000016221210344137400201570ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """This example shows how to create a temporary image to use during processing. The array is filled with zeros. """ import numpy as np from nipy import load_image, save_image from nipy.core.api import Image, vox2mni # create an array of zeros, the shape of your data array zero_array = np.zeros((91,109,91)) # create an image from our array. The image will be in MNI space img = Image(zero_array, vox2mni(np.diag([2, 2, 2, 1]))) # save the image to a file newimg = save_image(img, 'tempimage.nii.gz') # Example of creating a temporary image file from an existing image with a # matching coordinate map. img = load_image('tempimage.nii.gz') zeroarray = np.zeros(img.shape) zeroimg = Image(zeroarray, img.coordmap) newimg = save_image(zeroimg, 'another_tempimage.nii.gz') nipy-0.3.0/examples/data_package/000077500000000000000000000000001210344137400166725ustar00rootroot00000000000000nipy-0.3.0/examples/data_package/README.txt000066400000000000000000000003671210344137400203760ustar00rootroot00000000000000#################### Data package example #################### A directory containing the layout for a data package in the format that nipy uses. If you are thinking of making a new data package to work with nipy, this is a good place to start. nipy-0.3.0/examples/data_package/nipy-examplepkg/000077500000000000000000000000001210344137400220045ustar00rootroot00000000000000nipy-0.3.0/examples/data_package/nipy-examplepkg/MANIFEST000066400000000000000000000001211210344137400231270ustar00rootroot00000000000000MANIFEST.in README.txt setup.py examplepkg/config.ini examplepkg/placeholder.txt nipy-0.3.0/examples/data_package/nipy-examplepkg/MANIFEST.in000066400000000000000000000000451210344137400235410ustar00rootroot00000000000000graft examplepkg include MANIFEST.in nipy-0.3.0/examples/data_package/nipy-examplepkg/README.txt000066400000000000000000000005331210344137400235030ustar00rootroot00000000000000NIPY ``examplepkg`` data package -------------------------------- This is a NIPY data package. To rebuild this package for distribution: #. Change the files in the ``examplepkg`` directory #. Increase the minor or major version number in ``examplepkg/config.ini`` Then:: python setup.py sdist And upload the generated file for release. nipy-0.3.0/examples/data_package/nipy-examplepkg/examplepkg/000077500000000000000000000000001210344137400241415ustar00rootroot00000000000000nipy-0.3.0/examples/data_package/nipy-examplepkg/examplepkg/config.ini000066400000000000000000000000271210344137400261060ustar00rootroot00000000000000[DEFAULT] version = 0.1nipy-0.3.0/examples/data_package/nipy-examplepkg/examplepkg/placeholder.txt000066400000000000000000000000001210344137400271520ustar00rootroot00000000000000nipy-0.3.0/examples/data_package/nipy-examplepkg/setup.py000077500000000000000000000022611210344137400235220ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ''' Installation script for nipy examplepkg package ''' import os from os.path import join as pjoin from distutils.core import setup try: import ConfigParser as cfg # Python 2 except ImportError: import configparser as cfg # Python 3 # The directory under --prefix, under which to store files OUTPUT_BASE = pjoin('share', 'nipy', 'nipy') # The directory in this directory to be copied into OUTPUT_BASE # such that // will exist PKG_BASE = 'examplepkg' DATA_FILES = [] for dirpath, dirnames, filenames in os.walk(PKG_BASE): files = [pjoin(dirpath, filename) for filename in filenames] DATA_FILES.append((pjoin(OUTPUT_BASE, dirpath), files)) config = cfg.SafeConfigParser() config.read(pjoin(PKG_BASE, 'config.ini')) setup( name = 'nipy-' + PKG_BASE, version = config.get('DEFAULT', 'version'), description='NIPY %s data package' % PKG_BASE, author='The NIPY team', url='http://neuroimaging.scipy.org', author_email='nipy-devel@neuroimaging.scipy.org', data_files = DATA_FILES, ) nipy-0.3.0/examples/fiac/000077500000000000000000000000001210344137400152105ustar00rootroot00000000000000nipy-0.3.0/examples/fiac/README.txt000066400000000000000000000025761210344137400167200ustar00rootroot00000000000000====================================== Analyzing the FIAC dataset with NIPY ====================================== This directory contains a set of scripts to complete an analysis of the Functional Image Analysis Contest (FIAC) dataset. The FIAC was conducted as part of the 11th Annual Meeting of the Organization for Human Brain Mapping (Toronto, 2005). For more information on the dataset, see [1]. In order to run the examples in this directory, you will need a copy of the curated data. We haven't yet succeeded in licensing this data for full release. Please see the latest version of this file on github for the current link to the data: https://github.com/nipy/nipy/blob/master/examples/fiac/README.txt ToDo ==== - Provide the raw data repository, with design csv files. - Integrate the scripts for curating the raw data. - Separate input from output directories. - Change ':' in contrast directory names to - or something else, as ':' is not a valid character in directory names under Windows and OSX. .. _here: http://FIXME/MISSING/DATA/ACCESS .. [1] Dehaene-Lambertz G, Dehaene S, Anton JL, Campagne A, Ciuciu P, Dehaene G, Denghien I, Jobert A, LeBihan D, Sigman M, Pallier C, Poline JB. Functional segregation of cortical language areas by sentence repetition. Hum Brain Mapp. 2006;27:360–371. http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=2653076#R11 nipy-0.3.0/examples/fiac/fiac_example.py000066400000000000000000000454401210344137400202060ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Example analyzing the FIAC dataset with NIPY. * Single run models with per-voxel AR(1). * Cross-run, within-subject models with optimal effect estimates. * Cross-subject models using fixed / random effects variance ratios. * Permutation testing for inference on cross-subject result. See ``parallel_run.py`` for a rig to run these analysis in parallel using the IPython parallel machinery. This script needs the pre-processed FIAC data. See ``README.txt`` and ``fiac_util.py`` for details. See ``examples/labs/need_data/first_level_fiac.py`` for an alternative approach to some of these analyses. """ #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- from __future__ import print_function # Python 2/3 compatibility # Stdlib from tempfile import NamedTemporaryFile from os.path import join as pjoin from copy import copy import warnings # Third party import numpy as np # From NIPY from nipy.algorithms.statistics.api import (OLSModel, ARModel, make_recarray, isestimable) from nipy.modalities.fmri.fmristat import hrf as delay from nipy.modalities.fmri import design, hrf from nipy.io.api import load_image, save_image from nipy.core import api from nipy.core.api import Image from nipy.core.image.image import rollimg from nipy.algorithms.statistics import onesample # Local import fiac_util as futil reload(futil) # while developing interactively #----------------------------------------------------------------------------- # Globals #----------------------------------------------------------------------------- SUBJECTS = tuple(range(5) + range(6, 16)) # No data for subject 5 RUNS = tuple(range(1, 5)) DESIGNS = ('event', 'block') CONTRASTS = ('speaker_0', 'speaker_1', 'sentence_0', 'sentence_1', 'sentence:speaker_0', 'sentence:speaker_1') GROUP_MASK = futil.load_image_fiac('group', 'mask.nii') TINY_MASK = np.zeros(GROUP_MASK.shape, np.bool) TINY_MASK[30:32,40:42,30:32] = 1 #----------------------------------------------------------------------------- # Public functions #----------------------------------------------------------------------------- # For group analysis def run_model(subj, run): """ Single subject fitting of FIAC model """ #---------------------------------------------------------------------- # Set initial parameters of the FIAC dataset #---------------------------------------------------------------------- # Number of volumes in the fMRI data nvol = 191 # The TR of the experiment TR = 2.5 # The time of the first volume Tstart = 0.0 # The array of times corresponding to each volume in the fMRI data volume_times = np.arange(nvol) * TR + Tstart # This recarray of times has one column named 't'. It is used in the # function design.event_design to create the design matrices. volume_times_rec = make_recarray(volume_times, 't') # Get a path description dictionary that contains all the path data relevant # to this subject/run path_info = futil.path_info_run(subj,run) #---------------------------------------------------------------------- # Experimental design #---------------------------------------------------------------------- # Load the experimental description from disk. We have utilities in futil # that reformat the original FIAC-supplied format into something where the # factorial structure of the design is more explicit. This has already # been run once, and get_experiment_initial() will simply load the # newly-formatted design description files (.csv) into record arrays. experiment, initial = futil.get_experiment_initial(path_info) # Create design matrices for the "initial" and "experiment" factors, saving # the default contrasts. # The function event_design will create design matrices, which in the case # of "experiment" will have num_columns = (# levels of speaker) * (# levels # of sentence) * len(delay.spectral) = 2 * 2 * 2 = 8. For "initial", there # will be (# levels of initial) * len([hrf.glover]) = 1 * 1 = 1. # Here, delay.spectral is a sequence of 2 symbolic HRFs that are described # in: # # Liao, C.H., Worsley, K.J., Poline, J-B., Aston, J.A.D., Duncan, G.H., # Evans, A.C. (2002). \'Estimating the delay of the response in fMRI # data.\' NeuroImage, 16:593-606. # The contrast definitions in ``cons_exper`` are a dictionary with keys # ['constant_0', 'constant_1', 'speaker_0', 'speaker_1', 'sentence_0', # 'sentence_1', 'sentence:speaker_0', 'sentence:speaker_1'] representing the # four default contrasts: constant, main effects + interactions, each # convolved with 2 HRFs in delay.spectral. For example, sentence:speaker_0 # is the interaction of sentence and speaker convolved with the first (=0) # of the two HRF basis functions, and sentence:speaker_1 is the interaction # convolved with the second (=1) of the basis functions. # XXX use the hrf __repr__ for naming contrasts X_exper, cons_exper = design.event_design(experiment, volume_times_rec, hrfs=delay.spectral) # The contrasts for 'initial' are ignored as they are "uninteresting" and # are included in the model as confounds. X_initial, _ = design.event_design(initial, volume_times_rec, hrfs=[hrf.glover]) # In addition to factors, there is typically a "drift" term. In this case, # the drift is a natural cubic spline with a not at the midpoint # (volume_times.mean()) vt = volume_times # shorthand drift = np.array( [vt**i for i in range(4)] + [(vt-vt.mean())**3 * (np.greater(vt, vt.mean()))] ) for i in range(drift.shape[0]): drift[i] /= drift[i].max() # We transpose the drift so that its shape is (nvol,5) so that it will have # the same number of rows as X_initial and X_exper. drift = drift.T # There are helper functions to create these drifts: design.fourier_basis, # design.natural_spline. Therefore, the above is equivalent (except for # the normalization by max for numerical stability) to # # >>> drift = design.natural_spline(t, [volume_times.mean()]) # Stack all the designs, keeping the new contrasts which has the same keys # as cons_exper, but its values are arrays with 15 columns, with the # non-zero entries matching the columns of X corresponding to X_exper X, cons = design.stack_designs((X_exper, cons_exper), (X_initial, {}), (drift, {})) # Sanity check: delete any non-estimable contrasts for k in cons.keys(): if not isestimable(cons[k], X): del(cons[k]) warnings.warn("contrast %s not estimable for this run" % k) # The default contrasts are all t-statistics. We may want to output # F-statistics for 'speaker', 'sentence', 'speaker:sentence' based on the # two coefficients, one for each HRF in delay.spectral cons['speaker'] = np.vstack([cons['speaker_0'], cons['speaker_1']]) cons['sentence'] = np.vstack([cons['sentence_0'], cons['sentence_1']]) cons['sentence:speaker'] = np.vstack([cons['sentence:speaker_0'], cons['sentence:speaker_1']]) #---------------------------------------------------------------------- # Data loading #---------------------------------------------------------------------- # Load in the fMRI data, saving it as an array. It is transposed to have # time as the first dimension, i.e. fmri[t] gives the t-th volume. fmri_im = futil.get_fmri(path_info) # an Image fmri_im = rollimg(fmri_im, 't') fmri = fmri_im.get_data() # now, it's an ndarray nvol, volshape = fmri.shape[0], fmri.shape[1:] nx, sliceshape = volshape[0], volshape[1:] #---------------------------------------------------------------------- # Model fit #---------------------------------------------------------------------- # The model is a two-stage model, the first stage being an OLS (ordinary # least squares) fit, whose residuals are used to estimate an AR(1) # parameter for each voxel. m = OLSModel(X) ar1 = np.zeros(volshape) # Fit the model, storing an estimate of an AR(1) parameter at each voxel for s in range(nx): d = np.array(fmri[:,s]) flatd = d.reshape((d.shape[0], -1)) result = m.fit(flatd) ar1[s] = ((result.resid[1:] * result.resid[:-1]).sum(0) / (result.resid**2).sum(0)).reshape(sliceshape) # We round ar1 to nearest one-hundredth and group voxels by their rounded # ar1 value, fitting an AR(1) model to each batch of voxels. # XXX smooth here? # ar1 = smooth(ar1, 8.0) ar1 *= 100 ar1 = ar1.astype(np.int) / 100. # We split the contrasts into F-tests and t-tests. # XXX helper function should do this fcons = {}; tcons = {} for n, v in cons.items(): v = np.squeeze(v) if v.ndim == 1: tcons[n] = v else: fcons[n] = v # Setup a dictionary to hold all the output # XXX ideally these would be memmap'ed Image instances output = {} for n in tcons: tempdict = {} for v in ['sd', 't', 'effect']: tempdict[v] = np.memmap(NamedTemporaryFile(prefix='%s%s.nii' % (n,v)), dtype=np.float, shape=volshape, mode='w+') output[n] = tempdict for n in fcons: output[n] = np.memmap(NamedTemporaryFile(prefix='%s%s.nii' % (n,v)), dtype=np.float, shape=volshape, mode='w+') # Loop over the unique values of ar1 for val in np.unique(ar1): armask = np.equal(ar1, val) m = ARModel(X, val) d = fmri[:,armask] results = m.fit(d) # Output the results for each contrast for n in tcons: resT = results.Tcontrast(tcons[n]) output[n]['sd'][armask] = resT.sd output[n]['t'][armask] = resT.t output[n]['effect'][armask] = resT.effect for n in fcons: output[n][armask] = results.Fcontrast(fcons[n]).F # Dump output to disk odir = futil.output_dir(path_info,tcons,fcons) # The coordmap for a single volume in the time series vol0_map = fmri_im[0].coordmap for n in tcons: for v in ['t', 'sd', 'effect']: im = Image(output[n][v], vol0_map) save_image(im, pjoin(odir, n, '%s.nii' % v)) for n in fcons: im = Image(output[n], vol0_map) save_image(im, pjoin(odir, n, "F.nii")) def fixed_effects(subj, design): """ Fixed effects (within subject) for FIAC model Finds run by run estimated model results, creates fixed effects results image per subject. Parameters ---------- subj : int subject number 0..15 inclusive design : {'block', 'event'} design type """ # First, find all the effect and standard deviation images # for the subject and this design type path_dict = futil.path_info_design(subj, design) rootdir = path_dict['rootdir'] # The output directory fixdir = pjoin(rootdir, "fixed") # Fetch results images from run estimations results = futil.results_table(path_dict) # Get our hands on the relevant coordmap to save our results coordmap = futil.load_image_fiac("fiac_%02d" % subj, "wanatomical.nii").coordmap # Compute the "fixed" effects for each type of contrast for con in results: fixed_effect = 0 fixed_var = 0 for effect, sd in results[con]: effect = load_image(effect).get_data() sd = load_image(sd).get_data() var = sd ** 2 # The optimal, in terms of minimum variance, combination of the # effects has weights 1 / var # # XXX regions with 0 variance are set to 0 # XXX do we want this or np.nan? ivar = np.nan_to_num(1. / var) fixed_effect += effect * ivar fixed_var += ivar # Now, compute the fixed effects variance and t statistic fixed_sd = np.sqrt(fixed_var) isd = np.nan_to_num(1. / fixed_sd) fixed_t = fixed_effect * isd # Save the results odir = futil.ensure_dir(fixdir, con) for a, n in zip([fixed_effect, fixed_sd, fixed_t], ['effect', 'sd', 't']): im = api.Image(a, copy(coordmap)) save_image(im, pjoin(odir, '%s.nii' % n)) def group_analysis(design, contrast): """ Compute group analysis effect, t, sd for `design` and `contrast` Saves to disk in 'group' analysis directory Parameters ---------- design : {'block', 'event'} contrast : str contrast name """ array = np.array # shorthand # Directory where output will be written odir = futil.ensure_dir(futil.DATADIR, 'group', design, contrast) # Which subjects have this (contrast, design) pair? subj_con_dirs = futil.subj_des_con_dirs(design, contrast) if len(subj_con_dirs) == 0: raise ValueError('No subjects for %s, %s' % (design, contrast)) # Assemble effects and sds into 4D arrays sds = [] Ys = [] for s in subj_con_dirs: sd_img = load_image(pjoin(s, "sd.nii")) effect_img = load_image(pjoin(s, "effect.nii")) sds.append(sd_img.get_data()) Ys.append(effect_img.get_data()) sd = array(sds) Y = array(Ys) # This function estimates the ratio of the fixed effects variance # (sum(1/sd**2, 0)) to the estimated random effects variance # (sum(1/(sd+rvar)**2, 0)) where rvar is the random effects variance. # The EM algorithm used is described in: # # Worsley, K.J., Liao, C., Aston, J., Petre, V., Duncan, G.H., # Morales, F., Evans, A.C. (2002). \'A general statistical # analysis for fMRI data\'. NeuroImage, 15:1-15 varest = onesample.estimate_varatio(Y, sd) random_var = varest['random'] # XXX - if we have a smoother, use # random_var = varest['fixed'] * smooth(varest['ratio']) # Having estimated the random effects variance (and possibly smoothed it), # the corresponding estimate of the effect and its variance is computed and # saved. # This is the coordmap we will use coordmap = futil.load_image_fiac("fiac_00","wanatomical.nii").coordmap adjusted_var = sd**2 + random_var adjusted_sd = np.sqrt(adjusted_var) results = onesample.estimate_mean(Y, adjusted_sd) for n in ['effect', 'sd', 't']: im = api.Image(results[n], copy(coordmap)) save_image(im, pjoin(odir, "%s.nii" % n)) def group_analysis_signs(design, contrast, mask, signs=None): """ Refit the EM model with a vector of signs. Used in the permutation tests. Returns the maximum of the T-statistic within mask Parameters ---------- design: one of 'block', 'event' contrast: str name of contrast to estimate mask : ``Image`` instance or array-like image containing mask, or array-like signs: ndarray, optional Defaults to np.ones. Should have shape (*,nsubj) where nsubj is the number of effects combined in the group analysis. Returns ------- minT: np.ndarray, minima of T statistic within mask, one for each vector of signs maxT: np.ndarray, maxima of T statistic within mask, one for each vector of signs """ if api.is_image(mask): maska = mask.get_data() else: maska = np.asarray(mask) maska = maska.astype(np.bool) # Which subjects have this (contrast, design) pair? subj_con_dirs = futil.subj_des_con_dirs(design, contrast) # Assemble effects and sds into 4D arrays sds = [] Ys = [] for s in subj_con_dirs: sd_img = load_image(pjoin(s, "sd.nii")) effect_img = load_image(pjoin(s, "effect.nii")) sds.append(sd_img.get_data()[maska]) Ys.append(effect_img.get_data()[maska]) sd = np.array(sds) Y = np.array(Ys) if signs is None: signs = np.ones((1, Y.shape[0])) maxT = np.empty(signs.shape[0]) minT = np.empty(signs.shape[0]) for i, sign in enumerate(signs): signY = sign[:,np.newaxis] * Y varest = onesample.estimate_varatio(signY, sd) random_var = varest['random'] adjusted_var = sd**2 + random_var adjusted_sd = np.sqrt(adjusted_var) results = onesample.estimate_mean(Y, adjusted_sd) T = results['t'] minT[i], maxT[i] = np.nanmin(T), np.nanmax(T) return minT, maxT def permutation_test(design, contrast, mask=GROUP_MASK, nsample=1000): """ Perform a permutation (sign) test for a given design type and contrast. It is a Monte Carlo test because we only sample nsample possible sign arrays. Parameters ---------- design: str one of ['block', 'event'] contrast : str name of contrast to estimate mask : ``Image`` instance or array-like, optional image containing mask, or array-like nsample: int, optional number of permutations Returns ------- min_vals: np.ndarray max_vals: np.ndarray """ subj_con_dirs = futil.subj_des_con_dirs(design, contrast) nsubj = len(subj_con_dirs) if nsubj == 0: raise ValueError('No subjects have %s, %s' % (design, contrast)) signs = 2*np.greater(np.random.sample(size=(nsample, nsubj)), 0.5) - 1 min_vals, max_vals = group_analysis_signs(design, contrast, mask, signs) return min_vals, max_vals def run_run_models(subject_nos=SUBJECTS, run_nos = RUNS): """ Simple serial run of all the within-run models """ for subj in subject_nos: for run in run_nos: try: run_model(subj, run) except IOError: print('Skipping subject %d, run %d' % (subj, run)) def run_fixed_models(subject_nos=SUBJECTS, designs=DESIGNS): """ Simple serial run of all the within-subject models """ for subj in subject_nos: for design in designs: try: fixed_effects(subj, design) except IOError: print('Skipping subject %d, design %s' % (subj, design)) def run_group_models(designs=DESIGNS, contrasts=CONTRASTS): """ Simple serial run of all the across-subject models """ for design in designs: for contrast in contrasts: group_analysis(design, contrast) if __name__ == '__main__': pass # Sanity check while debugging #permutation_test('block','sentence_0',mask=TINY_MASK,nsample=3) nipy-0.3.0/examples/fiac/fiac_hashes.txt000066400000000000000000000326271210344137400202200ustar00rootroot00000000000000MD5 hashes for FIAC preprocessed data ------------------------------------- This also gives the directory structure that ``fiac_example.py`` needs. fiac_data/fiac_00/block/experiment_01.csv 6a9ebdefe6a72657f7c68533c124df39 fiac_data/fiac_00/block/experiment_02.csv 49731ababbc87465b6481ddf8a2d8664 fiac_data/fiac_00/block/initial_01.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_00/block/initial_02.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_00/block/swafunctional_01.nii cdbed16524732ec22d5888a1be82d1c4 fiac_data/fiac_00/block/swafunctional_02.nii e1235803f692d5111e4d79fa16fd1ed5 fiac_data/fiac_00/event/experiment_03.csv 33e347c5ed13484df9ab9bedf855e8e2 fiac_data/fiac_00/event/experiment_04.csv ca84e9f6394d767aff68346be3265ab7 fiac_data/fiac_00/event/initial_03.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_00/event/initial_04.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_00/event/swafunctional_03.nii 4a00580a881f3aa4260f3ceaac188c21 fiac_data/fiac_00/event/swafunctional_04.nii 4b3e32342ca90daffe14017644ba992a fiac_data/fiac_01/block/experiment_01.csv 6a9ebdefe6a72657f7c68533c124df39 fiac_data/fiac_01/block/experiment_02.csv 49731ababbc87465b6481ddf8a2d8664 fiac_data/fiac_01/block/initial_01.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_01/block/initial_02.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_01/block/swafunctional_01.nii 38afd035e6e60689c270fdaa8d456bf9 fiac_data/fiac_01/block/swafunctional_02.nii ce9c068913a89c5fee4bfa26f8417484 fiac_data/fiac_01/event/experiment_03.csv 33e347c5ed13484df9ab9bedf855e8e2 fiac_data/fiac_01/event/experiment_04.csv ca84e9f6394d767aff68346be3265ab7 fiac_data/fiac_01/event/initial_03.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_01/event/initial_04.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_01/event/swafunctional_03.nii 65430fd882511cbfdc461c8654f43c08 fiac_data/fiac_01/event/swafunctional_04.nii 8c419ff788218d8dc8475b4d17fa5614 fiac_data/fiac_02/block/experiment_01.csv 44e14d55f06b5aa6274e9b8e14e7f34d fiac_data/fiac_02/block/experiment_02.csv d9715937067d98627faf4eed79bf4df6 fiac_data/fiac_02/block/initial_01.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_02/block/initial_02.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_02/block/swafunctional_01.nii 00def42c41f3d1b6bf7956f30d3ca78e fiac_data/fiac_02/block/swafunctional_02.nii 347ef8d217f6ef7eeaeb29e92ca3634a fiac_data/fiac_02/event/experiment_03.csv 7b97248a3e3ff3a63fc7b2ea54541ab0 fiac_data/fiac_02/event/experiment_04.csv 8e52d16c9ef91d3607945338f38dbdd8 fiac_data/fiac_02/event/initial_03.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_02/event/initial_04.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_02/event/swafunctional_03.nii 1d0739396a855ef90ff89b5033f37fad fiac_data/fiac_02/event/swafunctional_04.nii a419c28db72197945fc632c09bc1868a fiac_data/fiac_03/block/experiment_03.csv b173ed72bcd82067f69964126c086335 fiac_data/fiac_03/block/experiment_04.csv 7637ac98ec67c5185de87d9f082f7bc5 fiac_data/fiac_03/block/initial_03.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_03/block/initial_04.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_03/block/swafunctional_03.nii 635aeebbf5fe60959b680912ea330cbd fiac_data/fiac_03/block/swafunctional_04.nii 904693e5b1d87ee02b612c28c2d0e4e8 fiac_data/fiac_03/event/experiment_01.csv f978b60749ecacb69cc4591123a87be5 fiac_data/fiac_03/event/experiment_02.csv 8eab700098c629378213c396822fc002 fiac_data/fiac_03/event/initial_01.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_03/event/initial_02.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_03/event/swafunctional_01.nii 480dba56a8e282897d8476e14e5b1c6b fiac_data/fiac_03/event/swafunctional_02.nii 6b984334dd5ddb246c8edcbece436e2c fiac_data/fiac_04/block/experiment_02.csv 5a25f02cb9b2f50d2a0b4b427faea2f6 fiac_data/fiac_04/block/experiment_03.csv 862dc60967c120915d0126df5a961b2d fiac_data/fiac_04/block/initial_02.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_04/block/initial_03.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_04/block/swafunctional_02.nii e0d62ac3f71f651bfa9e4f3484967273 fiac_data/fiac_04/block/swafunctional_03.nii 5219d9a597b78a69fb3b9d999e028b08 fiac_data/fiac_04/event/experiment_01.csv 8dd9bfa3644c30f42f3a3678e6ec5102 fiac_data/fiac_04/event/experiment_04.csv 05fe3c5bec4ebe5247ca23cc0b153012 fiac_data/fiac_04/event/initial_01.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_04/event/initial_04.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_04/event/swafunctional_01.nii 0c46c07f14fbb25fb61014c7b1472c84 fiac_data/fiac_04/event/swafunctional_04.nii d64197691aec027c7b9d920e28aecce1 fiac_data/fiac_05/block/experiment_02.csv b165c1276fe094ade2cf47db3df6c036 fiac_data/fiac_05/block/initial_02.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_05/event/experiment_01.csv 770d517d8022cb5ed39cfb3b38371308 fiac_data/fiac_05/event/initial_01.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_06/block/experiment_02.csv 907008500bcbf8204790e5138fab8bd7 fiac_data/fiac_06/block/experiment_03.csv 1c496dd1e8892384a701cbfe44492901 fiac_data/fiac_06/block/initial_02.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_06/block/initial_03.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_06/block/swafunctional_02.nii 803f2e754bcda3ff3170f1f39c44ffac fiac_data/fiac_06/block/swafunctional_03.nii f9eebfa39fdac1b16ebcc0dd085c1562 fiac_data/fiac_06/event/experiment_01.csv 129786bb621f0214f56993179f3ed40e fiac_data/fiac_06/event/experiment_04.csv 8f19a310d5abb8c876a4ca1f2b20cefd fiac_data/fiac_06/event/initial_01.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_06/event/initial_04.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_06/event/swafunctional_01.nii 3eddd593ed9e97cdc6ee94f4337fcf09 fiac_data/fiac_06/event/swafunctional_04.nii 0255761003a9b9a9c0d1d22d9c2b30c8 fiac_data/fiac_07/block/experiment_02.csv b2054da6001d926507b3c630ba7914db fiac_data/fiac_07/block/experiment_03.csv e3dea6bbcbe67f11710837fcfbb4b47e fiac_data/fiac_07/block/initial_02.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_07/block/initial_03.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_07/block/swafunctional_02.nii a711e931b7ba336cd513d2219480eefc fiac_data/fiac_07/block/swafunctional_03.nii 93861d5c563f68c6c80f1aa8f30af994 fiac_data/fiac_07/event/experiment_04.csv daf6114730ec53169f181f680c4820e3 fiac_data/fiac_07/event/initial_04.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_07/event/swafunctional_04.nii 18b881b94ea8a7970e056d8b1338b840 fiac_data/fiac_08/block/experiment_01.csv 9bd851a905f35ae11af7881659953e34 fiac_data/fiac_08/block/experiment_03.csv d0353b3d229f07e3055893addd4f1c3f fiac_data/fiac_08/block/initial_01.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_08/block/initial_03.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_08/block/swafunctional_01.nii ac8a7f3c49255cdbfff13caa79913917 fiac_data/fiac_08/block/swafunctional_03.nii b4bf244de40dd1a5aabd4acbc38afbb0 fiac_data/fiac_08/event/experiment_02.csv b94c07427f4b265d6ffa073448444aea fiac_data/fiac_08/event/experiment_04.csv b3f1005432a6cb58a78c8694d9232a18 fiac_data/fiac_08/event/initial_02.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_08/event/initial_04.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_08/event/swafunctional_02.nii 19b7fb6ada363d5d11e55ebe0c75203c fiac_data/fiac_08/event/swafunctional_04.nii bb1368611872f012a27c0b1ffe72d5e2 fiac_data/fiac_09/block/experiment_01.csv 3d49cd07b5ffa8d1692526572c396114 fiac_data/fiac_09/block/experiment_03.csv d7ef56ef814cb3f10dc57c00a8514600 fiac_data/fiac_09/block/initial_01.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_09/block/initial_03.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_09/block/swafunctional_01.nii 269e31e30913a07262db0577450ae276 fiac_data/fiac_09/block/swafunctional_03.nii 24eb37510ffe2fcc11bac201908d888b fiac_data/fiac_09/event/experiment_02.csv dcf026465df7bf5e02f6e91e430ce3b3 fiac_data/fiac_09/event/experiment_04.csv 6dcb72473920410c93c6e5fb584a3b0c fiac_data/fiac_09/event/initial_02.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_09/event/initial_04.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_09/event/swafunctional_02.nii 3de018c0c9aac8d8f4831bde6d14d2d6 fiac_data/fiac_09/event/swafunctional_04.nii f3a54581cd9ece5708b03c18aef0dcda fiac_data/fiac_10/block/experiment_01.csv 79d366f5ad8e2baa17571ba90a2d29c8 fiac_data/fiac_10/block/experiment_03.csv 159ffd9e1afa85e5d7f5818913f11255 fiac_data/fiac_10/block/initial_01.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_10/block/initial_03.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_10/block/swafunctional_01.nii ba76304d62f4458ffefd2bf961866517 fiac_data/fiac_10/block/swafunctional_03.nii 536eb2cd0923ef5f166f708efecd3d22 fiac_data/fiac_10/event/experiment_02.csv 2bd807a649539085005f3441a5d3266f fiac_data/fiac_10/event/experiment_04.csv 4259afa19c1cc1dc9cedaf2bbf6ea39d fiac_data/fiac_10/event/initial_02.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_10/event/initial_04.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_10/event/swafunctional_02.nii b35550de824147f116df000e5531b64c fiac_data/fiac_10/event/swafunctional_04.nii a4459fdd9540aebf9e4c42fce061d2ed fiac_data/fiac_11/block/experiment_01.csv 3a18ea4be3e6cd8e8c211943a8bc1738 fiac_data/fiac_11/block/experiment_04.csv e6934cf684f72812c916b67aa3b1f806 fiac_data/fiac_11/block/initial_01.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_11/block/initial_04.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_11/block/swafunctional_01.nii 4fe3f14e75486ee6598142e15d5e8d31 fiac_data/fiac_11/block/swafunctional_04.nii 4a6febb5e860f27e4e73e0ae050d729b fiac_data/fiac_11/event/experiment_02.csv b3f1005432a6cb58a78c8694d9232a18 fiac_data/fiac_11/event/experiment_03.csv 269c94a12854a833e380116a51f4a6d8 fiac_data/fiac_11/event/initial_02.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_11/event/initial_03.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_11/event/swafunctional_02.nii 6e0ee7061065231996cbdbe5a0ae194c fiac_data/fiac_11/event/swafunctional_03.nii c28e4ac1a2307acba3b96b0764fd219e fiac_data/fiac_12/block/experiment_01.csv 9e408441dc25d7016d5930608e1dd7a4 fiac_data/fiac_12/block/experiment_04.csv 81f7ebad3ddd40521908586f1775273e fiac_data/fiac_12/block/initial_01.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_12/block/initial_04.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_12/block/swafunctional_01.nii 872f6e7d6f827efcb29837a0099a0d5c fiac_data/fiac_12/block/swafunctional_04.nii c6e0397579c22fe8ff9b48dafa48b03f fiac_data/fiac_12/event/experiment_02.csv 7423c9d1f6c6b91c54945a135ae3b427 fiac_data/fiac_12/event/experiment_03.csv 7694aae4d34ecd33032b85e285059ab7 fiac_data/fiac_12/event/initial_02.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_12/event/initial_03.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_12/event/swafunctional_02.nii ccd26a8126bfaa545a521a96377097b0 fiac_data/fiac_12/event/swafunctional_03.nii 6d9a287ad26896eb5b6196b1235814bf fiac_data/fiac_13/block/experiment_01.csv ca46543e0ec61bdfef275d4e140763c8 fiac_data/fiac_13/block/experiment_04.csv d15d5c3cc0eacd4c5117b4640675b001 fiac_data/fiac_13/block/initial_01.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_13/block/initial_04.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_13/block/swafunctional_01.nii 0cb8ff5a4dbbf03a26eae084ff99d525 fiac_data/fiac_13/block/swafunctional_04.nii b64727ba5608d064a7c111114ff6f5f6 fiac_data/fiac_13/event/experiment_02.csv 915f57a8e6c6e329c65ed30c92ef0f71 fiac_data/fiac_13/event/experiment_03.csv 8c97635901a6552d51486d3e9a08e02f fiac_data/fiac_13/event/initial_02.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_13/event/initial_03.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_13/event/swafunctional_02.nii 1b18eea773a55e1b54dd2debd736e1a2 fiac_data/fiac_13/event/swafunctional_03.nii 8a2e39596b49f8ae57f936e1f91819f6 fiac_data/fiac_14/block/experiment_02.csv c1f9f84111c88cb3ce66885fa8947e7e fiac_data/fiac_14/block/experiment_04.csv 4ce67a5d04078da9ec20aa10a171147b fiac_data/fiac_14/block/initial_02.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_14/block/initial_04.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_14/block/swafunctional_02.nii 6774d4a6f933899a44ca3ea4100257a6 fiac_data/fiac_14/block/swafunctional_04.nii a38525d9ae5763a6beac7fb42659d09b fiac_data/fiac_14/event/experiment_01.csv 737b4a4e8b2f3bbc6d4dcddca2063311 fiac_data/fiac_14/event/experiment_03.csv f46cae55a5c6447ba7cdf025ad31afd4 fiac_data/fiac_14/event/initial_01.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_14/event/initial_03.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_14/event/swafunctional_01.nii 65cc11864f9d51f723f78c529459186f fiac_data/fiac_14/event/swafunctional_03.nii dac38972621b87d2ceeb647ab104a5bc fiac_data/fiac_15/block/experiment_02.csv a1d03527ce83e8f1d91fee407e8866e3 fiac_data/fiac_15/block/experiment_04.csv ee55950cc357518ce39a5f6005251672 fiac_data/fiac_15/block/initial_02.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_15/block/initial_04.csv 05517d5b5393697d303f988cbcb1b370 fiac_data/fiac_15/block/swafunctional_02.nii c078463ffaf91be2b2015ff674364eef fiac_data/fiac_15/block/swafunctional_04.nii 602230469a6b23e0db4881977407faa6 fiac_data/fiac_15/event/experiment_01.csv 4b8dafd3f69b5ad2c791dfbb98f6b622 fiac_data/fiac_15/event/experiment_03.csv a4807223c8cc68e5c39b995cda4f2df1 fiac_data/fiac_15/event/initial_01.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_15/event/initial_03.csv f4d2d3734a9b2f56d6f35bd485fb830d fiac_data/fiac_15/event/swafunctional_01.nii ebf99885709a3d7da35127640b92a467 fiac_data/fiac_15/event/swafunctional_03.nii 19b22372d55ba9849eee46e7e17ffcd2 nipy-0.3.0/examples/fiac/fiac_util.py000066400000000000000000000321451210344137400175260ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Support utilities for FIAC example, mostly path management. The purpose of separating these is to keep the main example code as readable as possible and focused on the experimental modeling and analysis, rather than on local file management issues. Requires matplotlib """ #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- from __future__ import print_function # Python 2/3 compatibility # Stdlib import os from os import makedirs, listdir from os.path import exists, abspath, isdir, join as pjoin, splitext import csv try: from StringIO import StringIO # Python 2 except ImportError: from io import StringIO # Python 3 # Third party import numpy as np from matplotlib.mlab import csv2rec, rec2csv # From NIPY from nipy.io.api import load_image #----------------------------------------------------------------------------- # Globals #----------------------------------------------------------------------------- # We assume that there is a directory holding the data and it's local to this # code. Users can either keep a copy here or a symlink to the real location on # disk of the data. DATADIR = 'fiac_data' # Sanity check if not os.path.isdir(DATADIR): e="The data directory %s must exist and contain the FIAC data." % DATADIR raise IOError(e) #----------------------------------------------------------------------------- # Classes and functions #----------------------------------------------------------------------------- # Path management utilities def load_image_fiac(*path): """Return a NIPY image from a set of path components. """ return load_image(pjoin(DATADIR, *path)) def subj_des_con_dirs(design, contrast, nsub=16): """Return a list of subject directories with this `design` and `contrast` Parameters ---------- design : {'event', 'block'} contrast : str nsub : int, optional total number of subjects Returns ------- con_dirs : list list of directories matching `design` and `contrast` """ rootdir = DATADIR con_dirs = [] for s in range(nsub): f = pjoin(rootdir, "fiac_%02d" % s, design, "fixed", contrast) if isdir(f): con_dirs.append(f) return con_dirs def path_info_run(subj, run): """Construct path information dict for current subject/run. Parameters ---------- subj : int subject number (0..15 inclusive) run : int run number (1..4 inclusive). Returns ------- path_dict : dict a dict with all the necessary path-related keys, including 'rootdir', and 'design', where 'design' can have values 'event' or 'block' depending on which type of run this was for subject no `subj` and run no `run` """ path_dict = {'subj': subj, 'run': run} if exists(pjoin(DATADIR, "fiac_%(subj)02d", "block", "initial_%(run)02d.csv") % path_dict): path_dict['design'] = 'block' else: path_dict['design'] = 'event' rootdir = pjoin(DATADIR, "fiac_%(subj)02d", "%(design)s") % path_dict path_dict['rootdir'] = rootdir return path_dict def path_info_design(subj, design): """Construct path information dict for subject and design. Parameters ---------- subj : int subject number (0..15 inclusive) design : {'event', 'block'} type of design Returns ------- path_dict : dict having keys 'rootdir', 'subj', 'design' """ path_dict = {'subj': subj, 'design': design} rootdir = pjoin(DATADIR, "fiac_%(subj)02d", "%(design)s") % path_dict path_dict['rootdir'] = rootdir return path_dict def results_table(path_dict): """ Return precalculated results images for subject info in `path_dict` Parameters ---------- path_dict : dict containing key 'rootdir' Returns ------- rtab : dict dict with keys given by run directories for this subject, values being a list with filenames of effect and sd images. """ # Which runs correspond to this design type? rootdir = path_dict['rootdir'] runs = filter(lambda f: isdir(pjoin(rootdir, f)), ['results_%02d' % i for i in range(1,5)] ) # Find out which contrasts have t-statistics, # storing the filenames for reading below results = {} for rundir in runs: rundir = pjoin(rootdir, rundir) for condir in listdir(rundir): for stat in ['sd', 'effect']: fname_effect = abspath(pjoin(rundir, condir, 'effect.nii')) fname_sd = abspath(pjoin(rundir, condir, 'sd.nii')) if exists(fname_effect) and exists(fname_sd): results.setdefault(condir, []).append([fname_effect, fname_sd]) return results def get_experiment_initial(path_dict): """Get the record arrays for the experimental/initial designs. Parameters ---------- path_dict : dict containing key 'rootdir', 'run', 'subj' Returns ------- experiment, initial : Two record arrays. """ # The following two lines read in the .csv files # and return recarrays, with fields # experiment: ['time', 'sentence', 'speaker'] # initial: ['time', 'initial'] rootdir = path_dict['rootdir'] if not exists(pjoin(rootdir, "experiment_%(run)02d.csv") % path_dict): e = "can't find design for subject=%(subj)d,run=%(subj)d" % path_dict raise IOError(e) experiment = csv2rec(pjoin(rootdir, "experiment_%(run)02d.csv") % path_dict) initial = csv2rec(pjoin(rootdir, "initial_%(run)02d.csv") % path_dict) return experiment, initial def get_fmri(path_dict): """Get the images for a given subject/run. Parameters ---------- path_dict : dict containing key 'rootdir', 'run' Returns ------- fmri : ndarray anat : NIPY image """ fmri_im = load_image( pjoin("%(rootdir)s/swafunctional_%(run)02d.nii") % path_dict) return fmri_im def ensure_dir(*path): """Ensure a directory exists, making it if necessary. Returns the full path.""" dirpath = pjoin(*path) if not isdir(dirpath): makedirs(dirpath) return dirpath def output_dir(path_dict, tcons, fcons): """Get (and make if necessary) directory to write output into. Parameters ---------- path_dict : dict containing key 'rootdir', 'run' tcons : sequence of str t contrasts fcons : sequence of str F contrasts """ rootdir = path_dict['rootdir'] odir = pjoin(rootdir, "results_%(run)02d" % path_dict) ensure_dir(odir) for n in tcons: ensure_dir(odir,n) for n in fcons: ensure_dir(odir,n) return odir def test_sanity(): from nipy.modalities.fmri import design, hrf import nipy.modalities.fmri.fmristat.hrf as fshrf from nipy.modalities.fmri.fmristat.tests import FIACdesigns from nipy.modalities.fmri.fmristat.tests.test_FIAC import matchcol from nipy.algorithms.statistics import formula from nose.tools import assert_true """ Single subject fitting of FIAC model """ # Based on file # subj3_evt_fonc1.txt # subj3_bloc_fonc3.txt for subj, run, design_type in [(3, 1, 'event'), (3, 3, 'block')]: nvol = 191 TR = 2.5 Tstart = 1.25 volume_times = np.arange(nvol)*TR + Tstart volume_times_rec = formula.make_recarray(volume_times, 't') path_dict = {'subj':subj, 'run':run} if exists(pjoin(DATADIR, "fiac_%(subj)02d", "block", "initial_%(run)02d.csv") % path_dict): path_dict['design'] = 'block' else: path_dict['design'] = 'event' experiment = csv2rec(pjoin(DATADIR, "fiac_%(subj)02d", "%(design)s", "experiment_%(run)02d.csv") % path_dict) initial = csv2rec(pjoin(DATADIR, "fiac_%(subj)02d", "%(design)s", "initial_%(run)02d.csv") % path_dict) X_exper, cons_exper = design.event_design(experiment, volume_times_rec, hrfs=fshrf.spectral) X_initial, _ = design.event_design(initial, volume_times_rec, hrfs=[hrf.glover]) X, cons = design.stack_designs((X_exper, cons_exper), (X_initial, {})) # Get original fmristat design Xf = FIACdesigns.fmristat[design_type] # Check our new design can be closely matched to the original for i in range(X.shape[1]): # Columns can be very well correlated negatively or positively assert_true(abs(matchcol(X[:,i], Xf)[1]) > 0.999) def rewrite_spec(subj, run, root = "/home/jtaylo/FIAC-HBM2009"): """ Take a FIAC specification file and get two specifications (experiment, begin). This creates two new .csv files, one for the experimental conditions, the other for the "initial" confounding trials that are to be modelled out. For the block design, the "initial" trials are the first trials of each block. For the event designs, the "initial" trials are made up of just the first trial. """ if exists(pjoin("%(root)s", "fiac%(subj)d", "subj%(subj)d_evt_fonc%(run)d.txt") % {'root':root, 'subj':subj, 'run':run}): designtype = 'evt' else: designtype = 'bloc' # Fix the format of the specification so it is # more in the form of a 2-way ANOVA eventdict = {1:'SSt_SSp', 2:'SSt_DSp', 3:'DSt_SSp', 4:'DSt_DSp'} s = StringIO() w = csv.writer(s) w.writerow(['time', 'sentence', 'speaker']) specfile = pjoin("%(root)s", "fiac%(subj)d", "subj%(subj)d_%(design)s_fonc%(run)d.txt") % {'root':root, 'subj':subj, 'run':run, 'design':designtype} d = np.loadtxt(specfile) for row in d: w.writerow([row[0]] + eventdict[row[1]].split('_')) s.seek(0) d = csv2rec(s) # Now, take care of the 'begin' event # This is due to the FIAC design if designtype == 'evt': b = np.array([(d[0]['time'], 1)], np.dtype([('time', np.float), ('initial', np.int)])) d = d[1:] else: k = np.equal(np.arange(d.shape[0]) % 6, 0) b = np.array([(tt, 1) for tt in d[k]['time']], np.dtype([('time', np.float), ('initial', np.int)])) d = d[~k] designtype = {'bloc':'block', 'evt':'event'}[designtype] fname = pjoin(DATADIR, "fiac_%(subj)02d", "%(design)s", "experiment_%(run)02d.csv") % {'root':root, 'subj':subj, 'run':run, 'design':designtype} rec2csv(d, fname) experiment = csv2rec(fname) fname = pjoin(DATADIR, "fiac_%(subj)02d", "%(design)s", "initial_%(run)02d.csv") % {'root':root, 'subj':subj, 'run':run, 'design':designtype} rec2csv(b, fname) initial = csv2rec(fname) return d, b def compare_results(subj, run, other_root, mask_fname): """ Find and compare calculated results images from a previous run This scipt checks that another directory containing results of this same analysis are similar in the sense of numpy ``allclose`` within a brain mask. Parameters ---------- subj : int subject number (0..4, 6..15) run : int run number (1..4) other_root : str path to previous run estimation mask_fname: path to a mask image defining area in which to compare differences """ # Get information for this subject and run path_dict = path_info_run(subj, run) # Get mask msk = load_image(mask_fname).get_data().copy().astype(bool) # Get results directories for this run rootdir = path_dict['rootdir'] res_dir = pjoin(rootdir, 'results_%02d' % run) if not isdir(res_dir): return for dirpath, dirnames, filenames in os.walk(res_dir): for fname in filenames: froot, ext = splitext(fname) if froot in ('effect', 'sd', 'F', 't'): this_fname = pjoin(dirpath, fname) other_fname = this_fname.replace(DATADIR, other_root) if not exists(other_fname): print(this_fname, 'present but ', other_fname, 'missing') continue this_arr = load_image(this_fname).get_data() other_arr = load_image(other_fname).get_data() ok = np.allclose(this_arr[msk], other_arr[msk]) if not ok and froot in ('effect', 'sd', 't'): # Maybe a sign flip ok = np.allclose(this_arr[msk], -other_arr[msk]) if not ok: print('Difference between', this_fname, other_fname) def compare_all(other_root, mask_fname): """ Run results comparison for all subjects and runs """ for subj in range(5) + range(6, 16): for run in range(1, 5): compare_results(subj, run, other_root, mask_fname) nipy-0.3.0/examples/fiac/parallel_run.py000066400000000000000000000070171210344137400202470ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Script to run the main analyses in parallel, using the IPython machinery. See ``fiac_example.py``. """ #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- import os import numpy as np from IPython import parallel #----------------------------------------------------------------------------- # Utility functions #----------------------------------------------------------------------------- _client = None def setup_client(): """Get a Client and initialize it. This assumes that all nodes see a shared filesystem. """ global _client if _client is None: _client = parallel.Client() mydir = os.path.split(os.path.abspath(__file__))[0] def cd(path): import os os.chdir(path) _client[:].apply_sync(cd, mydir) return _client def getruns(): for i in range(16): for j in range(1,5): yield i, j def getvals(): for con in ['sentence:speaker_0', 'sentence_1', 'sentence_0', 'sentence:speaker_1', 'speaker_1', 'speaker_0', 'constant_1', 'constant_0']: for design in ['block', 'event']: yield design, con #----------------------------------------------------------------------------- # Main analysis functions #----------------------------------------------------------------------------- def fitruns(): """Run the basic model fit.""" rc = setup_client() view = rc.load_balanced_view() i_s, j_s = zip(*getruns()) def _fit(subj, run): import fiac_example try: return fiac_example.run_model(subj, run) except IOError: pass return view.map(_fit, i_s, j_s) def fitfixed(): """Run the fixed effects analysis for all subjects.""" rc = setup_client() view = rc.load_balanced_view() subjects = range(16) def _fit(subject): import fiac_example try: fiac_example.fixed_effects(subject, "block") except IOError: pass try: fiac_example.fixed_effects(subject, "event") except IOError: pass return view.map(_fit, subjects) def fitgroup(): """Run the group analysis""" rc = setup_client() view = rc.load_balanced_view() d_s, c_s = zip(*getvals()) def _fit(d, c): import fiac_example return fiac_example.group_analysis(d, c) return view.map(_fit, d_s, c_s) def run_permute_test(design, contrast, nsample=1000): rc = setup_client() dview = rc[:] nnod = len(dview) # Samples per node. Round up ns_nod = np.ceil(nsample / float(nnod)) def _run_test(n, des, con): import fiac_example from fiac_example import GROUP_MASK min_vals, max_vals = fiac_example.permutation_test(des, con, GROUP_MASK, n) return min_vals, max_vals ar = dview.apply_async(_run_test, ns_nod, design, contrast) min_vals, max_vals = zip(*[r for r in ar]) return np.concatenate(min_vals), np.concatenate(max_vals) #----------------------------------------------------------------------------- # Script entry point #----------------------------------------------------------------------------- if __name__ == '__main__': pass nipy-0.3.0/examples/fiac/view_contrasts_3d.py000077500000000000000000000050301210344137400212230ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """A quick and dirty example of using Mayavi to overlay anatomy and activation. """ #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- from __future__ import print_function # Python 2/3 compatibility import numpy as np try: from mayavi import mlab except ImportError: try: from enthought.mayavi import mlab except ImportError: raise RuntimeError('Need mayavi for this module') from fiac_util import load_image_fiac #----------------------------------------------------------------------------- # Globals #----------------------------------------------------------------------------- MASK = load_image_fiac('group', 'mask.nii') AVGANAT = load_image_fiac('group', 'avganat.nii') #----------------------------------------------------------------------------- # Functions #----------------------------------------------------------------------------- def view_thresholdedT(design, contrast, threshold, inequality=np.greater): """ A mayavi isosurface view of thresholded t-statistics Parameters ---------- design : {'block', 'event'} contrast : str threshold : float inequality : {np.greater, np.less}, optional """ maska = np.asarray(MASK) tmap = np.array(load_image_fiac('group', design, contrast, 't.nii')) test = inequality(tmap, threshold) tval = np.zeros(tmap.shape) tval[test] = tmap[test] # XXX make the array axes agree with mayavi2 avganata = np.array(AVGANAT) avganat_iso = mlab.contour3d(avganata * maska, opacity=0.3, contours=[3600], color=(0.8,0.8,0.8)) avganat_iso.actor.property.backface_culling = True avganat_iso.actor.property.ambient = 0.3 tval_iso = mlab.contour3d(tval * MASK, color=(0.8,0.3,0.3), contours=[threshold]) return avganat_iso, tval_iso #----------------------------------------------------------------------------- # Script entry point #----------------------------------------------------------------------------- if __name__ == '__main__': # A simple example use case design = 'block' contrast = 'sentence_0' threshold = 0.3 print('Starting thresholded view with:') print('Design=', design, 'contrast=', contrast, 'threshold=', threshold) view_thresholdedT(design, contrast, threshold) nipy-0.3.0/examples/formula/000077500000000000000000000000001210344137400157535ustar00rootroot00000000000000nipy-0.3.0/examples/formula/fir.py000077500000000000000000000036341210344137400171160ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Example of FIR model using formula framework Shows how to use B splines as basis functions for the FIR instead of simple boxcars. Requires matplotlib """ import numpy as np try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") from nipy.fixes.sympy.utilities.lambdify import implemented_function from nipy.algorithms.statistics.api import Formula from nipy.modalities.fmri import utils def linBspline(knots): """ Create linear B spline that is zero outside [knots[0], knots[-1]] (knots is assumed to be sorted). """ fns = [] knots = np.array(knots) for i in range(knots.shape[0]-2): name = 'bs_%s' % i k1, k2, k3 = knots[i:i+3] d1 = k2-k1 def anon(x,k1=k1,k2=k2,k3=k3): return ((x-k1) / d1 * np.greater(x, k1) * np.less_equal(x, k2) + (k3-x) / d1 * np.greater(x, k2) * np.less(x, k3)) fns.append(implemented_function(name, anon)) return fns # The splines are functions of t (time) bsp_fns = linBspline(np.arange(0,10,2)) # We're going to evaluate at these specific values of time tt = np.linspace(0,50,101) tvals= tt.view(np.dtype([('t', np.float)])) # Some inter-stimulus intervals isis = np.random.uniform(low=0, high=3, size=(4,)) + 10. # Made into event onset times e = np.cumsum(isis) # Make event onsets into functions of time convolved with the spline functions. event_funcs = [utils.events(e, f=fn) for fn in bsp_fns] # Put into a formula. f = Formula(event_funcs) # The design matrix X = f.design(tvals, return_float=True) # Show the design matrix as line plots plt.plot(X[:,0]) plt.plot(X[:,1]) plt.plot(X[:,2]) plt.xlabel('time (s)') plt.title('B spline used as bases for an FIR response model') plt.show() nipy-0.3.0/examples/formula/multi_session_contrast.py000077500000000000000000000111201210344137400231350ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Example of more than one run in the same model """ from __future__ import print_function # Python 2/3 compatibility import numpy as np from nipy.algorithms.statistics.api import Term, Formula, Factor from nipy.modalities.fmri import utils, hrf # HRF models we will use for each run. Just to show it can be done, use a # different HRF model for each run h1 = hrf.glover h2 = hrf.afni # Symbol for time in general. The 'events' function below will return models in # terms of 't', but we'll want models in terms of 't1' and 't2'. We need 't' # here so we can substitute. t = Term('t') # run 1 t1 = Term('t1') # Time within run 1 c11 = utils.events([3, 7, 10], f=h1) # Condition 1, run 1 # The events utility returns a formula in terms of 't' - general time c11 = c11.subs(t, t1) # Now make it in terms of time in run 1 # Same for conditions 2 and 3 c21 = utils.events([1, 3, 9], f=h1); c21 = c21.subs(t, t1) c31 = utils.events([2, 4, 8], f=h1); c31 = c31.subs(t, t1) # Add also a Fourier basis set for drift with frequencies 0.3, 0.5, 0.7 d1 = utils.fourier_basis([0.3, 0.5, 0.7]); d1 = d1.subs(t, t1) # Here's our formula for run 1 signal terms of time in run 1 (t1) f1 = Formula([c11,c21,c31]) + d1 # run 2 t2 = Term('t2') # Time within run 2 # Conditions 1 through 3 in run 2 c12 = utils.events([3.3, 7, 10], f=h2); c12 = c12.subs(t, t2) c22 = utils.events([1, 3.2, 9], f=h2); c22 = c22.subs(t, t2) c32 = utils.events([2, 4.2, 8], f=h2); c32 = c32.subs(t, t2) d2 = utils.fourier_basis([0.3, 0.5, 0.7]); d2 = d2.subs(t, t2) # Formula for run 2 signal in terms of time in run 2 (t2) f2 = Formula([c12, c22, c32]) + d2 # Factor giving constant for run. The [1, 2] means that there are two levels to # this factor, and that when we get to pass in values for this factor, # instantiating an actual design matrix (see below), a value of 1 means level # 1 and a value of 2 means level 2. run_factor = Factor('run', [1, 2]) run_1_coder = run_factor.get_term(1) # Term coding for level 1 run_2_coder = run_factor.get_term(2) # Term coding for level 2 # The multi run formula will combine the indicator (dummy value) terms from the # run factor with the formulae for the runs (which are functions of (run1, run2) # time. The run_factor terms are step functions that are zero when not in the # run, 1 when in the run. f = Formula([run_1_coder]) * f1 + Formula([run_2_coder]) * f2 + run_factor # Now, we evaluate the formula. So far we've been entirely symbolic. Now we # start to think about the values at which we want to evaluate our symbolic # formula. # We'll use these values for time within run 1. The times are in seconds from # the beginning of run 1. In our case run 1 was 20 seconds long. 101 below # gives 101 values from 0 to 20 including the endpoints, giving a dt of 0.2. tval1 = np.linspace(0, 20, 101) # run 2 lasts 10 seconds. These are the times in terms of the start of run 2. tval2 = np.linspace(0, 10, 51) # We pad out the tval1 / tval2 time vectors with zeros corresponding to the # TRs in run 2 / run 1. ttval1 = np.hstack([tval1, np.zeros(tval2.shape)]) ttval2 = np.hstack([np.zeros(tval1.shape), tval2]) # The arrays above now have 152=101+51 rows... # Vector of run numbers for each time point (with values 1 or 2) run_no = np.array([1]*tval1.shape[0] + [2]*tval2.shape[0]) # Create the recarray that will be used to create the design matrix. The # recarray gives the actual values for the symbolic terms in the formulae. In # our case the terms are t1, t2, and the (indicator coding) terms from the run # factor. rec = np.array([(tv1, tv2, s) for tv1, tv2, s in zip(ttval1, ttval2, run_no)], np.dtype([('t1', np.float), ('t2', np.float), ('run', np.int)])) # The contrast we care about contrast = Formula([run_1_coder * c11 - run_2_coder * c12]) # # Create the design matrix X = f.design(rec, return_float=True) # Show ourselves the design space covered by the contrast, and the corresponding # contrast matrix preC = contrast.design(rec, return_float=True) # C is the matrix such that preC = X.dot(C.T) C = np.dot(np.linalg.pinv(X), preC) print(C) # We can also get this by passing the contrast into the design creation. X, c = f.design(rec, return_float=True, contrasts=dict(C=contrast)) assert np.allclose(C, c['C']) # Show the names of the non-trivial elements of the contrast nonzero = np.nonzero(np.fabs(C) >= 1e-5)[0] print((f.dtype.names[nonzero[0]], f.dtype.names[nonzero[1]])) print(((run_1_coder * c11), (run_2_coder * c12))) nipy-0.3.0/examples/formula/parametric_design.py000077500000000000000000000036071210344137400220160ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ In this example, we create a regression model for an event-related design in which the response to an event at time T[i] is modeled as depending on the amount of time since the last stimulus T[i-1] """ import numpy as np import sympy from nipy.algorithms.statistics.api import Formula, make_recarray from nipy.modalities.fmri import utils, hrf # Inter-stimulus intervals (time between events) dt = np.random.uniform(low=0, high=2.5, size=(50,)) # Onset times from the ISIs t = np.cumsum(dt) # We're going to model the amplitudes ('a') by dt (the time between events) a = sympy.Symbol('a') linear = utils.define('linear', utils.events(t, dt, f=hrf.glover)) quadratic = utils.define('quad', utils.events(t, dt, f=hrf.glover, g=a**2)) cubic = utils.define('cubic', utils.events(t, dt, f=hrf.glover, g=a**3)) f1 = Formula([linear, quadratic, cubic]) # Evaluate this time-based formula at specific times to make the design matrix tval = make_recarray(np.linspace(0,100, 1001), 't') X1 = f1.design(tval, return_float=True) # Now we make a model where the relationship of time between events and signal # is an exponential with a time constant tau l = sympy.Symbol('l') exponential = utils.events(t, dt, f=hrf.glover, g=sympy.exp(-l*a)) f3 = Formula([exponential]) # Make a design matrix by passing in time and required parameters params = make_recarray([(4.5, 3.5)], ('l', '_b0')) X3 = f3.design(tval, params, return_float=True) # the columns or d/d_b0 and d/dl tt = tval.view(np.float) v1 = np.sum([hrf.glovert(tt - s)*np.exp(-4.5*a) for s,a in zip(t, dt)], 0) v2 = np.sum([-3.5*a*hrf.glovert(tt - s)*np.exp(-4.5*a) for s,a in zip(t, dt)], 0) V = np.array([v1,v2]).T W = V - np.dot(X3, np.dot(np.linalg.pinv(X3), V)) np.testing.assert_almost_equal((W**2).sum() / (V**2).sum(), 0) nipy-0.3.0/examples/formula/simple_contrast.py000077500000000000000000000042751210344137400215460ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ A simple contrast for an FMRI HRF model """ from __future__ import print_function # Python 2/3 compatibility import numpy as np from nipy.algorithms.statistics.api import Formula, make_recarray from nipy.modalities.fmri import utils, hrf from nipy.modalities.fmri.fmristat import hrf as delay # We take event onsets, and a specified HRF model, and make symbolic functions # of time c1 = utils.events([3,7,10], f=hrf.glover) # Symbolic function of time c2 = utils.events([1,3,9], f=hrf.glover) # Symbolic function of time c3 = utils.events([3,4,6], f=delay.spectral[0]) # Symbolic function of time # We can also use a Fourier basis for some other onsets - again making symbolic # functions of time d = utils.fourier_basis([3,5,7]) # Formula # Make a formula for all four sets of onsets f = Formula([c1,c2,c3]) + d # A contrast is a formula expressed on the elements of the design formula contrast = Formula([c1-c2, c1-c3]) # Instantiate actual values of time at which to create the design matrix rows t = make_recarray(np.linspace(0,20,50), 't') # Make the design matrix, and get contrast matrices for the design X, c = f.design(t, return_float=True, contrasts={'C':contrast}) # c is a dictionary, containing a 2 by 9 matrix - the F contrast matrix for our # contrast of interest assert X.shape == (50, 9) assert c['C'].shape == (2, 9) # In this case the contrast matrix is rather obvious. np.testing.assert_almost_equal(c['C'], [[1,-1, 0, 0, 0, 0, 0, 0, 0], [1, 0, -1, 0, 0, 0, 0, 0, 0]]) # We can get the design implied by our contrast at our chosen times preC = contrast.design(t, return_float=True) np.testing.assert_almost_equal(preC[:, 0], X[:, 0] - X[:, 1]) np.testing.assert_almost_equal(preC[:, 1], X[:, 0] - X[:, 2]) # So, X . c['C'].T \approx preC np.testing.assert_almost_equal(np.dot(X, c['C'].T), preC) # So what is the matrix C such that preC = X . C? Yes, it's c['C'] C = np.dot(np.linalg.pinv(X), preC).T np.testing.assert_almost_equal(C, c['C']) # The contrast matrix (approx equal to c['C']) print(C) nipy-0.3.0/examples/image_from_array.py000077500000000000000000000024601210344137400201700ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Create a nifti image from a numpy array and an affine transform. """ import numpy as np from nipy import save_image, load_image from nipy.core.api import Image, vox2scanner # This gets the filename for a tiny example file from nipy.testing import anatfile # Load an image to get an array and affine # # Use one of our test files to get an array and affine (as numpy array) from. img = load_image(anatfile) arr = img.get_data() affine_array = img.coordmap.affine.copy() # 1) Create a CoordinateMap from the affine transform which specifies # the mapping from input to output coordinates. The ``vox2scanner`` function # makes a coordinate map from voxels to scanner coordinates. Other options are # ``vox2mni`` or ``vox2talairach`` affine_coordmap = vox2scanner(affine_array) # 2) Create a nipy image from the array and CoordinateMap newimg = Image(arr, affine_coordmap) # Save the nipy image to the specified filename save_image(newimg, 'an_image.nii.gz') # Reload and verify the data and affine were saved correctly. img_back = load_image('an_image.nii.gz') assert np.allclose(img_back.get_data(), img.get_data()) assert np.allclose(img_back.coordmap.affine, img.coordmap.affine) nipy-0.3.0/examples/interfaces/000077500000000000000000000000001210344137400164315ustar00rootroot00000000000000nipy-0.3.0/examples/interfaces/process_fiac.py000077500000000000000000000141511210344137400214500ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ''' Single subject analysis script for SPM / FIAC ''' import sys from os.path import join as pjoin from glob import glob import numpy as np from nipy.interfaces.spm import (spm_info, make_job, scans_for_fnames, run_jobdef, fnames_presuffix, fname_presuffix, fltcols) def get_data(data_path, subj_id): data_def = {} subject_path = pjoin(data_path, 'fiac%s' % subj_id) data_def['functionals'] = sorted( glob(pjoin(subject_path, 'functional_*.nii'))) anatomicals = glob(pjoin(subject_path, 'anatomical.nii')) if len(anatomicals) == 1: data_def['anatomical'] = anatomicals[0] elif len(anatomicals) == 0: data_def['anatomical'] = None else: raise ValueError('Too many anatomicals') return data_def def slicetime(data_def): sess_scans = scans_for_fnames(data_def['functionals']) stinfo = make_job('temporal', 'st', { 'scans': sess_scans, 'so':range(1,31,2) + range(2,31,2), 'tr':2.5, 'ta':2.407, 'nslices':float(30), 'refslice':1 }) run_jobdef(stinfo) def realign(data_def): sess_scans = scans_for_fnames(fnames_presuffix(data_def['functionals'], 'a')) rinfo = make_job('spatial', 'realign', [{ 'estimate':{ 'data':sess_scans, 'eoptions':{ 'quality':0.9, 'sep':4.0, 'fwhm':5.0, 'rtm':True, 'interp':2.0, 'wrap':[0.0,0.0,0.0], 'weight':[] } } }]) run_jobdef(rinfo) def reslice(data_def): sess_scans = scans_for_fnames(fnames_presuffix(data_def['functionals'], 'a')) rsinfo = make_job('spatial', 'realign', [{ 'write':{ 'data': np.vstack(sess_scans.flat), 'roptions':{ 'which':[2, 1], 'interp':4.0, 'wrap':[0.0,0.0,0.0], 'mask':True, } } }]) run_jobdef(rsinfo) def coregister(data_def): func1 = data_def['functionals'][0] mean_fname = fname_presuffix(func1, 'meana') crinfo = make_job('spatial', 'coreg', [{ 'estimate':{ 'ref': [mean_fname], 'source': [data_def['anatomical']], 'other': [[]], 'eoptions':{ 'cost_fun':'nmi', 'sep':[4.0, 2.0], 'tol':np.array( [0.02,0.02,0.02, 0.001,0.001,0.001, 0.01,0.01,0.01, 0.001,0.001,0.001]).reshape(1,12), 'fwhm':[7.0, 7.0] } } }]) run_jobdef(crinfo) def segnorm(data_def): def_tpms = np.zeros((3,1), dtype=np.object) spm_path = spm_info.spm_path def_tpms[0] = pjoin(spm_path, 'tpm', 'grey.nii'), def_tpms[1] = pjoin(spm_path, 'tpm', 'white.nii'), def_tpms[2] = pjoin(spm_path, 'tpm', 'csf.nii') data = np.zeros((1,), dtype=object) data[0] = data_def['anatomical'] sninfo = make_job('spatial', 'preproc', { 'data': data, 'output':{ 'GM':fltcols([0,0,1]), 'WM':fltcols([0,0,1]), 'CSF':fltcols([0,0,0]), 'biascor':1.0, 'cleanup':False, }, 'opts':{ 'tpm':def_tpms, 'ngaus':fltcols([2,2,2,4]), 'regtype':'mni', 'warpreg':1.0, 'warpco':25.0, 'biasreg':0.0001, 'biasfwhm':60.0, 'samp':3.0, 'msk':np.array([], dtype=object), } }) run_jobdef(sninfo) def norm_write(data_def): sess_scans = scans_for_fnames(fnames_presuffix(data_def['functionals'], 'a')) matname = fname_presuffix(data_def['anatomical'], suffix='_seg_sn.mat', use_ext=False) subj = { 'matname': np.zeros((1,), dtype=object), 'resample': np.vstack(sess_scans.flat), } subj['matname'][0] = matname roptions = { 'preserve':False, 'bb':np.array([[-78,-112, -50],[78,76,85.0]]), 'vox':fltcols([2.0,2.0,2.0]), 'interp':1.0, 'wrap':[0.0,0.0,0.0], } nwinfo = make_job('spatial', 'normalise', [{ 'write':{ 'subj': subj, 'roptions': roptions, } }]) run_jobdef(nwinfo) # knock out the list of images, replacing with only one subj['resample'] = np.zeros((1,), dtype=object) subj['resample'][0] = data_def['anatomical'] roptions['interp'] = 4.0 run_jobdef(nwinfo) def smooth(data_def, fwhm=8.0): try: len(fwhm) except TypeError: fwhm = [fwhm] * 3 fwhm = np.asarray(fwhm, dtype=np.float).reshape(1,3) sess_scans = scans_for_fnames(fnames_presuffix(data_def['functionals'], 'wa')) sinfo = make_job('spatial', 'smooth', {'data':np.vstack(sess_scans.flat), 'fwhm':fwhm, 'dtype':0}) run_jobdef(sinfo) def process_subject(ddef): if not ddef['anatomical']: return slicetime(ddef) realign(ddef) reslice(ddef) coregister(ddef) segnorm(ddef) norm_write(ddef) smooth(ddef) def process_subjects(data_path, subj_ids): for subj_id in subj_ids: ddef = get_data(data_path, subj_id) process_subject(ddef) if __name__ == '__main__': try: data_path = sys.argv[1] except IndexError: raise OSError('Need FIAC data path as input') try: subj_ids = sys.argv[2:] except IndexError: subj_ids = range(16) process_subjects(data_path, subj_ids) nipy-0.3.0/examples/labs/000077500000000000000000000000001210344137400152275ustar00rootroot00000000000000nipy-0.3.0/examples/labs/bayesian_structural_analysis.py000077500000000000000000000127021210344137400235740ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import print_function # Python 2/3 compatibility __doc__ = """ This script generates a noisy multi-subject activation image dataset and applies the Bayesian structural analysis on it Requires matplotlib Author : Bertrand Thirion, 2009-2011 """ print(__doc__) import numpy as np import scipy.stats as st try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") import nipy.labs.utils.simul_multisubject_fmri_dataset as simul import nipy.labs.spatial_models.bayesian_structural_analysis as bsa from nipy.labs.spatial_models.discrete_domain import domain_from_binary_array def make_bsa_2d(betas, theta=3., dmax=5., ths=0, thq=0.5, smin=0, method='simple', verbose=0): """ Function for performing bayesian structural analysis on a set of images. Parameters ---------- betas, array of shape (nsubj, dimx, dimy) the data used Note that it is assumed to be a t- or z-variate theta=3., float, first level threshold of betas dmax=5., float, expected between subject variability ths=0, float, null hypothesis for the prevalence statistic thq=0.5, float, p-value of the null rejection smin=0, int, threshold on the nu_mber of contiguous voxels to make regions meaningful structures method= 'simple', string, estimation method used ; to be chosen among 'simple', 'quick', 'loo', 'ipmi' verbose=0, verbosity mode Returns ------- AF the landmark_regions instance describing the result BF: list of hroi instances describing the individual data """ ref_dim = np.shape(betas[0]) nsubj = betas.shape[0] xyz = np.array(np.where(betas[:1])).T.astype(np.int) # Get coordinates in mm xyz = xyz[:, 1:] # switch to dimension 2 coord = xyz.astype(np.float) # get the functional information lbeta = np.array([np.ravel(betas[k]) for k in range(nsubj)]).T lmax = 0 bdensity = 1 dom = domain_from_binary_array(np.ones(ref_dim)) if method == 'simple': group_map, AF, BF, likelihood = \ bsa.compute_BSA_simple(dom, lbeta, dmax, thq, smin, ths, theta) if method == 'quick': likelihood = np.zeros(ref_dim) group_map, AF, BF, coclustering = \ bsa.compute_BSA_quick(dom, lbeta, dmax, thq, smin, ths, theta) if method == 'loo': mll, ll0 = bsa.compute_BSA_loo(dom, lbeta, dmax, thq, smin, ths, theta, bdensity) return mll, ll0 if method not in['loo', 'simple', 'quick']: raise ValueError('method is not correctly defined') if verbose == 0: return AF, BF if AF != None: lmax = AF.k + 2 AF.show() group_map.shape = ref_dim plt.figure(figsize=(8, 3)) ax = plt.subplot(1, 3, 1) plt.imshow(group_map, interpolation='nearest', vmin=-1, vmax=lmax) plt.title('Blob separation map', fontsize=10) plt.axis('off') plt.colorbar(shrink=.8) if AF != None: group_map = AF.map_label(coord, 0.95, dmax) group_map.shape = ref_dim plt.subplot(1, 3, 2) plt.imshow(group_map, interpolation='nearest', vmin=-1, vmax=lmax) plt.title('group-level position 95% \n confidence regions', fontsize=10) plt.axis('off') plt.colorbar(shrink=.8) plt.subplot(1, 3, 3) likelihood.shape = ref_dim plt.imshow(likelihood, interpolation='nearest') plt.title('Spatial density under h1', fontsize=10) plt.axis('off') plt.colorbar(shrink=.8) fig_output = plt.figure(figsize=(8, 3.5)) fig_output.text(.5, .9, "Individual landmark regions", ha="center") for s in range(nsubj): ax = plt.subplot(nsubj / 5, 5, s + 1) #ax.set_position([.02, .02, .96, .96]) lw = - np.ones(ref_dim) if BF[s] is not None: nls = BF[s].get_roi_feature('label') nls[nls == - 1] = np.size(AF) + 2 for k in range(BF[s].k): np.ravel(lw)[BF[s].label == k] = nls[k] plt.imshow(lw, interpolation='nearest', vmin=-1, vmax=lmax) plt.axis('off') fig_input = plt.figure(figsize=(8, 3.5)) fig_input.text(.5,.9, "Input activation maps", ha='center') for s in range(nsubj): plt.subplot(nsubj / 5, 5, s + 1) plt.imshow(betas[s], interpolation='nearest', vmin=betas.min(), vmax=betas.max()) plt.axis('off') return AF, BF ############################################################################### # Main script ############################################################################### # generate the data n_subj = 10 shape = (60, 60) pos = np.array([[12, 14], [20, 20], [30, 20]]) ampli = np.array([5, 7, 6]) sjitter = 1.0 betas = simul.surrogate_2d_dataset(n_subj=n_subj, shape=shape, pos=pos, ampli=ampli, width=5.0) # set various parameters theta = float(st.t.isf(0.01, 100)) dmax = 4. / 1.5 ths = n_subj / 4 thq = 0.9 verbose = 1 smin = 5 method = 'simple' # 'quick' # 'loo' # # run the algo AF, BF = make_bsa_2d(betas, theta, dmax, ths, thq, smin, method, verbose=verbose) plt.show() nipy-0.3.0/examples/labs/blob_extraction.py000077500000000000000000000065321210344137400207700ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import print_function # Python 2/3 compatibility __doc__ = """ This scipt makes a noisy activation image and extracts the blobs from it. Requires matplotlib Author : Bertrand Thirion, 2009--2012 """ print(__doc__) import numpy as np try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") import matplotlib as mpl import nipy.labs.utils.simul_multisubject_fmri_dataset as simul import nipy.labs.spatial_models.hroi as hroi from nipy.labs.spatial_models.discrete_domain import grid_domain_from_shape # --------------------------------------------------------- # simulate an activation image # --------------------------------------------------------- shape = (60, 60) pos = np.array([[12, 14], [20, 20], [30, 20]]) ampli = np.array([3, 4, 4]) dataset = simul.surrogate_2d_dataset(n_subj=1, shape=shape, pos=pos, ampli=ampli, width=10.0).squeeze() values = dataset.ravel() #------------------------------------------------------- # Computations #------------------------------------------------------- # create a domain descriptor associated with this domain = grid_domain_from_shape(shape) nroi = hroi.HROI_as_discrete_domain_blobs(domain, values, threshold=2.0, smin=3) # create an average activaion image activation = [values[nroi.select_id(id, roi=False)] for id in nroi.get_id()] nroi.set_feature('activation', activation) bmap = nroi.feature_to_voxel_map( 'activation', roi=True, method="mean").reshape(shape) #-------------------------------------------------------- # Result display #-------------------------------------------------------- aux1 = (0 - values.min()) / (values.max() - values.min()) aux2 = (bmap.max() - values.min()) / (values.max() - values.min()) cdict = {'red': ((0.0, 0.0, 0.7), (aux1, 0.7, 0.7), (aux2, 1.0, 1.0), (1.0, 1.0, 1.0)), 'green': ((0.0, 0.0, 0.7), (aux1, 0.7, 0.0), (aux2, 1.0, 1.0), (1.0, 1.0, 1.0)), 'blue': ((0.0, 0.0, 0.7), (aux1, 0.7, 0.0), (aux2, 0.5, 0.5), (1.0, 1.0, 1.0))} my_cmap = mpl.colors.LinearSegmentedColormap('my_colormap', cdict, 256) plt.figure(figsize=(12, 3)) plt.subplot(1, 3, 1) plt.imshow(dataset, interpolation='nearest', cmap=my_cmap) cb = plt.colorbar() for t in cb.ax.get_yticklabels(): t.set_fontsize(16) plt.axis('off') plt.title('Thresholded data') # plot the blob label image plt.subplot(1, 3, 2) plt.imshow(nroi.feature_to_voxel_map('id', roi=True).reshape(shape), interpolation='nearest') plt.colorbar() plt.title('Blob labels') # plot the blob-averaged signal image aux = 0.01 cdict = {'red': ((0.0, 0.0, 0.7), (aux, 0.7, 0.7), (1.0, 1.0, 1.0)), 'green': ((0.0, 0.0, 0.7), (aux, 0.7, 0.0), (1.0, 1.0, 1.0)), 'blue': ((0.0, 0.0, 0.7), (aux, 0.7, 0.0), (1.0, 0.5, 1.0))} my_cmap = mpl.colors.LinearSegmentedColormap('my_colormap', cdict, 256) plt.subplot(1, 3, 3) plt.imshow(bmap, interpolation='nearest', cmap=my_cmap) cb = plt.colorbar() for t in cb.ax.get_yticklabels(): t.set_fontsize(16) plt.axis('off') plt.title('Blob average') plt.show() nipy-0.3.0/examples/labs/demo_dmtx.py000077500000000000000000000042171210344137400175700ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import print_function # Python 2/3 compatibility __doc__ = """ Examples of design matrices specification and and computation (event-related design, FIR design, etc) Requires matplotlib Author : Bertrand Thirion: 2009-2010 """ print(__doc__) import numpy as np try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") from nipy.modalities.fmri.design_matrix import make_dmtx from nipy.modalities.fmri.experimental_paradigm import (EventRelatedParadigm, BlockParadigm) # frame times tr = 1.0 nscans = 128 frametimes = np.linspace(0, (nscans - 1) * tr, nscans) # experimental paradigm conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c3', 'c3', 'c3'] onsets = [30, 70, 100, 10, 30, 90, 30, 40, 60] hrf_model = 'canonical' motion = np.cumsum(np.random.randn(128, 6), 0) add_reg_names = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz'] #event-related design matrix paradigm = EventRelatedParadigm(conditions, onsets) X1 = make_dmtx( frametimes, paradigm, drift_model='polynomial', drift_order=3, add_regs=motion, add_reg_names=add_reg_names) # block design matrix duration = 7 * np.ones(9) paradigm = BlockParadigm(con_id=conditions, onset=onsets, duration=duration) X2 = make_dmtx(frametimes, paradigm, drift_model='polynomial', drift_order=3) # FIR model paradigm = EventRelatedParadigm(conditions, onsets) hrf_model = 'FIR' X3 = make_dmtx(frametimes, paradigm, hrf_model='fir', drift_model='polynomial', drift_order=3, fir_delays=np.arange(1, 6)) # plot the results fig = plt.figure(figsize=(10, 6)) ax = plt.subplot(1, 3, 1) X1.show(ax=ax) ax.set_title('Event-related design matrix', fontsize=12) ax = plt.subplot(1, 3, 2) X2.show(ax=ax) ax.set_title('Block design matrix', fontsize=12) ax = plt.subplot(1, 3, 3) X3.show(ax=ax) ax.set_title('FIR design matrix', fontsize=12) plt.subplots_adjust(top=0.9, bottom=0.25) plt.show() nipy-0.3.0/examples/labs/example_glm.py000077500000000000000000000062761210344137400201110ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import print_function # Python 2/3 compatibility __doc__ = """ This is an example where: 1. An sequence of fMRI volumes are simulated 2. A design matrix describing all the effects related to the data is computed 3. A GLM is applied to all voxels 4. A contrast image is created Requires matplotlib Author : Bertrand Thirion, 2010 """ print(__doc__) import os import os.path as op import numpy as np try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") from nibabel import save, Nifti1Image import nipy.modalities.fmri.design_matrix as dm from nipy.labs.utils.simul_multisubject_fmri_dataset import \ surrogate_4d_dataset from nipy.modalities.fmri.glm import GeneralLinearModel from nipy.modalities.fmri.experimental_paradigm import EventRelatedParadigm ####################################### # Simulation parameters ####################################### # volume mask shape = (20, 20, 20) affine = np.eye(4) # Acquisition parameters: number of scans (n_scans) and volume repetition time # value in seconds n_scans = 128 tr = 2.4 # input paradigm information frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans) # conditions are 0 1 0 1 0 1 ... conditions = np.arange(20) % 2 # 20 onsets (in sec), first event 10 sec after the start of the first scan onsets = np.linspace(5, (n_scans - 1) * tr - 10, 20) # model with canonical HRF (could also be : # 'canonical with derivative' or 'fir' hrf_model = 'canonical' # fake motion parameters to be included in the model motion = np.cumsum(np.random.randn(n_scans, 6), 0) add_reg_names = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz'] ######################################## # Design matrix ######################################## paradigm = EventRelatedParadigm(conditions, onsets) X, names = dm.dmtx_light(frametimes, paradigm, drift_model='cosine', hfcut=128, hrf_model=hrf_model, add_regs=motion, add_reg_names=add_reg_names) ####################################### # Get the FMRI data ####################################### fmri_data = surrogate_4d_dataset(shape=shape, n_scans=n_scans)[0] # if you want to save it as an image data_file = 'fmri_data.nii' save(fmri_data, data_file) ######################################## # Perform a GLM analysis ######################################## # GLM fit Y = fmri_data.get_data().reshape(np.prod(shape), n_scans) glm = GeneralLinearModel(X) glm.fit(Y.T) # specify the contrast [1 -1 0 ..] contrast = np.zeros(X.shape[1]) contrast[0] = 1 contrast[1] = - 1 # compute the constrast image related to it zvals = glm.contrast(contrast).z_score() contrast_image = Nifti1Image(np.reshape(zvals, shape), affine) # if you want to save the contrast as an image contrast_path = 'zmap.nii' save(contrast_image, contrast_path) print('Wrote the some of the results as images in directory %s' % op.abspath(os.getcwd())) h, c = np.histogram(zvals, 100) # Show the histogram plt.figure() plt.bar(c[: - 1], h, width=.1) plt.title(' Histogram of the z-values') plt.show() nipy-0.3.0/examples/labs/glm_lowlevel.py000077500000000000000000000022731210344137400203000ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import print_function # Python 2/3 compatibility __doc__ = """ This example simulates a number of pure Gaussian white noise signals, then fits each one in terms of two regressors: a constant baseline, and a linear function of time. The voxelwise t statistics associated with the baseline coefficient are then computed. """ print(__doc__) import numpy as np from nipy.modalities.fmri.glm import GeneralLinearModel dimt = 100 dimx = 10 dimy = 11 dimz = 12 # axis defines the "time direction" y = np.random.randn(dimt, dimx * dimy * dimz) axis = 0 X = np.array([np.ones(dimt), range(dimt)]) X = X.T ## the design matrix X must have dimt lines mod = GeneralLinearModel(X) mod.fit(y) # Define a t contrast tcon = mod.contrast([1, 0]) # Compute the t-stat t = tcon.stat() ## t = tcon.stat(baseline=1) to test effects > 1 # Compute the p-value p = tcon.p_value() # Compute the z-score z = tcon.z_score() # Perform a F test without keeping the F stat p = mod.contrast([[1, 0], [1, - 1]]).p_value() print(np.shape(y)) print(np.shape(X)) print(np.shape(z)) nipy-0.3.0/examples/labs/group_reproducibility_analysis.py000077500000000000000000000076701210344137400241460ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import print_function # Python 2/3 compatibility __doc__ = """ Example of script to analyse the reproducibility in group studies using a bootstrap procedure Needs matplotlib Author: Bertrand Thirion, 2005-2009 """ print(__doc__) import numpy as np # Scipy stats needed for thresholding import scipy.stats as st try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") import nipy.labs.utils.simul_multisubject_fmri_dataset as simul from nipy.labs.utils.reproducibility_measures import ( voxel_reproducibility, cluster_reproducibility, map_reproducibility, peak_reproducibility) from nipy.labs.spatial_models.discrete_domain import ( grid_domain_from_binary_array) ############################################################################### # Generate the data n_subj = 105 shape = (60, 60) pos = np.array([[12, 14], [20, 20], [30, 20]]) ampli = np.array([2.5, 3.5, 3]) betas = simul.surrogate_2d_dataset(n_subj=n_subj, shape=shape, pos=pos, ampli=ampli, width=5.0) n_vox = np.prod(shape) # set the variance at 1 everywhere func = np.reshape(betas, (n_subj, n_vox)).T var = np.ones((n_vox, n_subj)) domain = grid_domain_from_binary_array(np.ones((shape[0], shape[1], 1))) ############################################################################### # Run reproducibility analysis ngroups = 10 thresholds = np.arange(.5, 6., .5) sigma = 2.0 csize = 10 niter = 10 method = 'crfx' verbose = 0 # do not use permutations swap = False kap = [] clt = [] pk = [] sens = [] for threshold in thresholds: kwargs={'threshold': threshold, 'csize': csize} kappa = [] cls = [] sent = [] peaks = [] for i in range(niter): k = voxel_reproducibility(func, var, domain, ngroups, method, swap, verbose, **kwargs) kappa.append(k) cld = cluster_reproducibility(func, var, domain, ngroups, sigma, method, swap, verbose, **kwargs) cls.append(cld) peak = peak_reproducibility(func, var, domain, ngroups, sigma, method, swap, verbose, **kwargs) peaks.append(peak) seni = map_reproducibility(func, var, domain, ngroups, method, True, verbose, threshold=threshold, csize=csize).mean()/ngroups sent.append(seni) sens.append(np.array(sent)) kap.append(np.array(kappa)) clt.append(np.array(cls)) pk.append(np.array(peaks)) ############################################################################### # Visualize the results aux = st.norm.sf(thresholds) a = plt.figure(figsize=(11, 6)) plt.subplot(1, 3, 1) plt.boxplot(kap) plt.title('voxel-level \n reproducibility', fontsize=12) plt.xticks(range(1, 1 + len(thresholds)), thresholds, fontsize=9) plt.xlabel('threshold') plt.subplot(1, 3, 2) plt.boxplot(clt) plt.title('cluster-level \n reproducibility', fontsize=12) plt.xticks(range(1, 1 + len(thresholds)), thresholds, fontsize=9) plt.xlabel('threshold') plt.subplot(1, 3, 3) plt.boxplot(pk, notch=1) plt.title('peak-level \n reproducibility', fontsize=12) plt.xticks(range(1, 1 + len(thresholds)), thresholds, fontsize=9) plt.xlabel('threshold') plt.figure() for q, threshold in enumerate(thresholds): plt.subplot(3, len(thresholds) / 3 + 1, q + 1) rmap = map_reproducibility(func, var, domain, ngroups, method, verbose, threshold=threshold, csize=csize) rmap = np.reshape(rmap, shape) plt.imshow(rmap, interpolation=None, vmin=0, vmax=ngroups) plt.title('threshold: %g' % threshold, fontsize=10) plt.axis('off') plt.suptitle('Map reproducibility for different thresholds') plt.show() nipy-0.3.0/examples/labs/hierarchical_rois.py000077500000000000000000000040261210344137400212600ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import print_function # Python 2/3 compatibility __doc__ = \ """ Example of a script that crates a 'hierarchical roi' structure from the blob model of an image Needs matplotlib Author: Bertrand Thirion, 2008-2009 """ print(__doc__) import numpy as np try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") import nipy.labs.spatial_models.hroi as hroi import nipy.labs.utils.simul_multisubject_fmri_dataset as simul from nipy.labs.spatial_models.discrete_domain import domain_from_binary_array ############################################################################## # simulate the data shape = (60, 60) pos = np.array([[12, 14], [20, 20], [30, 20]]) ampli = np.array([3, 4, 4]) dataset = simul.surrogate_2d_dataset(n_subj=1, shape=shape, pos=pos, ampli=ampli, width=10.0).squeeze() # create a domain descriptor associated with this domain = domain_from_binary_array(dataset ** 2 > 0) nroi = hroi.HROI_as_discrete_domain_blobs(domain, dataset.ravel(), threshold=2., smin=5) n1 = nroi.copy() nroi.reduce_to_leaves() td = n1.make_forest().depth_from_leaves() root = np.argmax(td) lv = n1.make_forest().get_descendants(root) u = nroi.make_graph().cc() flat_data = dataset.ravel() activation = [flat_data[nroi.select_id(id, roi=False)] for id in nroi.get_id()] nroi.set_feature('activation', activation) label = np.reshape(n1.label, shape) label_ = np.reshape(nroi.label, shape) # make a figure plt.figure(figsize=(10, 4)) plt.subplot(1, 3, 1) plt.imshow(np.squeeze(dataset)) plt.title('Input map') plt.axis('off') plt.subplot(1, 3, 2) plt.title('Nested Rois') plt.imshow(label, interpolation='Nearest') plt.axis('off') plt.subplot(1, 3, 3) plt.title('Leave Rois') plt.imshow(label_, interpolation='Nearest') plt.axis('off') plt.show() nipy-0.3.0/examples/labs/histogram_fits.py000077500000000000000000000062441210344137400206340ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import print_function # Python 2/3 compatibility __doc__ = """ Example of a script that perfoms histogram analysis of an activation image, to estimate activation Z-score with various heuristics: * Gamma-Gaussian model * Gaussian mixture model * Empirical normal null This example is based on a (simplistic) simulated image. Needs matplotlib """ # Author : Bertrand Thirion, Gael Varoquaux 2008-2009 print(__doc__) import numpy as np try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") import nipy.labs.utils.simul_multisubject_fmri_dataset as simul import nipy.algorithms.statistics.empirical_pvalue as en ############################################################################### # simulate the data shape = (60, 60) pos = 2 * np.array([[6, 7], [10, 10], [15, 10]]) ampli = np.array([3, 4, 4]) dataset = simul.surrogate_2d_dataset(n_subj=1, shape=shape, pos=pos, ampli=ampli, width=10.0).squeeze() fig = plt.figure(figsize=(12, 10)) plt.subplot(3, 3, 1) plt.imshow(dataset, cmap=plt.cm.hot) plt.colorbar() plt.title('Raw data') Beta = dataset.ravel().squeeze() ############################################################################### # fit Beta's histogram with a Gamma-Gaussian mixture gam_gaus_pp = en.gamma_gaussian_fit(Beta, Beta) gam_gaus_pp = np.reshape(gam_gaus_pp, (shape[0], shape[1], 3)) plt.figure(fig.number) plt.subplot(3, 3, 4) plt.imshow(gam_gaus_pp[..., 0], cmap=plt.cm.hot) plt.title('Gamma-Gaussian mixture,\n first component posterior proba.') plt.colorbar() plt.subplot(3, 3, 5) plt.imshow(gam_gaus_pp[..., 1], cmap=plt.cm.hot) plt.title('Gamma-Gaussian mixture,\n second component posterior proba.') plt.colorbar() plt.subplot(3, 3, 6) plt.imshow(gam_gaus_pp[..., 2], cmap=plt.cm.hot) plt.title('Gamma-Gaussian mixture,\n third component posterior proba.') plt.colorbar() ############################################################################### # fit Beta's histogram with a mixture of Gaussians alpha = 0.01 gaus_mix_pp = en.three_classes_GMM_fit(Beta, None, alpha, prior_strength=100) gaus_mix_pp = np.reshape(gaus_mix_pp, (shape[0], shape[1], 3)) plt.figure(fig.number) plt.subplot(3, 3, 7) plt.imshow(gaus_mix_pp[..., 0], cmap=plt.cm.hot) plt.title('Gaussian mixture,\n first component posterior proba.') plt.colorbar() plt.subplot(3, 3, 8) plt.imshow(gaus_mix_pp[..., 1], cmap=plt.cm.hot) plt.title('Gaussian mixture,\n second component posterior proba.') plt.colorbar() plt.subplot(3, 3, 9) plt.imshow(gaus_mix_pp[..., 2], cmap=plt.cm.hot) plt.title('Gamma-Gaussian mixture,\n third component posterior proba.') plt.colorbar() ############################################################################### # Fit the null mode of Beta with an empirical normal null efdr = en.NormalEmpiricalNull(Beta) emp_null_fdr = efdr.fdr(Beta) emp_null_fdr = emp_null_fdr.reshape(shape) plt.subplot(3, 3, 3) plt.imshow(1 - emp_null_fdr, cmap=plt.cm.hot) plt.colorbar() plt.title('Empirical FDR\n ') plt.show() nipy-0.3.0/examples/labs/multi_subject_parcellation.py000077500000000000000000000037061210344137400232200ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import print_function # Python 2/3 compatibility __doc__ = """ This script contains a quick demo on a multi-subject parcellation on a toy 2D example. Note how the middle parcels adapt to the individual configuration. Needs matplotlib """ print(__doc__) import numpy as np try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") import nipy.labs.spatial_models.hierarchical_parcellation as hp import nipy.labs.utils.simul_multisubject_fmri_dataset as simul import nipy.labs.spatial_models.discrete_domain as dom # step 1: generate some synthetic data n_subj = 10 shape = (60, 60) pos = 3 * np.array([[6, 7], [10, 10], [15, 10]]) ampli = np.array([5, 7, 6]) sjitter = 6.0 dataset = simul.surrogate_2d_dataset(n_subj=n_subj, shape=shape, pos=pos, ampli=ampli, width=10.0) # dataset represents 2D activation images from n_subj subjects, # step 2 : prepare all the information for the parcellation nbparcel = 10 ldata = np.reshape(dataset, (n_subj, np.prod(shape), 1)) domain = dom.grid_domain_from_shape(shape) # step 3 : run the algorithm Pa = hp.hparcel(domain, ldata, nbparcel, mu=3.0) # note: play with mu to change the 'stiffness of the parcellation' # step 4: look at the results Label = np.array([np.reshape(Pa.individual_labels[:, s], shape) for s in range(n_subj)]) plt.figure(figsize=(8, 4)) plt.title('Input data') for s in range(n_subj): plt.subplot(2, 5, s + 1) plt.imshow(dataset[s], interpolation='nearest') plt.axis('off') plt.figure(figsize=(8, 4)) plt.title('Resulting parcels') for s in range(n_subj): plt.subplot(2, 5, s+1) plt.imshow(Label[s], interpolation='nearest', vmin=-1, vmax=nbparcel) plt.axis('off') plt.show() nipy-0.3.0/examples/labs/need_data/000077500000000000000000000000001210344137400171335ustar00rootroot00000000000000nipy-0.3.0/examples/labs/need_data/bayesian_structural_analysis.py000077500000000000000000000035331210344137400255020ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import print_function # Python 2/3 compatibility __doc__ = """ Example of a script that uses the BSA (Bayesian Structural Analysis) i.e. nipy.labs.spatial_models.bayesian_structural_analysis module. Author : Bertrand Thirion, 2008-2010 """ print(__doc__) #autoindent from os import mkdir, getcwd, path import pickle from numpy import array from scipy import stats from nipy.labs.spatial_models.bsa_io import make_bsa_image # Local import from get_data_light import DATA_DIR, get_second_level_dataset # Get the data nbsubj = 12 nbeta = 29 data_dir = path.join(DATA_DIR, 'group_t_images') mask_images = [path.join(data_dir, 'mask_subj%02d.nii' % n) for n in range(nbsubj)] betas = [path.join(data_dir, 'spmT_%04d_subj_%02d.nii' % (nbeta, n)) for n in range(nbsubj)] missing_file = array([not path.exists(m) for m in mask_images + betas]).any() if missing_file: get_second_level_dataset() # set various parameters subj_id = ['%04d' % i for i in range(12)] theta = float(stats.t.isf(0.01, 100)) dmax = 4. ths = 0 thq = 0.95 verbose = 1 smin = 5 write_dir = path.join(getcwd(), 'results') if not path.exists(write_dir): mkdir(write_dir) method = 'quick' print('method used:', method) # call the function AF, BF = make_bsa_image(mask_images, betas, theta, dmax, ths, thq, smin, write_dir, method, subj_id, '%04d' % nbeta, reshuffle=False) # Write the result. OK, this is only a temporary solution picname = path.join(write_dir, "AF_%04d.pic" % nbeta) pickle.dump(AF, open(picname, 'wb'), 2) picname = path.join(write_dir, "BF_%04d.pic" % nbeta) pickle.dump(BF, open(picname, 'wb'), 2) print("Wrote all the results in directory %s" % write_dir) nipy-0.3.0/examples/labs/need_data/demo_blob_from_image.py000077500000000000000000000051021210344137400236150ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import print_function # Python 2/3 compatibility """ This script generates a noisy activation image and extracts the blob from it. This creates as output - a label image representing the nested blobs, - an image of the average signal per blob and - an image with the terminal blob only Author : Bertrand Thirion, 2009 """ #autoindent from os import mkdir, getcwd, path from nibabel import load, save, Nifti1Image import nipy.labs.spatial_models.hroi as hroi from nipy.labs.spatial_models.discrete_domain import grid_domain_from_image # Local import from get_data_light import DATA_DIR, get_second_level_dataset # data paths input_image = path.join(DATA_DIR, 'spmT_0029.nii.gz') if not path.exists(input_image): get_second_level_dataset() write_dir = path.join(getcwd(), 'results') if not path.exists(write_dir): mkdir(write_dir) # parameters threshold = 3.0 # blob-forming threshold smin = 5 # size threshold on blobs # prepare the data nim = load(input_image) mask_image = Nifti1Image((nim.get_data() ** 2 > 0).astype('u8'), nim.get_affine()) domain = grid_domain_from_image(mask_image) data = nim.get_data() values = data[data != 0] # compute the nested roi object nroi = hroi.HROI_as_discrete_domain_blobs(domain, values, threshold=threshold, smin=smin) # compute region-level activation averages activation = [values[nroi.select_id(id, roi=False)] for id in nroi.get_id()] nroi.set_feature('activation', activation) average_activation = nroi.representative_feature('activation') # saving the blob image,i. e. a label image descrip = "blob image extracted from %s" % input_image wim = nroi.to_image('id', roi=True, descrip=descrip) save(wim, path.join(write_dir, "blob.nii")) # saving the image of the average-signal-per-blob descrip = "blob average signal extracted from %s" % input_image wim = nroi.to_image('activation', roi=True, descrip=descrip) save(wim, path.join(write_dir, "bmap.nii")) # saving the image of the end blobs or leaves lroi = nroi.copy() lroi.reduce_to_leaves() descrip = "blob image extracted from %s" % input_image wim = lroi.to_image('id', roi=True, descrip=descrip) save(wim, path.join(write_dir, "leaves.nii")) print("Wrote the blob image in %s" % path.join(write_dir, "blob.nii")) print("Wrote the blob-average signal image in %s" % path.join(write_dir, "bmap.nii")) print("Wrote the end-blob image in %s" % path.join(write_dir, "leaves.nii")) nipy-0.3.0/examples/labs/need_data/demo_roi.py000077500000000000000000000071561210344137400213160ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import print_function # Python 2/3 compatibility __doc__ = """ This is a little demo that simply shows ROI manipulation within the nipy framework. Needs matplotlib Author: Bertrand Thirion, 2009-2010 """ print(__doc__) from os import mkdir, getcwd, path import numpy as np try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") from nibabel import load, save import nipy.labs.spatial_models.mroi as mroi from nipy.labs.spatial_models.discrete_domain import grid_domain_from_image import nipy.labs.spatial_models.hroi as hroi # Local import from get_data_light import DATA_DIR, get_second_level_dataset # paths input_image = path.join(DATA_DIR, 'spmT_0029.nii.gz') mask_image = path.join(DATA_DIR, 'mask.nii.gz') if (not path.exists(input_image)) or (not path.exists(mask_image)): get_second_level_dataset() # write directory write_dir = path.join(getcwd(), 'results') if not path.exists(write_dir): mkdir(write_dir) # ----------------------------------------------------- # example 1: create the ROI from a given position # ----------------------------------------------------- position = np.array([[0, 0, 0]]) domain = grid_domain_from_image(mask_image) roi = mroi.subdomain_from_balls(domain, position, np.array([5.0])) roi_domain = domain.mask(roi.label > -1) dom_img = roi_domain.to_image() save(dom_img, path.join(write_dir, "myroi.nii")) print('Wrote an ROI mask image in %s' % path.join(write_dir, "myroi.nii")) # ---------------------------------------------------- # ---- example 2: create ROIs from a blob image ------ # ---------------------------------------------------- # --- 2.a create the blob image # parameters threshold = 3.0 # blob-forming threshold smin = 10 # size threshold on bblobs # prepare the data nim = load(input_image) affine = nim.get_affine() shape = nim.shape data = nim.get_data() values = data[data != 0] # compute the nested roi object nroi = hroi.HROI_as_discrete_domain_blobs(domain, values, threshold=threshold, smin=smin) # saving the blob image, i.e. a label image wim = nroi.to_image('id', roi=True) descrip = "blob image extracted from %s" % input_image blobPath = path.join(write_dir, "blob.nii") save(wim, blobPath) # --- 2.b take blob having id "132" as an ROI roi = nroi.copy() roi.select_roi([132]) wim2 = roi.to_image() roi_path_2 = path.join(write_dir, "roi_blob_1.nii") save(wim2, roi_path_2) # --- 2.c take the blob closest to 'position as an ROI' roi = mroi.subdomain_from_position_and_image(wim, position[0]) wim3 = roi.to_image() roi_path_3 = path.join(write_dir, "blob_closest_to_%d_%d_%d.nii" % (position[0][0], position[0][1], position[0][2])) save(wim3, roi_path_3) # --- 2.d make a set of ROIs from all the blobs roi = mroi.subdomain_from_image(blobPath) data = load(input_image).get_data().ravel() feature_activ = [data[roi.select_id(id, roi=False)] for id in roi.get_id()] roi.set_feature('activ', feature_activ) roi.plot_feature('activ') wim4 = roi.to_image() roi_path_4 = path.join(write_dir, "roi_all_blobs.nii") save(wim4, roi_path_4) # ---- 2.e the same, a bit more complex valid_roi = roi.get_id()[roi.representative_feature('activ') > 4.0] roi.select_roi(valid_roi) wim5 = roi.to_image() roi_path_5 = path.join(write_dir, "roi_some_blobs.nii") save(wim5, roi_path_5) print("Wrote ROI mask images in %s, \n %s \n %s \n and %s" % (roi_path_2, roi_path_3, roi_path_4, roi_path_5)) plt.show() nipy-0.3.0/examples/labs/need_data/demo_ward_clustering.py000077500000000000000000000025311210344137400237110ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import print_function # Python 2/3 compatibility __doc__ = """ This shows the effect of ward clustering on a real fMRI dataset Author: Bertrand Thirion, 2010 """ print(__doc__) from os import mkdir, getcwd, path import numpy as np from nibabel import load, save, Nifti1Image from nipy.algorithms.graph.field import Field # Local import from get_data_light import DATA_DIR, get_second_level_dataset # paths input_image = path.join(DATA_DIR, 'spmT_0029.nii.gz') mask_image = path.join(DATA_DIR, 'mask.nii.gz') if (not path.exists(mask_image)) or (not path.exists(input_image)): get_second_level_dataset() # write directory write_dir = path.join(getcwd(), 'results') if not path.exists(write_dir): mkdir(write_dir) # read the data mask = load(mask_image).get_data() > 0 ijk = np.array(np.where(mask)).T nvox = ijk.shape[0] data = load(input_image).get_data()[mask] image_field = Field(nvox) image_field.from_3d_grid(ijk, k=6) image_field.set_field(data) u, _ = image_field.ward(100) # write the results label_image = path.join(write_dir, 'label.nii') wdata = mask - 1 wdata[mask] = u save(Nifti1Image(wdata, load(mask_image).get_affine()), label_image) print("Label image written in %s" % label_image) nipy-0.3.0/examples/labs/need_data/example_roi_and_glm.py000077500000000000000000000152611210344137400235020ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import print_function # Python 2/3 compatibility __doc__ = """ This is an example where: 1. A sequence of fMRI volumes are loaded 2. An ROI mask is loaded 3. A design matrix describing all the effects related to the data is computed 4. A GLM is applied to all voxels in the ROI 5. A summary of the results is provided for certain contrasts 6. A plot of the HRF is provided for the mean response in the HRF 7. Fitted/adjusted response plots are provided Needs matplotlib Author : Bertrand Thirion, 2010 """ print(__doc__) from os import mkdir, getcwd, path import numpy as np try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") from nibabel import save, load from nipy.modalities.fmri.design_matrix import dmtx_light from nipy.modalities.fmri.experimental_paradigm import EventRelatedParadigm from nipy.labs.utils.simul_multisubject_fmri_dataset import \ surrogate_4d_dataset from nipy.modalities.fmri.glm import GeneralLinearModel import nipy.labs.spatial_models.mroi as mroi from nipy.labs.spatial_models.discrete_domain import grid_domain_from_image # Local import from get_data_light import DATA_DIR, get_second_level_dataset ####################################### # Simulation parameters ####################################### # volume mask mask_path = path.join(DATA_DIR, 'mask.nii.gz') if not path.exists(mask_path): get_second_level_dataset() mask = load(mask_path) mask_array, affine = mask.get_data() > 0, mask.get_affine() # timing n_scans = 128 tr = 2.4 # paradigm frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans) conditions = np.arange(20) % 2 onsets = np.linspace(5, (n_scans - 1) * tr - 10, 20) # in seconds hrf_model = 'canonical' motion = np.cumsum(np.random.randn(n_scans, 6), 0) add_reg_names = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz'] # write directory write_dir = path.join(getcwd(), 'results') if not path.exists(write_dir): mkdir(write_dir) ######################################## # Design matrix ######################################## paradigm = np.vstack(([conditions, onsets])).T paradigm = EventRelatedParadigm(conditions, onsets) X, names = dmtx_light(frametimes, paradigm, drift_model='cosine', hfcut=128, hrf_model=hrf_model, add_regs=motion, add_reg_names=add_reg_names) ######################################## # Create ROIs ######################################## positions = np.array([[60, -30, 5], [50, 27, 5]]) # in mm (here in the MNI space) radii = np.array([8, 6]) domain = grid_domain_from_image(mask) my_roi = mroi.subdomain_from_balls(domain, positions, radii) # to save an image of the ROIs save(my_roi.to_image(), path.join(write_dir, "roi.nii")) ####################################### # Get the FMRI data ####################################### fmri_data = surrogate_4d_dataset(mask=mask, dmtx=X)[0] Y = fmri_data.get_data()[mask_array] # artificially added signal in ROIs to make the example more meaningful activation = 30 * (X.T[1] + .5 * X.T[0]) for (position, radius) in zip(positions, radii): Y[((domain.coord - position) ** 2).sum(1) < radius ** 2 + 1] += activation ######################################## # Perform a GLM analysis ######################################## # GLM fit glm = GeneralLinearModel(X) glm.fit(Y.T) # specifiy the contrast [1 -1 0 ..] contrast = np.hstack((1, -1, np.zeros(X.shape[1] - 2))) # compute the constrast image related to it zvals = glm.contrast(contrast).z_score() ######################################## # ROI-based analysis ######################################## # exact the time courses with ROIs signal_feature = [Y[my_roi.select_id(id, roi=False)] for id in my_roi.get_id()] my_roi.set_feature('signal', signal_feature) # ROI average time courses my_roi.set_roi_feature('signal_avg', my_roi.representative_feature('signal')) # roi-level contrast average contrast_feature = [zvals[my_roi.select_id(id, roi=False)] for id in my_roi.get_id()] my_roi.set_feature('contrast', contrast_feature) my_roi.set_roi_feature('contrast_avg', my_roi.representative_feature('contrast')) ######################################## # GLM analysis on the ROI average time courses ######################################## n_reg = len(names) roi_tc = my_roi.get_roi_feature('signal_avg') glm.fit(roi_tc.T) plt.figure() plt.subplot(1, 2, 1) betas = glm.get_beta() b1 = plt.bar(np.arange(n_reg - 1), betas[:-1, 0], width=.4, color='blue', label='region 1') b2 = plt.bar(np.arange(n_reg - 1) + 0.3, betas[:- 1, 1], width=.4, color='red', label='region 2') plt.xticks(np.arange(n_reg - 1), names[:-1], fontsize=10) plt.legend() plt.title('Parameter estimates \n for the roi time courses') bx = plt.subplot(1, 2, 2) my_roi.plot_feature('contrast', bx) ######################################## # fitted and adjusted response ######################################## res = np.hstack([x.resid for x in glm.results_.values()]).T betas = np.hstack([x.theta for x in glm.results_.values()]) proj = np.eye(n_reg) proj[2:] = 0 fit = np.dot(np.dot(betas.T, proj), X.T) # plot it plt.figure() for k in range(my_roi.k): plt.subplot(my_roi.k, 1, k + 1) plt.plot(fit[k]) plt.plot(fit[k] + res[k], 'r') plt.xlabel('time (scans)') plt.legend(('effects', 'adjusted')) ########################################### # hrf for condition 1 ############################################ fir_order = 6 X_fir, _ = dmtx_light( frametimes, paradigm, hrf_model='fir', drift_model='cosine', drift_order=3, fir_delays=np.arange(fir_order), add_regs=motion, add_reg_names=add_reg_names) glm_fir = GeneralLinearModel(X_fir) plt.figure() for k in range(my_roi.k): # fit a glm on the ROI's time course glm_fir.fit(roi_tc[k]) # access to the corresponding result structure res = list(glm_fir.results_.values())[0] # only one value in this case plt.subplot(1, my_roi.k, k + 1) # get the confidence intervals for the effects and plot them -condition 0 conf_int = res.conf_int(cols=np.arange(fir_order)).squeeze() yerr = (conf_int[:, 1] - conf_int[:, 0]) / 2 plt.errorbar(np.arange(fir_order), conf_int.mean(1), yerr=yerr) # get the confidence intervals for the effects and plot them -condition 1 conf_int = res.conf_int(cols=np.arange(fir_order, 2 * fir_order)).squeeze() yerr = (conf_int[:, 1] - conf_int[:, 0]) / 2 plt.errorbar(np.arange(fir_order), conf_int.mean(1), yerr=yerr) plt.legend(('condition c0', 'condition c1')) plt.title('estimated hrf shape') plt.xlabel('time(scans)') plt.show() nipy-0.3.0/examples/labs/need_data/first_level_fiac.py000077500000000000000000000075751210344137400230260ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import print_function # Python 2/3 compatibility """ Script that performs a first-level analysis of the FIAC dataset. See ``examples/fiac/fiac_example.py`` for another approach to this analysis. Needs the *example data* package. Also needs matplotlib Author: Alexis Roche, Bertrand Thirion, 2009--2012 """ from os import mkdir, getcwd, path import numpy as np try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") from nibabel import save from nipy.modalities.fmri.glm import FMRILinearModel from nipy.utils import example_data from nipy.labs.viz import plot_map, cm # ----------------------------------------------------------- # --------- Get the data ----------------------------------- #----------------------------------------------------------- fmri_files = [example_data.get_filename('fiac', 'fiac0', run) for run in ['run1.nii.gz', 'run2.nii.gz']] design_files = [example_data.get_filename('fiac', 'fiac0', run) for run in ['run1_design.npz', 'run2_design.npz']] mask_file = example_data.get_filename('fiac', 'fiac0', 'mask.nii.gz') # Load all the data multi_session_model = FMRILinearModel(fmri_files, design_files, mask_file) # GLM fitting multi_session_model.fit(do_scaling=True, model='ar1') def make_fiac_contrasts(p): """Specify some contrasts for the FIAC experiment Parameters ========== p: int, the number of columns of the design matrix (for all sessions) """ con = {} # the design matrices of both runs comprise 13 columns # the first 5 columns of the design matrices correspond to the following # conditions: ["SSt-SSp", "SSt-DSp", "DSt-SSp", "DSt-DSp", "FirstSt"] def length_p_vector(con, p): return np.hstack((con, np.zeros(p - len(con)))) con["SStSSp_minus_DStDSp"] = length_p_vector([1, 0, 0, - 1], p) con["DStDSp_minus_SStSSp"] = length_p_vector([- 1, 0, 0, 1], p) con["DSt_minus_SSt"] = length_p_vector([- 1, - 1, 1, 1], p) con["DSp_minus_SSp"] = length_p_vector([- 1, 1, - 1, 1], p) con["DSt_minus_SSt_for_DSp"] = length_p_vector([0, - 1, 0, 1], p) con["DSp_minus_SSp_for_DSt"] = length_p_vector([0, 0, - 1, 1], p) con["Deactivation"] = length_p_vector([- 1, - 1, - 1, - 1, 4], p) con["Effects_of_interest"] = np.eye(p)[:5] return con # compute fixed effects of the two runs and compute related images n_regressors = np.load(design_files[0])['X'].shape[1] # note: implictly assume the same shape for all sessions ! contrasts = make_fiac_contrasts(n_regressors) # write directory write_dir = path.join(getcwd(), 'results') if not path.exists(write_dir): mkdir(write_dir) print('Computing contrasts...') mean_map = multi_session_model.means[0] # for display for index, (contrast_id, contrast_val) in enumerate(contrasts.items()): print(' Contrast % 2i out of %i: %s' % ( index + 1, len(contrasts), contrast_id)) z_image_path = path.join(write_dir, '%s_z_map.nii' % contrast_id) z_map, = multi_session_model.contrast( [contrast_val] * 2, con_id=contrast_id, output_z=True) save(z_map, z_image_path) # make a snapshot of the contrast activation if contrast_id == 'Effects_of_interest': vmax = max(- z_map.get_data().min(), z_map.get_data().max()) vmin = - vmax plot_map(z_map.get_data(), z_map.get_affine(), anat=mean_map.get_data(), anat_affine=mean_map.get_affine(), cmap=cm.cold_hot, vmin=vmin, vmax=vmax, figure=10, threshold=2.5, black_bg=True) plt.savefig(path.join(write_dir, '%s_z_map.png' % contrast_id)) print("All the results were witten in %s" % write_dir) plt.show() nipy-0.3.0/examples/labs/need_data/get_data_light.py000077500000000000000000000070011210344137400224450ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import print_function # Python 2/3 compatibility """ Get two images from the web (one mask image and one spmT image) and put them in the nipy user dir - usually therefore at ``~/.nipy/tests/data``. Author : Bertrand Thirion, 2009 """ import os try: from urllib2 import urlopen # Python 2 except ImportError: from urllib.request import urlopen # Python 3 import tarfile from nibabel.data import get_nipy_user_dir NIPY_DIR = get_nipy_user_dir() DATA_DIR = os.path.join(NIPY_DIR, 'tests', 'data') def get_second_level_dataset(): """ Lightweight dataset for multi-subject analysis """ # define several paths url = 'ftp://ftp.cea.fr/pub/dsv/madic/download/nipy' mask_image = os.path.join(DATA_DIR, 'mask.nii.gz') input_image = os.path.join(DATA_DIR, 'spmT_0029.nii.gz') group_data = os.path.join(DATA_DIR, 'group_t_images.tar.gz') # if needed create DATA_DIR if not os.path.exists(DATA_DIR): os.makedirs(DATA_DIR) assert os.path.exists(DATA_DIR) # download mask_image if necessary if not os.path.exists(mask_image): filename = 'mask.nii.gz' datafile = os.path.join(url, filename) fp = urlopen(datafile) local_file = open(mask_image, 'wb') local_file.write(fp.read()) local_file.flush() local_file.close() # download input_image if necessary if not os.path.exists(input_image): filename = 'spmT_0029.nii.gz' datafile = os.path.join(url, filename) fp = urlopen(datafile) local_file = open(input_image, 'wb') local_file.write(fp.read()) local_file.flush() local_file.close() # download group_data if necessary if not os.path.exists(group_data): filename = 'group_t_images.tar.gz' datafile = os.path.join(url, filename) fp = urlopen(datafile) local_file = open(group_data, 'wb') local_file.write(fp.read()) local_file.flush() local_file.close() # untargzip group_data tar = tarfile.open(group_data) tar.extractall(DATA_DIR) tar.close() os.remove(group_data) return DATA_DIR def get_first_level_dataset(): """ Heavier dataset (30 MO) for first-level analysis """ # define several paths url = 'ftp://ftp.cea.fr/pub/dsv/madic/download/nipy' raw_fmri = os.path.join(DATA_DIR, 's12069_swaloc1_corr.nii.gz') paradigm = os.path.join(DATA_DIR, 'localizer_paradigm.csv') # create DATA_DIR if not os.path.exists(DATA_DIR): os.makedirs(DATA_DIR) assert os.path.exists(DATA_DIR) # download mask_image if necessary if not os.path.exists(paradigm): print('Downloading mask image, this may take time') datafile = os.path.join(url, 'localizer_paradigm.csv') fp = urlopen(datafile) local_file = open(paradigm, 'wb') local_file.write(fp.read()) local_file.flush() local_file.close() # download raw_fmri if necessary if not os.path.exists(raw_fmri): print('Downloading fmri image, this may take time') filename = 's12069_swaloc1_corr.nii.gz' datafile = os.path.join(url, filename) fp = urlopen(datafile) local_file = open(raw_fmri, 'wb') local_file.write(fp.read()) local_file.flush() local_file.close() return DATA_DIR if __name__ == '__main__': get_second_level_dataset() nipy-0.3.0/examples/labs/need_data/glm_beta_and_variance.py000066400000000000000000000072011210344137400237510ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import print_function __doc__ = \ """ This example shows how to get variance and beta estimated from a nipy GLM. More specifically: 1. A sequence of fMRI volumes are loaded. 2. A design matrix describing all the effects related to the data is computed. 3. A GLM is applied to the dataset, effect and variance images are produced. Note that this corresponds to a single run. Needs matplotlib Author : Bertrand Thirion, 2010--2012 """ print(__doc__) from os import mkdir, getcwd, path import numpy as np try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") from nibabel import Nifti1Image, save from nipy.modalities.fmri.glm import FMRILinearModel from nipy.modalities.fmri.design_matrix import make_dmtx from nipy.modalities.fmri.experimental_paradigm import \ load_paradigm_from_csv_file from nipy.labs.viz import plot_map, cm # Local import from get_data_light import DATA_DIR, get_first_level_dataset ####################################### # Data and analysis parameters ####################################### # volume mask # This dataset is large get_first_level_dataset() data_path = path.join(DATA_DIR, 's12069_swaloc1_corr.nii.gz') paradigm_file = path.join(DATA_DIR, 'localizer_paradigm.csv') # timing n_scans = 128 tr = 2.4 # paradigm frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans) # confounds hrf_model = 'canonical' drift_model = "cosine" hfcut = 128 # write directory write_dir = path.join(getcwd(), 'results') if not path.exists(write_dir): mkdir(write_dir) print('Computation will be performed in directory: %s' % write_dir) ######################################## # Design matrix ######################################## print('Loading design matrix...') # the example example.labs.write_paradigm_file shows how to create this file paradigm = load_paradigm_from_csv_file(paradigm_file)['0'] design_matrix = make_dmtx(frametimes, paradigm, hrf_model=hrf_model, drift_model=drift_model, hfcut=hfcut) ax = design_matrix.show() ax.set_position([.05, .25, .9, .65]) ax.set_title('Design matrix') plt.savefig(path.join(write_dir, 'design_matrix.png')) dim = design_matrix.matrix.shape[1] ######################################## # Perform a GLM analysis ######################################## print('Fitting a GLM (this takes time)...') fmri_glm = FMRILinearModel(data_path, design_matrix.matrix, mask='compute') fmri_glm.fit(do_scaling=True, model='ar1') ######################################## # Output beta and variance images ######################################## beta_hat = fmri_glm.glms[0].get_beta() # Least-squares estimates of the beta variance_hat = fmri_glm.glms[0].get_mse() # Estimates of the variance mask = fmri_glm.mask.get_data() > 0 # output beta images beta_map = np.tile(mask.astype(np.float)[..., np.newaxis], dim) beta_map[mask] = beta_hat.T beta_image = Nifti1Image(beta_map, fmri_glm.affine) beta_image.get_header()['descrip'] = ( 'Parameter estimates of the localizer dataset') save(beta_image, path.join(write_dir, 'beta.nii')) print("Beta image witten in %s" % write_dir) variance_map = mask.astype(np.float) variance_map[mask] = variance_hat # Create a snapshots of the variance image contrasts vmax = np.log(variance_hat.max()) plot_map(np.log(variance_map + .1), fmri_glm.affine, cmap=cm.hot_black_bone, vmin=np.log(0.1), vmax=vmax, anat=None, threshold=.1, alpha=.9) plt.show() nipy-0.3.0/examples/labs/need_data/group_reproducibility_analysis.py000077500000000000000000000077721210344137400260550ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import print_function # Python 2/3 compatibility """ Example of script to analyse the reproducibility in group studies using a bootstrap procedure. This reproduces approximately the work described in 'Analysis of a large fMRI cohort: Statistical and methodological issues for group analyses' Thirion B, Pinel P, Meriaux S, Roche A, Dehaene S, Poline JB. Neuroimage. 2007 Mar;35(1):105-20. Needs matplotlib Author: Bertrand Thirion, 2005-2009 """ from os import getcwd, mkdir, path from numpy import array try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") from nipy.labs.utils.reproducibility_measures import ( group_reproducibility_metrics) # Local import from get_data_light import DATA_DIR, get_second_level_dataset print('This analysis takes a long while, please be patient') ############################################################################## # Set the paths, data, etc. ############################################################################## nsubj = 12 nbeta = 29 data_dir = path.join(DATA_DIR, 'group_t_images') mask_images = [path.join(data_dir, 'mask_subj%02d.nii' % n) for n in range(nsubj)] stat_images = [path.join(data_dir, 'spmT_%04d_subj_%02d.nii' % (nbeta, n)) for n in range(nsubj)] contrast_images = [path.join(data_dir, 'con_%04d_subj_%02d.nii' % (nbeta, n)) for n in range(nsubj)] all_images = mask_images + stat_images + contrast_images missing_file = array([not path.exists(m) for m in all_images]).any() if missing_file: get_second_level_dataset() # write directory write_dir = path.join(getcwd(), 'results') if not path.exists(write_dir): mkdir(write_dir) ############################################################################## # main script ############################################################################## ngroups = [4] thresholds = [3.0, 4.0, 5.0] sigma = 6.0 csize = 10 niter = 10 method = 'crfx' verbose = 0 swap = False voxel_results, cluster_results, peak_results = group_reproducibility_metrics( mask_images, contrast_images, [], thresholds, ngroups, method, cluster_threshold=csize, number_of_samples=niter, sigma=sigma, do_clusters=True, do_voxels=True, do_peaks=True, swap=swap) kap = [k for k in voxel_results[ngroups[0]].values()] clt = [k for k in cluster_results[ngroups[0]].values()] pk = [k for k in peak_results[ngroups[0]].values()] ############################################################################## # plot ############################################################################## plt.figure() plt.subplot(1, 3, 1) plt.boxplot(kap) plt.title('voxel-level reproducibility') plt.xticks(range(1, 1 + len(thresholds)), thresholds) plt.xlabel('threshold') plt.subplot(1, 3, 2) plt.boxplot(clt) plt.title('cluster-level reproducibility') plt.xticks(range(1, 1 + len(thresholds)), thresholds) plt.xlabel('threshold') plt.subplot(1, 3, 3) plt.boxplot(clt) plt.title('cluster-level reproducibility') plt.xticks(range(1, 1 + len(thresholds)), thresholds) plt.xlabel('threshold') ############################################################################## # create an image ############################################################################## """ # this is commented until a new version of the code allows it # with the adequate level of abstraction th = 4.0 swap = False kwargs = {'threshold':th,'csize':csize} rmap = map_reproducibility(Functional, VarFunctional, grp_mask, ngroups, method, swap, verbose, **kwargs) wmap = mask.astype(np.int) wmap[mask] = rmap wim = Nifti1Image(wmap, affine) wim.get_header()['descrip']= 'reproducibility map at threshold %f, \ cluster size %d'%(th,csize) wname = path.join(write_dir,'repro.nii') save(wim, wname) print('Wrote a reproducibility image in %s'%wname) """ nipy-0.3.0/examples/labs/need_data/histogram_fits.py000077500000000000000000000040071210344137400225330ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import print_function # Python 2/3 compatibility """ Example of a script that perfoms histogram analysis of an activation image. This is based on a real fMRI image. Simply modify the input image path to make it work on your preferred image. Needs matplotlib Author : Bertrand Thirion, 2008-2009 """ import os import numpy as np import scipy.stats as st try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") from nibabel import load import nipy.algorithms.statistics.empirical_pvalue as en # Local import from get_data_light import DATA_DIR, get_second_level_dataset # parameters verbose = 1 theta = float(st.t.isf(0.01, 100)) # paths mask_image = os.path.join(DATA_DIR, 'mask.nii.gz') input_image = os.path.join(DATA_DIR, 'spmT_0029.nii.gz') if (not os.path.exists(mask_image)) or (not os.path.exists(input_image)): get_second_level_dataset() # Read the mask nim = load(mask_image) mask = nim.get_data() # read the functional image rbeta = load(input_image) beta = rbeta.get_data() beta = beta[mask > 0] mf = plt.figure(figsize=(13, 5)) a1 = plt.subplot(1, 3, 1) a2 = plt.subplot(1, 3, 2) a3 = plt.subplot(1, 3, 3) # fit beta's histogram with a Gamma-Gaussian mixture bfm = np.array([2.5, 3.0, 3.5, 4.0, 4.5]) bfp = en.gamma_gaussian_fit(beta, bfm, verbose=1, mpaxes=a1) # fit beta's histogram with a mixture of Gaussians alpha = 0.01 pstrength = 100 bfq = en.three_classes_GMM_fit(beta, bfm, alpha, pstrength, verbose=1, mpaxes=a2) # fit the null mode of beta with the robust method efdr = en.NormalEmpiricalNull(beta) efdr.learn() efdr.plot(bar=0, mpaxes=a3) a1.set_title('Fit of the density with \n a Gamma-Gaussian mixture') a2.set_title('Fit of the density with \n a mixture of Gaussians') a3.set_title('Robust fit of the density \n with a single Gaussian') plt.show() nipy-0.3.0/examples/labs/need_data/localizer_glm_ar.py000077500000000000000000000122771210344137400230260ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import print_function # Python 2/3 compatibility __doc__ = """ Full step-by-step example of fitting a GLM to experimental data and visualizing the results. More specifically: 1. A sequence of fMRI volumes are loaded 2. A design matrix describing all the effects related to the data is computed 3. a mask of the useful brain volume is computed 4. A GLM is applied to the dataset (effect/covariance, then contrast estimation) Note that this corresponds to a single run. Needs matplotlib Author : Bertrand Thirion, 2010--2012 """ print(__doc__) from os import mkdir, getcwd, path import numpy as np try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") from nibabel import save from nipy.modalities.fmri.glm import FMRILinearModel from nipy.modalities.fmri.design_matrix import make_dmtx from nipy.modalities.fmri.experimental_paradigm import \ load_paradigm_from_csv_file from nipy.labs.viz import plot_map, cm # Local import from get_data_light import DATA_DIR, get_first_level_dataset ####################################### # Data and analysis parameters ####################################### # volume mask # This dataset is large get_first_level_dataset() data_path = path.join(DATA_DIR, 's12069_swaloc1_corr.nii.gz') paradigm_file = path.join(DATA_DIR, 'localizer_paradigm.csv') # timing n_scans = 128 tr = 2.4 # paradigm frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans) # confounds hrf_model = 'canonical with derivative' drift_model = "cosine" hfcut = 128 # write directory write_dir = path.join(getcwd(), 'results') if not path.exists(write_dir): mkdir(write_dir) print('Computation will be performed in directory: %s' % write_dir) ######################################## # Design matrix ######################################## print('Loading design matrix...') paradigm = load_paradigm_from_csv_file(paradigm_file)['0'] design_matrix = make_dmtx(frametimes, paradigm, hrf_model=hrf_model, drift_model=drift_model, hfcut=hfcut) ax = design_matrix.show() ax.set_position([.05, .25, .9, .65]) ax.set_title('Design matrix') plt.savefig(path.join(write_dir, 'design_matrix.png')) ######################################### # Specify the contrasts ######################################### # simplest ones contrasts = {} n_columns = len(design_matrix.names) for i in range(paradigm.n_conditions): contrasts['%s' % design_matrix.names[2 * i]] = np.eye(n_columns)[2 * i] # and more complex/ interesting ones contrasts["audio"] = contrasts["clicDaudio"] + contrasts["clicGaudio"] +\ contrasts["calculaudio"] + contrasts["phraseaudio"] contrasts["video"] = contrasts["clicDvideo"] + contrasts["clicGvideo"] + \ contrasts["calculvideo"] + contrasts["phrasevideo"] contrasts["left"] = contrasts["clicGaudio"] + contrasts["clicGvideo"] contrasts["right"] = contrasts["clicDaudio"] + contrasts["clicDvideo"] contrasts["computation"] = contrasts["calculaudio"] + contrasts["calculvideo"] contrasts["sentences"] = contrasts["phraseaudio"] + contrasts["phrasevideo"] contrasts["H-V"] = contrasts["damier_H"] - contrasts["damier_V"] contrasts["V-H"] = contrasts["damier_V"] - contrasts["damier_H"] contrasts["left-right"] = contrasts["left"] - contrasts["right"] contrasts["right-left"] = contrasts["right"] - contrasts["left"] contrasts["audio-video"] = contrasts["audio"] - contrasts["video"] contrasts["video-audio"] = contrasts["video"] - contrasts["audio"] contrasts["computation-sentences"] = contrasts["computation"] - \ contrasts["sentences"] contrasts["reading-visual"] = contrasts["sentences"] * 2 - \ contrasts["damier_H"] - contrasts["damier_V"] contrasts['effects_of_interest'] = np.eye(25)[:20:2] ######################################## # Perform a GLM analysis ######################################## print('Fitting a GLM (this takes time)...') fmri_glm = FMRILinearModel(data_path, design_matrix.matrix, mask='compute') fmri_glm.fit(do_scaling=True, model='ar1') ######################################### # Estimate the contrasts ######################################### print('Computing contrasts...') for index, (contrast_id, contrast_val) in enumerate(contrasts.items()): print(' Contrast % 2i out of %i: %s' % (index + 1, len(contrasts), contrast_id)) # save the z_image image_path = path.join(write_dir, '%s_z_map.nii' % contrast_id) z_map, = fmri_glm.contrast(contrast_val, con_id=contrast_id, output_z=True) save(z_map, image_path) # Create snapshots of the contrasts vmax = max(- z_map.get_data().min(), z_map.get_data().max()) if index > 0: plt.clf() plot_map(z_map.get_data(), z_map.get_affine(), cmap=cm.cold_hot, vmin=- vmax, vmax=vmax, anat=None, figure=10, threshold=2.5) plt.savefig(path.join(write_dir, '%s_z_map.png' % contrast_id)) print("All the results were witten in %s" % write_dir) plt.show() nipy-0.3.0/examples/labs/need_data/one_sample_t_test.py000066400000000000000000000053511210344137400232150ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import print_function __doc__ = """ Example of a one-sample t-test using the GLM formalism. This script takes individual contrast images and masks and runs a simple GLM. This can be readily generalized to any design matrix. This particular example shows the statical map of a contrast related to a computation task (subtraction of computation task minus sentence reading/listening). Needs matplotlib. Author : Bertrand Thirion, 2012 """ print(__doc__) #autoindent from os import mkdir, getcwd, path import numpy as np try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") from nibabel import load, concat_images, save, Nifti1Image from nipy.labs.mask import intersect_masks from nipy.modalities.fmri.glm import FMRILinearModel from nipy.labs.viz import plot_map, cm # Local import from get_data_light import DATA_DIR, get_second_level_dataset # Get the data n_subjects = 12 n_beta = 29 data_dir = path.join(DATA_DIR, 'group_t_images') mask_images = [path.join(data_dir, 'mask_subj%02d.nii' % n) for n in range(n_subjects)] betas = [path.join(data_dir, 'spmT_%04d_subj_%02d.nii' % (n_beta, n)) for n in range(n_subjects)] missing_files = np.array([not path.exists(m) for m in mask_images + betas]) if missing_files.any(): get_second_level_dataset() write_dir = path.join(getcwd(), 'results') if not path.exists(write_dir): mkdir(write_dir) # Compute a population-level mask as the intersection of individual masks grp_mask = Nifti1Image(intersect_masks(mask_images).astype(np.int8), load(mask_images[0]).get_affine()) # concatenate the individual images first_level_image = concat_images(betas) # set the model design_matrix = np.ones(len(betas))[:, np.newaxis] # only the intercept grp_model = FMRILinearModel(first_level_image, design_matrix, grp_mask) # GLM fitting using ordinary least_squares grp_model.fit(do_scaling=False, model='ols') # specify and estimate the contrast contrast_val = np.array(([[1]])) # the only possible contrast ! z_map, = grp_model.contrast(contrast_val, con_id='one_sample', output_z=True) # write the results save(z_map, path.join(write_dir, 'one_sample_z_map.nii')) # look at the result vmax = max(- z_map.get_data().min(), z_map.get_data().max()) vmin = - vmax plot_map(z_map.get_data(), z_map.get_affine(), cmap=cm.cold_hot, vmin=vmin, vmax=vmax, threshold=3., black_bg=True) plt.savefig(path.join(write_dir, '%s_z_map.png' % 'one_sample')) plt.show() print("Wrote all the results in directory %s" % write_dir) nipy-0.3.0/examples/labs/need_data/parcel_intra.py000077500000000000000000000030121210344137400221470ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import print_function # Python 2/3 compatibility __doc__ = """ Example of script to parcellate the data from one subject, using various algorithms. Note that it can take some time. author: Bertrand Thirion, 2005-2009 """ print(__doc__) from os import mkdir, getcwd, path from numpy import array from nipy.labs.spatial_models.parcel_io import fixed_parcellation # Local import from get_data_light import DATA_DIR, get_second_level_dataset # ------------------------------------ # Get the data (mask+functional image) # take several experimental conditions # time courses could be used instead n_beta = [29] mask_image = path.join(DATA_DIR, 'mask.nii.gz') betas = [path.join(DATA_DIR, 'spmT_%04d.nii.gz' % n) for n in n_beta] missing_file = array([not path.exists(m) for m in [mask_image] + betas]).any() if missing_file: get_second_level_dataset() # set the parameters n_parcels = 500 mu = 10 nn = 6 verbose = 1 # write directory write_dir = path.join(getcwd(), 'results') if not path.exists(write_dir): mkdir(write_dir) lpa = fixed_parcellation(mask_image, betas, n_parcels, nn, 'gkm', write_dir, mu, verbose) lpa = fixed_parcellation(mask_image, betas, n_parcels, nn, 'ward', write_dir, mu, verbose) lpa = fixed_parcellation(mask_image, betas, n_parcels, nn, 'ward_and_gkm', write_dir, mu, verbose) nipy-0.3.0/examples/labs/need_data/parcel_multisubj.py000077500000000000000000000041131210344137400230530ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import print_function # Python 2/3 compatibility """ Example of script to parcellate mutli-subject data. May take some time to complete. Author: Bertrand Thirion, 2005-2009 """ from os import mkdir, getcwd, path from numpy import array from nipy.labs.spatial_models.parcel_io import parcel_input, \ write_parcellation_images, parcellation_based_analysis from nipy.labs.spatial_models.hierarchical_parcellation import hparcel # Local import from get_data_light import DATA_DIR, get_second_level_dataset # Get the data nb_subj = 12 subj_id = ['subj_%02d' % s for s in range(nb_subj)] nbeta = '0029' data_dir = path.join(DATA_DIR, 'group_t_images') mask_images = [path.join(data_dir, 'mask_subj%02d.nii' % n) for n in range(nb_subj)] learn_images = [path.join(data_dir, 'spmT_%s_subj_%02d.nii' % (nbeta, n)) for n in range(nb_subj)] missing_file = array( [not path.exists(m) for m in mask_images + learn_images]).any() learn_images = [[m] for m in learn_images] if missing_file: get_second_level_dataset() # parameter for the intersection of the mask ths = .5 # number of parcels nbparcel = 200 # write directory write_dir = path.join(getcwd(), 'results') if not path.exists(write_dir): mkdir(write_dir) # prepare the parcel structure domain, ldata = parcel_input(mask_images, learn_images, ths) # run the algorithm fpa = hparcel(domain, ldata, nbparcel, verbose=1) # produce some output images write_parcellation_images(fpa, subject_id=subj_id, swd=write_dir) # do some parcellation-based analysis: # take some test images whose parcel-based signal needs to be assessed test_images = [path.join(data_dir, 'spmT_%s_subj_%02d.nii' % (nbeta, n)) for n in range(nb_subj)] # compute and write the parcel-based statistics rfx_path = path.join(write_dir, 'prfx_%s.nii' % nbeta) parcellation_based_analysis(fpa, test_images, 'one_sample', rfx_path=rfx_path) print("Wrote everything in %s" % write_dir) nipy-0.3.0/examples/labs/need_data/permutation_test.py000077500000000000000000000030641210344137400231210ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import print_function # Python 2/3 compatibility """ Example of onesample permutation test Needs *example data* package """ import numpy as np from nipy.labs.group.permutation_test import permutation_test_onesample from nipy.utils import example_data # Get group data group_data = example_data.get_filename('neurospin', 'language_babies', 'offset_002.npz') f = np.load(group_data) data, vardata, xyz = f['mat'], f['var'], f['xyz'] # Create one-sample permutation test instance ptest = permutation_test_onesample(data, xyz, stat_id='wilcoxon') # Cluster definition: (threshold, diameter) # Note that a list of definitions can be passed to ptest.calibrate cluster_def = (ptest.height_threshold(0.01), None) print(cluster_def) # Multiple calibration # To get accurate pvalues, don't pass nperms (default is 1e4) # Yet it will take longer to run voxel_res, cluster_res, region_res = ptest.calibrate(nperms=100, clusters=[cluster_def]) # Simulated Zmax values for FWER correction simu_zmax = ptest.zscore(voxel_res['perm_maxT_values']) # Output regions ## This is a list because several cluster definitions can be accepted clusters = cluster_res[0] sizes = clusters['size_values'] clusters_Pcorr = clusters['size_Corr_p_values'] # Simulated cluster sizes simu_s = clusters['perm_size_values'] simu_smax = clusters['perm_maxsize_values'] nipy-0.3.0/examples/labs/need_data/plot_registration.py000077500000000000000000000020541210344137400232610ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import print_function # Python 2/3 compatibility __doc__ = """ Example of plotting a registration checker with nipy.labs visualization tools The idea is to represent the anatomical image to be checked with an overlay of the edges of the reference image. This idea is borrowed from FSL. Needs the *templates* data package. Needs matplotlib. """ print(__doc__) try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") from nipy.labs import viz from nipy.labs.viz_tools import anat_cache # Get the data. Here we are using the reference T1 image anat, affine, _ = anat_cache._AnatCache.get_anat() # Here we use the same image as a reference. As a result it is perfectly # aligned. reference = anat reference_affine = affine slicer = viz.plot_anat(anat, affine, dim=.2, black_bg=True) slicer.edge_map(reference, reference_affine) plt.show() nipy-0.3.0/examples/labs/need_data/tmin_statistic.py000066400000000000000000000077111210344137400225510ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import print_function __doc__ = """ Example where the result of the min of two contrasts is computed and displayed. This is based on the Localizer dataset, in which we want to find the regions activated both in left and right finger tapping. Notes ----- This is the valid conjunction test discussed in: Nichols T, Brett M, Andersson J, Wager T, Poline JB. Valid conjunction inference with the minimum statistic. Neuroimage. 2005 Apr 15;25(3):653-60. Needs matplotlib Author : Bertrand Thirion, 2012 """ print(__doc__) from os import mkdir, getcwd, path import numpy as np try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") from nibabel import save from nipy.modalities.fmri.glm import FMRILinearModel from nipy.modalities.fmri.design_matrix import make_dmtx from nipy.modalities.fmri.experimental_paradigm import \ load_paradigm_from_csv_file from nipy.labs.viz import plot_map, cm # Local import from get_data_light import DATA_DIR, get_first_level_dataset ####################################### # Data and analysis parameters ####################################### # volume mask # This dataset is large get_first_level_dataset() data_path = path.join(DATA_DIR, 's12069_swaloc1_corr.nii.gz') paradigm_file = path.join(DATA_DIR, 'localizer_paradigm.csv') # timing n_scans = 128 tr = 2.4 # paradigm frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans) # confounds hrf_model = 'canonical' drift_model = 'cosine' hfcut = 128 # write directory write_dir = path.join(getcwd(), 'results') if not path.exists(write_dir): mkdir(write_dir) print('Computation will be performed in directory: %s' % write_dir) ######################################## # Design matrix ######################################## print('Loading design matrix...') paradigm = load_paradigm_from_csv_file(paradigm_file)['0'] design_matrix = make_dmtx(frametimes, paradigm, hrf_model=hrf_model, drift_model=drift_model, hfcut=hfcut) ######################################### # Specify the contrasts ######################################### # simplest ones contrasts = {} n_columns = len(design_matrix.names) for i in range(paradigm.n_conditions): contrasts['%s' % design_matrix.names[i]] = np.eye(n_columns)[i] # and more complex/ interesting ones contrasts['left'] = contrasts['clicGaudio'] + contrasts['clicGvideo'] contrasts['right'] = contrasts['clicDaudio'] + contrasts['clicDvideo'] ######################################## # Perform a GLM analysis ######################################## print('Fitting a General Linear Model') fmri_glm = FMRILinearModel(data_path, design_matrix.matrix, mask='compute') fmri_glm.fit(do_scaling=True, model='ar1') ######################################### # Estimate the contrasts ######################################### contrast_id = 'left_right_motor_min' z_map, effects_map = fmri_glm.contrast( np.vstack((contrasts['left'], contrasts['right'])), contrast_type='tmin-conjunction', output_z=True, output_effects=True) z_image_path = path.join(write_dir, '%s_z_map.nii' % contrast_id) save(z_map, z_image_path) contrast_path = path.join(write_dir, '%s_con.nii' % contrast_id) save(effects_map, contrast_path) # note that the effects_map is two-dimensional: # these dimensions correspond to 'left' and 'right' # Create snapshots of the contrasts vmax = max(- z_map.get_data().min(), z_map.get_data().max()) plot_map(z_map.get_data(), fmri_glm.affine, cmap=cm.cold_hot, vmin=- vmax, vmax=vmax, anat=None, figure=10, threshold=2.5) plt.savefig(path.join(write_dir, '%s_z_map.png' % contrast_id)) plt.show() print('All the results were witten in %s' % write_dir) # Note: fancier visualization of the results are shown # in the viz3d example nipy-0.3.0/examples/labs/need_data/viz.py000077500000000000000000000036321210344137400203240ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import print_function # Python 2/3 compatibility __doc__ = """ Example of activation image visualization with nipy.labs visualization tools Needs *example data* package. Needs matplotlib """ print(__doc__) import os.path try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") from nibabel import load from nipy.labs import viz from nipy.utils import example_data # Local import from get_data_light import get_second_level_dataset # get the data data_dir = get_second_level_dataset() # First example, with a anatomical template img = load(os.path.join(data_dir, 'spmT_0029.nii.gz')) data = img.get_data() affine = img.get_affine() viz.plot_map(data, affine, cut_coords=(-52, 10, 22), threshold=2.0, cmap=viz.cm.cold_hot) plt.savefig('ortho_view.png') # Second example, with a given anatomical image slicing in the Z direction try: anat_img = load(example_data.get_filename('neurospin', 'sulcal2000', 'nobias_anubis.nii.gz')) anat = anat_img.get_data() anat_affine = anat_img.get_affine() except OSError as e: # File does not exist: the data package is not installed print(e) anat = None anat_affine = None viz.plot_map(data, affine, anat=anat, anat_affine=anat_affine, slicer='z', threshold=2, cmap=viz.cm.cold_hot, black_bg=True) plt.savefig('z_view.png') viz.plot_map(data, affine, anat=anat, anat_affine=anat_affine, slicer='x', threshold=2, cmap=viz.cm.cold_hot, black_bg=True) plt.savefig('x_view.png') viz.plot_map(data, affine, anat=anat, anat_affine=anat_affine, slicer='y', threshold=2, cmap=viz.cm.cold_hot, black_bg=True) plt.savefig('y_view.png') plt.show() nipy-0.3.0/examples/labs/need_data/viz3d.py000066400000000000000000000034751210344137400205550ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import print_function __doc__ = """ This examples performs sifferent kinds of (2D and 3D) plots of a given activation map. Needs matplotlib. Author : Bertrand Thirion, 2012 """ print(__doc__) from os import path try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") from nibabel import load from nipy.labs.viz import plot_map, cm # Local import from get_data_light import DATA_DIR, get_second_level_dataset ####################################### # Data and analysis parameters ####################################### input_image = path.join(DATA_DIR, 'spmT_0029.nii.gz') if not path.exists(input_image): get_second_level_dataset() brain_map = load(input_image) vmin, vmax = brain_map.get_data().min(), brain_map.get_data().max() # make a simple 2D plot plot_map(brain_map.get_data(), brain_map.get_affine(), cmap=cm.cold_hot, vmin=vmin, vmax=vmax, anat=None, figure=10, threshold=3) # More plots using 3D if True: # replace with False to skip this plot_map(brain_map.get_data(), brain_map.get_affine(), cmap=cm.cold_hot, vmin=vmin, vmax=vmax, anat=None, figure=11, threshold=3, do3d=True) from nipy.labs import viz3d try: viz3d.plot_map_3d(brain_map.get_data(), brain_map.get_affine(), cmap=cm.cold_hot, vmin=vmin, vmax=vmax, anat=None, threshold=4) except ImportError: print("Need mayavi for 3D visualization") plt.show() nipy-0.3.0/examples/labs/permutation_test_fakedata.py000077500000000000000000000071331210344137400230360ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Example script for group permutation testing """ from __future__ import print_function # Python 2/3 compatibility import numpy as np from nipy.labs.group import permutation_test as PT def make_data(n=10, mask_shape=(10, 10, 10), axis=0, r=3, signal=5): """ Generate Gaussian noise in a cubic volume + cubic activations """ mask = np.zeros(mask_shape, int) XYZ = np.array(np.where(mask==0)) p = XYZ.shape[1] data = np.random.randn(n, p) I = np.where(np.square(XYZ - XYZ.max(axis=1).reshape(-1, 1) / 2).sum( axis=0) <= r ** 2)[0] data[:, I] += signal vardata = np.random.randn(n, p) ** 2 if axis == 1: data = data.T vardata = vardata.T return data, vardata, XYZ ############################################################################### # Example for using permutation_test_onesample class data, vardata, XYZ = make_data() # rfx calibration P = PT.permutation_test_onesample(data, XYZ) # clusters definition (height threshold, max diameter) c = [(P.random_Tvalues[P.ndraws * (0.95)], None)] # regions definition (label vector) r = np.ones(data.shape[1], int) r[data.shape[1]/2.:] *= 10 voxel_results, cluster_results, region_results = \ P.calibrate(nperms=100, clusters=c, regions=[r]) # mfx calibration P = PT.permutation_test_onesample(data, XYZ, vardata=vardata, stat_id="student_mfx") voxel_results, cluster_results, region_results = \ P.calibrate(nperms=100, clusters=c, regions=[r]) ############################################################################### # Example for using permutation_test_twosample class data, vardata, XYZ = make_data(n=20) data1, vardata1, data2, vardata2 = (data[:10], vardata[:10], data[10:], vardata[10:]) # rfx calibration P = PT.permutation_test_twosample(data1, data2, XYZ) c = [(P.random_Tvalues[P.ndraws * (0.95)], None)] voxel_results, cluster_results, region_results = P.calibrate(nperms=100, clusters=c) # mfx calibration P = PT.permutation_test_twosample(data1, data2, XYZ, vardata1=vardata1, vardata2=vardata2, stat_id="student_mfx") voxel_results, cluster_results, region_results = P.calibrate(nperms=100, clusters=c) ############################################################################### # Print cluster statistics level = 0.05 for results in cluster_results: nclust = results["labels"].max() + 1 Tmax = np.zeros(nclust, float) Tmax_P = np.zeros(nclust, float) Diam = np.zeros(nclust, int) for j in range(nclust): I = np.where(results["labels"]==j)[0] Tmax[j] = P.Tvalues[I].max() Tmax_P[j] = voxel_results["Corr_p_values"][I].min() Diam[j]= PT.max_dist(XYZ, I, I) J = np.where(1 - (results["size_Corr_p_values"] > level) * (results["Fisher_Corr_p_values"] > level) * (Tmax_P > level))[0] print("\nDETECTED CLUSTERS STATISTICS:\n") print("Cluster detection threshold:", round(results["thresh"], 2)) if results["diam"] != None: print("minimum cluster diameter", results["diam"]) print("Cluster level FWER controled at", level) for j in J: X, Y, Z = results["peak_XYZ"][:, j] strXYZ = str(X).zfill(2) + " " + str(Y).zfill(2) + " " + \ str(Z).zfill(2) nipy-0.3.0/examples/labs/two_sample_mixed_effects.py000077500000000000000000000014101210344137400226370ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import print_function # Python 2/3 compatibility __doc__ = """ Demo two sample mixed effect models Needs matplotlib """ print(__doc__) import numpy as np try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") from nipy.labs.group import twosample n1 = 8 n2 = 8 y1 = np.random.rand(n1) v1 = .1 * np.random.rand(n1) y2 = np.random.rand(n2) v2 = .1 * np.random.rand(n2) nperms = twosample.count_permutations(n1, n2) magics = np.arange(nperms) t = twosample.stat_mfx(y1, v1, y2, v2, id='student_mfx', Magics=magics) plt.hist(t, 101) plt.show() nipy-0.3.0/examples/labs/watershed_labeling.py000077500000000000000000000064631210344137400214400ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import print_function # Python 2/3 compatibility __doc__ = """ This scipt generates a noisy activation image image and performs a watershed segmentation in it. Needs matplotlib Author : Bertrand Thirion, 2009--2012 """ #autoindent print(__doc__) import numpy as np try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") import matplotlib as mpl from nipy.labs.spatial_models.hroi import HROI_from_watershed from nipy.labs.spatial_models.discrete_domain import grid_domain_from_shape import nipy.labs.utils.simul_multisubject_fmri_dataset as simul ############################################################################### # data simulation shape = (60, 60) pos = np.array([[12, 14], [20, 20], [30, 20]]) ampli = np.array([3, 4, 4]) x = simul.surrogate_2d_dataset(n_subj=1, shape=shape, pos=pos, ampli=ampli, width=10.0).squeeze() th = 2.36 # compute the field structure and perform the watershed domain = grid_domain_from_shape(shape) nroi = HROI_from_watershed(domain, np.ravel(x), threshold=th) label = nroi.label #compute the region-based signal average bfm = np.array([np.mean(x.ravel()[label == k]) for k in range(label.max() + 1)]) bmap = np.zeros(x.size) if label.max() > - 1: bmap[label > - 1] = bfm[label[label > - 1]] label = np.reshape(label, shape) bmap = np.reshape(bmap, shape) ############################################################################### # plot the input image aux1 = (0 - x.min()) / (x.max() - x.min()) aux2 = (bmap.max() - x.min()) / (x.max() - x.min()) cdict = {'red': ((0.0, 0.0, 0.7), (aux1, 0.7, 0.7), (aux2, 1.0, 1.0), (1.0, 1.0, 1.0)), 'green': ((0.0, 0.0, 0.7), (aux1, 0.7, 0.0), (aux2, 1.0, 1.0), (1.0, 1.0, 1.0)), 'blue': ((0.0, 0.0, 0.7), (aux1, 0.7, 0.0), (aux2, 0.5, 0.5), (1.0, 1.0, 1.0))} my_cmap = mpl.colors.LinearSegmentedColormap('my_colormap', cdict, 256) plt.figure(figsize=(12, 3)) plt.subplot(1, 3, 1) plt.imshow(np.squeeze(x), interpolation='nearest', cmap=my_cmap) plt.axis('off') plt.title('Thresholded image') cb = plt.colorbar() for t in cb.ax.get_yticklabels(): t.set_fontsize(16) ############################################################################### # plot the watershed label image plt.subplot(1, 3, 2) plt.imshow(label, interpolation='nearest') plt.axis('off') plt.colorbar() plt.title('Labels') ############################################################################### # plot the watershed-average image plt.subplot(1, 3, 3) aux = 0.01 cdict = {'red': ((0.0, 0.0, 0.7), (aux, 0.7, 0.7), (1.0, 1.0, 1.0)), 'green': ((0.0, 0.0, 0.7), (aux, 0.7, 0.0), (1.0, 1.0, 1.0)), 'blue': ((0.0, 0.0, 0.7), (aux, 0.7, 0.0), (1.0, 0.5, 1.0))} my_cmap = mpl.colors.LinearSegmentedColormap('my_colormap', cdict, 256) plt.imshow(bmap, interpolation='nearest', cmap=my_cmap) plt.axis('off') plt.title('Label-average') cb = plt.colorbar() for t in cb.ax.get_yticklabels(): t.set_fontsize(16) plt.show() nipy-0.3.0/examples/labs/write_paradigm_file.py000077500000000000000000000047101210344137400216030ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import print_function # Python 2/3 compatibility """ Examples of a paradigm .csv file generation: the neurospin/localizer paradigm. See Pinel et al., BMC neuroscience 2007 for reference """ import sys import csv import numpy as np # onset times in milliseconds time = np.array([ 0, 2400, 5700, 8700, 11400, 15000, 18000, 20700, 23700, 26700, 29700, 33000, 35400, 39000, 41700, 44700, 48000, 50700, 53700, 56400, 59700, 62400, 66000, 69000, 71400, 75000, 78000, 80400, 83400, 87000, 89700, 93000, 96000, 99000, 102000, 105000, 108000, 110400, 113700, 116700, 119400, 122700, 125400, 129000, 131400, 135000, 137700, 140400, 143400, 146700, 149400, 153000, 156000, 159000, 162000, 164400, 167700, 170400, 173700, 176700, 179700, 182700, 186000, 188400, 191700, 195000, 198000, 201000, 203700, 207000, 210000, 212700, 215700, 218700, 221400, 224700, 227700, 230700, 234000, 236700, 240000, 243000, 246000, 248400, 251700, 254700, 257400, 260400, 264000, 266700, 269700, 272700, 275400, 278400, 281700, 284400, 288000, 291000, 293400, 296700]).astype('f')/1000 # corresponding onset types # Because it's cutpasted from Matlab(tm), i substract 1 at the end ;-) # onset types trial_type = np.array([ 8, 8, 11, 1, 3, 10, 5, 10, 4, 6, 10, 2, 7, 9, 9, 7, 7, 11, 11, 9, 1, 4, 11, 5, 6, 9, 11, 11, 7, 3, 10, 11, 2, 11, 11, 11, 7, 11, 11, 6, 10, 2, 8, 11, 9, 7, 7, 2, 3, 10, 1, 8, 2, 9, 3, 8, 9, 4, 7, 1, 11, 11, 11, 1, 7, 9, 8, 8, 2, 2, 2, 6, 6, 1, 8, 1, 5, 3, 8, 10, 11, 11, 9, 1, 7, 4, 4, 8, 2, 1, 1, 11, 5, 2, 11, 10, 9, 5, 10, 10]) - 1 condition_ids = ['damier_H', 'damier_V', 'clicDaudio', 'clicGaudio', 'clicDvideo', 'clicGvideo', 'calculaudio', 'calculvideo', 'phrasevideo', 'phraseaudio'] time = time[trial_type < 10] cid = np.array([condition_ids[i] for i in trial_type[trial_type < 10]]) sess = np.zeros(np.size(time)).astype('int8') pdata = np.vstack((sess, cid, time)).T csvfile = 'localizer_paradigm.csv' # Opening files for CSV writing differs between Python 2 and 3 if sys.version_info[0] >= 3: # Python 3 fid = open(csvfile, "w", newline = '') else: # Python 2 fid = open(csvfile, "wb") writer = csv.writer(fid, delimiter=' ') for row in pdata: writer.writerow(row) fid.close() print("Created the paradigm file in %s " % csvfile) nipy-0.3.0/examples/onesample_group.py000077500000000000000000000031731210344137400200660ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This modules launches a one-sample test on a dataset Statistical significance is obtained using cluster-level inference and permutation testing. Author: Alexis Roche, Bertrand Thirion 2009-2012 """ import numpy as np from nibabel import Nifti1Image as Image import nipy.labs.statistical_mapping as sm from nipy.utils import example_data def remake_images(): # Get group data group_data = example_data.get_filename( 'neurospin', 'language_babies', 'offset_002.npz') f = np.load(group_data) data, vardata, xyz = f['mat'], f['var'], f['xyz'] dX = xyz[0].max() + 1 dY = xyz[1].max() + 1 dZ = xyz[2].max() + 1 aux = np.zeros([dX, dY, dZ]) data_images = [] vardata_images = [] mask_images = [] for i in range(data.shape[0]): aux[list(xyz)] = data[i] data_images.append(Image(aux.copy(), np.eye(4))) aux[list(xyz)] = vardata[i] vardata_images.append(Image(aux.copy(), np.eye(4))) aux[list(xyz)] = 1 mask_images.append(aux) return data_images, vardata_images, mask_images data_images, vardata_images, mask_images = remake_images() zimg, mask, nulls = sm.onesample_test(data_images, None, mask_images, 'wilcoxon', permutations=1024, cluster_forming_th=0.01) clusters, info = sm.cluster_stats(zimg, mask, 0.01, nulls=nulls) nipy-0.3.0/examples/space_time_realign.py000077500000000000000000000031751210344137400205030ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This script requires the nipy-data package to run. It is an example of simultaneous motion correction and slice timing correction in multi-session fMRI data from the FIAC 2005 dataset. Specifically, it uses the first two sessions of subject 'fiac0'. Usage: python space_time_realign.py Two images will be created in the working directory for the realigned series:: rarun1.nii rarun2.nii Author: Alexis Roche, 2009. """ from __future__ import print_function # Python 2/3 compatibility import os from os.path import split as psplit, abspath from nipy.algorithms.registration import FmriRealign4d from nipy import load_image, save_image from nipy.utils import example_data # Input images are provided with the nipy-data package runnames = [example_data.get_filename('fiac', 'fiac0', run + '.nii.gz') for run in ('run1', 'run2')] runs = [load_image(run) for run in runnames] # Declare interleaved ascending slice order nslices = runs[0].shape[2] slice_order = list(range(0, nslices, 2)) + list(range(1, nslices, 2)) print('Slice order: %s' % slice_order) # Spatio-temporal realigner R = FmriRealign4d(runs, tr=2.5, slice_order=slice_order) # Estimate motion within- and between-sessions R.estimate(refscan=None) # Resample data on a regular space+time lattice using 4d interpolation # Save images cwd = abspath(os.getcwd()) print('Saving results in: %s' % cwd) for i in range(len(runs)): corr_run = R.resample(i) fname = 'ra' + psplit(runnames[i])[1] save_image(corr_run, fname) nipy-0.3.0/examples/tissue_classification.py000077500000000000000000000063541210344137400212620ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Script example of tissue classification """ from __future__ import print_function # Python 2/3 compatibility import numpy as np from nipy import load_image, save_image from nipy.core.image.image_spaces import (make_xyz_image, xyz_affine) from nipy.externals.argparse import ArgumentParser from nipy.algorithms.segmentation import BrainT1Segmentation def fuzzy_dice(gold_ppm, ppm, mask): """ Fuzzy dice index. """ dices = np.zeros(3) if gold_ppm == None: return dices for k in range(3): pk = gold_ppm[mask][:, k] qk = ppm[mask][:, k] PQ = np.sum(np.sqrt(np.maximum(pk * qk, 0))) P = np.sum(pk) Q = np.sum(qk) dices[k] = 2 * PQ / float(P + Q) return dices # Parse command line description = 'Perform brain tissue classification from skull stripped T1 \ image in CSF, GM and WM. If no mask image is provided, the mask is defined by \ thresholding the input image above zero (strictly).' parser = ArgumentParser(description=description) parser.add_argument('img', metavar='img', nargs='+', help='input image') parser.add_argument('--mask', dest='mask', help='mask image') parser.add_argument('--niters', dest='niters', help='number of iterations (default=%d)' % 25) parser.add_argument('--beta', dest='beta', help='Markov random field beta parameter (default=%f)' % 0.5) parser.add_argument('--ngb_size', dest='ngb_size', help='Markov random field neighborhood system (default=%d)' % 6) parser.add_argument('--probc', dest='probc', help='csf probability map') parser.add_argument('--probg', dest='probg', help='gray matter probability map') parser.add_argument('--probw', dest='probw', help='white matter probability map') args = parser.parse_args() def get_argument(dest, default): val = args.__getattribute__(dest) if val == None: return default else: return val # Input image img = load_image(args.img[0]) # Input mask image mask_img = get_argument('mask', None) if mask_img == None: mask_img = img else: mask_img = load_image(mask_img) # Other optional arguments niters = int(get_argument('niters', 25)) beta = float(get_argument('beta', 0.5)) ngb_size = int(get_argument('ngb_size', 6)) # Perform tissue classification mask = mask_img.get_data() > 0 S = BrainT1Segmentation(img.get_data(), mask=mask, model='5k', niters=niters, beta=beta, ngb_size=ngb_size) # Save label image outfile = 'hard_classif.nii' save_image(make_xyz_image(S.label, xyz_affine(img), 'scanner'), outfile) print('Label image saved in: %s' % outfile) # Compute fuzzy Dice indices if a 3-class fuzzy model is provided if not args.probc == None and \ not args.probg == None and \ not args.probw == None: print('Computing Dice index') gold_ppm = np.zeros(S.ppm.shape) gold_ppm_img = (args.probc, args.probg, args.probw) for k in range(3): img = load_image(gold_ppm_img[k]) gold_ppm[..., k] = img.get_data() d = fuzzy_dice(gold_ppm, S.ppm, np.where(mask_img.get_data() > 0)) print('Fuzzy Dice indices: %s' % d) nipy-0.3.0/libcstat/000077500000000000000000000000001210344137400142755ustar00rootroot00000000000000nipy-0.3.0/libcstat/fff/000077500000000000000000000000001210344137400150365ustar00rootroot00000000000000nipy-0.3.0/libcstat/fff/fff_array.c000066400000000000000000000503521210344137400171460ustar00rootroot00000000000000#include "fff_array.h" #include #include /* Static functions */ static double _get_uchar(const char* data, size_t pos); static double _get_schar(const char* data, size_t pos); static double _get_ushort(const char* data, size_t pos); static double _get_sshort(const char* data, size_t pos); static double _get_uint(const char* data, size_t pos); static double _get_int(const char* data, size_t pos); static double _get_ulong(const char* data, size_t pos); static double _get_long(const char* data, size_t pos); static double _get_float(const char* data, size_t pos); static double _get_double(const char* data, size_t pos); static void _set_uchar(char* data, size_t pos, double value); static void _set_schar(char* data, size_t pos, double value); static void _set_ushort(char* data, size_t pos, double value); static void _set_sshort(char* data, size_t pos, double value); static void _set_uint(char* data, size_t pos, double value); static void _set_int(char* data, size_t pos, double value); static void _set_ulong(char* data, size_t pos, double value); static void _set_long(char* data, size_t pos, double value); static void _set_float(char* data, size_t pos, double value); static void _set_double(char* data, size_t pos, double value); static void _fff_array_iterator_update1d(void* it); static void _fff_array_iterator_update2d(void* it); static void _fff_array_iterator_update3d(void* it); static void _fff_array_iterator_update4d(void* it); /* Creates a C-contiguous array. */ fff_array* fff_array_new(fff_datatype datatype, size_t dimX, size_t dimY, size_t dimZ, size_t dimT) { fff_array* thisone; size_t nvoxels = dimX*dimY*dimZ*dimT; size_t aux, offX, offY, offZ, offT; /* Offset computation */ offT = 1; aux = dimT; offZ = aux; aux *= dimZ; offY = aux; aux *= dimY; offX = aux; /* Instantiate the structure member */ thisone = (fff_array*)malloc(sizeof(fff_array)); if (thisone==NULL) { FFF_ERROR("Out of memory", ENOMEM); return NULL; } /* Set dimensions, offsets and accessors */ *thisone = fff_array_view(datatype, NULL, dimX, dimY, dimZ, dimT, offX, offY, offZ, offT); /* Gives ownership */ thisone->owner = 1; /* Allocate the image buffer */ switch(datatype) { case FFF_UCHAR: { unsigned char* buf = (unsigned char*)calloc(nvoxels, sizeof(unsigned char)); thisone->data = (void*)buf; } break; case FFF_SCHAR: { signed char* buf = (signed char*)calloc(nvoxels, sizeof(signed char)); thisone->data = (void*)buf; } break; case FFF_USHORT: { unsigned short* buf = (unsigned short*)calloc(nvoxels, sizeof(unsigned short)); thisone->data = (void*)buf; } break; case FFF_SSHORT: { signed short* buf = (signed short*)calloc(nvoxels, sizeof(signed short)); thisone->data = (void*)buf; } break; case FFF_UINT: { unsigned int* buf = (unsigned int*)calloc(nvoxels, sizeof(unsigned int)); thisone->data = (void*)buf; } break; case FFF_INT: { int* buf = (int*)calloc(nvoxels, sizeof(int)); thisone->data = (void*)buf; } break; case FFF_ULONG: { unsigned long int* buf = (unsigned long int*)calloc(nvoxels, sizeof(unsigned long int)); thisone->data = (void*)buf; } break; case FFF_LONG: { long int* buf = (long int*)calloc(nvoxels, sizeof(long int)); thisone->data = (void*)buf; } break; case FFF_FLOAT: { float* buf = (float*)calloc(nvoxels, sizeof(float)); thisone->data = (void*)buf; } break; case FFF_DOUBLE: { double* buf = (double*)calloc(nvoxels, sizeof(double)); thisone->data = (void*)buf; } break; default: FFF_ERROR("Unrecognized data type", EINVAL); break; } /* Report error if array has not been allocated */ if (thisone->data==NULL) FFF_ERROR("Out of memory", ENOMEM); return thisone; } void fff_array_delete(fff_array* thisone) { if ((thisone->owner) && (thisone->data != NULL)) free(thisone->data); free(thisone); return; } fff_array fff_array_view(fff_datatype datatype, void* buf, size_t dimX, size_t dimY, size_t dimZ, size_t dimT, size_t offX, size_t offY, size_t offZ, size_t offT) { fff_array thisone; fff_array_ndims ndims = FFF_ARRAY_4D; unsigned int nbytes = fff_nbytes(datatype); /* Decrease the number of dimensions if applicable */ if (dimT == 1) { ndims = FFF_ARRAY_3D; if (dimZ == 1) { ndims = FFF_ARRAY_2D; if (dimY == 1) ndims = FFF_ARRAY_1D; } } thisone.ndims = ndims; /* Set dimensions / offsets / voxel size */ thisone.dimX = dimX; thisone.dimY = dimY; thisone.dimZ = dimZ; thisone.dimT = dimT; thisone.offsetX = offX; thisone.offsetY = offY; thisone.offsetZ = offZ; thisone.offsetT = offT; thisone.byte_offsetX = nbytes*offX; thisone.byte_offsetY = nbytes*offY; thisone.byte_offsetZ = nbytes*offZ; thisone.byte_offsetT = nbytes*offT; /* Set data type and point towards buffer */ thisone.datatype = datatype; thisone.data = buf; thisone.owner = 0; /* Set accessors */ switch(datatype) { case FFF_UCHAR: { thisone.get = &_get_uchar; thisone.set = &_set_uchar; } break; case FFF_SCHAR: { thisone.get = &_get_schar; thisone.set = &_set_schar; } break; case FFF_USHORT: { thisone.get = &_get_ushort; thisone.set = &_set_ushort; } break; case FFF_SSHORT: { thisone.get = &_get_sshort; thisone.set = &_set_sshort; } break; case FFF_UINT: { thisone.get = &_get_uint; thisone.set = &_set_uint; } break; case FFF_INT: { thisone.get = &_get_int; thisone.set = &_set_int; } break; case FFF_ULONG: { thisone.get = &_get_ulong; thisone.set = &_set_ulong; } break; case FFF_LONG: { thisone.get = &_get_long; thisone.set = &_set_long; } break; case FFF_FLOAT: { thisone.get = &_get_float; thisone.set = &_set_float; } break; case FFF_DOUBLE: { thisone.get = &_get_double; thisone.set = &_set_double; } break; default: { thisone.get = NULL; thisone.set = NULL; FFF_ERROR("Unrecognized data type", EINVAL); } break; } return thisone; } /* Check coordinate range and return FFF_NAN if position is out of bounds */ double fff_array_get(const fff_array* thisone, size_t x, size_t y, size_t z, size_t t) { size_t idx; if ((x >= thisone->dimX) || (y >= thisone->dimY) || (z >= thisone->dimZ) || (t >= thisone->dimT)) return FFF_NAN; idx = x*thisone->offsetX + y*thisone->offsetY + z*thisone->offsetZ + t*thisone->offsetT; return thisone->get((const char*)thisone->data, idx); } /* Check coordinate range and do noting position is out of bounds */ void fff_array_set(fff_array* thisone, size_t x, size_t y, size_t z, size_t t, double value) { size_t idx; if ((x >= thisone->dimX) || (y >= thisone->dimY) || (z >= thisone->dimZ) || (t >= thisone->dimT)) return; idx = x*thisone->offsetX + y*thisone->offsetY + z*thisone->offsetZ + t*thisone->offsetT; thisone->set((char*)thisone->data, idx, value); return; } void fff_array_set_all(fff_array* thisone, double val) { fff_array_iterator iter = fff_array_iterator_init(thisone); while (iter.idx < iter.size) { fff_array_set_from_iterator(thisone, iter, val); fff_array_iterator_update(&iter); } return; } fff_array fff_array_get_block(const fff_array* thisone, size_t x0, size_t x1, size_t fX, size_t y0, size_t y1, size_t fY, size_t z0, size_t z1, size_t fZ, size_t t0, size_t t1, size_t fT) { char* data = (char*)thisone->data; data += x0*thisone->byte_offsetX + y0*thisone->byte_offsetY + z0*thisone->byte_offsetZ + t0*thisone->byte_offsetT; return fff_array_view(thisone->datatype, (void*)data, (x1-x0)/fX+1, (y1-y0)/fY+1, (z1-z0)/fZ+1, (t1-t0)/fZ+1, fX*thisone->offsetX, fY*thisone->offsetY, fZ*thisone->offsetZ, fT*thisone->offsetT); } void fff_array_extrema (double* min, double* max, const fff_array* thisone) { double val; fff_array_iterator iter = fff_array_iterator_init(thisone); /* Initialization */ *min = FFF_POSINF; /* 0.0;*/ *max = FFF_NEGINF; /*0.0;*/ while (iter.idx < iter.size) { val = fff_array_get_from_iterator(thisone, iter); if (val < *min) *min = val; else if (val > *max) *max = val; fff_array_iterator_update(&iter); } return; } #define CHECK_DIMS(a1,a2) \ if ((a1->dimX != a2->dimX) || \ (a1->dimY != a2->dimY) || \ (a1->dimZ != a2->dimZ) || \ (a1->dimT != a2->dimT)) \ {FFF_ERROR("Arrays have different sizes", EINVAL); return;} \ void fff_array_copy(fff_array* aRes, const fff_array* aSrc) { fff_array_iterator itSrc = fff_array_iterator_init(aSrc); fff_array_iterator itRes = fff_array_iterator_init(aRes); double valSrc; CHECK_DIMS(aRes, aSrc); while (itSrc.idx < itSrc.size) { valSrc = fff_array_get_from_iterator(aSrc, itSrc); fff_array_set_from_iterator(aRes, itRes, valSrc); fff_array_iterator_update(&itSrc); fff_array_iterator_update(&itRes); } return; } /* Applies an affine correction to the input array so that: s0 --> r0 s1 --> r1 */ void fff_array_compress(fff_array* aRes, const fff_array* aSrc, double r0, double s0, double r1, double s1) { fff_array_iterator itSrc = fff_array_iterator_init(aSrc); fff_array_iterator itRes = fff_array_iterator_init(aRes); double a, b, valSrc; CHECK_DIMS(aRes, aSrc); a = (r1-r0) / (s1-s0); b = r0 - a*s0; while (itSrc.idx < itSrc.size) { valSrc = fff_array_get_from_iterator(aSrc, itSrc); fff_array_set_from_iterator(aRes, itRes, a*valSrc+b); fff_array_iterator_update(&itSrc); fff_array_iterator_update(&itRes); } return; } void fff_array_add(fff_array* aRes, const fff_array* aSrc) { fff_array_iterator itSrc = fff_array_iterator_init(aSrc); fff_array_iterator itRes = fff_array_iterator_init(aRes); double v; CHECK_DIMS(aRes, aSrc); while (itSrc.idx < itSrc.size) { v = fff_array_get_from_iterator(aRes, itRes); v += fff_array_get_from_iterator(aSrc, itSrc); fff_array_set_from_iterator(aRes, itRes, v); fff_array_iterator_update(&itSrc); fff_array_iterator_update(&itRes); } return; } void fff_array_sub(fff_array* aRes, const fff_array* aSrc) { fff_array_iterator itSrc = fff_array_iterator_init(aSrc); fff_array_iterator itRes = fff_array_iterator_init(aRes); double v; CHECK_DIMS(aRes, aSrc); while (itSrc.idx < itSrc.size) { v = fff_array_get_from_iterator(aRes, itRes); v -= fff_array_get_from_iterator(aSrc, itSrc); fff_array_set_from_iterator(aRes, itRes, v); fff_array_iterator_update(&itSrc); fff_array_iterator_update(&itRes); } return; } void fff_array_mul(fff_array* aRes, const fff_array* aSrc) { fff_array_iterator itSrc = fff_array_iterator_init(aSrc); fff_array_iterator itRes = fff_array_iterator_init(aRes); double v; CHECK_DIMS(aRes, aSrc); while (itSrc.idx < itSrc.size) { v = fff_array_get_from_iterator(aRes, itRes); v *= fff_array_get_from_iterator(aSrc, itSrc); fff_array_set_from_iterator(aRes, itRes, v); fff_array_iterator_update(&itSrc); fff_array_iterator_update(&itRes); } return; } /* Force denominator's aboslute value greater than FFF_TINY. */ void fff_array_div(fff_array* aRes, const fff_array* aSrc) { fff_array_iterator itSrc = fff_array_iterator_init(aSrc); fff_array_iterator itRes = fff_array_iterator_init(aRes); double v; CHECK_DIMS(aRes, aSrc); while (itSrc.idx < itSrc.size) { v = fff_array_get_from_iterator(aSrc, itSrc); if (FFF_ABS(v)dimX*im->dimY*im->dimZ*im->dimT; /* Initialize pointer and coordinates */ iter.data = (char*)im->data; iter.x = 0; iter.y = 0; iter.z = 0; iter.t = 0; /* Boundary check parameters */ iter.ddimY = im->dimY - 1; iter.ddimZ = im->dimZ - 1; iter.ddimT = im->dimT - 1; if (axis == 3) { iter.ddimT = 0; iter.size /= im->dimT; } else if (axis == 2) { iter.ddimZ = 0; iter.size /= im->dimZ; } else if (axis == 1) { iter.ddimY = 0; iter.size /= im->dimY; } else if (axis == 0) iter.size /= im->dimX; /* Increments */ pY = iter.ddimY * im->byte_offsetY; pZ = iter.ddimZ * im->byte_offsetZ; pT = iter.ddimT * im->byte_offsetT; iter.incT = im->byte_offsetT; iter.incZ = im->byte_offsetZ - pT; iter.incY = im->byte_offsetY - pZ - pT; iter.incX = im->byte_offsetX - pY - pZ - pT; /* Update function */ switch(im->ndims) { case FFF_ARRAY_1D: iter.update = &_fff_array_iterator_update1d; break; case FFF_ARRAY_2D: iter.update = &_fff_array_iterator_update2d; break; case FFF_ARRAY_3D: iter.update = &_fff_array_iterator_update3d; break; case FFF_ARRAY_4D: default: iter.update = &_fff_array_iterator_update4d; break; } return iter; } fff_array_iterator fff_array_iterator_init(const fff_array* im) { return fff_array_iterator_init_skip_axis(im, -1); } static void _fff_array_iterator_update1d(void* it) { fff_array_iterator* iter = (fff_array_iterator*)it; iter->idx ++; iter->data += iter->incX; iter->x = iter->idx; return; } static void _fff_array_iterator_update2d(void* it) { fff_array_iterator* iter = (fff_array_iterator*)it; iter->idx ++; if (iter->y < iter->ddimY) { iter->y ++; iter->data += iter->incY; return; } iter->y = 0; iter->x ++; iter->data += iter->incX; return; } static void _fff_array_iterator_update3d(void* it) { fff_array_iterator* iter = (fff_array_iterator*)it; iter->idx ++; if (iter->z < iter->ddimZ) { iter->z ++; iter->data += iter->incZ; return; } if (iter->y < iter->ddimY) { iter->z = 0; iter->y ++; iter->data += iter->incY; return; } iter->z = 0; iter->y = 0; iter->x ++; iter->data += iter->incX; return; } static void _fff_array_iterator_update4d(void* it) { fff_array_iterator* iter = (fff_array_iterator*)it; iter->idx ++; if (iter->t < iter->ddimT) { iter->t ++; iter->data += iter->incT; return; } if (iter->z < iter->ddimZ) { iter->t = 0; iter->z ++; iter->data += iter->incZ; return; } if (iter->y < iter->ddimY) { iter->t = 0; iter->z = 0; iter->y ++; iter->data += iter->incY; return; } iter->t = 0; iter->z = 0; iter->y = 0; iter->x ++; iter->data += iter->incX; return; } /* Image must be in DOUBLE format */ void fff_array_iterate_vector_function(fff_array* im, int axis, void(*func)(fff_vector*, void*), void* par) { fff_array_iterator iter; fff_vector x; if (im->datatype != FFF_DOUBLE) { FFF_WARNING("Image type must be double."); return; } if ((axis>3) || (axis<0)) { FFF_WARNING("Invalid axis."); return; } x.size = fff_array_dim(im, axis); x.stride = fff_array_offset(im, axis); x.owner = 0; iter = fff_array_iterator_init_skip_axis(im, axis); while (iter.idx < iter.size) { x.data = (double*)iter.data; (*func)(&x, par); fff_array_iterator_update(&iter); } return; } /* Convert image values to [0,clamp-1]; typically clamp = 256. Possibly modify the dynamic range if the input value is overestimated. For instance, the reconstructed MRI signal is generally encoded in 12 bits (values ranging from 0 to 4095). Therefore, this operation may result in a loss of information. */ void fff_array_clamp(fff_array* aRes, const fff_array* aSrc, double th, int* clamp) { double imin, imax, tth; int dmax = *clamp - 1; /* Compute input image min and max */ fff_array_extrema(&imin, &imax, aSrc); /* Make sure the threshold is not below the min intensity */ tth = FFF_MAX(th, imin); /* Test */ if (tth>imax) { FFF_WARNING("Inconsistent threshold, ignored."); tth = imin; } /* If the image dynamic is small, no need for compression: just downshift image values and re-estimate the dynamic range (hence imax is translated to imax-tth casted to SSHORT) */ if ((fff_is_integer(aSrc->datatype)) && ((imax-tth)<=dmax)) { fff_array_compress(aRes, aSrc, 0, tth, 1, tth+1); *clamp = (int)(imax-tth) + 1; } /* Otherwise, compress after downshifting image values (values equal to the threshold are reset to zero) */ else fff_array_compress(aRes, aSrc, 0, tth, dmax, imax); return; } /************************************************************************* Manually templated array acessors *************************************************************************/ static double _get_uchar(const char* data, size_t pos) { unsigned char* buf = (unsigned char*)data; return((double)buf[pos]); } static double _get_schar(const char* data, size_t pos) { signed char* buf = (signed char*)data; return((double)buf[pos]); } static double _get_ushort(const char* data, size_t pos) { unsigned short* buf = (unsigned short*)data; return((double)buf[pos]); } static double _get_sshort(const char* data, size_t pos) { signed short* buf = (signed short*)data; return((double)buf[pos]); } static double _get_uint(const char* data, size_t pos) { unsigned int* buf = (unsigned int*)data; return((double)buf[pos]); } static double _get_int(const char* data, size_t pos) { int* buf = (int*)data; return((double)buf[pos]); } static double _get_ulong(const char* data, size_t pos) { unsigned long int* buf = (unsigned long int*)data; return((double)buf[pos]); } static double _get_long(const char* data, size_t pos) { long int* buf = (long int*)data; return((double)buf[pos]); } static double _get_float(const char* data, size_t pos) { float* buf = (float*)data; return((double)buf[pos]); } static double _get_double(const char* data, size_t pos) { double* buf = (double*)data; return(buf[pos]); } static void _set_uchar(char* data, size_t pos, double value) { unsigned char* buf = (unsigned char*)data; buf[pos] = (unsigned char)(FFF_ROUND(value)); return; } static void _set_schar(char* data, size_t pos, double value) { signed char* buf = (signed char*)data; buf[pos] = (signed char)(FFF_ROUND(value)); return; } static void _set_ushort(char* data, size_t pos, double value) { unsigned short* buf = (unsigned short*)data; buf[pos] = (unsigned short)(FFF_ROUND(value)); return; } static void _set_sshort(char* data, size_t pos, double value) { signed short* buf = (signed short*)data; buf[pos] = (signed short)(FFF_ROUND(value)); return; } static void _set_uint(char* data, size_t pos, double value) { unsigned int* buf = (unsigned int*)data; buf[pos] = (unsigned int)(FFF_ROUND(value)); return; } static void _set_int(char* data, size_t pos, double value) { int* buf = (int*)data; buf[pos] = (int)(FFF_ROUND(value)); return; } static void _set_ulong(char* data, size_t pos, double value) { unsigned long int* buf = (unsigned long int*)data; buf[pos] = (unsigned long int)(FFF_ROUND(value)); return; } static void _set_long(char* data, size_t pos, double value) { long int* buf = (long int*)data; buf[pos] = (long int)(FFF_ROUND(value)); return; } static void _set_float(char* data, size_t pos, double value) { float* buf = (float*)data; buf[pos] = (float)value; return; } static void _set_double(char* data, size_t pos, double value) { double* buf = (double*)data; buf[pos] = value; return; } nipy-0.3.0/libcstat/fff/fff_array.h000066400000000000000000000237221210344137400171540ustar00rootroot00000000000000/*! \file fff_array.h \brief Basic image object \author Alexis Roche \date 2005-2006 This library implements a generic 4-dimensional array object that can be used to represent images. */ #ifndef FFF_ARRAY #define FFF_ARRAY #ifdef __cplusplus extern "C" { #endif #include "fff_base.h" #include "fff_vector.h" #include #define fff_array_dim(array, axis) \ ((axis)==0 ? (array->dimX) : ((axis)==1 ? (array->dimY) : ((axis)==2 ? (array->dimZ) : (array->dimT)) ) ) #define fff_array_offset(array, axis) \ ((axis)==0 ? (array->offsetX) : ((axis)==1 ? (array->offsetY) : ((axis)==2 ? (array->offsetZ) : (array->offsetT)) ) ) /* #define fff_array_copy(ares, asrc) \ fff_array_compress(ares, asrc, 0, 0, 1, 1) */ #define fff_array_new1d(dtype, dx) \ fff_array_new(dtype, dx, 1, 1, 1) #define fff_array_new2d(dtype, dx, dy) \ fff_array_new(dtype, dx, dy, 1, 1) #define fff_array_new3d(dtype, dx, dy, dz) \ fff_array_new(dtype, dx, dy, dz, 1) #define fff_array_view1d(dtype, data, dx, ox) \ fff_array_view(dtype, data, dx, 1, 1, 1, ox, 1, 1, 1) #define fff_array_view2d(dtype, data, dx, dy, ox, oy) \ fff_array_view(dtype, data, dx, dy, 1, 1, ox, oy, 1, 1) #define fff_array_view3d(dtype, data, dx, dy, dz, ox, oy, oz) \ fff_array_view(dtype, data, dx, dy, dz, 1, ox, oy, oz, 1) #define fff_array_get1d(array, x) \ fff_array_get(array, x, 0, 0, 0) #define fff_array_get2d(array, x, y) \ fff_array_get(array, x, y, 0, 0) #define fff_array_get3d(array, x, y) \ fff_array_get(array, x, y, z, 0) #define fff_array_set1d(array, x, a) \ fff_array_set(array, x, 0, 0, 0, a) #define fff_array_set2d(array, x, y, a) \ fff_array_set(array, x, y, 0, 0, a) #define fff_array_set3d(array, x, y, z, a) \ fff_array_set(array, x, y, z, 0, a) #define fff_array_get_block1d(array, x0, x1, fx) \ fff_array_get_block(array, x0, x1, fx, 0, 0, 1, 0, 0, 1, 0, 0, 1) #define fff_array_get_block2d(array, x0, x1, fx, y0, y1, fy) \ fff_array_get_block(array, x0, x1, fx, y0, y1, fy, 0, 0, 1, 0, 0, 1) #define fff_array_get_block3d(array, x0, x1, fx, y0, y1, fy, z0, z1, fz) \ fff_array_get_block(array, x0, x1, fx, y0, y1, fy, z0, z1, fz, 0, 0, 1) #define fff_array_get_from_iterator(array, iter) \ array->get(iter.data, 0) #define fff_array_set_from_iterator(array, iter, val) \ array->set(iter.data, 0, val) #define fff_array_iterator_update(iter) \ (iter)->update(iter) /*! \typedef fff_array_ndims \brief Image flag type */ typedef enum { FFF_ARRAY_1D = 1, /*!< 1d image */ FFF_ARRAY_2D = 2, /*!< 2d image */ FFF_ARRAY_3D = 3, /*!< 3d image */ FFF_ARRAY_4D = 4 /*!< 4d image */ } fff_array_ndims; /*! \struct fff_array \brief The fff image structure Image values are stored in a \c void linear array, the actual encoding type being specified by the field \c datatype. The image dimension along each axis are encoded by fields starting with \c dim, while the \c ndims flag specifies the biggest axis index corresponding to a non-unitary dimension; it essentially defines whether the image is 1d, 2d, 3d, or 4d. The use of offsets (or strides) makes the object independent from any storage convention. A pixel with coordinates (\a x, \a y, \a z, \a t) may be accessed using a command like: \code value = im->data[ x*im->offsetX + y*im->offsetY + z*im->offsetZ + t*im->offsetT ]; \endcode Note that this approach makes it possible to extract a sub-image from an original image without the need to reallocate memory. */ typedef struct { fff_array_ndims ndims; /*!< Image flag */ fff_datatype datatype; /*!< Image encoding type */ size_t dimX; /*!< Dimension (number of pixels) along first axis */ size_t dimY; /*!< Dimension (number of pixels) along second axis */ size_t dimZ; /*!< Dimension (number of pixels) along third axis */ size_t dimT; /*!< Dimension (number of pixels) along fourth axis */ size_t offsetX; /*!< Offset (relative to type) along first axis */ size_t offsetY; /*!< Offset (relative to type) along second axis */ size_t offsetZ; /*!< Offset (relative to type) along third axis */ size_t offsetT; /*!< Offset (relative to type) along fourth axis */ size_t byte_offsetX; /*!< Offset (in bytes) along first axis */ size_t byte_offsetY; /*!< Offset (in bytes) along second axis */ size_t byte_offsetZ; /*!< Offset (in bytes) along third axis */ size_t byte_offsetT; /*!< Offset (in bytes) along fourth axis */ void* data; /*!< Image buffer */ int owner; /*!< Non-zero if the object owns its data */ double (*get)(const char*, size_t); /*!< Get accessor */ void (*set)(char*, size_t, double); /*!< Set accessor */ } fff_array; /*! \struct fff_array_iterator \brief Image iterator structure */ typedef struct { size_t idx; size_t size; char* data; size_t x; size_t y; size_t z; size_t t; size_t ddimY; size_t ddimZ; size_t ddimT; size_t incX; size_t incY; size_t incZ; size_t incT; void (*update)(void*); /*!< Updater */ } fff_array_iterator; /*! \brief Constructor for the fff_array structure \param datatype image encoding type \param dimX number of pixels along the first axis \param dimY number of pixels along the second axis \param dimZ number of pixels along the third axis \param dimT number of pixels along the fourth axis This function allocates a new image buffer. */ extern fff_array* fff_array_new(fff_datatype datatype, size_t dimX, size_t dimY, size_t dimZ, size_t dimT); /*! \brief Destructor for the \c fff_array structure \param thisone fff_array member to be deleted */ extern void fff_array_delete(fff_array* thisone); /*! \brief Array view \param datatype image encoding type \param buf already allocated image buffer \param dimX number of pixels along the first axis \param dimY number of pixels along the second axis \param dimZ number of pixels along the third axis \param dimT number of pixels along the fourth axis \param offX offset along the first axis \param offY offset along the second axis \param offZ offset along the third axis \param offT offset along the fourth axis This function assumes that the image buffer is already allocated. */ extern fff_array fff_array_view(fff_datatype datatype, void* buf, size_t dimX, size_t dimY, size_t dimZ, size_t dimT, size_t offX, size_t offY, size_t offZ, size_t offT); /*! \brief Generic function to access a voxel's value \param thisone input image \param x first coordinate \param y second coordinate \param z third coordinate \param t fourth coordinate Get image value at a specific location defined by voxel coordinates. Return \c fff_NAN if the position is out of bounds. */ extern double fff_array_get(const fff_array* thisone, size_t x, size_t y, size_t z, size_t t); /*! \brief Generic function to set one voxel's value \param value value to set \param thisone input image \param x first coordinate \param y second coordinate \param z third coordinate \param t fourth coordinate */ extern void fff_array_set(fff_array* thisone, size_t x, size_t y, size_t z, size_t t, double value); /*! \brief Set all pixel values to a given constant \param thisone image \param c constant */ extern void fff_array_set_all(fff_array* thisone, double c); /*! \brief Extract an image block \param thisone input image \param x0 first coordinate of the starting point \param x1 first coordinate of the finishing point \param y0 second coordinate of the starting point \param y1 second coordinate of the finishing point \param z0 third coordinate of the starting point \param z1 third coordinate of the finishing point \param t0 fourth coordinate of the starting point \param t1 fourth coordinate of the finishing point \param fX subsampling factor in the first direction \param fY subsampling factor in the second direction \param fZ subsampling factor in the third direction \param fT subsampling factor in the fourth direction */ extern fff_array fff_array_get_block(const fff_array* thisone, size_t x0, size_t x1, size_t fX, size_t y0, size_t y1, size_t fY, size_t z0, size_t z1, size_t fZ, size_t t0, size_t t1, size_t fT); extern void fff_array_extrema (double* min, double* max, const fff_array* thisone); extern void fff_array_copy(fff_array* ares, const fff_array* asrc); extern void fff_array_compress(fff_array* ares, const fff_array* asrc, double r0, double s0, double r1, double s1); extern void fff_array_add (fff_array * x, const fff_array * y); extern void fff_array_sub (fff_array * x, const fff_array * y); extern void fff_array_div (fff_array * x, const fff_array * y); extern void fff_array_mul (fff_array * x, const fff_array * y); /* Convert image values to [0,clamp-1]; typically clamp = 256. Possibly modify the dynamic range if the input value is overestimated. For instance, the reconstructed MRI signal is generally encoded in 12 bits (values ranging from 0 to 4095). Therefore, this operation may result in a loss of information. */ extern void fff_array_clamp(fff_array* ares, const fff_array* asrc, double th, int* clamp); extern fff_array_iterator fff_array_iterator_init(const fff_array* array); extern fff_array_iterator fff_array_iterator_init_skip_axis(const fff_array* array, int axis); /* extern void fff_array_iterator_update(fff_array_iterator* thisone); */ extern void fff_array_iterate_vector_function(fff_array* array, int axis, void(*func)(fff_vector*, void*), void* par); #ifdef __cplusplus } #endif #endif nipy-0.3.0/libcstat/fff/fff_base.c000066400000000000000000000043341210344137400167410ustar00rootroot00000000000000#include "fff_base.h" unsigned int fff_nbytes(fff_datatype type) { unsigned int nbytes; switch(type) { case FFF_UCHAR: nbytes = (unsigned int)sizeof(unsigned char); break; case FFF_SCHAR: nbytes = (unsigned int)sizeof(signed char); break; case FFF_USHORT: nbytes = (unsigned int)sizeof(unsigned short); break; case FFF_SSHORT: nbytes = (unsigned int)sizeof(signed short); break; case FFF_UINT: nbytes = (unsigned int)sizeof(unsigned int); break; case FFF_INT: nbytes = (unsigned int)sizeof(int); break; case FFF_ULONG: nbytes = (unsigned int)sizeof(unsigned long); break; case FFF_LONG: nbytes = (unsigned int)sizeof(long); break; case FFF_FLOAT: nbytes = (unsigned int)sizeof(float); break; case FFF_DOUBLE: nbytes = (unsigned int)sizeof(double); break; default: nbytes = 0; break; } return nbytes; } int fff_is_integer(fff_datatype type) { int ok = 0; switch (type) { default: break; case FFF_UCHAR: case FFF_SCHAR: case FFF_USHORT: case FFF_SSHORT: case FFF_UINT: case FFF_INT: case FFF_ULONG: case FFF_LONG: ok = 1; break; } return ok; } fff_datatype fff_get_datatype( unsigned int sizeType, unsigned int integerType, unsigned int signedType ) { fff_datatype type = FFF_UNKNOWN_TYPE; /* Case: integer type */ if ( integerType ) { if ( signedType ) { if ( sizeType == sizeof(signed char) ) type = FFF_SCHAR; else if ( sizeType == sizeof(signed short) ) type = FFF_SSHORT; else if ( sizeType == sizeof(int) ) type = FFF_INT; else if ( sizeType == sizeof(signed long int) ) type = FFF_LONG; } else { if ( sizeType == sizeof(unsigned char) ) type = FFF_UCHAR; else if ( sizeType == sizeof(unsigned short) ) type = FFF_USHORT; else if ( sizeType == sizeof(unsigned int) ) type = FFF_UINT; else if ( sizeType == sizeof(unsigned long int) ) type = FFF_ULONG; } } /* Case: floating type */ else { if ( sizeType == sizeof(float) ) type = FFF_FLOAT; else if ( sizeType == sizeof(double) ) type = FFF_DOUBLE; } return type; } nipy-0.3.0/libcstat/fff/fff_base.h000066400000000000000000000110321210344137400167370ustar00rootroot00000000000000/*! \file fff_base.h \brief Basic fff macros and error handling functions \author Alexis Roche \date 2003-2008 */ #ifndef FFF_BASE #define FFF_BASE #ifdef __cplusplus extern "C" { #endif #include #include #ifdef INFINITY #define FFF_POSINF INFINITY #define FFF_NEGINF (-INFINITY) #else #define FFF_POSINF HUGE_VAL #define FFF_NEGINF (-HUGE_VAL) #endif #ifdef NAN #define FFF_NAN NAN #else #define FFF_NAN (FFF_POSINF/FFF_POSINF) #endif #ifdef NO_APPEND_FORTRAN # define FFF_FNAME(x) x #else # define FFF_FNAME(x) x##_ #endif /*! Displays an error message with associated error code. */ #define FFF_ERROR(message, errcode) \ { \ fprintf(stderr, "Unhandled error: %s (errcode %i)\n", message, errcode); \ fprintf(stderr, " in file %s, line %d, function %s\n", __FILE__, __LINE__, __FUNCTION__); \ } \ /*! Displays a warning message. */ #define FFF_WARNING(message) \ { \ fprintf(stderr, "Warning: %s\n", message); \ fprintf(stderr, " in file %s, line %d, function %s\n", __FILE__, __LINE__, __FUNCTION__); \ } \ /*! Displays a debug message. */ #define FFF_DEBUG(message) \ { \ fprintf(stderr, "DEBUG: %s\n", message); \ fprintf(stderr, " in file %s, line %d, function %s\n", __FILE__, __LINE__, __FUNCTION__); \ } \ /*! Rounds \a a to the nearest smaller integer \bug Compilator-dependent? */ #define FFF_FLOOR(a)((a)>0.0 ? (int)(a):(((int)(a)-a)!= 0.0 ? (int)(a)-1 : (int)(a))) /*! Rounds \a a to the nearest integer (either smaller or bigger) */ #define FFF_ROUND(a)(FFF_FLOOR(a+0.5)) /*! Rounds \a a to the nearest bigger integer */ #define FFF_CEIL(a)(-(FFF_FLOOR(-(a)))) /*! Rounds \a a to the nearest smaller integer, assuming \a a is non-negative \bug Compilator-dependent? */ #define FFF_UNSIGNED_FLOOR(a) ( (int)(a) ) /*! Rounds \a a to the nearest integer, assuming \a a is non-negative */ #define FFF_UNSIGNED_ROUND(a) ( (int)(a+0.5) ) /*! Rounds \a a to the nearest bigger integer, assuming \a a is non-negative */ #define FFF_UNSIGNED_CEIL(a) ( ( (int)(a)-a )!=0.0 ? (int)(a+1) : (int)(a) ) /*! Returns 1 if \a a is positive, -1 if \a a is negative, 0 if \a a equals zero Note that this macro differs from \a GSL_SIGN which returns +1 if \a a==0 */ #define FFF_SIGN(a)( (a)>0.0 ? 1 : ( (a)<0.0 ? -1 : 0 ) ) /*! Computes the absolute value of \a a */ #define FFF_ABS(a) ( (a) > 0.0 ? (a) : (-(a)) ) /*! Computes \f$ a^2 \f$ */ #define FFF_SQR(a) ( (a)*(a) ) /*! Computes \f$ a^3 \f$ */ #define FFF_CUBE(a) ( (a)*(a)*(a) ) /*! Computes \f$ a modulo, b ie the remainder after division of a by b \f$ */ #define FFF_REM(a, b) ( (int)(a)%(int)(b) ) /*! Computes the minimum of \a a and \a b */ #define FFF_MIN(a,b) ( (a) < (b) ? (a) : (b) ) /*! Computes the maximum of \a a and \a b */ #define FFF_MAX(a,b) ( (a) > (b) ? (a) : (b) ) /*! Low threshold a value to avoid vanishing */ #define FFF_TINY 1e-50 #define FFF_ENSURE_POSITIVE(a) ( (a) > FFF_TINY ? (a) : FFF_TINY ) #define FFF_IS_ODD(n) ((n) & 1) /*! \typedef fff_datatype \brief Data encoding types */ typedef enum { FFF_UNKNOWN_TYPE = -1, /*!< unknown type */ FFF_UCHAR = 0, /*!< unsigned char */ FFF_SCHAR = 1, /*!< signed char */ FFF_USHORT = 2, /*!< unsigned short */ FFF_SSHORT = 3, /*!< signed short */ FFF_UINT = 4, /*!< unsigned int */ FFF_INT = 5, /*!< (signed) int */ FFF_ULONG = 6, /*!< unsigned long int */ FFF_LONG = 7, /*!< (signed) long int */ FFF_FLOAT = 8, /*!< float */ FFF_DOUBLE = 9 /*!< double */ } fff_datatype; /*! \brief Return the byte length of a given data type \param type input data type */ extern unsigned int fff_nbytes(fff_datatype type); /*! \brief Return 1 if data type is integer, 0 otherwise \param type input data type */ extern int fff_is_integer(fff_datatype type); /*! \brief Return the data type that matches given features \param sizeType size in bytes \param integerType if zero, a floating-point type (\c float or \c double) is assumed \param signedType for integer types, tells whether the type is signed or not */ extern fff_datatype fff_get_datatype( unsigned int sizeType, unsigned int integerType, unsigned int signedType ); #ifdef __cplusplus } #endif #endif nipy-0.3.0/libcstat/fff/fff_blas.c000066400000000000000000000435141210344137400167530ustar00rootroot00000000000000#include "fff_base.h" #include "fff_blas.h" #include #define FNAME FFF_FNAME /* TODO : add tests for dimension compatibility */ /* We have to account for the fact that BLAS assumes column-major ordered matrices by transposing */ #define DIAG(Diag) ( (Diag)==(CblasUnit) ? "U" : "N" ) #define TRANS(Trans) ( (Trans)==(CblasNoTrans) ? "N" : "T" ) #define SWAP_TRANS(Trans) ( (Trans)==(CblasNoTrans) ? "T" : "N" ) #define SWAP_UPLO(Uplo) ( (Uplo)==(CblasUpper) ? "L" : "U" ) #define SWAP_SIDE(Side) ( (Side)==(CblasRight) ? "L" : "R" ) /* BLAS 1 */ extern double FNAME(ddot)(int* n, double* dx, int* incx, double* dy, int* incy); extern double FNAME(dnrm2)(int* n, double* x, int* incx); extern double FNAME(dasum)(int* n, double* dx, int* incx); extern int FNAME(idamax)(int* n, double* dx, int* incx); extern int FNAME(dswap)(int* n, double* dx, int* incx, double* dy, int* incy); extern int FNAME(dcopy)(int* n, double* dx, int* incx, double* dy, int* incy); extern int FNAME(daxpy)(int* n, double* da, double* dx, int* incx, double* dy, int* incy); extern int FNAME(dscal)(int* n, double* da, double* dx, int* incx); extern int FNAME(drotg)(double* da, double* db, double* c__, double* s); extern int FNAME(drot)(int* n, double* dx, int* incx, double* dy, int* incy, double* c__, double* s); extern int FNAME(drotmg)(double* dd1, double* dd2, double* dx1, double* dy1, double* dparam); extern int FNAME(drotm)(int* n, double* dx, int* incx, double* dy, int* incy, double* dparam); /* BLAS 2 */ extern int FNAME(dgemv)(char *trans, int* m, int* n, double* alpha, double* a, int* lda, double* x, int* incx, double* beta, double* y, int* incy); extern int FNAME(dtrmv)(char *uplo, char *trans, char *diag, int* n, double* a, int* lda, double* x, int* incx); extern int FNAME(dtrsv)(char *uplo, char *trans, char *diag, int* n, double* a, int* lda, double* x, int* incx); extern int FNAME(dsymv)(char *uplo, int* n, double* alpha, double* a, int* lda, double* x, int* incx, double *beta, double* y, int* incy); extern int FNAME(dger)(int* m, int* n, double* alpha, double* x, int* incx, double* y, int* incy, double* a, int* lda); extern int FNAME(dsyr)(char *uplo, int* n, double* alpha, double* x, int* incx, double* a, int* lda); extern int FNAME(dsyr2)(char *uplo, int* n, double* alpha, double* x, int* incx, double* y, int* incy, double* a, int* lda); /* BLAS 3 */ extern int FNAME(dgemm)(char *transa, char *transb, int* m, int* n, int* k, double* alpha, double* a, int* lda, double* b, int* ldb, double* beta, double* c__, int* ldc); extern int FNAME(dsymm)(char *side, char *uplo, int* m, int* n, double* alpha, double* a, int* lda, double* b, int* ldb, double* beta, double* c__, int* ldc); extern int FNAME(dtrmm)(char *side, char *uplo, char *transa, char *diag, int* m, int* n, double* alpha, double* a, int* lda, double* b, int* ldb); extern int FNAME(dtrsm)(char *side, char *uplo, char *transa, char *diag, int* m, int* n, double* alpha, double* a, int* lda, double* b, int* ldb); extern int FNAME(dsyrk)(char *uplo, char *trans, int* n, int* k, double* alpha, double* a, int* lda, double* beta, double* c__, int* ldc); extern int FNAME(dsyr2k)(char *uplo, char *trans, int* n, int* k, double* alpha, double* a, int* lda, double* b, int* ldb, double* beta, double* c__, int* ldc); /****** BLAS 1 ******/ /* Compute the scalar product x^T y for the vectors x and y, returning the result in result.*/ double fff_blas_ddot (const fff_vector * x, const fff_vector * y) { int n = (int) x->size; int incx = (int) x->stride; int incy = (int) y->stride; if ( n != y->size ) return 1; return( FNAME(ddot)(&n, x->data, &incx, y->data, &incy) ); } /* Compute the Euclidean norm ||x||_2 = \sqrt {\sum x_i^2} of the vector x. */ double fff_blas_dnrm2 (const fff_vector * x) { int n = (int) x->size; int incx = (int) x->stride; return( FNAME(dnrm2)(&n, x->data, &incx) ); } /* Compute the absolute sum \sum |x_i| of the elements of the vector x.*/ double fff_blas_dasum (const fff_vector * x) { int n = (int) x->size; int incx = (int) x->stride; return( FNAME(dasum)(&n, x->data, &incx) ); } /* Return the index of the largest element of the vector x. The largest element is determined by its absolute magnitude. We substract one to the original Fortran routine an actual C index. */ CBLAS_INDEX_t fff_blas_idamax (const fff_vector * x) { int n = (int) x->size; int incx = (int) x->stride; return( (CBLAS_INDEX_t)(FNAME(idamax)(&n, x->data, &incx) - 1) ); } /* Exchange the elements of the vectors x and y.*/ int fff_blas_dswap (fff_vector * x, fff_vector * y) { int n = (int) x->size; int incx = (int) x->stride; int incy = (int) y->stride; if ( n != y->size ) return 1; return( FNAME(dswap)(&n, x->data, &incx, y->data, &incy) ); } /* Copy the elements of the vector x into the vector y */ int fff_blas_dcopy (const fff_vector * x, fff_vector * y) { int n = (int) x->size; int incx = (int) x->stride; int incy = (int) y->stride; if ( n != y->size ) return 1; return( FNAME(dcopy)(&n, x->data, &incx, y->data, &incy) ); } /* Compute the sum y = \alpha x + y for the vectors x and y */ int fff_blas_daxpy (double alpha, const fff_vector * x, fff_vector * y) { int n = (int) x->size; int incx = (int) x->stride; int incy = (int) y->stride; if ( n != y->size ) return 1; return( FNAME(daxpy)(&n, &alpha, x->data, &incx, y->data, &incy) ); } /* Rescale the vector x by the multiplicative factor alpha. */ int fff_blas_dscal (double alpha, fff_vector * x) { int n = (int) x->size; int incx = (int) x->stride; return( FNAME(dscal)(&n, &alpha, x->data, &incx) ); } /* Compute a Givens rotation (c,s) which zeroes the vector (a,b), [ c s ] [ a ] = [ r ] [ -s c ] [ b ] [ 0 ] The variables a and b are overwritten by the routine. */ int fff_blas_drotg (double a[], double b[], double c[], double s[]) { return( FNAME(drotg)(a, b, c, s) ); } /* Apply a Givens rotation (x', y') = (c x + s y, -s x + c y) to the vectors x, y.*/ int fff_blas_drot (fff_vector * x, fff_vector * y, double c, double s) { int n = (int) x->size; int incx = (int) x->stride; int incy = (int) y->stride; if ( n != y->size ) return 1; return( FNAME(drot)(&n, x->data, &incx, y->data, &incy, &c, &s) ); } /* Compute a modified Givens transformation. The modified Givens transformation is defined in the original Level-1 blas specification. */ int fff_blas_drotmg (double d1[], double d2[], double b1[], double b2, double P[]) { return( FNAME(drotmg)(d1, d2, b1, &b2, P) ); } /* Apply a modified Givens transformation.*/ int fff_blas_drotm (fff_vector * x, fff_vector * y, const double P[]) { int n = (int) x->size; int incx = (int) x->stride; int incy = (int) y->stride; if ( n != y->size ) return 1; return( FNAME(drotm)(&n, x->data, &incx, y->data, &incy, (double*)P) ); } /****** BLAS 2 ******/ /* Compute the matrix-vector product and sum y = \alpha op(A) x + \beta y, where op(A) = A, A^T, A^H for TransA = CblasNoTrans, CblasTrans, CblasConjTrans. */ int fff_blas_dgemv (CBLAS_TRANSPOSE_t TransA, double alpha, const fff_matrix * A, const fff_vector * x, double beta, fff_vector * y) { char* trans = SWAP_TRANS(TransA); int incx = (int) x->stride; int incy = (int) y->stride; int m = (int) A->size2; int n = (int) A->size1; int lda = (int) A->tda; return( FNAME(dgemv)(trans, &m, &n, &alpha, A->data, &lda, x->data, &incx, &beta, y->data, &incy) ); } /* Compute the matrix-vector product x = op(A) x for the triangular matrix A, where op(A) = A, A^T, A^H for TransA = CblasNoTrans, CblasTrans, CblasConjTrans. When Uplo is CblasUpper then the upper triangle of A is used, and when Uplo is CblasLower then the lower triangle of A is used. If Diag is CblasNonUnit then the diagonal of the matrix is used, but if Diag is CblasUnit then the diagonal elements of the matrix A are taken as unity and are not referenced.*/ int fff_blas_dtrmv (CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t TransA, CBLAS_DIAG_t Diag, const fff_matrix * A, fff_vector * x) { char* uplo = SWAP_UPLO(Uplo); char* trans = SWAP_TRANS(TransA); char* diag = DIAG(Diag); int incx = (int) x->stride; int n = (int) A->size1; int lda = (int) A->tda; return( FNAME(dtrmv)(uplo, trans, diag, &n, A->data, &lda, x->data, &incx) ); } /* Compute inv(op(A)) x for x, where op(A) = A, A^T, A^H for TransA = CblasNoTrans, CblasTrans, CblasConjTrans. When Uplo is CblasUpper then the upper triangle of A is used, and when Uplo is CblasLower then the lower triangle of A is used. If Diag is CblasNonUnit then the diagonal of the matrix is used, but if Diag is CblasUnit then the diagonal elements of the matrix A are taken as unity and are not referenced. */ int fff_blas_dtrsv (CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t TransA, CBLAS_DIAG_t Diag, const fff_matrix * A, fff_vector * x) { char* uplo = SWAP_UPLO(Uplo); char* trans = SWAP_TRANS(TransA); char* diag = DIAG(Diag); int incx = (int) x->stride; int n = (int) A->size1; int lda = (int) A->tda; return( FNAME(dtrsv)(uplo, trans, diag, &n, A->data, &lda, x->data, &incx) ); } /* Compute the matrix-vector product and sum y = \alpha A x + \beta y for the symmetric matrix A. Since the matrix A is symmetric only its upper half or lower half need to be stored. When Uplo is CblasUpper then the upper triangle and diagonal of A are used, and when Uplo is CblasLower then the lower triangle and diagonal of A are used. */ int fff_blas_dsymv (CBLAS_UPLO_t Uplo, double alpha, const fff_matrix * A, const fff_vector * x, double beta, fff_vector * y) { char* uplo = SWAP_UPLO(Uplo); int incx = (int) x->stride; int incy = (int) y->stride; int n = (int) A->size1; int lda = (int) A->tda; return( FNAME(dsymv)(uplo, &n, &alpha, A->data, &lda, x->data, &incx, &beta, y->data, &incy) ); } /* Compute the rank-1 update A = \alpha x y^T + A of the matrix A.*/ int fff_blas_dger (double alpha, const fff_vector * x, const fff_vector * y, fff_matrix * A) { int incx = (int) x->stride; int incy = (int) y->stride; int m = (int) A->size2; int n = (int) A->size1; int lda = (int) A->tda; return( FNAME(dger)(&m, &n, &alpha, y->data, &incy, x->data, &incx, A->data, &lda) ); } /* Compute the symmetric rank-1 update A = \alpha x x^T + A of the symmetric matrix A. Since the matrix A is symmetric only its upper half or lower half need to be stored. When Uplo is CblasUpper then the upper triangle and diagonal of A are used, and when Uplo is CblasLower then the lower triangle and diagonal of A are used. */ int fff_blas_dsyr (CBLAS_UPLO_t Uplo, double alpha, const fff_vector * x, fff_matrix * A) { char* uplo = SWAP_UPLO(Uplo); int incx = (int) x->stride; int n = (int) A->size1; int lda = (int) A->tda; return( FNAME(dsyr)(uplo, &n, &alpha, x->data, &incx, A->data, &lda ) ); } /* These functions compute the symmetric rank-2 update A = \alpha x y^T + \alpha y x^T + A of the symmetric matrix A. Since the matrix A is symmetric only its upper half or lower half need to be stored. When Uplo is CblasUpper then the upper triangle and diagonal of A are used, and when Uplo is CblasLower then the lower triangle and diagonal of A are used. */ int fff_blas_dsyr2 (CBLAS_UPLO_t Uplo, double alpha, const fff_vector * x, const fff_vector * y, fff_matrix * A) { char* uplo = SWAP_UPLO(Uplo); int incx = (int) x->stride; int incy = (int) y->stride; int n = (int) A->size1; int lda = (int) A->tda; return( FNAME(dsyr2)(uplo, &n, &alpha, y->data, &incy, x->data, &incx, A->data, &lda) ); } /****** BLAS 3 ******/ /* Compute the matrix-matrix product and sum C = \alpha op(A) op(B) + \beta C where op(A) = A, A^T, A^H for TransA = CblasNoTrans, CblasTrans, CblasConjTrans and similarly for the parameter TransB. */ int fff_blas_dgemm (CBLAS_TRANSPOSE_t TransA, CBLAS_TRANSPOSE_t TransB, double alpha, const fff_matrix * A, const fff_matrix * B, double beta, fff_matrix * C) { /* We have A and B in C convention, hence At and Bt in F convention. By computing Bt*At in F convention, we get A*B in C convention. Hence, m is the number of rows of Bt and Ct (number of cols of B and C) n is the number of cols of At and Ct (number of rows of A and C) k is the number of cols of Bt and rows of At (number of rows of B and cols of A) */ char* transa = TRANS(TransA); char* transb = TRANS(TransB); int m = C->size2; int n = C->size1; int lda = (int) A->tda; int ldb = (int) B->tda; int ldc = (int) C->tda; int k = (TransB == CblasNoTrans) ? (int)B->size1 : (int)B->size2; return( FNAME(dgemm)(transb, transa, &m, &n, &k, &alpha, B->data, &ldb, A->data, &lda, &beta, C->data, &ldc) ); } /* Compute the matrix-matrix product and sum C = \alpha A B + \beta C for Side is CblasLeft and C = \alpha B A + \beta C for Side is CblasRight, where the matrix A is symmetric. When Uplo is CblasUpper then the upper triangle and diagonal of A are used, and when Uplo is CblasLower then the lower triangle and diagonal of A are used. */ int fff_blas_dsymm (CBLAS_SIDE_t Side, CBLAS_UPLO_t Uplo, double alpha, const fff_matrix * A, const fff_matrix * B, double beta, fff_matrix * C) { char* side = SWAP_SIDE(Side); char* uplo = SWAP_UPLO(Uplo); int m = C->size2; int n = C->size1; int lda = (int) A->tda; int ldb = (int) B->tda; int ldc = (int) C->tda; return ( FNAME(dsymm)(side, uplo, &m, &n, &alpha, A->data, &lda, B->data, &ldb, &beta, C->data, &ldc) ); } /* Compute the matrix-matrix product B = \alpha op(A) B for Side is CblasLeft and B = \alpha B op(A) for Side is CblasRight. The matrix A is triangular and op(A) = A, A^T, A^H for TransA = CblasNoTrans, CblasTrans, CblasConjTrans. When Uplo is CblasUpper then the upper triangle of A is used, and when Uplo is CblasLower then the lower triangle of A is used. If Diag is CblasNonUnit then the diagonal of A is used, but if Diag is CblasUnit then the diagonal elements of the matrix A are taken as unity and are not referenced. */ int fff_blas_dtrmm (CBLAS_SIDE_t Side, CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t TransA, CBLAS_DIAG_t Diag, double alpha, const fff_matrix * A, fff_matrix * B) { char* side = SWAP_SIDE(Side); char* uplo = SWAP_UPLO(Uplo); char* transa = TRANS(TransA); char* diag = DIAG(Diag); int m = B->size2; int n = B->size1; int lda = (int) A->tda; int ldb = (int) B->tda; return( FNAME(dtrmm)(side, uplo, transa, diag, &m, &n, &alpha, A->data, &lda, B->data, &ldb) ); } /* Compute the inverse-matrix matrix product B = \alpha op(inv(A))B for Side is CblasLeft and B = \alpha B op(inv(A)) for Side is CblasRight. The matrix A is triangular and op(A) = A, A^T, A^H for TransA = CblasNoTrans, CblasTrans, CblasConjTrans. When Uplo is CblasUpper then the upper triangle of A is used, and when Uplo is CblasLower then the lower triangle of A is used. If Diag is CblasNonUnit then the diagonal of A is used, but if Diag is CblasUnit then the diagonal elements of the matrix A are taken as unity and are not referenced. */ int fff_blas_dtrsm (CBLAS_SIDE_t Side, CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t TransA, CBLAS_DIAG_t Diag, double alpha, const fff_matrix * A, fff_matrix * B) { char* side = SWAP_SIDE(Side); char* uplo = SWAP_UPLO(Uplo); char* transa = TRANS(TransA); char* diag = DIAG(Diag); int m = B->size2; int n = B->size1; int lda = (int) A->tda; int ldb = (int) B->tda; return( FNAME(dtrsm)(side, uplo, transa, diag, &m, &n, &alpha, A->data, &lda, B->data, &ldb) ); } /* Compute a rank-k update of the symmetric matrix C, C = \alpha A A^T + \beta C when Trans is CblasNoTrans and C = \alpha A^T A + \beta C when Trans is CblasTrans. Since the matrix C is symmetric only its upper half or lower half need to be stored. When Uplo is CblasUpper then the upper triangle and diagonal of C are used, and when Uplo is CblasLower then the lower triangle and diagonal of C are used. */ int fff_blas_dsyrk (CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t Trans, double alpha, const fff_matrix * A, double beta, fff_matrix * C) { char* uplo = SWAP_UPLO(Uplo); char* trans = SWAP_TRANS(Trans); int n = C->size1; int k = (Trans == CblasNoTrans) ? (int)A->size1 : (int)A->size2; int lda = (int) A->tda; int ldc = (int) C->tda; return( FNAME(dsyrk)(uplo, trans, &n, &k, &alpha, A->data, &lda, &beta, C->data, &ldc) ); } /* Compute a rank-2k update of the symmetric matrix C, C = \alpha A B^T + \alpha B A^T + \beta C when Trans is CblasNoTrans and C = \alpha A^T B + \alpha B^T A + \beta C when Trans is CblasTrans. Since the matrix C is symmetric only its upper half or lower half need to be stored. When Uplo is CblasUpper then the upper triangle and diagonal of C are used, and when Uplo is CblasLower then the lower triangle and diagonal of C are used. */ int fff_blas_dsyr2k (CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t Trans, double alpha, const fff_matrix * A, const fff_matrix * B, double beta, fff_matrix * C) { char* uplo = SWAP_UPLO(Uplo); char* trans = SWAP_TRANS(Trans); int n = C->size1; int k = (Trans == CblasNoTrans) ? (int)B->size1 : (int)B->size2; int lda = (int) A->tda; int ldb = (int) B->tda; int ldc = (int) C->tda; return( FNAME(dsyr2k)(uplo, trans, &n, &k, &alpha, B->data, &ldb, A->data, &lda, &beta, C->data, &ldc) ); } nipy-0.3.0/libcstat/fff/fff_blas.h000066400000000000000000000072431210344137400167570ustar00rootroot00000000000000/*! \file fff_blas.h \brief lite wrapper around the Fortran Basic Linear Algeabra Library (BLAS) \author Alexis Roche \date 2008 This library can be linked against the standard (Fortran) blas library, but not against cblas. */ #ifndef FFF_BLAS #define FFF_BLAS #ifdef __cplusplus extern "C" { #endif #include "fff_vector.h" #include "fff_matrix.h" #define CBLAS_INDEX_t size_t /* this may vary between platforms */ typedef enum {CblasRowMajor=101, CblasColMajor=102} CBLAS_ORDER_t; typedef enum {CblasNoTrans=111, CblasTrans=112, CblasConjTrans=113} CBLAS_TRANSPOSE_t; typedef enum {CblasUpper=121, CblasLower=122} CBLAS_UPLO_t; typedef enum {CblasNonUnit=131, CblasUnit=132} CBLAS_DIAG_t; typedef enum {CblasLeft=141, CblasRight=142} CBLAS_SIDE_t; /* BLAS 1 */ extern double fff_blas_ddot (const fff_vector * x, const fff_vector * y); extern double fff_blas_dnrm2 (const fff_vector * x); extern double fff_blas_dasum (const fff_vector * x); extern CBLAS_INDEX_t fff_blas_idamax (const fff_vector * x); extern int fff_blas_dswap (fff_vector * x, fff_vector * y); extern int fff_blas_dcopy (const fff_vector * x, fff_vector * y); extern int fff_blas_daxpy (double alpha, const fff_vector * x, fff_vector * y); extern int fff_blas_dscal (double alpha, fff_vector * x); extern int fff_blas_drot (fff_vector * x, fff_vector * y, double c, double s); extern int fff_blas_drotg (double a[], double b[], double c[], double s[]); extern int fff_blas_drotmg (double d1[], double d2[], double b1[], double b2, double P[]); extern int fff_blas_drotm (fff_vector * x, fff_vector * y, const double P[]); /* BLAS 2 */ extern int fff_blas_dgemv (CBLAS_TRANSPOSE_t TransA, double alpha, const fff_matrix * A, const fff_vector * x, double beta, fff_vector * y); extern int fff_blas_dtrmv (CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t TransA, CBLAS_DIAG_t Diag, const fff_matrix * A, fff_vector * x); extern int fff_blas_dtrsv (CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t TransA, CBLAS_DIAG_t Diag, const fff_matrix * A, fff_vector * x); extern int fff_blas_dsymv (CBLAS_UPLO_t Uplo, double alpha, const fff_matrix * A, const fff_vector * x, double beta, fff_vector * y); extern int fff_blas_dger (double alpha, const fff_vector * x, const fff_vector * y, fff_matrix * A); extern int fff_blas_dsyr (CBLAS_UPLO_t Uplo, double alpha, const fff_vector * x, fff_matrix * A); extern int fff_blas_dsyr2 (CBLAS_UPLO_t Uplo, double alpha, const fff_vector * x, const fff_vector * y, fff_matrix * A); /* BLAS 3 */ extern int fff_blas_dgemm (CBLAS_TRANSPOSE_t TransA, CBLAS_TRANSPOSE_t TransB, double alpha, const fff_matrix * A, const fff_matrix * B, double beta, fff_matrix * C); extern int fff_blas_dsymm (CBLAS_SIDE_t Side, CBLAS_UPLO_t Uplo, double alpha, const fff_matrix * A, const fff_matrix * B, double beta, fff_matrix * C); extern int fff_blas_dtrmm (CBLAS_SIDE_t Side, CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t TransA, CBLAS_DIAG_t Diag, double alpha, const fff_matrix * A, fff_matrix * B); extern int fff_blas_dtrsm (CBLAS_SIDE_t Side, CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t TransA, CBLAS_DIAG_t Diag, double alpha, const fff_matrix * A, fff_matrix * B); extern int fff_blas_dsyrk (CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t Trans, double alpha, const fff_matrix * A, double beta, fff_matrix * C); extern int fff_blas_dsyr2k (CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t Trans, double alpha, const fff_matrix * A, const fff_matrix * B, double beta, fff_matrix * C); #ifdef __cplusplus } #endif #endif nipy-0.3.0/libcstat/fff/fff_gen_stats.c000066400000000000000000000046121210344137400200150ustar00rootroot00000000000000#include "fff_gen_stats.h" #include "fff_lapack.h" #include #include #include #include #include /* Generate a random permutation from [0..n-1]. */ extern void fff_permutation(unsigned int* x, unsigned int n, unsigned long magic) { unsigned int* xi, i, ir, j, tmp, nc; unsigned long int m = magic; /* Initialize x as the identity permutation */ for(i=0, xi=x; i 0 ) { nn --; c = _combinations(kk-1, nn); /* If i is accepted, then store it and do: kk-- */ if ( m < c ) { *bx = i; bx ++; kk --; } else m = m - c; /* Next candidate */ i ++; } return; } /* Squared mahalanobis distance: d2 = x' S^-1 x Beware: x is not const */ extern double fff_mahalanobis(fff_vector* x, fff_matrix* S, fff_matrix* Saux) { double d2; double m = 0.0; /* Cholesky decomposition: S = L L^t, L lower triangular */ fff_lapack_dpotrf(CblasLower, S, Saux); /* Compute S^-1 x */ fff_blas_dtrsv(CblasLower, CblasNoTrans, CblasNonUnit, S, x); /* L^-1 x */ /* Compute x' S^-1 x */ d2 = (double) fff_vector_ssd(x, &m, 1); return d2; } nipy-0.3.0/libcstat/fff/fff_gen_stats.h000066400000000000000000000026771210344137400200330ustar00rootroot00000000000000/*! \file fff_gen_stats.h \brief General interest statistical routines \author Alexis Roche \date 2004-2008 */ #ifndef FFF_GEN_STATS #define FFF_GEN_STATS #ifdef __cplusplus extern "C" { #endif #include "fff_vector.h" #include "fff_matrix.h" /*! \brief Squared Mahalanobis distance \param x input data vector (beware: gets modified) \param S associated variance matrix \param Saux auxiliary matrix, same size as \a S Compute the squared Mahalanobis distance \f$ d^2 = x^t S^{-1} x \f$. The routine uses the Cholesky decomposition: \f$ S = L L^t \f$ where \a L is lower triangular, and then exploits the fact that \f$ d^2 = \| L^{-1}x \|^2 \f$. */ extern double fff_mahalanobis( fff_vector* x, fff_matrix* S, fff_matrix* Saux ); /* \brief Generate a permutation from \a [0..n-1] \param x output list of integers \param n interval range \param seed initial state of the random number generator \a x needs is assumed contiguous, pre-allocated with size \a n. */ extern void fff_permutation(unsigned int* x, unsigned int n, unsigned long magic); /* \brief Generate a random combination of \a k elements in \a [0..n-1]. \a x must be contiguous, pre-allocated with size \a k. By convention, elements are output in ascending order. */ extern void fff_combination(unsigned int* x, unsigned int k, unsigned int n, unsigned long magic); #ifdef __cplusplus } #endif #endif nipy-0.3.0/libcstat/fff/fff_glm_kalman.c000066400000000000000000000242631210344137400201340ustar00rootroot00000000000000#include "fff_glm_kalman.h" #include "fff_base.h" #include "fff_blas.h" #include #include /* Declaration of static functions */ static void _fff_glm_RKF_iterate_Vb( fff_matrix* Vb, const fff_matrix* Vb0, const fff_matrix* Hspp, double aux1, double aux2, fff_matrix* Maux ); static double _fff_glm_hermit_norm( const fff_matrix* A, const fff_vector* x, fff_vector* vaux ); fff_glm_KF* fff_glm_KF_new( size_t dim ) { fff_glm_KF * thisone; /* Start with allocating the object */ thisone = (fff_glm_KF*) calloc( 1, sizeof(fff_glm_KF) ); /* Checks that the pointer has been allocated */ if ( thisone == NULL) return NULL; /* Allocate KF objects */ thisone->b = fff_vector_new( dim ); thisone->Cby = fff_vector_new( dim ); thisone->Vb = fff_matrix_new( dim, dim ); /* Initialization */ thisone->dim = dim; thisone->t = 0; thisone->ssd = 0.0; thisone->s2 = 0.0; thisone->dof = 0.0; thisone->s2_cor = 0.0; /* Initialize covariance using a scalar matrix */ fff_matrix_set_scalar( thisone->Vb, FFF_GLM_KALMAN_INIT_VAR); return thisone; } void fff_glm_KF_delete( fff_glm_KF* thisone ) { if ( thisone != NULL ) { if ( thisone->b != NULL ) fff_vector_delete(thisone->b); if ( thisone->Cby != NULL ) fff_vector_delete(thisone->Cby); if ( thisone->Vb != NULL ) fff_matrix_delete(thisone->Vb); free( thisone ); } return; } void fff_glm_KF_reset( fff_glm_KF* thisone ) { thisone->t = 0; thisone->ssd = 0.0; thisone->s2 = 0.0; thisone->dof = 0.0; thisone->s2_cor = 0.0; fff_vector_set_all( thisone->b, 0.0 ); fff_matrix_set_scalar( thisone->Vb, FFF_GLM_KALMAN_INIT_VAR ); return; } void fff_glm_KF_iterate( fff_glm_KF* thisone, double y, const fff_vector* x ) { double Ey, Vy, invVy, ino; /* Update time */ thisone->t ++; /* Measurement moments conditional to the effect */ Ey = fff_blas_ddot( x, thisone->b ); fff_blas_dsymv( CblasUpper, 1.0, thisone->Vb, x, 0.0, thisone->Cby ); Vy = fff_blas_ddot( x, thisone->Cby ) + 1.0; invVy = 1/Vy; /* Inovation */ ino = y - Ey; /* Update effect estimate */ fff_blas_daxpy( invVy*ino, thisone->Cby, thisone->b ); /* Update effect variance matrix: Vb = Vb - invVy*Cby*Cby' */ fff_blas_dger( -invVy, thisone->Cby, thisone->Cby, thisone->Vb ); /* Update sum of squares and scale */ thisone->ssd = thisone->ssd + FFF_SQR(ino)*invVy; thisone->s2 = thisone->ssd / (double)thisone->t; return; } fff_glm_RKF* fff_glm_RKF_new( size_t dim ) { fff_glm_RKF* thisone; /* Start with allocating the object */ thisone = (fff_glm_RKF*) calloc( 1, sizeof(fff_glm_RKF) ); /* Checks that the pointer has been allocated */ if ( thisone == NULL) return NULL; /* Allocate RKF objects */ thisone->Kfilt = fff_glm_KF_new( dim ); thisone->db = fff_vector_new( dim ); thisone->Hssd = fff_matrix_new( dim, dim ); thisone->Gspp = fff_vector_new( dim ); thisone->Hspp = fff_matrix_new( dim, dim ); thisone->b = fff_vector_new( dim ); thisone->Vb = fff_matrix_new( dim, dim ); thisone->vaux = fff_vector_new( dim ); thisone->Maux = fff_matrix_new( dim, dim ); /* Initialization */ thisone->dim = dim; thisone->t = 0; thisone->spp = 0.0; thisone->s2 = 0.0; thisone->a = 0.0; thisone->dof = 0.0; thisone->s2_cor = 0.0; return thisone; } void fff_glm_RKF_delete( fff_glm_RKF* thisone ) { if ( thisone != NULL ) { if ( thisone->Kfilt != NULL ) fff_glm_KF_delete( thisone->Kfilt ); if ( thisone->db != NULL ) fff_vector_delete(thisone->db); if ( thisone->Hssd != NULL ) fff_matrix_delete(thisone->Hssd); if ( thisone->Gspp != NULL ) fff_vector_delete(thisone->Gspp); if ( thisone->Hspp != NULL ) fff_matrix_delete(thisone->Hspp); if ( thisone->b != NULL ) fff_vector_delete(thisone->b); if ( thisone->Vb != NULL ) fff_matrix_delete(thisone->Vb); if ( thisone->vaux != NULL ) fff_vector_delete(thisone->vaux); if ( thisone->Maux != NULL ) fff_matrix_delete(thisone->Maux); free(thisone); } return; } void fff_glm_RKF_reset( fff_glm_RKF* thisone ) { thisone->t = 0; thisone->spp = 0; thisone->s2 = 0; thisone->a = 0; thisone->dof = 0; thisone->s2_cor = 0; fff_glm_KF_reset( thisone->Kfilt ); fff_vector_set_all( thisone->Gspp, 0.0 ); fff_matrix_set_all( thisone->Hssd, 0.0 ); fff_matrix_set_all( thisone->Hspp, 0.0 ); return; } void fff_glm_RKF_iterate( fff_glm_RKF* thisone, unsigned int nloop, double y, const fff_vector* x, double yy, const fff_vector* xx ) { unsigned int iter; double cor, r, rr, ssd_ref, spp_ref, aux1, aux2; /* Update time */ thisone->t ++; /* Store the current OLS estimate */ fff_vector_memcpy( thisone->vaux, thisone->Kfilt->b ); /* Iterate the standard Kalman filter */ fff_glm_KF_iterate( thisone->Kfilt, y, x ); /* OLS estimate variation */ fff_vector_memcpy( thisone->db, thisone->Kfilt->b ); fff_vector_sub( thisone->db, thisone->vaux ); /* db = b - db */ /* Update SSD hessian: Hssd = Hssd + x*x' */ fff_blas_dger( 1.0, x, x, thisone->Hssd ); /* Dont process any further if we are dealing with the first scan */ if ( thisone->t==1 ) { thisone->s2 = thisone->Kfilt->s2; fff_vector_memcpy( thisone->b, thisone->Kfilt->b ); fff_matrix_memcpy( thisone->Vb, thisone->Kfilt->Vb ); return; } /* Update bias correction factor otherwise */ else cor = (double)thisone->t / (double)(thisone->t - 1); /* Update SPP value */ aux1 = fff_blas_ddot( x, thisone->Kfilt->b ); r = y - aux1; aux1 = fff_blas_ddot( xx, thisone->Kfilt->b ); rr = yy - aux1; aux1 = fff_blas_ddot( thisone->Gspp, thisone->db ); thisone->spp += 2.0*aux1 + _fff_glm_hermit_norm( thisone->Hspp, thisone->db, thisone->vaux ) + r*rr; /* Update SPP gradient. Notice, we currently have: vaux == Hspp*db */ fff_vector_add ( thisone->Gspp, thisone->vaux ); fff_blas_daxpy( -.5*rr, x, thisone->Gspp ); fff_blas_daxpy( -.5*r, xx, thisone->Gspp ); /* Update SPP hessian: Hspp = Hspp + .5*(x*xx'+xx*x') */ fff_blas_dsyr2( CblasUpper, .5, x, xx, thisone->Hspp ); /* Update autocorrelation */ thisone->a = cor*thisone->spp / FFF_ENSURE_POSITIVE( thisone->Kfilt->ssd ); /* Update scale */ thisone->s2 = thisone->Kfilt->s2; /* Refinement loop */ fff_vector_memcpy( thisone->b, thisone->Kfilt->b ); fff_matrix_memcpy( thisone->Vb, thisone->Kfilt->Vb ); iter = 1; while ( iter < nloop ) { aux1 = 1/(1 + FFF_SQR(thisone->a)); aux2 = 2*cor*thisone->a; /* Update covariance */ _fff_glm_RKF_iterate_Vb( thisone->Vb, thisone->Kfilt->Vb, thisone->Hspp, aux1, aux2, thisone->Maux ); /* Update effect estimate */ fff_blas_dsymv( CblasUpper, aux2, thisone->Vb, thisone->Gspp, 0.0, thisone->db ); fff_vector_memcpy( thisone->b, thisone->Kfilt->b ); fff_vector_add( thisone->b, thisone->db ); /* Calculate SSD and SPP at current estimate */ aux1 = fff_blas_ddot( thisone->Gspp, thisone->db ); spp_ref = thisone->spp + 2*aux1 + _fff_glm_hermit_norm( thisone->Hspp, thisone->db, thisone->vaux ); ssd_ref = thisone->Kfilt->ssd + _fff_glm_hermit_norm( thisone->Hssd, thisone->db, thisone->vaux ); /* Update autocorrelation */ thisone->a = cor*spp_ref / FFF_ENSURE_POSITIVE(ssd_ref); /* Update scale */ thisone->s2 = (1-FFF_SQR(thisone->a))*ssd_ref / (double)thisone->t; /* Counter */ iter ++; } return; } void fff_glm_KF_fit( fff_glm_KF* thisone, const fff_vector* y, const fff_matrix* X ) { size_t i, offset_xi = 0; double* yi = y->data; fff_vector xi; /* Init */ fff_glm_KF_reset( thisone ); xi.size = X->size2; xi.stride = 1; /* Tests */ if ( X->size1 != y->size ) return; /* Loop */ for( i=0; isize; i++, yi+=y->stride, offset_xi+=X->tda ) { /* Get the i-th row of the design matrix */ xi.data = X->data + offset_xi; /* Iterate the Kalman filter */ fff_glm_KF_iterate( thisone, *yi, &xi ); } /* DOF */ thisone->dof = (double)(y->size - X->size2); thisone->s2_cor = ((double)y->size/thisone->dof)*thisone->s2; return; } void fff_glm_RKF_fit( fff_glm_RKF* thisone, unsigned int nloop, const fff_vector* y, const fff_matrix* X ) { size_t i, offset_xi = 0; double* yi = y->data; fff_vector xi, xxi; double yyi = 0.0; unsigned int nloop_actual = 1; /* Init */ fff_glm_RKF_reset( thisone ); xi.size = X->size2; xi.stride = 1; xxi.size = X->size2; xxi.stride = 1; xxi.data = NULL; /* Tests */ if ( X->size1 != y->size ) return; /* Loop */ for( i=0; isize; i++, yi+=y->stride, offset_xi+=X->tda ) { /* Get the i-th row of the design matrix */ xi.data = X->data + offset_xi; /* Refinement loop only needed at the last time frame */ if ( i == (y->size-1) ) nloop_actual = nloop; /* Iterate the refined Kalman filter */ fff_glm_RKF_iterate( thisone, nloop_actual, *yi, &xi, yyi, &xxi ); /* Copy current time values */ yyi = *yi; xxi.data = xi.data; } /* DOF */ thisone->dof = (double)(y->size - X->size2); thisone->s2_cor = ((double)y->size/thisone->dof)*thisone->s2; return; } /* Compute: Vb = aux1 * ( Id + aux1*aux2*Vb0*Hspp ) * Vb0 This corresponds to a simplification as the exact update formula would be: Vb = aux1 * pinv( eye(p) - aux1*aux2*Vbd*He ) * Vbd */ static void _fff_glm_RKF_iterate_Vb( fff_matrix* Vb, const fff_matrix* Vb0, const fff_matrix* Hspp, double aux1, double aux2, fff_matrix* Maux ) { fff_blas_dsymm ( CblasLeft, CblasUpper, 1.0, Hspp, Vb0, 0.0, Maux ); /** Maux == Hspp*Vb0 **/ fff_matrix_memcpy( Vb, Vb0 ); fff_blas_dgemm( CblasNoTrans, CblasNoTrans, FFF_SQR(aux1)*aux2, Vb0, Maux, aux1, Vb ); return; } /* Static function to compute the Hermitian norm: x'*A*x for a positive symmetric matrix A. The matrix-vector product A*x is output in the auxiliary vector, vaux. */ static double _fff_glm_hermit_norm( const fff_matrix* A, const fff_vector* x, fff_vector* vaux ) { double norm = 0.0; fff_blas_dsymv( CblasUpper, 1.0, A, x, 0.0, vaux ); norm = fff_blas_ddot( x, vaux ); return FFF_MAX( norm, 0.0 ); } nipy-0.3.0/libcstat/fff/fff_glm_kalman.h000066400000000000000000000133301210344137400201320ustar00rootroot00000000000000/*! \file fff_glm_kalman.h \brief General linear model fitting using Kalman filters \author Alexis Roche \date 2004-2006 This library implements several Kalman filter variants to fit a signal (represented as a gsl_vector structure) in terms of a general linear model. Kalman filtering works incrementally as opposed to more classical GLM fitting procedures, hence making it possible to produce parameter estimates on each time frame. Two methods are currently available: - the standard Kalman filter: performs an ordinary least-square regression, hence ignoring the temporal autocorrelation of the errors. - the refined Kalman filter: original Kalman extension to estimate both the GLM parameters and the noise autocorrelation based on an autoregressive AR(1) model. Significantly more memory demanding than the standard KF. */ #ifndef FFF_GLM_KALMAN #define FFF_GLM_KALMAN #ifdef __cplusplus extern "C" { #endif #include "fff_vector.h" #include "fff_matrix.h" #define FFF_GLM_KALMAN_INIT_VAR 1e7 /*! \struct fff_glm_KF \brief Standard Kalman filter structure. */ typedef struct{ size_t t; /*!< time counter */ size_t dim; /*!< model dimension (i.e. number of linear regressors) */ fff_vector* b; /*!< effect vector */ fff_matrix* Vb; /*!< effect variance matrix before multiplication by scale */ fff_vector* Cby; /*!< covariance between the effect and the data before multiplication by scale */ double ssd; /*!< sum of squared residuals */ double s2; /*!< scale parameter (squared) */ double dof; /*!< degrees of freedom */ double s2_cor; /*!< s2 corrected for degrees of freedom, s2_cor=n*s2/dof */ } fff_glm_KF; /*! \struct fff_glm_RKF \brief Refined Kalman filter structure. */ typedef struct{ size_t t; /*!< time counter */ size_t dim; /*!< model dimension (i.e. number of linear regressors) */ fff_glm_KF* Kfilt; /*!< standard kalman filter */ fff_vector* db; /*!< auxiliary vector for estimate variation */ fff_matrix* Hssd; /*!< SSD hessian (SSD = sum of squared differences) */ double spp; /*!< SSP value (SPP = sum of paired products) */ fff_vector* Gspp; /*!< SSP gradient */ fff_matrix* Hspp; /*!< SSP hessian */ fff_vector* b; /*!< effect vector */ fff_matrix* Vb; /*!< effect variance matrix before multiplication by scale */ double s2; /*!< scale parameter (squared) */ double a; /*!< autocorrelation parameter */ double dof; /*!< degrees of freedom */ double s2_cor; /*!< s2 corrected for degrees of freedom, s2_cor=n*s2/dof */ fff_vector* vaux; /*!< auxiliary vector */ fff_matrix* Maux; /*!< auxiliary matrix */ } fff_glm_RKF; /*! \brief Constructor for the fff_glm_KF structure \param dim model dimension (number of linear regressors) */ extern fff_glm_KF* fff_glm_KF_new( size_t dim ); /*! \brief Destructor for the fff_glm_KF structure \param thisone the fff_glm_KF structure to be deleted */ extern void fff_glm_KF_delete( fff_glm_KF* thisone ); /*! \brief Reset function (without destruction) for the fff_glm_KF structure \param thisone the fff_glm_KF structure to be reset */ extern void fff_glm_KF_reset( fff_glm_KF* thisone ); /*! \brief Performs a standard Kalman iteration from a fff_glm_KF structure \param thisone the fff_glm_KF structure to be iterated \param y current signal sample \param x current regressor values */ extern void fff_glm_KF_iterate( fff_glm_KF* thisone, double y, const fff_vector* x ); /*! \brief Constructor for the fff_glm_RKF structure \param dim model dimension (number of linear regressors) */ extern fff_glm_RKF* fff_glm_RKF_new( size_t dim ); /*! \brief Destructor for the fff_glm_RKF structure \param thisone the fff_glm_KF structure to be deleted */ extern void fff_glm_RKF_delete( fff_glm_RKF* thisone ); /*! \brief Reset function (without destruction) for the fff_glm_RKF structure \param thisone the fff_glm_KF structure to be reset */ extern void fff_glm_RKF_reset( fff_glm_RKF* thisone ); /*! \brief Performs a refined Kalman iteration from a fff_glm_RKF structure \param thisone the fff_glm_KF structure to be iterated \param nloop number of refinement iterations \param y current signal sample \param x current regressor values \param yy previous signal sample \param xx previous regressor values */ extern void fff_glm_RKF_iterate( fff_glm_RKF* thisone, unsigned int nloop, double y, const fff_vector* x, double yy, const fff_vector* xx ); /*! \brief Perform an ordinary least square regression using the standard Kalman filter and return the degrees of freedom \param thisone the fff_glm_KF structure to be filled in \param y input data \param X design matrix (column-wise stored covariates) */ extern void fff_glm_KF_fit( fff_glm_KF* thisone, const fff_vector* y, const fff_matrix* X ); /*! \brief Perform a linear regression using the refined Kalman filter, corresponding to a GLM with AR(1) errors. \param thisone the fff_glm_RKF structure to be filled in \param nloop number of refinement iterations \param y input data \param X design matrix (column-wise stored covariates) */ extern void fff_glm_RKF_fit( fff_glm_RKF* thisone, unsigned int nloop, const fff_vector* y, const fff_matrix* X ); #ifdef __cplusplus } #endif #endif nipy-0.3.0/libcstat/fff/fff_glm_twolevel.c000066400000000000000000000072501210344137400205270ustar00rootroot00000000000000#include "fff_glm_twolevel.h" #include "fff_base.h" #include "fff_blas.h" #include #include #include /* b, s2 are initialized using the values passed to the function. The function requires the projected pseudo-inverse matrix PpiX to be pre-calculated externally. It is defined by: PpiX = P * (X'X)^-1 X' where: P = Ip - A C' (C A C')^-1 C with A = (X'X)^-1 is the appropriate projector onto the constaint space, Cb=0. P is, in fact, orthogonal for the dot product defined by X'X. PpiX is p x n. The equality PpiX*X=P is not checked. */ fff_glm_twolevel_EM* fff_glm_twolevel_EM_new(size_t n, size_t p) { fff_glm_twolevel_EM* thisone; thisone = (fff_glm_twolevel_EM*)malloc(sizeof(fff_glm_twolevel_EM)); if (thisone==NULL) return NULL; thisone->n = n; thisone->p = p; thisone->s2 = FFF_POSINF; thisone->b = fff_vector_new(p); thisone->z = fff_vector_new(n); thisone->vz = fff_vector_new(n); thisone->Qz = fff_vector_new(n); return thisone; } void fff_glm_twolevel_EM_delete(fff_glm_twolevel_EM* thisone) { if (thisone==NULL) return; fff_vector_delete(thisone->b); fff_vector_delete(thisone->z); fff_vector_delete(thisone->vz); fff_vector_delete(thisone->Qz); free(thisone); } void fff_glm_twolevel_EM_init(fff_glm_twolevel_EM* em) { fff_vector_set_all(em->b, 0.0); em->s2 = FFF_POSINF; return; } void fff_glm_twolevel_EM_run(fff_glm_twolevel_EM* em, const fff_vector* y, const fff_vector* vy, const fff_matrix* X, const fff_matrix* PpiX, unsigned int niter) { unsigned int iter = 0; size_t n=X->size1, i; double *yi, *zi, *vyi, *vzi; double w1, w2; double m = 0.0; while (iter < niter) { /*** E step ***/ /* Compute current prediction estimate: z = X*b */ fff_blas_dgemv(CblasNoTrans, 1.0, X, em->b, 0.0, em->z); /* Posterior mean and variance of each "true" effect: vz = 1/(1/vy + 1/s2) z = vz * (y/vy + X*b/s2) */ w2 = FFF_ENSURE_POSITIVE(em->s2); w2 = 1/w2; for(i=0, yi=y->data, zi=em->z->data, vyi=vy->data, vzi=em->vz->data; istride, zi+=em->z->stride, vyi+=vy->stride, vzi+=em->vz->stride) { w1 = FFF_ENSURE_POSITIVE(*vyi); w1 = 1/w1; *vzi = 1/(w1+w2); *zi = *vzi * (w1*(*yi) + w2*(*zi)); } /*** M step ***/ /* Update effect: b = PpiX * z */ fff_blas_dgemv(CblasNoTrans, 1.0, PpiX, em->z, 0.0, em->b); /* Update variance: s2 = (1/n) [ sum((z-Xb).^2) + sum(vz) ] */ fff_vector_memcpy(em->Qz, em->z); fff_blas_dgemv(CblasNoTrans, 1.0, X, em->b, -1.0, em->Qz); /* Qz= Xb-z = Proj_X(z) - z */ em->s2 = (fff_vector_ssd(em->Qz, &m, 1) + fff_vector_sum(em->vz)) / (long double)n; /*** Increment iteration number ***/ iter ++; } return; } /* Log-likelihood computation. ri = y - Xb -2 LL = n log(2pi) + \sum_i log (s^2 + si^2) + \sum_i ri^2/(s^2 + si^2) We omit the nlog(2pi) term as it is constant. */ double fff_glm_twolevel_log_likelihood(const fff_vector* y, const fff_vector* vy, const fff_matrix* X, const fff_vector* b, double s2, fff_vector* tmp) { double LL = 0.0, w; size_t n=X->size1, i; double *ri, *vyi; /* Compute residuals: tmp = y - X b */ fff_vector_memcpy(tmp, y); fff_blas_dgemv(CblasNoTrans, -1.0, X, b, 1.0, tmp); /* Incremental computation */ for(i=0, ri=tmp->data, vyi=vy->data; istride, vyi+=vy->stride) { w = *vyi + s2; w = FFF_ENSURE_POSITIVE(w); LL += log(w); LL += FFF_SQR(*ri)/w; } /* Finalize computation */ LL *= -0.5; return LL; } nipy-0.3.0/libcstat/fff/fff_glm_twolevel.h000066400000000000000000000037011210344137400205310ustar00rootroot00000000000000/*! \file fff_glm_twolevel.h \brief General linear model under observation errors (mixed effects) \author Alexis Roche \date 2008 Bla bla bla */ #ifndef FFF_GLM_TWOLEVEL #define FFF_GLM_TWOLEVEL #ifdef __cplusplus extern "C" { #endif #include "fff_vector.h" #include "fff_matrix.h" /*! \struct fff_glm_twolevel_EM \brief Structure for the mixed-effect general linear model This structure is intended for multiple regression under mixed effects using the EM algorithm. */ typedef struct{ size_t n; /*! Number of observations */ size_t p; /*! Number of regresssors */ fff_vector* b; /*! Effect estimate */ double s2; /*! Variance estimate */ fff_vector* z; /*! Expected true effects */ fff_vector* vz; /*! Expected variance of the true effects (diagonal matrix) */ fff_vector* Qz; /* Expected prediction error */ unsigned int niter; /* Number of iterations */ } fff_glm_twolevel_EM; extern fff_glm_twolevel_EM* fff_glm_twolevel_EM_new(size_t n, size_t p); extern void fff_glm_twolevel_EM_delete(fff_glm_twolevel_EM* thisone); extern void fff_glm_twolevel_EM_init(fff_glm_twolevel_EM* em); /* \a PpiX is defined by: \f$ PpiX = P (X'X)^{-1} X' \f$, where: \f$ P = I_p - A C (C' A C)^{-1} C' \f$ with \f$ A = (X'X)^-1 \f$ is the appropriate projector onto the constaint space, \f$ C'b=0 \f$. \a P is, in fact, orthogonal for the dot product defined by \a X'X. Please note that the equality \a PpiX*X=P should hold but is not checked. */ extern void fff_glm_twolevel_EM_run(fff_glm_twolevel_EM* em, const fff_vector* y, const fff_vector* vy, const fff_matrix* X, const fff_matrix* PpiX, unsigned int niter); extern double fff_glm_twolevel_log_likelihood( const fff_vector* y, const fff_vector* vy, const fff_matrix* X, const fff_vector* b, double s2, fff_vector* tmp ); #ifdef __cplusplus } #endif #endif nipy-0.3.0/libcstat/fff/fff_lapack.c000066400000000000000000000161041210344137400172600ustar00rootroot00000000000000#include "fff_base.h" #include "fff_lapack.h" #include #define FNAME FFF_FNAME /* dgetrf : LU decomp dpotrf: Cholesky decomp dgesdd: SVD decomp dgeqrf: QR decomp */ #define CHECK_SQUARE(A) \ if ( (A->size1) != (A->size2) ) \ FFF_ERROR("Not a square matrix", EDOM) #define LAPACK_UPLO(Uplo) ( (Uplo)==(CblasUpper) ? "U" : "L" ) extern int FNAME(dgetrf)(int* m, int* n, double* a, int* lda, int* ipiv, int* info); extern int FNAME(dpotrf)(char *uplo, int* n, double* a, int* lda, int* info); extern int FNAME(dgesdd)(char *jobz, int* m, int* n, double* a, int* lda, double* s, double* u, int* ldu, double* vt, int* ldvt, double* work, int* lwork, int* iwork, int* info); extern int FNAME(dgeqrf)(int* m, int* n, double* a, int* lda, double* tau, double* work, int* lwork, int* info); /* Cholesky decomposition */ /*** Aux needs be square with the same size as A ***/ int fff_lapack_dpotrf( CBLAS_UPLO_t Uplo, fff_matrix* A, fff_matrix* Aux ) { char* uplo = LAPACK_UPLO(Uplo); int info; int n = (int)A->size1; /* Assumed squared */ int lda = (int)Aux->tda; CHECK_SQUARE(A); fff_matrix_transpose( Aux, A ); FNAME(dpotrf)(uplo, &n, Aux->data, &lda, &info); fff_matrix_transpose( A, Aux ); return info; } /* LU decomposition */ /*** Aux needs be m x n with m=A->size2 and n=A->size1 ***/ /*** ipiv needs be 1d contiguous in int with size min(m,n) ***/ int fff_lapack_dgetrf( fff_matrix* A, fff_array* ipiv, fff_matrix* Aux ) { int info; int m = (int)A->size1; int n = (int)A->size2; int lda = (int)Aux->tda; if ( (ipiv->ndims != 1) || (ipiv->datatype != FFF_INT) || (ipiv->dimX != FFF_MIN(m,n)) || (ipiv->offsetX != 1) ) FFF_ERROR("Invalid array: Ipiv", EDOM); fff_matrix_transpose( Aux, A ); FNAME(dgetrf)(&m, &n, Aux->data, &lda, (int*)ipiv->data, &info); fff_matrix_transpose( A, Aux ); return info; } /* QR decomposition */ /*** Aux needs be m x n with m=A->size2 and n=A->size1 ***/ /*** tau needs be contiguous with size min(m,n) ***/ /*** work needs be contiguous with size >= n ***/ int fff_lapack_dgeqrf( fff_matrix* A, fff_vector* tau, fff_vector* work, fff_matrix* Aux ) { int info; int m = (int)A->size1; int n = (int)A->size2; int lda = (int)Aux->tda; int lwork = (int)work->size; if ( (tau->size != FFF_MIN(m,n)) || (tau->stride != 1) ) FFF_ERROR("Invalid vector: tau", EDOM); /* Resets lwork to -1 if the input work vector is too small (in which case work only needs be of size >= 1) */ if ( lwork < n ) lwork = -1; else if ( work->stride != 1 ) FFF_ERROR("Invalid vector: work", EDOM); fff_matrix_transpose( Aux, A ); FNAME(dgeqrf)(&m, &n, Aux->data, &lda, tau->data, work->data, &lwork, &info); fff_matrix_transpose( A, Aux ); return info; } /* SVD decomposition */ /*** Aux needs be square with size max(m=A->size2, n=A->size1) ***/ /*** s needs be contiguous with size min(m,n) ***/ /*** U needs be m x m ***/ /*** Vt needs be n x n ***/ /*** work needs be contiguous, with size lwork such that dmin = min(M,N) dmax = max(M,N) lwork >= 3*dmin**2 + max(dmax,4*dmin**2+4*dmin) ***/ /*** iwork needs be 1d contiguous in int with size 8*min(m,n) ***/ int fff_lapack_dgesdd( fff_matrix* A, fff_vector* s, fff_matrix* U, fff_matrix* Vt, fff_vector* work, fff_array* iwork, fff_matrix* Aux ) { int info; int m = (int)A->size1; int n = (int)A->size2; int dmin = FFF_MIN(m,n); int dmax = FFF_MAX(m,n); int a1 = FFF_SQR(dmin); int a2 = 4*(a1+dmin); int lwork_min = 3*a1 + FFF_MAX(dmax, a2); int lda = (int)Aux->tda; int ldu = (int)U->tda; int ldvt = (int)Vt->tda; int lwork = work->size; fff_matrix Aux_mm, Aux_nn; CHECK_SQUARE(U); CHECK_SQUARE(Vt); CHECK_SQUARE(Aux); if ( U->size1 != m) FFF_ERROR("Invalid size for U", EDOM); if ( Vt->size1 != n) FFF_ERROR("Invalid size for Vt", EDOM); if ( Aux->size1 != dmax) FFF_ERROR("Invalid size for Aux", EDOM); if ( (s->size != dmin) || (s->stride != 1) ) FFF_ERROR("Invalid vector: s", EDOM); if ( (iwork->ndims != 1) || (iwork->datatype != FFF_INT) || (iwork->dimX != 8*dmin) || (iwork->offsetX != 1 ) ) FFF_ERROR("Invalid array: Iwork", EDOM); /* Resets lwork to -1 if the input work vector is too small (in which case work only needs be of size >= 1) */ if ( lwork < lwork_min ) lwork = -1; else if ( work->stride != 1 ) FFF_ERROR("Invalid vector: work", EDOM); /* Perform the svd on A**t: A**t = U* S* Vt* => A = V* S* Ut* => U = V*, V = U*, s = s* so we just need to swap m <-> n, and U <-> Vt in the input line */ FNAME(dgesdd)("A", &n, &m, A->data, &lda, s->data, Vt->data, &ldvt, U->data, &ldu, work->data, &lwork, (int*)iwork->data, &info); /* At this point, both U and V are in Fortran order, so we need to transpose */ Aux_mm = fff_matrix_block( Aux, 0, m, 0, m ); fff_matrix_transpose(&Aux_mm, U); fff_matrix_memcpy(U, &Aux_mm); Aux_nn = fff_matrix_block( Aux, 0, n, 0, n ); fff_matrix_transpose(&Aux_nn, Vt); fff_matrix_memcpy(Vt, &Aux_nn); return info; } /* simply do the pre-allocations to simplify the use of SVD*/ static int _fff_lapack_SVD(fff_matrix* A, fff_vector* s, fff_matrix* U, fff_matrix* Vt) { int n = A->size1; int m = A->size2; int dmin = FFF_MIN(m,n); int dmax = FFF_MAX(m,n); int lwork = 2* (3*dmin*dmin + FFF_MAX(dmax,4*dmin*dmin + 4*dmin)); int liwork = 8* dmin; fff_vector *work = fff_vector_new(lwork); fff_array *iwork = fff_array_new1d(FFF_INT,liwork); fff_matrix *Aux = fff_matrix_new(dmax,dmax); int info = fff_lapack_dgesdd(A,s,U,Vt,work,iwork,Aux ); fff_vector_delete(work); fff_array_delete(iwork); fff_matrix_delete(Aux); return info; } /* Compute the determinant of a symmetric matrix */ /* caveat : A is modified */ extern double fff_lapack_det_sym(fff_matrix* A) { int i,n = A->size1; fff_matrix* U = fff_matrix_new(n,n); fff_matrix* Vt = fff_matrix_new(n,n); fff_vector* s = fff_vector_new(n); double det; _fff_lapack_SVD(A,s,U,Vt); for (i=0, det=1; isize1; fff_matrix* U = fff_matrix_new(n,n); fff_matrix* Vt = fff_matrix_new(n,n); fff_vector* s = fff_vector_new(n); fff_matrix* iS = fff_matrix_new(n,n); fff_matrix* aux = fff_matrix_new(n,n); int info = _fff_lapack_SVD(A,s,U,Vt); fff_matrix_set_all(iS,0); for (i=0 ; isize1=A->size2 and \a A->size2=B->size1, then do \a fff_matrix_transpose(B,A). Then, we may call LAPACK with \a B->data as array input, \a m=B->size2=A->size1 rows, \a n=B->size1=A->size2 columns and \a lda=B->tda leading dimension. The same procedure works to perform convertion in the other way: the "C sizes" are just the swapped "Fortan sizes". */ #ifndef FFF_LAPACK #define FFF_LAPACK #ifdef __cplusplus extern "C" { #endif #include "fff_blas.h" #include "fff_array.h" /*! \brief Cholesky decomposition \param Uplo flag \param A N-by-N matrix \param Aux N-by-N auxiliary matrix The factorization has the form \f$ A = U^t U \f$, if \c Uplo==CblasUpper, or \f$ A = L L^t\f$, if \c Uplo==CblasLower, where \a U is an upper triangular matrix and \a L is lower triangular. On entry, if \c Uplo==CblasUpper, the leading N-by-N upper triangular part of \c A contains the upper triangular part of the matrix \a A, and the strictly lower triangular part of A is not referenced. If \c Uplo==CblasLower, the leading N-by-N lower triangular part of \a A contains the lower triangular part of the matrix \a A, and the strictly upper triangular part of \a A is not referenced. On exit, \a A contains the factor \a U or \a L from the Cholesky factorization. */ extern int fff_lapack_dpotrf( CBLAS_UPLO_t Uplo, fff_matrix* A, fff_matrix* Aux ); /*! \brief LU decomposition \param A M-by-N matrix \param ipiv pivot indices with size min(M,N) \param Aux N-by-M auxiliary matrix On entry, \a A is the M-by-N matrix to be factored. On exit, it contains the factors \a L and \a U from the factorization \a A=PLU, where \a P is a permutation matrix, \a L is a lower triangular matrix with unit diagonal elements (not stored) and \a U is upper triangular. \a ipiv needs be one-dimensional contiguous in \c FFF_INT with size min(M,N) */ extern int fff_lapack_dgetrf( fff_matrix* A, fff_array* ipiv, fff_matrix* Aux ); /*! \brief QR decomposition \param A M-by-N matrix \param tau scalar factors of the elementary reflectors with size min(M,N) \param work auxiliary vector with size >= N \param Aux N-by-M auxiliary matrix Computes matrices \a Q and \a R such that \a A=QR where \a Q is orthonormal and \a R is triangular. On entry, \a A is an M-by-N matrix. On exit, the elements on and above the diagonal of \a A contain the min(M,N)-by-N upper trapezoidal matrix \a R (\a R is upper triangular if \f$ M \geq N\f$); the elements below the diagonal, with the array \a tau, represent the orthogonal matrix \a Q as a product of min(M,N) reflectors. Each \a H(i) has the form \f$ H(i) = I - \tau v v^t \f$ where \f$ \tau \f$ is a real scalar, and \a v is a real vector with v(1:i-1) = 0 and \a v(i)=1; \a v(i+1:M) is stored on exit in \a A(i+1:M,i), and \f$ \tau \f$ in \a tau(i). If \a work is of size 1, then the routine only computes the optimal size for \a work and stores the result in \c work->data[0]. For the actual computation, \a work should be contiguous with size at least N. \a tau needs be contiguous as well. TODO: actually compute \a R using \c dorgqr. */ extern int fff_lapack_dgeqrf( fff_matrix* A, fff_vector* tau, fff_vector* work, fff_matrix* Aux ); /*! \brief Singular Value Decomposition \param A M-by-N matrix to decompose (to be overwritten) \param s singular values in descending order, with size min(M,N) \param U M-by-M matrix \param Vt N-by-N matrix \param work auxiliary vector \param iwork auxiliary array of integers \param Aux auxiliary square matrix with size max(M,N) Computes a diagonal matrix \a S and orthonormal matrices \a U and \a Vt such that \f$ A = U S V^t \f$. If \a work is of size 1, then the routine only computes the optimal size for \a work and stores the result in \c work->data[0]. For the actual computation, \a work should be contiguous with size at least: \f$ L_{work} \geq 3 d_{\min}^2 + \max(d_{\max}, 4 (d_{\min}^2 + d_{\min})) \f$ where \f$ d_{\min}=\min(M,N) \f$ and \f$ d_{\max}=\max(M,N) \f$. For good performance, \f$ L_{work} \f$ should generally be larger. \a iwork needs be one-dimensional contiguous in \c FFF_INT with size 8*min(M,N) */ extern int fff_lapack_dgesdd( fff_matrix* A, fff_vector* s, fff_matrix* U, fff_matrix* Vt, fff_vector* work, fff_array* iwork, fff_matrix* Aux ); /* \brief Computation of the determinant of symmetric matrices \param A M-by-M matrix (to be overwritten) The determinant is returned as output of the function. The procedure uses the SVD hence it is valid only for symmetric matrices. It is not meant to be optimal at the moment. Caveat : no check is performed -- untested version */ extern double fff_lapack_det_sym(fff_matrix* A); /* \brief Computation of the inverse of of symmetric matrices \param iA The resulting output matrix \param A M-by-M matrix to be inverted (to be overwritten) The determinant is returned as output of the function. The procedure uses the SVD hence it is valid only for symmetric matrices. It is not meant to be optimal at the moment. Caveat : no check is performed -- untested version */ extern int fff_lapack_inv_sym(fff_matrix* iA, fff_matrix *A); #ifdef __cplusplus } #endif #endif nipy-0.3.0/libcstat/fff/fff_matrix.c000066400000000000000000000164621210344137400173400ustar00rootroot00000000000000#include "fff_base.h" #include "fff_matrix.h" #include #include #include fff_matrix* fff_matrix_new(size_t size1, size_t size2) { fff_matrix* thisone; thisone = (fff_matrix*)calloc(1, sizeof(fff_matrix)); if (thisone == NULL) { FFF_ERROR("Allocation failed", ENOMEM); return NULL; } thisone->data = (double*)calloc(size1*size2, sizeof(double)); if (thisone->data == NULL) FFF_ERROR("Allocation failed", ENOMEM); thisone->size1 = size1; thisone->size2 = size2; thisone->tda = size2; thisone->owner = 1; return thisone; } void fff_matrix_delete(fff_matrix* thisone) { if (thisone->owner) if (thisone->data != NULL) free(thisone->data); free(thisone); return; } /* View */ fff_matrix fff_matrix_view(const double* data, size_t size1, size_t size2, size_t tda) { fff_matrix A; A.size1 = size1; A.size2 = size2; A.tda = tda; A.owner = 0; A.data = (double*)data; return A; } /* Get element */ double fff_matrix_get (const fff_matrix * A, size_t i, size_t j) { return(A->data[i*A->tda + j]); } /* Set element */ void fff_matrix_set (fff_matrix * A, size_t i, size_t j, double a) { A->data[i*A->tda + j] = a; return; } /* Set all elements */ void fff_matrix_set_all (fff_matrix * A, double a) { size_t i, j, rA; double *bA; for(i=0, rA=0; isize1; i++, rA+=A->tda) { bA = A->data + rA; for(j=0; jsize2; j++, bA++) *bA = a; } return; } /* Set all diagonal elements to a, others to zero */ void fff_matrix_set_scalar (fff_matrix * A, double a) { size_t i, j, rA; double *bA; for(i=0, rA=0; isize1; i++, rA+=A->tda) { bA = A->data + rA; for(j=0; jsize2; j++, bA++) { if (j == i) *bA = a; else *bA = 0.0; } } return; } /* Global scaling */ void fff_matrix_scale (fff_matrix * A, double a) { size_t i, j, rA; double *bA; for(i=0, rA=0; isize1; i++, rA+=A->tda) { bA = A->data + rA; for(j=0; jsize2; j++, bA++) *bA *= a; } return; } /* Add constant */ void fff_matrix_add_constant (fff_matrix * A, double a) { size_t i, j, rA; double *bA; for(i=0, rA=0; isize1; i++, rA+=A->tda) { bA = A->data + rA; for(j=0; jsize2; j++, bA++) *bA += a; } return; } /* Row view */ fff_vector fff_matrix_row(const fff_matrix* A, size_t i) { fff_vector x; x.size = A->size2; x.stride = 1; x.owner = 0; x.data = A->data + i*A->tda; return x; } /* Column view */ fff_vector fff_matrix_col(const fff_matrix* A, size_t j) { fff_vector x; x.size = A->size1; x.stride = A->tda; x.owner = 0; x.data = A->data + j; return x; } /* Diagonal view */ fff_vector fff_matrix_diag(const fff_matrix* A) { fff_vector x; x.size = FFF_MIN(A->size1, A->size2); x.stride = A->tda + 1; x.owner = 0; x.data = A->data; return x; } /* Block view */ fff_matrix fff_matrix_block(const fff_matrix* A, size_t imin, size_t nrows, size_t jmin, size_t ncols) { fff_matrix Asub; Asub.size1 = nrows; Asub.size2 = ncols; Asub.tda = A->tda; Asub.owner = 0; Asub.data = A->data + jmin + imin*A->tda; return Asub; } /* Row copy */ void fff_matrix_get_row (fff_vector * x, const fff_matrix * A, size_t i) { fff_vector xc = fff_matrix_row(A, i); fff_vector_memcpy(x, &xc); return; } /* Column copy */ void fff_matrix_get_col (fff_vector * x, const fff_matrix * A, size_t j) { fff_vector xc = fff_matrix_col(A, j); fff_vector_memcpy(x, &xc); return; } /* Diag copy */ void fff_matrix_get_diag (fff_vector * x, const fff_matrix * A) { fff_vector xc = fff_matrix_diag(A); fff_vector_memcpy(x, &xc); return; } /* Set row */ void fff_matrix_set_row (fff_matrix * A, size_t i, const fff_vector * x) { fff_vector xc = fff_matrix_row(A, i); fff_vector_memcpy(&xc, x); return; } /* Set column */ void fff_matrix_set_col (fff_matrix * A, size_t j, const fff_vector * x) { fff_vector xc = fff_matrix_col(A, j); fff_vector_memcpy(&xc, x); return; } /* Set diag */ void fff_matrix_set_diag (fff_matrix * A, const fff_vector * x) { fff_vector xc = fff_matrix_diag(A); fff_vector_memcpy(&xc, x); return; } /** Methods involving two matrices **/ #define CHECK_SIZE(A,B) \ if ((A->size1) != (B->size1) || (A->size2 != B->size2)) \ FFF_ERROR("Matrices have different sizes", EDOM) #define CHECK_TRANSPOSED_SIZE(A,B) \ if ((A->size1) != (B->size2) || (A->size2 != B->size1)) \ FFF_ERROR("Incompatible matrix sizes", EDOM) /* Copy B in A */ void fff_matrix_memcpy (fff_matrix * A, const fff_matrix * B) { CHECK_SIZE(A, B); /* If both matrices are contiguous in memory, use memcpy, otherwise perform a loop */ if ((A->tda == A->size2) && (B->tda == B->size2)) memcpy((void*)A->data, (void*)B->data, A->size1*A->size2*sizeof(double)); else { size_t i, j, rA, rB; double *bA, *bB; for(i=0, rA=0, rB=0; isize1; i++, rA+=A->tda, rB+=B->tda) { bA = A->data + rA; bB = B->data + rB; for(j=0; jsize2; j++, bA++, bB++) *bA = *bB; } } return; } /* Transpose a matrix: A = B**t. A needs be preallocated This is equivalent to turning the matrix in Fortran convention (column-major order) if initially in C convention (row-major order), and the other way round. */ void fff_matrix_transpose(fff_matrix* A, const fff_matrix* B) { size_t i, j, rA, rB; double *bA, *bB; CHECK_TRANSPOSED_SIZE(A, B); for(i=0, rA=0, rB=0; isize1; i++, rA+=A->tda) { bA = A->data + rA; bB = B->data + i; for(j=0; jsize2; j++, bA++, bB+=B->tda) *bA = *bB; } return; } /* Add two matrices */ void fff_matrix_add (fff_matrix * A, const fff_matrix * B) { size_t i, j, rA, rB; double *bA, *bB; CHECK_SIZE(A, B); for(i=0, rA=0, rB=0; isize1; i++, rA+=A->tda, rB+=B->tda) { bA = A->data + rA; bB = B->data + rB; for(j=0; jsize2; j++, bA++, bB++) *bA += *bB; } return; } /* Compute: A = A - B */ void fff_matrix_sub (fff_matrix * A, const fff_matrix * B) { size_t i, j, rA, rB; double *bA, *bB; CHECK_SIZE(A, B); for(i=0, rA=0, rB=0; isize1; i++, rA+=A->tda, rB+=B->tda) { bA = A->data + rA; bB = B->data + rB; for(j=0; jsize2; j++, bA++, bB++) *bA -= *bB; } return; } /* Element-wise multiplication */ void fff_matrix_mul_elements (fff_matrix * A, const fff_matrix * B) { size_t i, j, rA, rB; double *bA, *bB; CHECK_SIZE(A, B); for(i=0, rA=0, rB=0; isize1; i++, rA+=A->tda, rB+=B->tda) { bA = A->data + rA; bB = B->data + rB; for(j=0; jsize2; j++, bA++, bB++) *bA *= *bB; } return; } /* Element-wise division */ void fff_matrix_div_elements (fff_matrix * A, const fff_matrix * B) { size_t i, j, rA, rB; double *bA, *bB; CHECK_SIZE(A, B); for(i=0, rA=0, rB=0; isize1; i++, rA+=A->tda, rB+=B->tda) { bA = A->data + rA; bB = B->data + rB; for(j=0; jsize2; j++, bA++, bB++) *bA /= *bB; } return; } long double fff_matrix_sum(const fff_matrix* A) { long double sum = 0.0; fff_vector a; double *buf; size_t i; for(i=0, buf=A->data; isize1; i++, buf+=A->tda) { a = fff_vector_view(buf, A->size2, 1); sum += fff_vector_sum(&a); } return sum; } nipy-0.3.0/libcstat/fff/fff_matrix.h000066400000000000000000000056441210344137400173450ustar00rootroot00000000000000/*! \file fff_matrix.h \brief fff matrix object \author Alexis Roche \date 2003-2008 */ #ifndef FFF_MATRIX #define FFF_MATRIX #ifdef __cplusplus extern "C" { #endif #include "fff_vector.h" #include /*! \struct fff_matrix \brief The fff matrix structure */ typedef struct { size_t size1; size_t size2; size_t tda; double* data; int owner; } fff_matrix; /*! \brief fff matrix constructor \param size1 number of rows \param size2 number of columns */ extern fff_matrix* fff_matrix_new( size_t size1, size_t size2 ); /*! \brief fff matrix destructor \param thisone instance to delete */ extern void fff_matrix_delete( fff_matrix* thisone ); extern double fff_matrix_get (const fff_matrix * A, size_t i, size_t j); extern void fff_matrix_set (fff_matrix * A, size_t i, size_t j, double a); extern void fff_matrix_set_all (fff_matrix * A, double a); /*! \brief Set all diagonal elements to \a a, others to zero */ extern void fff_matrix_set_scalar (fff_matrix * A, double a); extern void fff_matrix_scale (fff_matrix * A, double a); extern void fff_matrix_add_constant (fff_matrix * A, double a); /** NOT TESTED! **/ extern long double fff_matrix_sum(const fff_matrix* A); /*** Views ***/ extern fff_matrix fff_matrix_view(const double* data, size_t size1, size_t size2, size_t tda); extern fff_vector fff_matrix_row(const fff_matrix* A, size_t i); extern fff_vector fff_matrix_col(const fff_matrix* A, size_t j); extern fff_vector fff_matrix_diag(const fff_matrix* A); extern fff_matrix fff_matrix_block(const fff_matrix* A, size_t imin, size_t nrows, size_t jmin, size_t ncols ); extern void fff_matrix_get_row (fff_vector * x, const fff_matrix * A, size_t i); extern void fff_matrix_get_col (fff_vector * x, const fff_matrix * A, size_t j) ; extern void fff_matrix_get_diag (fff_vector * x, const fff_matrix * A); extern void fff_matrix_set_row (fff_matrix * A, size_t i, const fff_vector * x); extern void fff_matrix_set_col (fff_matrix * A, size_t j, const fff_vector * x); extern void fff_matrix_set_diag (fff_matrix * A, const fff_vector * x); extern void fff_matrix_memcpy (fff_matrix * A, const fff_matrix * B); /*! \brief transpose a matrix \param B input matrix \param A transposed matrix on exit The matrix \c A needs be pre-allocated consistently with \c B, so that \c A->size1==B->size2 and \c A->size2==B->size1. */ extern void fff_matrix_transpose( fff_matrix* A, const fff_matrix* B ); extern void fff_matrix_add (fff_matrix * A, const fff_matrix * B); extern void fff_matrix_sub (fff_matrix * A, const fff_matrix * B); extern void fff_matrix_mul_elements (fff_matrix * A, const fff_matrix * B); extern void fff_matrix_div_elements (fff_matrix * A, const fff_matrix * B); #ifdef __cplusplus } #endif #endif nipy-0.3.0/libcstat/fff/fff_onesample_stat.c000066400000000000000000001053351210344137400210500ustar00rootroot00000000000000#include "fff_onesample_stat.h" #include "fff_base.h" #include "fff_blas.h" #include #include #include #include #define EL_LDA_TOL 1e-5 #define EL_LDA_ITERMAX 100 #define MIN_RELATIVE_VAR_FFX 1e-4 /* Dummy structure for sorting */ typedef struct{ double x; size_t i; } fff_indexed_data; /* Static structure for empirical MFX stats */ typedef struct{ fff_vector* w; /* weights */ fff_vector* z; /* centers */ fff_matrix* Q; fff_vector* tvar; /* low thresholded variances */ fff_vector* tmp1; fff_vector* tmp2; fff_indexed_data* idx; unsigned int* niter; } fff_onesample_mfx; /* Declaration of static functions */ /** Pure RFX analysis **/ static double _fff_onesample_mean(void* params, const fff_vector* x, double base); static double _fff_onesample_median(void* params, const fff_vector* x, double base); static double _fff_onesample_student(void* params, const fff_vector* x, double base); static double _fff_onesample_laplace(void* params, const fff_vector* x, double base); static double _fff_onesample_tukey(void* params, const fff_vector* x, double base); static double _fff_onesample_sign_stat(void* params, const fff_vector* x, double base); static double _fff_onesample_wilcoxon(void* params, const fff_vector* x, double base); static double _fff_onesample_elr(void* params, const fff_vector* x, double base); static double _fff_onesample_grubb(void* params, const fff_vector* x, double base); static void _fff_absolute_residuals(fff_vector* r, const fff_vector* x, double base); static double _fff_el_solve_lda(fff_vector* c, const fff_vector* w); /** Normal MFX analysis **/ static double _fff_onesample_LR_gmfx(void* params, const fff_vector* x, const fff_vector* var, double base); static double _fff_onesample_mean_gmfx(void* params, const fff_vector* x, const fff_vector* var, double base); static void _fff_onesample_gmfx_EM(double* m, double* v, const fff_vector* x, const fff_vector* var, unsigned int niter, int constraint); static double _fff_onesample_gmfx_nll(const fff_vector* x, const fff_vector* var, double m, double v); /** Empirical MFX analysis **/ static fff_onesample_mfx* _fff_onesample_mfx_new(unsigned int n, unsigned int* niter, int flagIdx); static void _fff_onesample_mfx_delete(fff_onesample_mfx* thisone); static double _fff_onesample_mean_mfx(void* params, const fff_vector* x, const fff_vector* var, double base); static double _fff_onesample_median_mfx(void* params, const fff_vector* x, const fff_vector* var, double base); static double _fff_onesample_sign_stat_mfx(void* params, const fff_vector* x, const fff_vector* var, double base); static double _fff_onesample_wilcoxon_mfx(void* params, const fff_vector* x, const fff_vector* var, double base); static double _fff_onesample_LR_mfx(void* params, const fff_vector* x, const fff_vector* var, double base); static void _fff_onesample_mfx_EM(fff_onesample_mfx* Params, const fff_vector* x, const fff_vector* var, int constraint); static void _fff_onesample_mfx_EM_init(fff_onesample_mfx* Params, const fff_vector* x, int flag); static double _fff_onesample_mfx_nll(fff_onesample_mfx* Params, const fff_vector* x); /** Low level for qsort **/ static int _fff_abs_comp(const void * x, const void * y); static int _fff_indexed_data_comp(const void * x, const void * y); static void _fff_sort_z(fff_indexed_data* idx, fff_vector* tmp1, fff_vector* tmp2, const fff_vector* z, const fff_vector* w); fff_onesample_stat* fff_onesample_stat_new(unsigned int n, fff_onesample_stat_flag flag, double base) { fff_onesample_stat* thisone = (fff_onesample_stat*)malloc(sizeof(fff_onesample_stat)); if (thisone == NULL) return NULL; /* Fields */ thisone->flag = flag; thisone->base = base; thisone->params = NULL; /* Switch (possibly overwrite the 'par' field)*/ switch (flag) { case FFF_ONESAMPLE_EMPIRICAL_MEAN: thisone->compute_stat = &_fff_onesample_mean; break; case FFF_ONESAMPLE_EMPIRICAL_MEDIAN: thisone->params = (void*) fff_vector_new(n); thisone->compute_stat = &_fff_onesample_median; break; case FFF_ONESAMPLE_STUDENT: thisone->compute_stat = &_fff_onesample_student; break; case FFF_ONESAMPLE_LAPLACE: thisone->params = (void*) fff_vector_new(n); thisone->compute_stat = &_fff_onesample_laplace; break; case FFF_ONESAMPLE_TUKEY: thisone->params = (void*) fff_vector_new(n); thisone->compute_stat = &_fff_onesample_tukey; break; case FFF_ONESAMPLE_SIGN_STAT: thisone->compute_stat = &_fff_onesample_sign_stat; break; case FFF_ONESAMPLE_WILCOXON: thisone->params = (void*) fff_vector_new(n); thisone->compute_stat = &_fff_onesample_wilcoxon; break; case FFF_ONESAMPLE_ELR: thisone->params = (void*) fff_vector_new(n); thisone->compute_stat = &_fff_onesample_elr; break; case FFF_ONESAMPLE_GRUBB: thisone->compute_stat = &_fff_onesample_grubb; break; default: FFF_ERROR("Unrecognized statistic", EINVAL); break; } /* End switch */ return thisone; } void fff_onesample_stat_delete(fff_onesample_stat* thisone) { if (thisone == NULL) return; /* Switch */ switch (thisone->flag) { default: break; case FFF_ONESAMPLE_LAPLACE: case FFF_ONESAMPLE_TUKEY: case FFF_ONESAMPLE_WILCOXON: case FFF_ONESAMPLE_ELR: fff_vector_delete((fff_vector*)thisone->params); break; } /* End switch */ free(thisone); } double fff_onesample_stat_eval(fff_onesample_stat* thisone, const fff_vector* x) { double t; t = thisone->compute_stat(thisone->params, x, thisone->base); return t; } /********************************** SAMPLE MEAN *******************************/ static double _fff_onesample_mean(void* params, const fff_vector* x, double base) { double aux; if (params != NULL) return FFF_NAN; aux = fff_vector_sum(x)/(long double)x->size - base; return aux; } /********************************** SAMPLE MEDIAN ****************************/ static double _fff_onesample_median(void* params, const fff_vector* x, double base) { double aux; fff_vector* tmp = (fff_vector*)params; fff_vector_memcpy(tmp, x); aux = fff_vector_median(tmp) - base; return aux; } /********************************** STUDENT STATISTIC ****************************/ static double _fff_onesample_student(void* params, const fff_vector* x, double base) { double m, std, aux; int sign; size_t n = x->size; if (params != NULL) return FFF_NAN; std = sqrt(fff_vector_ssd(x, &m, 0)/(long double)x->size); aux = sqrt((double)(n-1))*(m-base); sign = (int) FFF_SIGN(aux); if (sign == 0) /* Sample mean equals baseline, return zero */ return 0.0; aux = aux / std; if (sign > 0) if (aux < FFF_POSINF) return aux; else return FFF_POSINF; else if (aux > FFF_NEGINF) return aux; else return FFF_NEGINF; } /********************************** LAPLACE STATISTIC ****************************/ static double _fff_onesample_laplace(void* params, const fff_vector* x, double base) { double s, s0, aux; int sign; size_t n = x->size; fff_vector* tmp = (fff_vector*)params; fff_vector_memcpy(tmp, x); aux = fff_vector_median(tmp); s = fff_vector_sad(x, aux)/(long double)x->size; s0 = fff_vector_sad(x, base)/(long double)x->size; s0 = FFF_MAX(s0, s); /* Ensure s0 >= s */ aux -= base; sign = FFF_SIGN(aux); if (sign == 0) /* Sample median equals baseline, return zero */ return 0.0; aux = sqrt(2*n*log(s0/s)); if (aux < FFF_POSINF) return (sign * aux); else if (sign > 0) return FFF_POSINF; else return FFF_NEGINF; } /********************************** TUKEY STATISTIC ******************************/ static void _fff_absolute_residuals(fff_vector* r, const fff_vector* x, double base) { size_t i, n = x->size; double aux; double *bufX = x->data, *bufR = r->data; for(i=0; istride, bufR+=r->stride) { aux = *bufX - base; *bufR = FFF_ABS(aux); } return; } static double _fff_onesample_tukey(void* params, const fff_vector* x, double base) { double s, s0, aux; int sign; size_t n = x->size; fff_vector* tmp = (fff_vector*)params; fff_vector_memcpy(tmp, x); aux = fff_vector_median(tmp); /* Take the median of absolute residuals |x_i-median| */ _fff_absolute_residuals(tmp, x, aux); s = fff_vector_median(tmp); /* Take the median of absolute residuals |x_i-base| */ _fff_absolute_residuals(tmp, x, base); s0 = fff_vector_median(tmp); s0 = FFF_MAX(s0, s); /* Ensure s0 >= s */ aux -= base; /* aux == median(x) - base */ sign = FFF_SIGN(aux); if (sign == 0) /* Sample median equals baseline, return zero */ return 0.0; aux = sqrt(2*n*log(s0/s)); if (aux < FFF_POSINF) return (sign * aux); else if (sign > 0) return FFF_POSINF; else return FFF_NEGINF; } /********************************** SIGN STATISTIC ****************************/ static double _fff_onesample_sign_stat(void* params, const fff_vector* x, double base) { size_t i, n = x->size; double rp = 0.0, rm = 0.0, aux; double* buf = x->data; if (params != NULL) return FFF_NAN; for (i=0; istride) { aux = *buf - base; if (aux > 0.0) rp ++; else if (aux < 0.0) rm ++; else { /* in case the sample value is exactly zero */ rp += .5; rm += .5; } } return (rp-rm)/(double)n; } /********************* WILCOXON (SIGNED RANK) STATISTIC *********************/ static int _fff_abs_comp(const void * x, const void * y) { int ans = 1; double xx = *((double*)x); double yy = *((double*)y); xx = FFF_ABS(xx); yy = FFF_ABS(yy); if (yy > xx) { ans = -1; return ans; } if (yy == xx) ans = 0; return ans; } static double _fff_onesample_wilcoxon(void* params, const fff_vector* x, double base) { size_t i, n = x->size; double t = 0.0; double* buf; fff_vector* tmp = (fff_vector*)params; /* Compute the residuals wrt baseline */ fff_vector_memcpy(tmp, x); fff_vector_add_constant(tmp, -base); /* Sort the residuals in terms of their ABSOLUTE values NOTE: tmp needs be contiguous -- and it is, if allocated using fff_onesample_stat_new */ qsort (tmp->data, n, sizeof(double), &_fff_abs_comp); /* Compute the sum of ranks multiplied by corresponding elements' signs */ buf = tmp->data; for(i=1; i<=n; i++, buf++) /* Again buf++ works IFF tmp is contiguous */ t += (double)i * FFF_SIGN(*buf); /* Normalization to have the stat range in [-1,1] */ /* t /= (double)((n*(n+1))/2);*/ /* Normalization */ t /= ((double)(n*n)); return t; } /************************ EMPIRICAL LIKELIHOOD STATISTIC **********************/ static double _fff_onesample_elr(void* params, const fff_vector* x, double base) { size_t i, n = x->size; double lda, aux, nwi; int sign; fff_vector* tmp = (fff_vector*)params; double* buf; /* Compute: tmp = x-base */ fff_vector_memcpy(tmp, x); fff_vector_add_constant(tmp, -base); aux = fff_vector_sum(tmp)/(long double)tmp->size; sign = FFF_SIGN(aux); /* If sample mean equals baseline, return zero */ if (sign == 0) return 0.0; /* Find the Lagrange multiplier corresponding to the constrained empirical likelihood maximization problem */ lda = _fff_el_solve_lda(tmp, NULL); if (lda >= FFF_POSINF) { if (sign > 0) return FFF_POSINF; else return FFF_NEGINF; } /* Compute the log empirical likelihood ratio, log lda = \sum_i \log(nw_i) */ buf = x->data; aux = 0.0; for(i=0; istride) { nwi = 1/(1 + lda*(*buf-base)); nwi = FFF_MAX(nwi, 0.0); aux += log(nwi); } /* We output \sqrt{-2\log\lambda} multiplied by the effect's sign */ aux = -2.0 * aux; aux = sqrt(FFF_MAX(aux, 0.0)); if (aux < FFF_POSINF) return (sign*aux); else if (sign > 0) return FFF_POSINF; else return FFF_NEGINF; } /* Solve the equation: sum(wi*ci/(lda*ci+1)) = 0 where the unknown is lda and ci is the constraint, e.g. ci = xi-m. In standard RFX context, wi is uniformly constant, while in MFX context it may vary from one datapoint to another. By transforming ci into -1./ci, the equation becomes: sum(wi/ (lda-ci)) = 0 */ static double _fff_el_solve_lda(fff_vector* c, const fff_vector* w) { size_t i, n = c->size; unsigned int iter = 0; double aux, g, dg, lda, lda0 = FFF_NEGINF, lda1 = FFF_POSINF, ldac, err; double *buf, *bufW; /* Transform the constraint vector: c = -1./c and find the max and min elements of c such that c(i)<0 and c(i)>0, respectively */ buf = c->data; for (i=0; istride) { aux = *buf; aux = -1.0/aux; *buf = aux; /* Vector values are overwritten */ if ((aux<0.0) && (aux>lda0)) lda0 = aux; else if ((aux>0.0) && (auxFFF_NEGINF) || !(lda1 EL_LDA_TOL) { iter ++; if (iter > EL_LDA_ITERMAX) break; /* Compute: g(lda) = \sum_i w_i / (lda - c_i) dg(lda) = -\sum_i w_i / (lda - c_i)^2 */ g = 0.0; dg = 0.0; buf = c->data; if (w == NULL) { for (i=0; istride) { aux = 1/(lda-*buf); g += aux; dg += FFF_SQR(aux); } } else { bufW = w->data; for (i=0; istride, bufW+=w->stride) { aux = 1/(lda-*buf); g += *bufW * aux; dg += *bufW * FFF_SQR(aux); } } /* Update brakets */ if (g > 0.0) lda0 = lda; else if (g < 0.0) lda1 = lda; /* Accept the Newton update if it falls within the brakets */ ldac = lda + (g/dg); if ((lda0 < lda) && (lda < lda1)) lda = ldac; else lda = .5*(lda0+lda1); /* Error update */ err = lda1 - lda0; } return lda; } /******************************* GRUBB STATISTIC *******************************/ static double _fff_onesample_grubb(void* params, const fff_vector* x, double base) { size_t i; double t=0.0, mean, std, inv_std, ti; double *buf = x->data; if (params != NULL) return FFF_NAN; base = 0; /* Compute the mean and std deviation */ std = sqrt(fff_vector_ssd(x, &mean, 0)/(long double)x->size); inv_std = 1/std; if (t >= FFF_POSINF) return 0.0; /* Compute the max of Studentized datapoints */ for (i=0; isize; i++, buf+=x->stride) { ti = (*buf-mean) * inv_std; ti = FFF_ABS(ti); if (ti > t) t = ti; } return t; } /*****************************************************************************************/ /* Mixed-effect statistic structure */ /*****************************************************************************************/ fff_onesample_stat_mfx* fff_onesample_stat_mfx_new(unsigned int n, fff_onesample_stat_flag flag, double base) { fff_onesample_stat_mfx* thisone = (fff_onesample_stat_mfx*)malloc(sizeof(fff_onesample_stat_mfx)); if (thisone == NULL) return NULL; /* Fields */ thisone->flag = flag; thisone->base = base; thisone->empirical = 1; thisone->niter = 0; thisone->constraint = 0; thisone->params = NULL; /* Switch (possibly overwrite the 'par' field)*/ switch (flag) { case FFF_ONESAMPLE_STUDENT_MFX: thisone->empirical = 0; thisone->compute_stat = &_fff_onesample_LR_gmfx; thisone->params = (void*)(&(thisone->niter)); break; case FFF_ONESAMPLE_GAUSSIAN_MEAN_MFX: thisone->empirical = 0; thisone->compute_stat = &_fff_onesample_mean_gmfx; thisone->params = (void*)(&(thisone->niter)); break; case FFF_ONESAMPLE_EMPIRICAL_MEAN_MFX: thisone->compute_stat = &_fff_onesample_mean_mfx; thisone->params = (void*)_fff_onesample_mfx_new(n, &(thisone->niter), 0); break; case FFF_ONESAMPLE_EMPIRICAL_MEDIAN_MFX: thisone->compute_stat = &_fff_onesample_median_mfx; thisone->params = (void*)_fff_onesample_mfx_new(n, &(thisone->niter), 1); break; case FFF_ONESAMPLE_SIGN_STAT_MFX: thisone->compute_stat = &_fff_onesample_sign_stat_mfx; thisone->params = (void*)_fff_onesample_mfx_new(n, &(thisone->niter), 0); break; case FFF_ONESAMPLE_WILCOXON_MFX: thisone->compute_stat = &_fff_onesample_wilcoxon_mfx; thisone->params = (void*)_fff_onesample_mfx_new(n, &(thisone->niter), 1); break; case FFF_ONESAMPLE_ELR_MFX: thisone->compute_stat = &_fff_onesample_LR_mfx; thisone->params = (void*)_fff_onesample_mfx_new(n, &(thisone->niter), 0); break; default: FFF_ERROR("Unrecognized statistic", EINVAL); break; } /* End switch */ return thisone; } void fff_onesample_stat_mfx_delete(fff_onesample_stat_mfx* thisone) { if (thisone == NULL) return; if (thisone->empirical) _fff_onesample_mfx_delete((fff_onesample_mfx*)thisone->params); free(thisone); return; } static fff_onesample_mfx* _fff_onesample_mfx_new(unsigned int n, unsigned int* niter, int flagIdx) { fff_onesample_mfx* thisone; thisone = (fff_onesample_mfx*)malloc(sizeof(fff_onesample_mfx)); thisone->w = fff_vector_new(n); thisone->z = fff_vector_new(n); thisone->Q = fff_matrix_new(n, n); thisone->tvar = fff_vector_new(n); thisone->tmp1 = fff_vector_new(n); thisone->tmp2 = fff_vector_new(n); thisone->idx = NULL; thisone->niter = niter; if (flagIdx == 1) thisone->idx = (fff_indexed_data*)calloc(n, sizeof(fff_indexed_data)); return thisone; } static void _fff_onesample_mfx_delete(fff_onesample_mfx* thisone) { fff_vector_delete(thisone->w); fff_vector_delete(thisone->z); fff_matrix_delete(thisone->Q); fff_vector_delete(thisone->tvar); fff_vector_delete(thisone->tmp1); fff_vector_delete(thisone->tmp2); if (thisone->idx != NULL) free(thisone->idx); free(thisone); return; } double fff_onesample_stat_mfx_eval(fff_onesample_stat_mfx* thisone, const fff_vector* x, const fff_vector* vx) { double t; t = thisone->compute_stat(thisone->params, x, vx, thisone->base); return t; } /*****************************************************************************************/ /* Standard MFX (normal population model) */ /*****************************************************************************************/ static double _fff_onesample_mean_gmfx(void* params, const fff_vector* x, const fff_vector* var, double base) { unsigned int niter = *((unsigned int*)params); double mu = 0.0, v = 0.0; _fff_onesample_gmfx_EM(&mu, &v, x, var, niter, 0); return (mu-base); } static double _fff_onesample_LR_gmfx(void* params, const fff_vector* x, const fff_vector* var, double base) { int sign; double t, mu = 0.0, v = 0.0, v0 = 0.0, nll, nll0; unsigned int niter = *((unsigned int*)params); /* Estimate maximum likelihood group mean and group variance */ _fff_onesample_gmfx_EM(&mu, &v, x, var, niter, 0); /* MFX mean estimate equals baseline, return zero */ t = mu - base; sign = FFF_SIGN(t); if (sign == 0) return 0.0; /* Estimate maximum likelihood group variance under zero group mean assumption */ _fff_onesample_gmfx_EM(&base, &v0, x, var, niter, 1); /* Negated log-likelihoods */ nll = _fff_onesample_gmfx_nll(x, var, mu, v); nll0 = _fff_onesample_gmfx_nll(x, var, base, v0); /* If both nll and nll0 are globally minimized, we always have: nll0 >= nll; however, EM convergence issues may cause nll>nll0, in which case we return 0.0 */ t = -2.0 * (nll - nll0); t = FFF_MAX(t, 0.0); if (t < FFF_POSINF) return sign * sqrt(t); /* To get perhaps a more "Student-like" statistic: t = sign * sqrt((n-1)*(exp(t/nn) - 1.0)); */ else if (sign > 0) return FFF_POSINF; else return FFF_NEGINF; } /* EM algorithm to estimate the mean and variance parameters. */ static void _fff_onesample_gmfx_EM(double* m, double* v, const fff_vector* x, const fff_vector* var, unsigned int niter, int constraint) { size_t n = x->size, i; unsigned int iter = 0; double nn=(double)n, m1, v1, m0, v0, mi_ap, vi_ap, aux; double *bufx, *bufvar; /* Initialization: pure RFX solution (FFX variances set to zero) */ if ( ! constraint ) /** m1 = gsl_stats_mean(x->data, x->stride, n); v1 = gsl_stats_variance_with_fixed_mean(x->data, x->stride, n, m1); **/ v1 = fff_vector_ssd(x, &m1, 0)/(long double)x->size; else { m1 = 0.0; v1 = fff_vector_ssd(x, &m1, 1)/(long double)x->size; } /* Refine result using an EM loop */ while (iter < niter) { /* Previous estimates */ m0 = m1; v0 = v1; /* Loop: aggregated E- and M-steps */ bufx = x->data; bufvar = var->data; if ( ! constraint ) m1 = 0.0; v1 = 0.0; for (i=0; istride, bufvar+=var->stride) { /* Posterior mean and variance of the true effect value */ aux = 1.0 / (*bufvar + v0); mi_ap = v0 * (*bufx) + (*bufvar) * m0; mi_ap *= aux; vi_ap = aux * (*bufvar) * v0; /* Update */ if ( ! constraint ) m1 += mi_ap; v1 += vi_ap + FFF_SQR(mi_ap); } /* Normalization */ if ( ! constraint ) m1 /= nn; v1 /= nn; v1 -= FFF_SQR(m1); /* Iteration number */ iter ++; } /* Save estimates */ *m = m1; *v = v1; return; } /* Negated log-likelihood for the MFX model */ static double _fff_onesample_gmfx_nll(const fff_vector* x, const fff_vector* var, double m, double v) { size_t n = x->size, i; double s, aux, ll = 0.0; double *bufx = x->data, *bufvar = var->data; for (i=0; istride, bufvar+=var->stride) { s = *bufvar + v; aux = *bufx - m; ll += log(s); ll += FFF_SQR(aux) / s; } ll *= .5; return ll; } /*****************************************************************************************/ /* Empirical MFX */ /*****************************************************************************************/ static double _fff_onesample_mean_mfx(void* params, const fff_vector* x, const fff_vector* var, double base) { double m; fff_onesample_mfx* Params = (fff_onesample_mfx*)params; long double aux, sumw; /* Estimate the population distribution using EM */ _fff_onesample_mfx_EM(Params, x, var, 0); /* Compute the mean of the estimated distribution */ /** m = gsl_stats_wmean (Params->w->data, Params->w->stride, Params->z->data, Params->z->stride, Params->z->size) - base; **/ aux = fff_vector_wsum(Params->z, Params->w, &sumw); m = aux/sumw - base; return m; } static double _fff_onesample_median_mfx(void* params, const fff_vector* x, const fff_vector* var, double base) { double m; fff_onesample_mfx* Params = (fff_onesample_mfx*)params; /* Estimate the population distribution using EM */ _fff_onesample_mfx_EM(Params, x, var, 0); /* Compute the median of the estimated distribution */ /** m = fff_weighted_median(Params->idx, Params->w, Params->z) - base; **/ _fff_sort_z(Params->idx, Params->tmp1, Params->tmp2, Params->z, Params->w); m = fff_vector_wmedian_from_sorted_data (Params->tmp1, Params->tmp2); return m; } static double _fff_onesample_sign_stat_mfx(void* params, const fff_vector* x, const fff_vector* var, double base) { fff_onesample_mfx* Params = (fff_onesample_mfx*)params; double *buf, *bufw; double aux, rp = 0.0, rm = 0.0; size_t i, n = x->size; /* Estimate the population distribution using EM */ _fff_onesample_mfx_EM(Params, x, var, 0); /* Compute the sign statistic of the fitted distribution */ buf = Params->z->data; bufw = Params->w->data; for (i=0; iz->stride, bufw+=Params->w->stride) { aux = *buf - base; if (aux > 0.0) rp += *bufw; else if (aux < 0.0) rm += *bufw; else { /* in case the center is exactly zero */ aux = .5 * *bufw; rp += aux; rm += aux; } } return (rp-rm); } static double _fff_onesample_wilcoxon_mfx(void* params, const fff_vector* x, const fff_vector* var, double base) { double t = 0.0; fff_onesample_mfx* Params = (fff_onesample_mfx*)params; size_t i, n = x->size; double *buf1, *buf2; double zi, wi, Ri; /* Estimate the population distribution using EM */ _fff_onesample_mfx_EM(Params, x, var, 0); /* Compute the vector of absolute residuals wrt the baseline */ buf1 = Params->tmp1->data; buf2 = Params->z->data; for(i=0; itmp1->stride, buf2+=Params->z->stride) { zi = *buf2 - base; *buf1 = FFF_ABS(zi); } /* Sort the absolute residuals and get the permutation of indices */ /** gsl_sort_vector_index(Params->idx, Params->tmp1); **/ _fff_sort_z(Params->idx, Params->tmp1, Params->tmp2, Params->z, Params->w); /* Compute the sum of ranks */ /** Ri = 0.0; for(i=0; iidx->data[i]; zi = Params->z->data[j*Params->z->stride]; wi = Params->w->data[j*Params->w->stride]; Ri += wi; if (zi > base) t += wi * Ri; else if (zi < base) t -= wi * Ri; }**/ Ri = 0.0; for(i=1, buf1=Params->tmp1->data, buf2=Params->tmp2->data; i<=n; i++) { zi = *buf1; wi = *buf2; Ri += wi; if (zi > base) t += wi * Ri; else if (zi < base) t -= wi * Ri; } return t; } static double _fff_onesample_LR_mfx(void* params, const fff_vector* x, const fff_vector* var, double base) { double t, mu, nll, nll0; int sign; fff_onesample_mfx* Params = (fff_onesample_mfx*)params; long double aux, sumw; /* Estimate the population distribution using EM */ _fff_onesample_mfx_EM(Params, x, var, 0); nll = _fff_onesample_mfx_nll(Params, x); /* Estimate the population mean */ /** mu = gsl_stats_wmean (Params->w->data, Params->w->stride, Params->z->data, Params->z->stride, Params->z->size); **/ aux = fff_vector_wsum(Params->z, Params->w, &sumw); mu = aux/sumw - base; /* MFX mean estimate equals baseline, return zero */ t = mu - base; sign = FFF_SIGN(t); if (sign == 0) return 0.0; /* Estimate the population distribution under zero mean constraint */ _fff_onesample_mfx_EM(Params, x, var, 1); nll0 = _fff_onesample_mfx_nll(Params, x); /* Compute the one-sided likelihood ratio statistic */ t = -2.0 * (nll - nll0); t = FFF_MAX(t, 0.0); if (t < FFF_POSINF) return sign * sqrt(t); else if (sign > 0) return FFF_POSINF; else return FFF_NEGINF; } /* EM algorithm to estimate the population distribution as a linear combination of Diracs centered at the datapoints. */ static void _fff_onesample_mfx_EM(fff_onesample_mfx* Params, const fff_vector* x, const fff_vector* var, int constraint) { fff_vector *w = Params->w, *z = Params->z; fff_vector *tvar = Params->tvar, *tmp1 = Params->tmp1, *tmp2 = Params->tmp2; fff_matrix *Q = Params->Q; unsigned int niter = *(Params->niter); size_t n = x->size, i, k; unsigned int iter = 0; double m, lda, aux; double *buf, *buf2; fff_vector Qk; /* Pre-process: low threshold the variances to avoid numerical instabilities */ aux = fff_vector_ssd(x, &m, 0)/(long double)(FFF_MAX(n,2)-1); aux *= MIN_RELATIVE_VAR_FFX; fff_vector_memcpy(tvar, var); buf = tvar->data; for(i=0; istride) { if (*buf < aux) *buf = aux; } /* Initial estimate: uniform weigths, class centers at datapoints */ fff_vector_set_all(w, 1/(double)n); fff_vector_memcpy(z, x); /* Refine result using an EM loop */ while (iter < niter) { /* Compute the posterior probability matrix Qik : probability that subject i belongs to class k */ _fff_onesample_mfx_EM_init(Params, x, 0); /* Update weights: wk = sum_i Qik / n */ buf = w->data; for(k=0; kstride) { Qk = fff_matrix_col(Q, k); *buf = fff_vector_sum(&Qk)/(long double)n; } /* Reweight if restricted maximum likelihood: use the same Newton algorithm as in standard empirical likelihood */ if ( constraint ) { fff_vector_memcpy(tmp1, z); lda = _fff_el_solve_lda(tmp1, w); if(lda < FFF_POSINF) { buf = z->data; buf2 = w->data; for(i=0; istride, buf2+=w->stride) *buf2 *= 1/(1 + lda*(*buf)); } } /* Update centers: zk = sum_i Rik xi with Rik = Qik/si^2 */ buf = z->data; buf2 = tmp2->data; for(k=0; kstride, buf2+=tmp2->stride) { /* Store the unconstrained ML update in z */ Qk = fff_matrix_col(Q, k); fff_vector_memcpy(tmp1, &Qk); fff_vector_div(tmp1, tvar); /* Store Rik in tmp1 */ aux = (double)fff_vector_sum(tmp1); /* aux == Rk = sum_i Rik */ aux = FFF_ENSURE_POSITIVE(aux); *buf = fff_blas_ddot(tmp1, x); /* z[k] = sum_i Rik xi */ *buf /= aux; /* Store Rk = sum_i Rik in tmp2 */ *buf2 = aux; } /* Shift to zero if restricted maximum likelihood */ if ( constraint ) { fff_vector_memcpy(tmp1, w); fff_vector_div(tmp1, tmp2); /* tmp1_k == wk/Rk */ aux = fff_blas_ddot(w, tmp1); /* aux == sum_k [ wk^2 / Rk ] */ lda = fff_blas_ddot(w, z); /* lda = sum_k wk zk */ aux = FFF_ENSURE_POSITIVE(aux); lda /= aux; /* lda = sum_k wk zk / sum_k [ wk^2 / Rk ] */ fff_blas_daxpy(-lda, tmp1, z); /* zk = zk - lda * wk/Rk */ } /* Iteration number */ iter ++; } return; } /* If flag == 0, assemble the posterior probability matrix Q Qik : posterior probability that subject i belongs to class k. Qik = ci wk g(xi-zk,si) ci determined by sum_k Qik = 1 Otherwise, assemble the likelihood matrix G Gik = g(xi-zk,si) */ static void _fff_onesample_mfx_EM_init(fff_onesample_mfx* Params, const fff_vector* x, int flag) { fff_matrix* Q = Params->Q; const fff_vector *w = Params->w, *z = Params->z, *var = Params->tvar; size_t i, k, n = x->size, ii; double xi, si; double *bufQ, *bufxi, *bufvi, *bufwk, *bufzk; double sum = 0.0, aux; /* Loop over subjects */ bufxi = x->data; bufvi = var->data; for(i=0; istride, bufvi+=var->stride) { xi = *bufxi; si = sqrt(*bufvi); ii = i*Q->tda; /* First element of the i-th line of Q */ /* Loop over classes: compute Qik = wk * g(xi-zk,si), for each k */ bufwk = w->data; bufzk = z->data; bufQ = Q->data + ii; sum = 0.0; for(k=0; kstride, bufzk+=z->stride) { /** aux = gsl_ran_gaussian_pdf(xi-*bufzk, si); **/ aux = (xi-*bufzk)/si; aux = exp(-.5 * FFF_SQR(aux)); /* No need to divide by sqrt(2pi)si as it is constant */ *bufQ = FFF_ENSURE_POSITIVE(aux); /* Refrain posterior probabilities from vanishing */ if (flag == 0) { *bufQ *= *bufwk; sum += *bufQ; } } /* Loop over classes: normalize Qik */ if (flag == 0) { bufQ = Q->data + ii; for(k=0; kw; fff_vector *Gw = Params->tmp1; fff_matrix* G = Params->Q; size_t i, n = w->size; double aux, nll = 0.0; double *buf; /* Compute G */ _fff_onesample_mfx_EM_init(Params, x, 1); /* Compute Gw */ fff_blas_dgemv(CblasNoTrans, 1.0, G, w, 0.0, Gw); /* Compute the sum of logarithms of Gw */ buf = Gw->data; for (i=0; istride) { aux = *buf; aux = FFF_ENSURE_POSITIVE(aux); nll -= log(aux); } return nll; } extern void fff_onesample_stat_mfx_pdf_fit(fff_vector* w, fff_vector* z, fff_onesample_stat_mfx* thisone, const fff_vector* x, const fff_vector* var) { fff_onesample_mfx* Params = (fff_onesample_mfx*)thisone->params; unsigned int constraint = thisone->constraint; /* Check appropriate flag */ if (!thisone->empirical) return; /* Estimate the population distribution using EM */ _fff_onesample_mfx_EM(Params, x, var, constraint); /* Copy result in output vectors */ fff_vector_memcpy(w, Params->w); fff_vector_memcpy(z, Params->z); return; } extern void fff_onesample_stat_gmfx_pdf_fit(double *mu, double *v, fff_onesample_stat_mfx* thisone, const fff_vector* x, const fff_vector* var) { unsigned int niter = thisone->niter; unsigned int constraint = thisone->constraint; /* Estimate the population gaussian parameters using EM */ _fff_onesample_gmfx_EM(mu, v, x, var, niter, constraint); } /** Comparison function for qsort **/ static int _fff_indexed_data_comp(const void * x, const void * y) { int ans = 1; fff_indexed_data xx = *((fff_indexed_data*)x); fff_indexed_data yy = *((fff_indexed_data*)y); if (yy.x > xx.x) { ans = -1; return ans; } if (yy.x == xx.x) ans = 0; return ans; } /** Sort z array and re-order w accordingly **/ static void _fff_sort_z(fff_indexed_data* idx, fff_vector* tmp1, fff_vector* tmp2, const fff_vector* z, const fff_vector* w) { size_t n = z->size, i, is; double *buf1, *buf2; fff_indexed_data* buf_idx; /* Copy z into the auxiliary qsort structure idx */ for(i=0, buf1=z->data, buf_idx=idx; istride) { (*buf_idx).x = *buf1; (*buf_idx).i = i; } /* Effectively sort */ qsort (idx, n, sizeof(fff_indexed_data), &_fff_indexed_data_comp); /* Copy the sorted z into tmp1, and the accordingly sorted w into tmp2 */ for(i=0, buf1=tmp1->data, buf2=tmp2->data, buf_idx=idx; istride, buf2+=tmp2->stride) { is = (*buf_idx).i; *buf1 = (*buf_idx).x; *buf2 = w->data[ is*w->stride ]; } return; } /* Sign permutations */ void fff_onesample_permute_signs(fff_vector* xx, const fff_vector* x, double magic) { size_t n = x->size, i; double *bufx=x->data, *bufxx=xx->data; double m = magic, aux; for (i=0; istride, bufxx+=xx->stride) { aux = m/2; m = FFF_FLOOR(aux); aux -= m; if (aux > 0) *bufxx = -*bufx; else *bufxx = *bufx; } return; } nipy-0.3.0/libcstat/fff/fff_onesample_stat.h000066400000000000000000000152221210344137400210500ustar00rootroot00000000000000/*! \file fff_onesample_stat.h \brief One-sample test statistics \author Alexis Roche \date 2004-2008 */ #ifndef FFF_ONESAMPLE_STAT #define FFF_ONESAMPLE_STAT #ifdef __cplusplus extern "C" { #endif #include "fff_vector.h" /*! \typedef fff_onesample_stat_flag \brief Decision statistic for one-sample tests \c FFF_ONESAMPLE_MEAN is the sample mean. In permutation testing context, it is equivalent to \c FFF_ONESAMPLE_STUDENT (see below). \c FFF_ONESAMPLE_MEDIAN is the sample median. \c FFF_ONESAMPLE_STUDENT is the one-sample Student statistic defined as \f$ t = \frac{\hat{m}-m}{\hat{\sigma}/\sqrt{n}} \f$, where \a n is the sample size, \f$\hat{m}\f$ is the sample mean, and \f$\hat{\sigma}\f$ is the sample standard deviation normalized by \a n-1. \c FFF_ONESAMPLE_LAPLACE is a robust version of Student's \a t based on the Laplace likelihood ratio. The statistic is defined by: \f$ t = {\rm sign}(med-m) \sqrt{2n\log(\frac{s_0}{s})}\f$, where \a n is the sample size, \f$med\f$ is the sample median, and \f$s, s_0\f$ are the mean absolute deviations wrt the median and the baseline, respectively. Owing to Wilks's theorem, \a t is an approximate Z-statistic under the null assumption \a m=base. \c FFF_ONESAMPLE_TUKEY is similar to Laplace's \a t except the scale estimates are computed using the median of absolute deviations (MAD) rather than the average absolute deviation. This provides an even more robust statistic, which we term Tukey's \a t as Tukey appears to be the first author who proposed MAD as a scale estimator. \c FFF_ONESAMPLE_SIGN_STAT is the simple sign statistic, \f$ t = (n_+ - n_-)/n \f$ where \f$ n_+ \f$ (resp. \f$ n_- \f$) is the number of sample values greater than (resp. lower than) the baseline, and \a n is the total sample size. \c FFF_ONESAMPLE_SIGNED_RANK is Wilcoxon's signed rank statistic, \f$ t = \frac{2}{n(n+1)} \sum_i {\rm rank}(|x_i-m|) {\rm sign}(x_i-m) \f$, where rank values range from 1 to \a n, the sample size. Using this definition, \a t ranges from -1 to 1. \c FFF_ONESAMPLE_ELR implements the empirical likelihood ratio for a univariate mean (see Owen, 2001). The one-tailed statistic is defined as: \f$ t = {\rm sign}(\hat{\mu}-m) \sqrt{-2\log\lambda} \f$, where \a n is the sample size, \f$\hat{\mu}\f$ is the empirical mean, and \f$\lambda\f$ is the empirical likelihood ratio. The latter is given by \f$ \lambda = \prod_{i=1}^n nw_i\f$ where \f$ w_i \f$ are nonnegative weights assessing the "probability" of each datapoint under the null assumption that the population mean equals \a m. \c FFF_ONESAMPLE_GRUBB is the Grubb's statistic for normality testing. It is defined as \f$ t = \max_i \frac{|x_i-\hat{m}|}{\hat{\sigma}} \f$ where \f$\hat{m}\f$ is the sample mean, and \f$\hat{\sigma}\f$ is the sample standard deviation. */ typedef enum { FFF_ONESAMPLE_EMPIRICAL_MEAN = 0, FFF_ONESAMPLE_EMPIRICAL_MEDIAN = 1, FFF_ONESAMPLE_STUDENT = 2, FFF_ONESAMPLE_LAPLACE = 3, FFF_ONESAMPLE_TUKEY = 4, FFF_ONESAMPLE_SIGN_STAT = 5, FFF_ONESAMPLE_WILCOXON = 6, FFF_ONESAMPLE_ELR = 7, FFF_ONESAMPLE_GRUBB = 8, FFF_ONESAMPLE_EMPIRICAL_MEAN_MFX = 10, FFF_ONESAMPLE_EMPIRICAL_MEDIAN_MFX = 11, FFF_ONESAMPLE_STUDENT_MFX = 12, FFF_ONESAMPLE_SIGN_STAT_MFX = 15, FFF_ONESAMPLE_WILCOXON_MFX = 16, FFF_ONESAMPLE_ELR_MFX = 17, FFF_ONESAMPLE_GAUSSIAN_MEAN_MFX = 19 } fff_onesample_stat_flag; /*! \struct fff_onesample_stat \brief General structure for one-sample test statistics */ typedef struct{ fff_onesample_stat_flag flag; /*!< statistic's identifier */ double base; /*!< baseline for mean-value testing */ unsigned int constraint; /* non-zero for statistics computed from maximum likelihood under the null hypothesis */ void* params; /*!< other auxiliary parameters */ double (*compute_stat)(void*, const fff_vector*, double); /*!< actual statistic implementation */ } fff_onesample_stat; /*! \struct fff_onesample_stat_mfx \brief General structure for one-sample test statistics with mixed-effects Tests statistics corrected for mixed effects, i.e. eliminates the influence of heteroscedastic measurement errors. The classical Student statistic is generalized from the likelihood ratio of the model including heteroscedastic first-level errors. More comments to come. */ typedef struct{ fff_onesample_stat_flag flag; /*!< MFX statistic's identifier */ double base; /*!< baseline for mean-value testing */ int empirical; /*!< boolean, tells whether MFX statistic is nonparametric or not */ unsigned int niter; /* non-zero for statistics based on iterative algorithms */ unsigned int constraint; /* non-zero for statistics computed from maximum likelihood under the null hypothesis */ void* params; /*!< auxiliary parameters */ double (*compute_stat)(void*, const fff_vector*, const fff_vector*, double); /*!< actual statistic implementation */ } fff_onesample_stat_mfx; /*! \brief Constructor for the \c fff_onesample_stat structure \param n sample size \param flag statistic identifier \param base baseline value for mean-value testing */ extern fff_onesample_stat* fff_onesample_stat_new(unsigned int n, fff_onesample_stat_flag flag, double base); /*! \brief Destructor for the \c fff_onesample_stat structure \param thisone instance to be deleted */ extern void fff_onesample_stat_delete(fff_onesample_stat* thisone); /*! \brief Compute a one-sample test statistic \param thisone already created one-sample stat structure \param x input vector */ extern double fff_onesample_stat_eval(fff_onesample_stat* thisone, const fff_vector* x); /** MFX **/ extern fff_onesample_stat_mfx* fff_onesample_stat_mfx_new(unsigned int n, fff_onesample_stat_flag flag, double base); extern void fff_onesample_stat_mfx_delete(fff_onesample_stat_mfx* thisone); extern double fff_onesample_stat_mfx_eval(fff_onesample_stat_mfx* thisone, const fff_vector* x, const fff_vector* vx); extern void fff_onesample_stat_mfx_pdf_fit(fff_vector* w, fff_vector* z, fff_onesample_stat_mfx* thisone, const fff_vector* x, const fff_vector* vx); extern void fff_onesample_stat_gmfx_pdf_fit(double* mu, double* v, fff_onesample_stat_mfx* thisone, const fff_vector* x, const fff_vector* vx); /** Sign permutations **/ extern void fff_onesample_permute_signs(fff_vector* xx, const fff_vector* x, double magic); #ifdef __cplusplus } #endif #endif nipy-0.3.0/libcstat/fff/fff_routines.c000066400000000000000000000052601210344137400176760ustar00rootroot00000000000000#include "fff_routines.h" #include "fff_base.h" #include #include typedef struct{ double x; long i; } dummy_struct; static int _dummy_struct_geq(const void * x, const void * y) { int ans = -1; dummy_struct xx = *((dummy_struct*)x); dummy_struct yy = *((dummy_struct*)y); if ( xx.x > yy.x ) { ans = 1; return ans; } if ( xx.x == yy.x ) ans = 0; return ans; } extern void sort_ascending_and_get_permutation( double* x, long* idx, long n ) { long i; double *bufx; dummy_struct* xx = (dummy_struct*)calloc( n, sizeof(dummy_struct) ); dummy_struct* buf_xx; long* buf_idx; bufx = x; buf_idx = idx; buf_xx = xx; for ( i=0; idimX; long idx = 0; double val,max = (double) fff_array_get1d(farray,idx); for (i=0 ; imax){ max = val; idx = i; } } return idx; } extern long fff_array_argmin1d(const fff_array *farray) { /* returns the index of the max value on a supposedly 1D array quick and dirty implementation */ long i,n = farray->dimX; long idx = 0; double val,min = (double) fff_array_get1d(farray,idx); for (i=0 ; idimX; double val,min = (double) fff_array_get1d(farray,0); for (i=0 ; idimX; double val,max = (double) fff_array_get1d(farray,0); for (i=0 ; imax) max = val; } return max; } nipy-0.3.0/libcstat/fff/fff_routines.h000066400000000000000000000015051210344137400177010ustar00rootroot00000000000000/*! \file fff_routines.h \brief A few standard functions that are always necessary \author bertrand Thirion and Alexis Roche \date 2008 Things could also be put somewhere else. The implementation has often a quick-and-dirty flavour. */ #ifndef FFF_ROUTINES #define FFF_ROUTINES #ifdef __cplusplus extern "C" { #endif #include #include #include "fff_array.h" #include "fff_matrix.h" extern void sort_ascending_and_get_permutation( double* x, long* idx, long n ); extern void sort_ascending(double *x, int n); extern long fff_array_argmax1d(const fff_array *farray); extern long fff_array_argmin1d(const fff_array *farray); extern double fff_array_min1d(const fff_array *farray); extern double fff_array_max1d(const fff_array *farray); #ifdef __cplusplus } #endif #endif nipy-0.3.0/libcstat/fff/fff_specfun.c000066400000000000000000000031011210344137400174610ustar00rootroot00000000000000/* Special functions for FFF. * Author: Gael Varoquaux (implemented from canonical sources: * log gammma: algorithm as described in numerical recipes * psi : algorithm as described in Applied Statistics, * Volume 25, Number 3, 1976, pages 315-317. * * License: BSD */ #include "fff_specfun.h" #include double fff_gamln(double x) { /* Log Gamma. * * INPUT: x > 0 */ double coeff[] = { 76.18009172947146, -86.50532032941677, 24.01409824083091, -1.231739572450155, .1208650973866179e-2, -.5395239384953e-5 }; const double stp = 2.5066282746310005; double y = x; double sum = 1.000000000190015; double out ; int i; for(i=0; i<6; i++) { y += 1; sum += coeff[i]/y; } out = x + 5.5; out = (x+0.5) * log(out) - out; return out + log(stp*sum/x); } double fff_psi(double x) { /* psi: d gamln(x)/dx * * INPUT: x > 0 */ double c = 8.5; double d1 = -0.5772156649; double r; double s = 0.00001; double s3 = 0.08333333333; double s4 = 0.0083333333333; double s5 = 0.003968253968; double out; double y; /* XXX: What if x < 0 ? */ y = x; out = 0.0; /* Use approximation if argument <= s */ if (y<= s) { out = d1 - 1.0 / y; return out; } /* Reduce to psi(x + n) where (x + n) >= c */ while (y c */ r = 1.0 / y; out += log (y) - 0.5*r; r = r*r; out += -r*(s3 - r * ( s4 - r*s5)); return out; } nipy-0.3.0/libcstat/fff/fff_specfun.h000066400000000000000000000005421210344137400174740ustar00rootroot00000000000000/*! \file fff_specfun.h \brief special functions needed by fff's C routines. \author Alexis Roche, Gael Varoquaux \date 2008, 2009 \licence BSD */ #ifndef FFF_SPECFUN #define FFF_SPECFUN #ifdef __cplusplus extern "C" { #endif extern double fff_psi(double x); extern double fff_gamln(double x); #ifdef __cplusplus } #endif #endif nipy-0.3.0/libcstat/fff/fff_twosample_stat.c000066400000000000000000000233571210344137400211030ustar00rootroot00000000000000#include "fff_twosample_stat.h" #include "fff_onesample_stat.h" #include "fff_gen_stats.h" #include "fff_glm_twolevel.h" #include "fff_base.h" #include #include #include #include static double _fff_twosample_student(void* params, const fff_vector* x, unsigned int n1); static double _fff_twosample_wilcoxon(void* params, const fff_vector* x, unsigned int n1); static double _fff_twosample_student_mfx(void* params, const fff_vector* x, const fff_vector* vx, unsigned int n1); static void _fff_twosample_mfx_assembly(fff_matrix* X, fff_matrix* PX, fff_matrix* PPX, unsigned int n1, unsigned int n2); typedef struct{ fff_glm_twolevel_EM *em; unsigned int* niter; fff_vector* work; fff_matrix* X; fff_matrix* PX; fff_matrix* PPX; } fff_twosample_mfx; fff_twosample_stat* fff_twosample_stat_new(unsigned int n1, unsigned int n2, fff_twosample_stat_flag flag) { fff_twosample_stat* thisone = (fff_twosample_stat*)malloc(sizeof(fff_twosample_stat)); if (thisone == NULL) { FFF_ERROR("Cannot allocate memory", ENOMEM); return NULL; } thisone->n1 = n1; thisone->n2 = n2; thisone->flag = flag; thisone->params = NULL; switch (flag) { case FFF_TWOSAMPLE_STUDENT: thisone->compute_stat = &_fff_twosample_student; break; case FFF_TWOSAMPLE_WILCOXON: thisone->compute_stat = &_fff_twosample_wilcoxon; break; default: FFF_ERROR("Unrecognized statistic", EINVAL); break; } return thisone; } void fff_twosample_stat_delete(fff_twosample_stat* thisone) { if (thisone == NULL) return; free(thisone); return; } double fff_twosample_stat_eval(fff_twosample_stat* thisone, const fff_vector* x) { double t; t = thisone->compute_stat(thisone->params, x, thisone->n1); return t; } fff_twosample_stat_mfx* fff_twosample_stat_mfx_new(unsigned int n1, unsigned int n2, fff_twosample_stat_flag flag) { fff_twosample_stat_mfx* thisone = (fff_twosample_stat_mfx*)malloc(sizeof(fff_twosample_stat_mfx)); fff_twosample_mfx* aux; unsigned int n = n1+n2; if (thisone == NULL) { FFF_ERROR("Cannot allocate memory", ENOMEM); return NULL; } thisone->n1 = n1; thisone->n2 = n2; thisone->flag = flag; thisone->niter = 0; switch (flag) { case FFF_TWOSAMPLE_STUDENT_MFX: thisone->compute_stat = &_fff_twosample_student_mfx; aux = (fff_twosample_mfx*)malloc(sizeof(fff_twosample_mfx)); thisone->params = (void*)aux; aux->em = fff_glm_twolevel_EM_new(n, 2); aux->niter = &(thisone->niter); aux->work = fff_vector_new(n); aux->X = fff_matrix_new(n, 2); aux->PX = fff_matrix_new(2, n); aux->PPX = fff_matrix_new(2, n); _fff_twosample_mfx_assembly(aux->X, aux->PX, aux->PPX, n1, n2); break; default: FFF_ERROR("Unrecognized statistic", EINVAL); break; } return thisone; } void fff_twosample_stat_mfx_delete(fff_twosample_stat_mfx* thisone) { fff_twosample_mfx* aux; if (thisone == NULL) return; switch (thisone->flag) { case FFF_TWOSAMPLE_STUDENT_MFX: aux = (fff_twosample_mfx*) thisone->params; fff_vector_delete(aux->work); fff_matrix_delete(aux->X); fff_matrix_delete(aux->PX); fff_matrix_delete(aux->PPX); fff_glm_twolevel_EM_delete(aux->em); free(aux); break; default: FFF_ERROR("Unrecognized statistic", EINVAL); break; } free(thisone); return; } double fff_twosample_stat_mfx_eval(fff_twosample_stat_mfx* thisone, const fff_vector* x, const fff_vector* vx) { double t; t = thisone->compute_stat(thisone->params, x, vx, thisone->n1); return t; } /********************************************************************* Actual test statistic implementation **********************************************************************/ static double _fff_twosample_student(void* params, const fff_vector* x, unsigned int n1) { fff_vector x1, x2; unsigned int naux = x->size-n1; double t, m1, m2; long double v1, aux; /* Compute within-group means and variances */ x1 = fff_vector_view(x->data, n1, x->stride); x2 = fff_vector_view(x->data+n1, naux, x->stride); v1 = fff_vector_ssd(&x1, &m1, 0); aux = fff_vector_ssd(&x2, &m2, 0); /* Compute max( n1+n2-2, 1 ) */ naux += n1-2; if (naux<=0) naux = 1; /* Compute the inverse std estimate */ aux += v1; aux /= naux; aux = sqrt(aux); if (aux<=0.0) aux = FFF_POSINF; else aux = 1/aux; /* t value */ t = (m1-m2)*aux; return t; } /* Wilcoxon. */ static double _fff_twosample_wilcoxon(void* params, const fff_vector* x, unsigned int n1) { fff_vector x1, x2; unsigned int i, j, n2=x->size-n1; double w=0.0, aux; double *b1, *b2; x1 = fff_vector_view(x->data, n1, x->stride); x2 = fff_vector_view(x->data+n1, n2, x->stride); for(i=0, b1=x1.data; i *b2) aux += 1.0; else if (*b2 > *b1) aux -= 1.0; } aux /= (double)n2; w += aux; } return w; } /* Pre-compute matrices for two-sample mixed-effect linear analysis. X has two columns: c0 = [1 1 ... 1]' and c1 = [1 ... 1 | 0 ... 0]' */ static void _fff_twosample_mfx_assembly(fff_matrix* X, fff_matrix* PX, fff_matrix* PPX, unsigned int n1, unsigned int n2) { unsigned int n = n1+n2; double g1=1/(double)n1, g2=1/(double)n2; fff_matrix B; /* X */ fff_matrix_set_all(X, 1.0); B = fff_matrix_block(X, n1, n2, 1, 1); fff_matrix_set_all(&B, 0.0); /* PX */ B = fff_matrix_block(PX, 0, 1, 0, n1); fff_matrix_set_all(&B, 0.0); B = fff_matrix_block(PX, 0, 1, n1, n2); fff_matrix_set_all(&B, g2); B = fff_matrix_block(PX, 1, 1, 0, n1); fff_matrix_set_all(&B, g1); B = fff_matrix_block(PX, 1, 1, n1, n2); fff_matrix_set_all(&B, -g2); /* PPX */ B = fff_matrix_block(PPX, 0, 1, 0, n); fff_matrix_set_all(&B, 1.0/(double)n); B = fff_matrix_block(PPX, 1, 1, 0, n); fff_matrix_set_all(&B, 0.0); return; } static double _fff_twosample_student_mfx(void* params, const fff_vector* x, const fff_vector* vx, unsigned int n1) { fff_twosample_mfx* Params = (fff_twosample_mfx*)params; double F, sign, ll, ll0; unsigned int niter = *(Params->niter); /* Constrained EM */ fff_glm_twolevel_EM_init(Params->em); fff_glm_twolevel_EM_run(Params->em, x, vx, Params->X, Params->PPX, niter); ll0 = fff_glm_twolevel_log_likelihood(x, vx, Params->X, Params->em->b, Params->em->s2, Params->work); /* Unconstrained EM initialized with constrained maximization results */ fff_glm_twolevel_EM_run(Params->em, x, vx, Params->X, Params->PX, niter); ll = fff_glm_twolevel_log_likelihood(x, vx, Params->X, Params->em->b, Params->em->s2, Params->work); /* Form the generalized F statistic */ F = 2.0*(ll-ll0); F = FFF_MAX(F, 0.0); /* Just to make sure */ sign = Params->em->b->data[1]; /* Contiguity ensured */ sign = FFF_SIGN(sign); return sign*sqrt(F); } /********************************************************************* Permutations **********************************************************************/ unsigned int fff_twosample_permutation(unsigned int* idx1, unsigned int* idx2, unsigned int n1, unsigned int n2, double* magic) { unsigned int n=FFF_MIN(n1, n2), i; double aux, magic1, magic2, cuml=0, cumr=1,c1=1, c2=1; /* Pre-computation mode */ if ( (idx1==NULL) || (idx2==NULL) ) *magic = FFF_POSINF; /* Find i such that Cn1,i*Cn2,i <= magic < Cn1,i*Cn2,i + Cn1,i+1*Cn2,i+1 */ for(i=0; i<=n; i++) { /* Downshift the magic number on exit */ if (*magic= cumr) { /* AR,27/2/09 modified without certainty from *magic > cumr */ *magic = cumr; return 0; } /* Compute magic numbers for within-group combinations. We use: magic = magic2*c1 + magic1 */ magic2 = floor(*magic/c1); magic1 = *magic - magic2*c1; /* Find the underlying combinations */ fff_combination(idx1, i, n1, magic1); fff_combination(idx2, i, n2, magic2); return i; } /* px assumed allocated n1 + n2 */ #define SWAP(a, b) \ aux = a; \ a = b; \ b = aux void fff_twosample_apply_permutation(fff_vector* px, fff_vector* pv, const fff_vector* x1, const fff_vector* v1, const fff_vector* x2, const fff_vector* v2, unsigned int i, const unsigned int* idx1, const unsigned int* idx2) { unsigned int j; size_t i1, i2, n1=x1->size, n2=x2->size; double aux; double *bpx1, *bpx2; fff_vector px1, px2, pv1, pv2; int flag_mfx = (pv!=NULL); /* Copy input vectors into single output vector */ px1 = fff_vector_view(px->data, n1, px->stride); fff_vector_memcpy(&px1, x1); px2 = fff_vector_view(px->data + n1, n2, px->stride); fff_vector_memcpy(&px2, x2); if (flag_mfx) { pv1 = fff_vector_view(pv->data, n1, pv->stride); fff_vector_memcpy(&pv1, v1); pv2 = fff_vector_view(pv->data + n1, n2, pv->stride); fff_vector_memcpy(&pv2, v2); } /* Exchange elements */ for(j=0; jstride; bpx2 = px2.data + i2*px->stride; SWAP(*bpx1, *bpx2); if (flag_mfx) { bpx1 = pv1.data + i1*pv->stride; bpx2 = pv2.data + i2*pv->stride; SWAP(*bpx1, *bpx2); } } return; } nipy-0.3.0/libcstat/fff/fff_twosample_stat.h000066400000000000000000000050231210344137400210760ustar00rootroot00000000000000/*! \file fff_twosample_stat.h \brief One-sample test statistics \author Alexis Roche \date 2008 */ #ifndef FFF_TWOSAMPLE_STAT #define FFF_TWOSAMPLE_STAT #ifdef __cplusplus extern "C" { #endif #include "fff_vector.h" /* Two-sample stat flag */ typedef enum { FFF_TWOSAMPLE_STUDENT = 2, FFF_TWOSAMPLE_WILCOXON = 6, FFF_TWOSAMPLE_STUDENT_MFX = 12 } fff_twosample_stat_flag; /*! \struct fff_twosample_stat \brief General structure for two-sample test statistics */ typedef struct{ unsigned int n1; /*!< number of subjects in first group */ unsigned int n2; /*!< number of subjects in second group */ fff_twosample_stat_flag flag; /*!< statistic's identifier */ void* params; double (*compute_stat)(void*, const fff_vector*, unsigned int); /*!< actual statistic implementation */ } fff_twosample_stat; extern fff_twosample_stat* fff_twosample_stat_new(unsigned int n1, unsigned int n2, fff_twosample_stat_flag flag); extern void fff_twosample_stat_delete(fff_twosample_stat* thisone); extern double fff_twosample_stat_eval(fff_twosample_stat* thisone, const fff_vector* x); /** MFX **/ /*! \struct fff_twosample_stat_mfx \brief General structure for two-sample test statistics */ typedef struct{ unsigned int n1; /*!< number of subjects in first group */ unsigned int n2; /*!< number of subjects in second group */ fff_twosample_stat_flag flag; /*!< statistic's identifier */ unsigned int niter; void* params; /*! auxiliary structures */ double (*compute_stat)(void*, const fff_vector*, const fff_vector*, unsigned int); /*!< actual statistic implementation */ } fff_twosample_stat_mfx; extern fff_twosample_stat_mfx* fff_twosample_stat_mfx_new(unsigned int n1, unsigned int n2, fff_twosample_stat_flag flag); extern void fff_twosample_stat_mfx_delete(fff_twosample_stat_mfx* thisone); extern double fff_twosample_stat_mfx_eval(fff_twosample_stat_mfx* thisone, const fff_vector* x, const fff_vector* vx); /** Label permutations **/ extern unsigned int fff_twosample_permutation(unsigned int* idx1, unsigned int* idx2, unsigned int n1, unsigned int n2, double* magic); extern void fff_twosample_apply_permutation(fff_vector* px, fff_vector* pv, const fff_vector* x1, const fff_vector* v1, const fff_vector* x2, const fff_vector* v2, unsigned int i, const unsigned int* idx1, const unsigned int* idx2); #ifdef __cplusplus } #endif #endif nipy-0.3.0/libcstat/fff/fff_vector.c000066400000000000000000000262021210344137400173270ustar00rootroot00000000000000#include "fff_base.h" #include "fff_vector.h" #include "fff_array.h" #include #include #include #include #include /* Declaration of static functions */ static double _fff_pth_element(double* x, size_t p, size_t stride, size_t size); static void _fff_pth_interval(double* am, double* aM, double* x, size_t p, size_t stride, size_t size); /* Constructor */ fff_vector* fff_vector_new(size_t size) { fff_vector* thisone; thisone = (fff_vector*)calloc(1, sizeof(fff_vector)); if (thisone == NULL) { FFF_ERROR("Allocation failed", ENOMEM); return NULL; } thisone->data = (double*)calloc(size, sizeof(double)); if (thisone->data == NULL) FFF_ERROR("Allocation failed", ENOMEM); thisone->size = size; thisone->stride = 1; thisone->owner = 1; return thisone; } /* Destructor */ void fff_vector_delete(fff_vector* thisone) { if (thisone->owner) if (thisone->data != NULL) free(thisone->data); free(thisone); return; } /* View */ fff_vector fff_vector_view(const double* data, size_t size, size_t stride) { fff_vector x; x.size = size; x.stride = stride; x.owner = 0; x.data = (double*)data; return x; } #define CHECK_SIZE(x,y) \ if ((x->size) != (y->size)) FFF_ERROR("Vectors have different sizes", EDOM) /* Vector copy. If both vectors are contiguous in memory, we use memcpy, otherwise we perform a loop */ void fff_vector_memcpy(fff_vector* x, const fff_vector* y) { CHECK_SIZE(x, y); if ((x->stride == 1) && (y->stride == 1)) memcpy((void*)x->data, (void*)y->data, x->size*sizeof(double)); else { size_t i; double *bx, *by; for(i=0, bx=x->data, by=y->data; isize; i++, bx+=x->stride, by+=y->stride) *bx = *by; } return; } /* Copy buffer with arbitrary type */ void fff_vector_fetch(fff_vector* x, const void* data, fff_datatype datatype, size_t stride) { fff_array a = fff_array_view1d(datatype, (void*)data, x->size, stride); fff_array b = fff_array_view1d(FFF_DOUBLE, x->data, x->size, x->stride); fff_array_copy(&b, &a); return; } /* Get an element */ double fff_vector_get (const fff_vector * x, size_t i) { return(x->data[ i * x->stride ]); } /* Set an element */ void fff_vector_set (fff_vector * x, size_t i, double a) { x->data[ i * x->stride ] = a; return; } /* Set all elements */ void fff_vector_set_all (fff_vector * x, double a) { size_t i; double *buf; for(i=0, buf=x->data; isize; i++, buf+=x->stride) *buf = a; return; } /* Add two vectors */ void fff_vector_add (fff_vector * x, const fff_vector * y) { size_t i; double *bx, *by; CHECK_SIZE(x, y); for(i=0, bx=x->data, by=y->data; isize; i++, bx+=x->stride, by+=y->stride) *bx += *by; return; } /* Compute: x = x - y */ void fff_vector_sub (fff_vector * x, const fff_vector * y) { size_t i; double *bx, *by; CHECK_SIZE(x, y); for(i=0, bx=x->data, by=y->data; isize; i++, bx+=x->stride, by+=y->stride) *bx -= *by; return; } /* Element-wise product */ void fff_vector_mul (fff_vector * x, const fff_vector * y) { size_t i; double *bx, *by; CHECK_SIZE(x, y); for(i=0, bx=x->data, by=y->data; isize; i++, bx+=x->stride, by+=y->stride) *bx *= *by; return; } /* Element-wise division */ void fff_vector_div (fff_vector * x, const fff_vector * y) { size_t i; double *bx, *by; CHECK_SIZE(x, y); for(i=0, bx=x->data, by=y->data; isize; i++, bx+=x->stride, by+=y->stride) *bx /= *by; return; } /* Scale by a constant */ void fff_vector_scale (fff_vector * x, double a) { size_t i; double *bx; for(i=0, bx=x->data; isize; i++, bx+=x->stride) *bx *= a; return; } /* Add a constant */ void fff_vector_add_constant (fff_vector * x, double a) { size_t i; double *bx; for(i=0, bx=x->data; isize; i++, bx+=x->stride) *bx += a; return; } /* Sum up elements */ long double fff_vector_sum(const fff_vector* x) { long double sum = 0.0; double* buf = x->data; size_t i; for(i=0; isize; i++, buf+=x->stride) sum += *buf; return sum; } /* Mean */ double fff_vector_mean(const fff_vector* x) { return((double)(fff_vector_sum(x) / (double)x->size)); } /* SSD We use Konig formula: SUM[(x-a)^2] = SUM[(x-m)^2] + n*(a-m)^2 where m is the mean. */ long double fff_vector_ssd(const fff_vector* x, double* m, int fixed_offset) { long double ssd = 0.0; long double sum = 0.0; long double n = (long double)x->size; double aux; double* buf = x->data; size_t i; for(i=0; isize; i++, buf+=x->stride) { aux = *buf; sum += aux; ssd += FFF_SQR(aux); } sum /= n; if (fixed_offset) { aux = *m - sum; ssd += n * (FFF_SQR(aux) - FFF_SQR(sum)); } else{ *m = sum; ssd -= n * FFF_SQR(sum); } return ssd; } long double fff_vector_wsum(const fff_vector* x, const fff_vector* w, long double* sumw) { long double wsum=0.0, aux=0.0; double *bufx=x->data, *bufw=w->data; size_t i; CHECK_SIZE(x, w); for(i=0; isize; i++, bufx+=x->stride, bufw+=w->stride) { wsum += (*bufw) * (*bufx); aux += *bufw; } *sumw = aux; return wsum; } long double fff_vector_sad(const fff_vector* x, double m) { long double sad=0.0; double aux; double *buf=x->data; size_t i; for(i=0; isize; i++, buf+=x->stride) { aux = *buf-m; sad += FFF_ABS(aux); } return sad; } /* Median (modify input vector) */ double fff_vector_median(fff_vector* x) { double m; double* data = x->data; size_t stride = x->stride, size = x->size; if (FFF_IS_ODD(size)) m = _fff_pth_element(data, size>>1, stride, size); else{ double mm; _fff_pth_interval(&m, &mm, data, (size>>1)-1, stride, size); m = .5*(m+mm); } return m; } /* Quantile. Given a sample x, this function computes a value q so that the number of sample values that are greater or equal to q is smaller or equal to (1-r) * sample size. */ double fff_vector_quantile(fff_vector* x, double r, int interp) { double m, pp; double* data = x->data; size_t p, stride = x->stride, size = x->size; if ((r<0) || (r>1)){ FFF_WARNING("Ratio must be in [0,1], returning zero"); return 0.0; } if (size == 1) return data[0]; /* Find the smallest index p so that p >= r * size */ if (!interp) { pp = r * size; p = FFF_UNSIGNED_CEIL(pp); if (p == size) return FFF_POSINF; m = _fff_pth_element(data, p, stride, size); } else { double wm, wM; pp = r * (size-1); p = FFF_UNSIGNED_FLOOR(pp); wM = pp - (double)p; wm = 1.0 - wM; if (wM <= 0) m = _fff_pth_element(data, p, stride, size); else { double am, aM; _fff_pth_interval(&am, &aM, data, p, stride, size); m = wm*am + wM*aM; } } return m; } /*** STATIC FUNCTIONS ***/ /* BEWARE: the input array x gets modified! */ /* Pick up the sample value a so that: (p+1) sample values are <= a AND the remaining sample values are >= a */ #define SWAP(a, b) {tmp=(a); (a)=(b); (b)=tmp;} static double _fff_pth_element(double* x, size_t p, size_t stride, size_t n) { double a, tmp; double *bufl, *bufr; size_t i, j, il, jr, stop1, stop2; int same_extremities; stop1 = 0; il = 0; jr = n-1; while (stop1 == 0) { same_extremities = 0; bufl = x + stride*il; bufr = x + stride*jr; if (*bufl > *bufr) SWAP(*bufl, *bufr) else if (*bufl == *bufr) same_extremities = 1; a = *bufl; if (il == jr) return a; bufl += stride; i = il + 1; j = jr; stop2 = 0; while (stop2 == 0) { while (*bufl < a) { i ++; bufl += stride; } while (*bufr > a) { j --; bufr -= stride; } if (j <= i) stop2 = 1; else { SWAP(*bufl, *bufr) j --; bufr -= stride; i ++; bufl += stride; } /* Avoids infinite loops in samples with redundant values. This situation can only occur with i == j */ if ((same_extremities) && (j==jr)) { j --; bufr -= stride; SWAP(x[il*stride], *bufr) stop2 = 1; } } /* At this point, we know that il <= j <= i; moreover: if k <= j, x(j) <= a and if k > j, x(j) >= a if k < i, x(i) <= a and if k >= i, x(i) >= a We hence have: (j+1) values <= a and the remaining (n-j-1) >= a i values <= a and the remaining (n-i) >= a */ if (j > p) jr = j; else if (j < p) il = i; else /* j == p */ stop1 = 1; } return a; } /* BEWARE: the input array x gets modified! */ static void _fff_pth_interval(double* am, double* aM, double* x, size_t p, size_t stride, size_t n) { double a, tmp; double *bufl, *bufr; size_t i, j, il, jr, stop1, stop2, stop3; size_t pp = p+1; int same_extremities = 0; *am = 0.0; *aM = 0.0; stop1 = 0; stop2 = 0; il = 0; jr = n-1; while ((stop1 == 0) || (stop2 == 0)) { same_extremities = 0; bufl = x + stride*il; bufr = x + stride*jr; if (*bufl > *bufr) SWAP(*bufl, *bufr) else if (*bufl == *bufr) same_extremities = 1; a = *bufl; if (il == jr) { *am=a; *aM=a; return; } bufl += stride; i = il + 1; j = jr; stop3 = 0; while (stop3 == 0) { while (*bufl < a) { i ++; bufl += stride; } while (*bufr > a) { j --; bufr -= stride; } if (j <= i) stop3 = 1; else { SWAP(*bufl, *bufr) j --; bufr -= stride; i ++; bufl += stride; } /* Avoids infinite loops in samples with redundant values */ if ((same_extremities) && (j==jr)) { j --; bufr -= stride; SWAP(x[il*stride], *bufr) stop3 = 1; } } /* At this point, we know that there are (j+1) datapoints <=a including a itself, and another (n-j-1) datapoints >=a */ if (j > pp) jr = j; else if (j < p) il = i; /* Case: found percentile at p */ else if (j == p) { il = i; *am = a; stop1 = 1; } /* Case: found percentile at (p+1), ie j==(p+1) */ else { jr = j; *aM = a; stop2 = 1; } } return; } /* Sort x by ascending order and reorder w accordingly. */ double fff_vector_wmedian_from_sorted_data (const fff_vector* x_sorted, const fff_vector* w) { size_t i; double mu, sumW, WW, WW_prev, xx, xx_prev, ww; double *bxx, *bww; /* Compute the sum of weights */ sumW = (double) fff_vector_sum(w); if (sumW <= 0.0) return FFF_NAN; /* Find the smallest index such that the cumulative density > 0.5 */ i = 0; xx = FFF_NEGINF; WW = 0.0; bxx = x_sorted->data; bww = w->data; while (WW <= .5) { xx_prev = xx; WW_prev = WW; xx = *bxx; ww = *bww / sumW; WW += ww; i ++; bxx += x_sorted->stride; bww += w->stride; } /* Linearly interpolated median */ if (i == 1) mu = xx; else mu = .5*(xx_prev+xx) + (.5-WW_prev)*(xx-xx_prev)/ww; return mu; } nipy-0.3.0/libcstat/fff/fff_vector.h000066400000000000000000000116451210344137400173410ustar00rootroot00000000000000/*! \file fff_vector.h \brief fff vector object \author Alexis Roche \date 2003-2008 */ #ifndef FFF_VECTOR #define FFF_VECTOR #ifdef __cplusplus extern "C" { #endif #include "fff_base.h" #include /*! \struct fff_vector \brief The fff vector structure */ typedef struct { size_t size; size_t stride; double* data; int owner; } fff_vector; /*! \brief fff vector constructor \param size vector size */ extern fff_vector* fff_vector_new(size_t size); /*! \brief fff vector destructor \param thisone instance to delete */ extern void fff_vector_delete(fff_vector* thisone); /*! \brief Vector view \param data data array \param size array size \param stride array stride */ extern fff_vector fff_vector_view(const double* data, size_t size, size_t stride); /*! \brief Get an element \param x vector \param i index */ extern double fff_vector_get (const fff_vector * x, size_t i); /*! \brief Set an element \param x vector \param i index \param a value to set */ extern void fff_vector_set (fff_vector * x, size_t i, double a); /*! \brief Set all elements to a constant value \param x vector \param a value to set */ extern void fff_vector_set_all (fff_vector * x, double a); extern void fff_vector_scale (fff_vector * x, double a); extern void fff_vector_add_constant (fff_vector * x, double a); /*! \brief Copy a vector \param x input vector \param y output vector */ extern void fff_vector_memcpy( fff_vector* x, const fff_vector* y ); /*! \brief view or copy an existing buffer \param x destination vector \param data pre-allocated buffer \param datatype data type \param stride stride in relative units (1 means contiguous array) */ extern void fff_vector_fetch(fff_vector* x, const void* data, fff_datatype datatype, size_t stride); /*! \brief Add two vectors \param x output vector \param y constant vector */ extern void fff_vector_add (fff_vector * x, const fff_vector * y); /*! \brief Compute the difference x-y \param x output vector \param y constant vector */ extern void fff_vector_sub (fff_vector * x, const fff_vector * y); extern void fff_vector_mul (fff_vector * x, const fff_vector * y); extern void fff_vector_div (fff_vector * x, const fff_vector * y); /*! \brief Sum up vector elements \param x input vector */ extern long double fff_vector_sum( const fff_vector* x ); /*! \brief Sum of squared differences \param x input vector \param m offset value, either fixed or set to the mean \param fixed_offset true if the offset is to be held fixed Compute the sum: \f$ \sum_i (x_i-a)^2 \f$ where \a a is a given offset. */ extern long double fff_vector_ssd( const fff_vector* x, double* m, int fixed_offset ); extern long double fff_vector_wsum( const fff_vector* x, const fff_vector* w, long double* sumw ); extern long double fff_vector_sad( const fff_vector* x, double m ); /*! \brief Fast median from non-const vector \param x input vector Beware that the input array is re-arranged. This function does not require the input array to be sorted in ascending order. It deals itself with sorting the data, and this is done in a partial way, yielding a faster algorithm. */ extern double fff_vector_median( fff_vector* x ); /*! \brief Sample percentile, or quantile from non-const array \param input vector \param r value between 0 and 1 \param interp interpolation flag If \c interp is \c FALSE, this function returns the smallest sample value \a q that is greater than or equal to a proportion \a r of all sample values; more precisely, the number of sample values that are greater or equal to \a q is smaller or equal to \a (1-r) times the sample size. If \c interp is \c TRUE, then the quantile is defined from a linear interpolation of the empirical cumulative distribution. For instance, if \a r = 0.5 and \c interp = \c TRUE, \a q is the usual median; the \c interp flag does not play any role if the sample size is odd. Similarly to \c fff_median_from_temp_data, the array elements are re-arranged. */ extern double fff_vector_quantile( fff_vector* x, double r, int interp ); /*! \brief Weighted median \param x already sorted data \param w weight vector Compute the weighted median of \c x_sorted using the weights in \c w, assuming the elements in \c x_sorted are in ascending order. Notice, the function does not check for negative weights; if the weights sum up to a negative value, \c FFF_NAN is returned. */ extern double fff_vector_wmedian_from_sorted_data ( const fff_vector* x_sorted, const fff_vector* w ); #ifdef __cplusplus } #endif #endif nipy-0.3.0/libcstat/lapack_lite/000077500000000000000000000000001210344137400165455ustar00rootroot00000000000000nipy-0.3.0/libcstat/lapack_lite/blas_lite.c000066400000000000000000004061711210344137400206600ustar00rootroot00000000000000/* NOTE: This is generated code. Look in Misc/lapack_lite for information on remaking this file. */ #include "f2c.h" #ifdef HAVE_CONFIG #include "config.h" #else extern doublereal dlamch_(char *); #define EPSILON dlamch_("Epsilon") #define SAFEMINIMUM dlamch_("Safe minimum") #define PRECISION dlamch_("Precision") #define BASE dlamch_("Base") #endif extern doublereal dlapy2_(doublereal *x, doublereal *y); /* Table of constant values */ static doublereal c_b90 = 1.; static integer c__1 = 1; doublereal dasum_(integer *n, doublereal *dx, integer *incx) { /* System generated locals */ integer i__1, i__2; doublereal ret_val, d__1, d__2, d__3, d__4, d__5, d__6; /* Local variables */ static integer i__, m; static doublereal dtemp; static integer nincx, mp1; /* Purpose ======= takes the sum of the absolute values. jack dongarra, linpack, 3/11/78. modified 3/93 to return if incx .le. 0. modified 12/3/93, array(1) declarations changed to array(*) */ /* Parameter adjustments */ --dx; /* Function Body */ ret_val = 0.; dtemp = 0.; if (*n <= 0 || *incx <= 0) { return ret_val; } if (*incx == 1) { goto L20; } /* code for increment not equal to 1 */ nincx = *n * *incx; i__1 = nincx; i__2 = *incx; for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { dtemp += (d__1 = dx[i__], abs(d__1)); /* L10: */ } ret_val = dtemp; return ret_val; /* code for increment equal to 1 clean-up loop */ L20: m = *n % 6; if (m == 0) { goto L40; } i__2 = m; for (i__ = 1; i__ <= i__2; ++i__) { dtemp += (d__1 = dx[i__], abs(d__1)); /* L30: */ } if (*n < 6) { goto L60; } L40: mp1 = m + 1; i__2 = *n; for (i__ = mp1; i__ <= i__2; i__ += 6) { dtemp = dtemp + (d__1 = dx[i__], abs(d__1)) + (d__2 = dx[i__ + 1], abs(d__2)) + (d__3 = dx[i__ + 2], abs(d__3)) + (d__4 = dx[i__ + 3], abs(d__4)) + (d__5 = dx[i__ + 4], abs(d__5)) + (d__6 = dx[i__ + 5], abs(d__6)); /* L50: */ } L60: ret_val = dtemp; return ret_val; } /* dasum_ */ /* Subroutine */ int daxpy_(integer *n, doublereal *da, doublereal *dx, integer *incx, doublereal *dy, integer *incy) { /* System generated locals */ integer i__1; /* Local variables */ static integer i__, m, ix, iy, mp1; /* Purpose ======= constant times a vector plus a vector. uses unrolled loops for increments equal to one. jack dongarra, linpack, 3/11/78. modified 12/3/93, array(1) declarations changed to array(*) */ /* Parameter adjustments */ --dy; --dx; /* Function Body */ if (*n <= 0) { return 0; } if (*da == 0.) { return 0; } if (*incx == 1 && *incy == 1) { goto L20; } /* code for unequal increments or equal increments not equal to 1 */ ix = 1; iy = 1; if (*incx < 0) { ix = (-(*n) + 1) * *incx + 1; } if (*incy < 0) { iy = (-(*n) + 1) * *incy + 1; } i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { dy[iy] += *da * dx[ix]; ix += *incx; iy += *incy; /* L10: */ } return 0; /* code for both increments equal to 1 clean-up loop */ L20: m = *n % 4; if (m == 0) { goto L40; } i__1 = m; for (i__ = 1; i__ <= i__1; ++i__) { dy[i__] += *da * dx[i__]; /* L30: */ } if (*n < 4) { return 0; } L40: mp1 = m + 1; i__1 = *n; for (i__ = mp1; i__ <= i__1; i__ += 4) { dy[i__] += *da * dx[i__]; dy[i__ + 1] += *da * dx[i__ + 1]; dy[i__ + 2] += *da * dx[i__ + 2]; dy[i__ + 3] += *da * dx[i__ + 3]; /* L50: */ } return 0; } /* daxpy_ */ doublereal dcabs1_(doublecomplex *z__) { /* System generated locals */ doublereal ret_val, d__1, d__2; /* Builtin functions */ double d_imag(doublecomplex *); /* Purpose ======= DCABS1 computes absolute value of a double complex number */ ret_val = (d__1 = z__->r, abs(d__1)) + (d__2 = d_imag(z__), abs(d__2)); return ret_val; } /* dcabs1_ */ /* Subroutine */ int dcopy_(integer *n, doublereal *dx, integer *incx, doublereal *dy, integer *incy) { /* System generated locals */ integer i__1; /* Local variables */ static integer i__, m, ix, iy, mp1; /* Purpose ======= copies a vector, x, to a vector, y. uses unrolled loops for increments equal to one. jack dongarra, linpack, 3/11/78. modified 12/3/93, array(1) declarations changed to array(*) */ /* Parameter adjustments */ --dy; --dx; /* Function Body */ if (*n <= 0) { return 0; } if (*incx == 1 && *incy == 1) { goto L20; } /* code for unequal increments or equal increments not equal to 1 */ ix = 1; iy = 1; if (*incx < 0) { ix = (-(*n) + 1) * *incx + 1; } if (*incy < 0) { iy = (-(*n) + 1) * *incy + 1; } i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { dy[iy] = dx[ix]; ix += *incx; iy += *incy; /* L10: */ } return 0; /* code for both increments equal to 1 clean-up loop */ L20: m = *n % 7; if (m == 0) { goto L40; } i__1 = m; for (i__ = 1; i__ <= i__1; ++i__) { dy[i__] = dx[i__]; /* L30: */ } if (*n < 7) { return 0; } L40: mp1 = m + 1; i__1 = *n; for (i__ = mp1; i__ <= i__1; i__ += 7) { dy[i__] = dx[i__]; dy[i__ + 1] = dx[i__ + 1]; dy[i__ + 2] = dx[i__ + 2]; dy[i__ + 3] = dx[i__ + 3]; dy[i__ + 4] = dx[i__ + 4]; dy[i__ + 5] = dx[i__ + 5]; dy[i__ + 6] = dx[i__ + 6]; /* L50: */ } return 0; } /* dcopy_ */ doublereal ddot_(integer *n, doublereal *dx, integer *incx, doublereal *dy, integer *incy) { /* System generated locals */ integer i__1; doublereal ret_val; /* Local variables */ static integer i__, m; static doublereal dtemp; static integer ix, iy, mp1; /* Purpose ======= forms the dot product of two vectors. uses unrolled loops for increments equal to one. jack dongarra, linpack, 3/11/78. modified 12/3/93, array(1) declarations changed to array(*) */ /* Parameter adjustments */ --dy; --dx; /* Function Body */ ret_val = 0.; dtemp = 0.; if (*n <= 0) { return ret_val; } if (*incx == 1 && *incy == 1) { goto L20; } /* code for unequal increments or equal increments not equal to 1 */ ix = 1; iy = 1; if (*incx < 0) { ix = (-(*n) + 1) * *incx + 1; } if (*incy < 0) { iy = (-(*n) + 1) * *incy + 1; } i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { dtemp += dx[ix] * dy[iy]; ix += *incx; iy += *incy; /* L10: */ } ret_val = dtemp; return ret_val; /* code for both increments equal to 1 clean-up loop */ L20: m = *n % 5; if (m == 0) { goto L40; } i__1 = m; for (i__ = 1; i__ <= i__1; ++i__) { dtemp += dx[i__] * dy[i__]; /* L30: */ } if (*n < 5) { goto L60; } L40: mp1 = m + 1; i__1 = *n; for (i__ = mp1; i__ <= i__1; i__ += 5) { dtemp = dtemp + dx[i__] * dy[i__] + dx[i__ + 1] * dy[i__ + 1] + dx[ i__ + 2] * dy[i__ + 2] + dx[i__ + 3] * dy[i__ + 3] + dx[i__ + 4] * dy[i__ + 4]; /* L50: */ } L60: ret_val = dtemp; return ret_val; } /* ddot_ */ /* Subroutine */ int dgemm_(char *transa, char *transb, integer *m, integer * n, integer *k, doublereal *alpha, doublereal *a, integer *lda, doublereal *b, integer *ldb, doublereal *beta, doublereal *c__, integer *ldc) { /* System generated locals */ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2, i__3; /* Local variables */ static integer info; static logical nota, notb; static doublereal temp; static integer i__, j, l, ncola; extern logical lsame_(char *, char *); static integer nrowa, nrowb; extern /* Subroutine */ int xerbla_(char *, integer *); /* Purpose ======= DGEMM performs one of the matrix-matrix operations C := alpha*op( A )*op( B ) + beta*C, where op( X ) is one of op( X ) = X or op( X ) = X', alpha and beta are scalars, and A, B and C are matrices, with op( A ) an m by k matrix, op( B ) a k by n matrix and C an m by n matrix. Arguments ========== TRANSA - CHARACTER*1. On entry, TRANSA specifies the form of op( A ) to be used in the matrix multiplication as follows: TRANSA = 'N' or 'n', op( A ) = A. TRANSA = 'T' or 't', op( A ) = A'. TRANSA = 'C' or 'c', op( A ) = A'. Unchanged on exit. TRANSB - CHARACTER*1. On entry, TRANSB specifies the form of op( B ) to be used in the matrix multiplication as follows: TRANSB = 'N' or 'n', op( B ) = B. TRANSB = 'T' or 't', op( B ) = B'. TRANSB = 'C' or 'c', op( B ) = B'. Unchanged on exit. M - INTEGER. On entry, M specifies the number of rows of the matrix op( A ) and of the matrix C. M must be at least zero. Unchanged on exit. N - INTEGER. On entry, N specifies the number of columns of the matrix op( B ) and the number of columns of the matrix C. N must be at least zero. Unchanged on exit. K - INTEGER. On entry, K specifies the number of columns of the matrix op( A ) and the number of rows of the matrix op( B ). K must be at least zero. Unchanged on exit. ALPHA - DOUBLE PRECISION. On entry, ALPHA specifies the scalar alpha. Unchanged on exit. A - DOUBLE PRECISION array of DIMENSION ( LDA, ka ), where ka is k when TRANSA = 'N' or 'n', and is m otherwise. Before entry with TRANSA = 'N' or 'n', the leading m by k part of the array A must contain the matrix A, otherwise the leading k by m part of the array A must contain the matrix A. Unchanged on exit. LDA - INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. When TRANSA = 'N' or 'n' then LDA must be at least max( 1, m ), otherwise LDA must be at least max( 1, k ). Unchanged on exit. B - DOUBLE PRECISION array of DIMENSION ( LDB, kb ), where kb is n when TRANSB = 'N' or 'n', and is k otherwise. Before entry with TRANSB = 'N' or 'n', the leading k by n part of the array B must contain the matrix B, otherwise the leading n by k part of the array B must contain the matrix B. Unchanged on exit. LDB - INTEGER. On entry, LDB specifies the first dimension of B as declared in the calling (sub) program. When TRANSB = 'N' or 'n' then LDB must be at least max( 1, k ), otherwise LDB must be at least max( 1, n ). Unchanged on exit. BETA - DOUBLE PRECISION. On entry, BETA specifies the scalar beta. When BETA is supplied as zero then C need not be set on input. Unchanged on exit. C - DOUBLE PRECISION array of DIMENSION ( LDC, n ). Before entry, the leading m by n part of the array C must contain the matrix C, except when beta is zero, in which case C need not be set on entry. On exit, the array C is overwritten by the m by n matrix ( alpha*op( A )*op( B ) + beta*C ). LDC - INTEGER. On entry, LDC specifies the first dimension of C as declared in the calling (sub) program. LDC must be at least max( 1, m ). Unchanged on exit. Level 3 Blas routine. -- Written on 8-February-1989. Jack Dongarra, Argonne National Laboratory. Iain Duff, AERE Harwell. Jeremy Du Croz, Numerical Algorithms Group Ltd. Sven Hammarling, Numerical Algorithms Group Ltd. Set NOTA and NOTB as true if A and B respectively are not transposed and set NROWA, NCOLA and NROWB as the number of rows and columns of A and the number of rows of B respectively. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; b_dim1 = *ldb; b_offset = 1 + b_dim1 * 1; b -= b_offset; c_dim1 = *ldc; c_offset = 1 + c_dim1 * 1; c__ -= c_offset; /* Function Body */ nota = lsame_(transa, "N"); notb = lsame_(transb, "N"); if (nota) { nrowa = *m; ncola = *k; } else { nrowa = *k; ncola = *m; } if (notb) { nrowb = *k; } else { nrowb = *n; } /* Test the input parameters. */ info = 0; if (! nota && ! lsame_(transa, "C") && ! lsame_( transa, "T")) { info = 1; } else if (! notb && ! lsame_(transb, "C") && ! lsame_(transb, "T")) { info = 2; } else if (*m < 0) { info = 3; } else if (*n < 0) { info = 4; } else if (*k < 0) { info = 5; } else if (*lda < max(1,nrowa)) { info = 8; } else if (*ldb < max(1,nrowb)) { info = 10; } else if (*ldc < max(1,*m)) { info = 13; } if (info != 0) { xerbla_("DGEMM ", &info); return 0; } /* Quick return if possible. */ if (*m == 0 || *n == 0 || (*alpha == 0. || *k == 0) && *beta == 1.) { return 0; } /* And if alpha.eq.zero. */ if (*alpha == 0.) { if (*beta == 0.) { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = 0.; /* L10: */ } /* L20: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]; /* L30: */ } /* L40: */ } } return 0; } /* Start the operations. */ if (notb) { if (nota) { /* Form C := alpha*A*B + beta*C. */ i__1 = *n; for (j = 1; j <= i__1; ++j) { if (*beta == 0.) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = 0.; /* L50: */ } } else if (*beta != 1.) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]; /* L60: */ } } i__2 = *k; for (l = 1; l <= i__2; ++l) { if (b[l + j * b_dim1] != 0.) { temp = *alpha * b[l + j * b_dim1]; i__3 = *m; for (i__ = 1; i__ <= i__3; ++i__) { c__[i__ + j * c_dim1] += temp * a[i__ + l * a_dim1]; /* L70: */ } } /* L80: */ } /* L90: */ } } else { /* Form C := alpha*A'*B + beta*C */ i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { temp = 0.; i__3 = *k; for (l = 1; l <= i__3; ++l) { temp += a[l + i__ * a_dim1] * b[l + j * b_dim1]; /* L100: */ } if (*beta == 0.) { c__[i__ + j * c_dim1] = *alpha * temp; } else { c__[i__ + j * c_dim1] = *alpha * temp + *beta * c__[ i__ + j * c_dim1]; } /* L110: */ } /* L120: */ } } } else { if (nota) { /* Form C := alpha*A*B' + beta*C */ i__1 = *n; for (j = 1; j <= i__1; ++j) { if (*beta == 0.) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = 0.; /* L130: */ } } else if (*beta != 1.) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]; /* L140: */ } } i__2 = *k; for (l = 1; l <= i__2; ++l) { if (b[j + l * b_dim1] != 0.) { temp = *alpha * b[j + l * b_dim1]; i__3 = *m; for (i__ = 1; i__ <= i__3; ++i__) { c__[i__ + j * c_dim1] += temp * a[i__ + l * a_dim1]; /* L150: */ } } /* L160: */ } /* L170: */ } } else { /* Form C := alpha*A'*B' + beta*C */ i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { temp = 0.; i__3 = *k; for (l = 1; l <= i__3; ++l) { temp += a[l + i__ * a_dim1] * b[j + l * b_dim1]; /* L180: */ } if (*beta == 0.) { c__[i__ + j * c_dim1] = *alpha * temp; } else { c__[i__ + j * c_dim1] = *alpha * temp + *beta * c__[ i__ + j * c_dim1]; } /* L190: */ } /* L200: */ } } } return 0; /* End of DGEMM . */ } /* dgemm_ */ /* Subroutine */ int dgemv_(char *trans, integer *m, integer *n, doublereal * alpha, doublereal *a, integer *lda, doublereal *x, integer *incx, doublereal *beta, doublereal *y, integer *incy) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2; /* Local variables */ static integer info; static doublereal temp; static integer lenx, leny, i__, j; extern logical lsame_(char *, char *); static integer ix, iy, jx, jy, kx, ky; extern /* Subroutine */ int xerbla_(char *, integer *); /* Purpose ======= DGEMV performs one of the matrix-vector operations y := alpha*A*x + beta*y, or y := alpha*A'*x + beta*y, where alpha and beta are scalars, x and y are vectors and A is an m by n matrix. Arguments ========== TRANS - CHARACTER*1. On entry, TRANS specifies the operation to be performed as follows: TRANS = 'N' or 'n' y := alpha*A*x + beta*y. TRANS = 'T' or 't' y := alpha*A'*x + beta*y. TRANS = 'C' or 'c' y := alpha*A'*x + beta*y. Unchanged on exit. M - INTEGER. On entry, M specifies the number of rows of the matrix A. M must be at least zero. Unchanged on exit. N - INTEGER. On entry, N specifies the number of columns of the matrix A. N must be at least zero. Unchanged on exit. ALPHA - DOUBLE PRECISION. On entry, ALPHA specifies the scalar alpha. Unchanged on exit. A - DOUBLE PRECISION array of DIMENSION ( LDA, n ). Before entry, the leading m by n part of the array A must contain the matrix of coefficients. Unchanged on exit. LDA - INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. LDA must be at least max( 1, m ). Unchanged on exit. X - DOUBLE PRECISION array of DIMENSION at least ( 1 + ( n - 1 )*abs( INCX ) ) when TRANS = 'N' or 'n' and at least ( 1 + ( m - 1 )*abs( INCX ) ) otherwise. Before entry, the incremented array X must contain the vector x. Unchanged on exit. INCX - INTEGER. On entry, INCX specifies the increment for the elements of X. INCX must not be zero. Unchanged on exit. BETA - DOUBLE PRECISION. On entry, BETA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. Unchanged on exit. Y - DOUBLE PRECISION array of DIMENSION at least ( 1 + ( m - 1 )*abs( INCY ) ) when TRANS = 'N' or 'n' and at least ( 1 + ( n - 1 )*abs( INCY ) ) otherwise. Before entry with BETA non-zero, the incremented array Y must contain the vector y. On exit, Y is overwritten by the updated vector y. INCY - INTEGER. On entry, INCY specifies the increment for the elements of Y. INCY must not be zero. Unchanged on exit. Level 2 Blas routine. -- Written on 22-October-1986. Jack Dongarra, Argonne National Lab. Jeremy Du Croz, Nag Central Office. Sven Hammarling, Nag Central Office. Richard Hanson, Sandia National Labs. Test the input parameters. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --x; --y; /* Function Body */ info = 0; if (! lsame_(trans, "N") && ! lsame_(trans, "T") && ! lsame_(trans, "C") ) { info = 1; } else if (*m < 0) { info = 2; } else if (*n < 0) { info = 3; } else if (*lda < max(1,*m)) { info = 6; } else if (*incx == 0) { info = 8; } else if (*incy == 0) { info = 11; } if (info != 0) { xerbla_("DGEMV ", &info); return 0; } /* Quick return if possible. */ if (*m == 0 || *n == 0 || *alpha == 0. && *beta == 1.) { return 0; } /* Set LENX and LENY, the lengths of the vectors x and y, and set up the start points in X and Y. */ if (lsame_(trans, "N")) { lenx = *n; leny = *m; } else { lenx = *m; leny = *n; } if (*incx > 0) { kx = 1; } else { kx = 1 - (lenx - 1) * *incx; } if (*incy > 0) { ky = 1; } else { ky = 1 - (leny - 1) * *incy; } /* Start the operations. In this version the elements of A are accessed sequentially with one pass through A. First form y := beta*y. */ if (*beta != 1.) { if (*incy == 1) { if (*beta == 0.) { i__1 = leny; for (i__ = 1; i__ <= i__1; ++i__) { y[i__] = 0.; /* L10: */ } } else { i__1 = leny; for (i__ = 1; i__ <= i__1; ++i__) { y[i__] = *beta * y[i__]; /* L20: */ } } } else { iy = ky; if (*beta == 0.) { i__1 = leny; for (i__ = 1; i__ <= i__1; ++i__) { y[iy] = 0.; iy += *incy; /* L30: */ } } else { i__1 = leny; for (i__ = 1; i__ <= i__1; ++i__) { y[iy] = *beta * y[iy]; iy += *incy; /* L40: */ } } } } if (*alpha == 0.) { return 0; } if (lsame_(trans, "N")) { /* Form y := alpha*A*x + y. */ jx = kx; if (*incy == 1) { i__1 = *n; for (j = 1; j <= i__1; ++j) { if (x[jx] != 0.) { temp = *alpha * x[jx]; i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { y[i__] += temp * a[i__ + j * a_dim1]; /* L50: */ } } jx += *incx; /* L60: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { if (x[jx] != 0.) { temp = *alpha * x[jx]; iy = ky; i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { y[iy] += temp * a[i__ + j * a_dim1]; iy += *incy; /* L70: */ } } jx += *incx; /* L80: */ } } } else { /* Form y := alpha*A'*x + y. */ jy = ky; if (*incx == 1) { i__1 = *n; for (j = 1; j <= i__1; ++j) { temp = 0.; i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { temp += a[i__ + j * a_dim1] * x[i__]; /* L90: */ } y[jy] += *alpha * temp; jy += *incy; /* L100: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { temp = 0.; ix = kx; i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { temp += a[i__ + j * a_dim1] * x[ix]; ix += *incx; /* L110: */ } y[jy] += *alpha * temp; jy += *incy; /* L120: */ } } } return 0; /* End of DGEMV . */ } /* dgemv_ */ /* Subroutine */ int dger_(integer *m, integer *n, doublereal *alpha, doublereal *x, integer *incx, doublereal *y, integer *incy, doublereal *a, integer *lda) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2; /* Local variables */ static integer info; static doublereal temp; static integer i__, j, ix, jy, kx; extern /* Subroutine */ int xerbla_(char *, integer *); /* Purpose ======= DGER performs the rank 1 operation A := alpha*x*y' + A, where alpha is a scalar, x is an m element vector, y is an n element vector and A is an m by n matrix. Arguments ========== M - INTEGER. On entry, M specifies the number of rows of the matrix A. M must be at least zero. Unchanged on exit. N - INTEGER. On entry, N specifies the number of columns of the matrix A. N must be at least zero. Unchanged on exit. ALPHA - DOUBLE PRECISION. On entry, ALPHA specifies the scalar alpha. Unchanged on exit. X - DOUBLE PRECISION array of dimension at least ( 1 + ( m - 1 )*abs( INCX ) ). Before entry, the incremented array X must contain the m element vector x. Unchanged on exit. INCX - INTEGER. On entry, INCX specifies the increment for the elements of X. INCX must not be zero. Unchanged on exit. Y - DOUBLE PRECISION array of dimension at least ( 1 + ( n - 1 )*abs( INCY ) ). Before entry, the incremented array Y must contain the n element vector y. Unchanged on exit. INCY - INTEGER. On entry, INCY specifies the increment for the elements of Y. INCY must not be zero. Unchanged on exit. A - DOUBLE PRECISION array of DIMENSION ( LDA, n ). Before entry, the leading m by n part of the array A must contain the matrix of coefficients. On exit, A is overwritten by the updated matrix. LDA - INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. LDA must be at least max( 1, m ). Unchanged on exit. Level 2 Blas routine. -- Written on 22-October-1986. Jack Dongarra, Argonne National Lab. Jeremy Du Croz, Nag Central Office. Sven Hammarling, Nag Central Office. Richard Hanson, Sandia National Labs. Test the input parameters. */ /* Parameter adjustments */ --x; --y; a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; /* Function Body */ info = 0; if (*m < 0) { info = 1; } else if (*n < 0) { info = 2; } else if (*incx == 0) { info = 5; } else if (*incy == 0) { info = 7; } else if (*lda < max(1,*m)) { info = 9; } if (info != 0) { xerbla_("DGER ", &info); return 0; } /* Quick return if possible. */ if (*m == 0 || *n == 0 || *alpha == 0.) { return 0; } /* Start the operations. In this version the elements of A are accessed sequentially with one pass through A. */ if (*incy > 0) { jy = 1; } else { jy = 1 - (*n - 1) * *incy; } if (*incx == 1) { i__1 = *n; for (j = 1; j <= i__1; ++j) { if (y[jy] != 0.) { temp = *alpha * y[jy]; i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] += x[i__] * temp; /* L10: */ } } jy += *incy; /* L20: */ } } else { if (*incx > 0) { kx = 1; } else { kx = 1 - (*m - 1) * *incx; } i__1 = *n; for (j = 1; j <= i__1; ++j) { if (y[jy] != 0.) { temp = *alpha * y[jy]; ix = kx; i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] += x[ix] * temp; ix += *incx; /* L30: */ } } jy += *incy; /* L40: */ } } return 0; /* End of DGER . */ } /* dger_ */ doublereal dnrm2_(integer *n, doublereal *x, integer *incx) { /* System generated locals */ integer i__1, i__2; doublereal ret_val, d__1; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static doublereal norm, scale, absxi; static integer ix; static doublereal ssq; /* Purpose ======= DNRM2 returns the euclidean norm of a vector via the function name, so that DNRM2 := sqrt( x'*x ) -- This version written on 25-October-1982. Modified on 14-October-1993 to inline the call to DLASSQ. Sven Hammarling, Nag Ltd. */ /* Parameter adjustments */ --x; /* Function Body */ if (*n < 1 || *incx < 1) { norm = 0.; } else if (*n == 1) { norm = abs(x[1]); } else { scale = 0.; ssq = 1.; /* The following loop is equivalent to this call to the LAPACK auxiliary routine: CALL DLASSQ( N, X, INCX, SCALE, SSQ ) */ i__1 = (*n - 1) * *incx + 1; i__2 = *incx; for (ix = 1; i__2 < 0 ? ix >= i__1 : ix <= i__1; ix += i__2) { if (x[ix] != 0.) { absxi = (d__1 = x[ix], abs(d__1)); if (scale < absxi) { /* Computing 2nd power */ d__1 = scale / absxi; ssq = ssq * (d__1 * d__1) + 1.; scale = absxi; } else { /* Computing 2nd power */ d__1 = absxi / scale; ssq += d__1 * d__1; } } /* L10: */ } norm = scale * sqrt(ssq); } ret_val = norm; return ret_val; /* End of DNRM2. */ } /* dnrm2_ */ /* Subroutine */ int drot_(integer *n, doublereal *dx, integer *incx, doublereal *dy, integer *incy, doublereal *c__, doublereal *s) { /* System generated locals */ integer i__1; /* Local variables */ static integer i__; static doublereal dtemp; static integer ix, iy; /* Purpose ======= applies a plane rotation. jack dongarra, linpack, 3/11/78. modified 12/3/93, array(1) declarations changed to array(*) */ /* Parameter adjustments */ --dy; --dx; /* Function Body */ if (*n <= 0) { return 0; } if (*incx == 1 && *incy == 1) { goto L20; } /* code for unequal increments or equal increments not equal to 1 */ ix = 1; iy = 1; if (*incx < 0) { ix = (-(*n) + 1) * *incx + 1; } if (*incy < 0) { iy = (-(*n) + 1) * *incy + 1; } i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { dtemp = *c__ * dx[ix] + *s * dy[iy]; dy[iy] = *c__ * dy[iy] - *s * dx[ix]; dx[ix] = dtemp; ix += *incx; iy += *incy; /* L10: */ } return 0; /* code for both increments equal to 1 */ L20: i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { dtemp = *c__ * dx[i__] + *s * dy[i__]; dy[i__] = *c__ * dy[i__] - *s * dx[i__]; dx[i__] = dtemp; /* L30: */ } return 0; } /* drot_ */ /* Subroutine */ int drotg_(doublereal *da, doublereal *db, doublereal *c__, doublereal *s) { /* System generated locals */ doublereal d__1, d__2; /* Builtin functions */ double sqrt(doublereal), d_sign(doublereal *, doublereal *); /* Local variables */ static doublereal r__, scale, z__, roe; /* Purpose ======= construct givens plane rotation. jack dongarra, linpack, 3/11/78. */ roe = *db; if (abs(*da) > abs(*db)) { roe = *da; } scale = abs(*da) + abs(*db); if (scale != 0.) { goto L10; } *c__ = 1.; *s = 0.; r__ = 0.; z__ = 0.; goto L20; L10: /* Computing 2nd power */ d__1 = *da / scale; /* Computing 2nd power */ d__2 = *db / scale; r__ = scale * sqrt(d__1 * d__1 + d__2 * d__2); r__ = d_sign(&c_b90, &roe) * r__; *c__ = *da / r__; *s = *db / r__; z__ = 1.; if (abs(*da) > abs(*db)) { z__ = *s; } if (abs(*db) >= abs(*da) && *c__ != 0.) { z__ = 1. / *c__; } L20: *da = r__; *db = z__; return 0; } /* drotg_ */ /* Subroutine */ int drotm_(integer *n, doublereal *dx, integer *incx, doublereal *dy, integer *incy, doublereal *dparam) { /* Initialized data */ static doublereal zero = 0.; static doublereal two = 2.; /* System generated locals */ integer i__1, i__2; /* Local variables */ static integer i__; static doublereal dflag, w, z__; static integer kx, ky, nsteps; static doublereal dh11, dh12, dh21, dh22; /* Purpose ======= APPLY THE MODIFIED GIVENS TRANSFORMATION, H, TO THE 2 BY N MATRIX (DX**T) , WHERE **T INDICATES TRANSPOSE. THE ELEMENTS OF DX ARE IN (DY**T) DX(LX+I*INCX), I = 0 TO N-1, WHERE LX = 1 IF INCX .GE. 0, ELSE LX = (-INCX)*N, AND SIMILARLY FOR SY USING LY AND INCY. WITH DPARAM(1)=DFLAG, H HAS ONE OF THE FOLLOWING FORMS.. DFLAG=-1.D0 DFLAG=0.D0 DFLAG=1.D0 DFLAG=-2.D0 (DH11 DH12) (1.D0 DH12) (DH11 1.D0) (1.D0 0.D0) H=( ) ( ) ( ) ( ) (DH21 DH22), (DH21 1.D0), (-1.D0 DH22), (0.D0 1.D0). SEE DROTMG FOR A DESCRIPTION OF DATA STORAGE IN DPARAM. Arguments ========= N (input) INTEGER number of elements in input vector(s) DX (input/output) DOUBLE PRECISION array, dimension N double precision vector with 5 elements INCX (input) INTEGER storage spacing between elements of DX DY (input/output) DOUBLE PRECISION array, dimension N double precision vector with N elements INCY (input) INTEGER storage spacing between elements of DY DPARAM (input/output) DOUBLE PRECISION array, dimension 5 DPARAM(1)=DFLAG DPARAM(2)=DH11 DPARAM(3)=DH21 DPARAM(4)=DH12 DPARAM(5)=DH22 ===================================================================== */ /* Parameter adjustments */ --dparam; --dy; --dx; /* Function Body */ dflag = dparam[1]; if (*n <= 0 || dflag + two == zero) { goto L140; } if (! (*incx == *incy && *incx > 0)) { goto L70; } nsteps = *n * *incx; if (dflag < 0.) { goto L50; } else if (dflag == 0) { goto L10; } else { goto L30; } L10: dh12 = dparam[4]; dh21 = dparam[3]; i__1 = nsteps; i__2 = *incx; for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { w = dx[i__]; z__ = dy[i__]; dx[i__] = w + z__ * dh12; dy[i__] = w * dh21 + z__; /* L20: */ } goto L140; L30: dh11 = dparam[2]; dh22 = dparam[5]; i__2 = nsteps; i__1 = *incx; for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) { w = dx[i__]; z__ = dy[i__]; dx[i__] = w * dh11 + z__; dy[i__] = -w + dh22 * z__; /* L40: */ } goto L140; L50: dh11 = dparam[2]; dh12 = dparam[4]; dh21 = dparam[3]; dh22 = dparam[5]; i__1 = nsteps; i__2 = *incx; for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { w = dx[i__]; z__ = dy[i__]; dx[i__] = w * dh11 + z__ * dh12; dy[i__] = w * dh21 + z__ * dh22; /* L60: */ } goto L140; L70: kx = 1; ky = 1; if (*incx < 0) { kx = (1 - *n) * *incx + 1; } if (*incy < 0) { ky = (1 - *n) * *incy + 1; } if (dflag < 0.) { goto L120; } else if (dflag == 0) { goto L80; } else { goto L100; } L80: dh12 = dparam[4]; dh21 = dparam[3]; i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { w = dx[kx]; z__ = dy[ky]; dx[kx] = w + z__ * dh12; dy[ky] = w * dh21 + z__; kx += *incx; ky += *incy; /* L90: */ } goto L140; L100: dh11 = dparam[2]; dh22 = dparam[5]; i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { w = dx[kx]; z__ = dy[ky]; dx[kx] = w * dh11 + z__; dy[ky] = -w + dh22 * z__; kx += *incx; ky += *incy; /* L110: */ } goto L140; L120: dh11 = dparam[2]; dh12 = dparam[4]; dh21 = dparam[3]; dh22 = dparam[5]; i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { w = dx[kx]; z__ = dy[ky]; dx[kx] = w * dh11 + z__ * dh12; dy[ky] = w * dh21 + z__ * dh22; kx += *incx; ky += *incy; /* L130: */ } L140: return 0; } /* drotm_ */ /* Subroutine */ int drotmg_(doublereal *dd1, doublereal *dd2, doublereal * dx1, doublereal *dy1, doublereal *dparam) { /* Initialized data */ static doublereal zero = 0.; static doublereal one = 1.; static doublereal two = 2.; static doublereal gam = 4096.; static doublereal gamsq = 16777216.; static doublereal rgamsq = 5.9604645e-8; /* Format strings */ static char fmt_120[] = ""; static char fmt_150[] = ""; static char fmt_180[] = ""; static char fmt_210[] = ""; /* System generated locals */ doublereal d__1; /* Local variables */ static doublereal dflag, dtemp, du, dp1, dp2, dq1, dq2, dh11, dh12, dh21, dh22; static integer igo; /* Assigned format variables */ static char *igo_fmt; /* Purpose ======= CONSTRUCT THE MODIFIED GIVENS TRANSFORMATION MATRIX H WHICH ZEROS THE SECOND COMPONENT OF THE 2-VECTOR (DSQRT(DD1)*DX1,DSQRT(DD2)* DY2)**T. WITH DPARAM(1)=DFLAG, H HAS ONE OF THE FOLLOWING FORMS.. DFLAG=-1.D0 DFLAG=0.D0 DFLAG=1.D0 DFLAG=-2.D0 (DH11 DH12) (1.D0 DH12) (DH11 1.D0) (1.D0 0.D0) H=( ) ( ) ( ) ( ) (DH21 DH22), (DH21 1.D0), (-1.D0 DH22), (0.D0 1.D0). LOCATIONS 2-4 OF DPARAM CONTAIN DH11, DH21, DH12, AND DH22 RESPECTIVELY. (VALUES OF 1.D0, -1.D0, OR 0.D0 IMPLIED BY THE VALUE OF DPARAM(1) ARE NOT STORED IN DPARAM.) THE VALUES OF GAMSQ AND RGAMSQ SET IN THE DATA STATEMENT MAY BE INEXACT. THIS IS OK AS THEY ARE ONLY USED FOR TESTING THE SIZE OF DD1 AND DD2. ALL ACTUAL SCALING OF DATA IS DONE USING GAM. Arguments ========= DD1 (input/output) DOUBLE PRECISION DD2 (input/output) DOUBLE PRECISION DX1 (input/output) DOUBLE PRECISION DY1 (input) DOUBLE PRECISION DPARAM (input/output) DOUBLE PRECISION array, dimension 5 DPARAM(1)=DFLAG DPARAM(2)=DH11 DPARAM(3)=DH21 DPARAM(4)=DH12 DPARAM(5)=DH22 ===================================================================== */ /* Parameter adjustments */ --dparam; /* Function Body */ if (! (*dd1 < zero)) { goto L10; } /* GO ZERO-H-D-AND-DX1.. */ goto L60; L10: /* CASE-DD1-NONNEGATIVE */ dp2 = *dd2 * *dy1; if (! (dp2 == zero)) { goto L20; } dflag = -two; goto L260; /* REGULAR-CASE.. */ L20: dp1 = *dd1 * *dx1; dq2 = dp2 * *dy1; dq1 = dp1 * *dx1; if (! (abs(dq1) > abs(dq2))) { goto L40; } dh21 = -(*dy1) / *dx1; dh12 = dp2 / dp1; du = one - dh12 * dh21; if (! (du <= zero)) { goto L30; } /* GO ZERO-H-D-AND-DX1.. */ goto L60; L30: dflag = zero; *dd1 /= du; *dd2 /= du; *dx1 *= du; /* GO SCALE-CHECK.. */ goto L100; L40: if (! (dq2 < zero)) { goto L50; } /* GO ZERO-H-D-AND-DX1.. */ goto L60; L50: dflag = one; dh11 = dp1 / dp2; dh22 = *dx1 / *dy1; du = one + dh11 * dh22; dtemp = *dd2 / du; *dd2 = *dd1 / du; *dd1 = dtemp; *dx1 = *dy1 * du; /* GO SCALE-CHECK */ goto L100; /* PROCEDURE..ZERO-H-D-AND-DX1.. */ L60: dflag = -one; dh11 = zero; dh12 = zero; dh21 = zero; dh22 = zero; *dd1 = zero; *dd2 = zero; *dx1 = zero; /* RETURN.. */ goto L220; /* PROCEDURE..FIX-H.. */ L70: if (! (dflag >= zero)) { goto L90; } if (! (dflag == zero)) { goto L80; } dh11 = one; dh22 = one; dflag = -one; goto L90; L80: dh21 = -one; dh12 = one; dflag = -one; L90: switch (igo) { case 0: goto L120; case 1: goto L150; case 2: goto L180; case 3: goto L210; } /* PROCEDURE..SCALE-CHECK */ L100: L110: if (! (*dd1 <= rgamsq)) { goto L130; } if (*dd1 == zero) { goto L160; } igo = 0; igo_fmt = fmt_120; /* FIX-H.. */ goto L70; L120: /* Computing 2nd power */ d__1 = gam; *dd1 *= d__1 * d__1; *dx1 /= gam; dh11 /= gam; dh12 /= gam; goto L110; L130: L140: if (! (*dd1 >= gamsq)) { goto L160; } igo = 1; igo_fmt = fmt_150; /* FIX-H.. */ goto L70; L150: /* Computing 2nd power */ d__1 = gam; *dd1 /= d__1 * d__1; *dx1 *= gam; dh11 *= gam; dh12 *= gam; goto L140; L160: L170: if (! (abs(*dd2) <= rgamsq)) { goto L190; } if (*dd2 == zero) { goto L220; } igo = 2; igo_fmt = fmt_180; /* FIX-H.. */ goto L70; L180: /* Computing 2nd power */ d__1 = gam; *dd2 *= d__1 * d__1; dh21 /= gam; dh22 /= gam; goto L170; L190: L200: if (! (abs(*dd2) >= gamsq)) { goto L220; } igo = 3; igo_fmt = fmt_210; /* FIX-H.. */ goto L70; L210: /* Computing 2nd power */ d__1 = gam; *dd2 /= d__1 * d__1; dh21 *= gam; dh22 *= gam; goto L200; L220: if (dflag < 0.) { goto L250; } else if (dflag == 0) { goto L230; } else { goto L240; } L230: dparam[3] = dh21; dparam[4] = dh12; goto L260; L240: dparam[2] = dh11; dparam[5] = dh22; goto L260; L250: dparam[2] = dh11; dparam[3] = dh21; dparam[4] = dh12; dparam[5] = dh22; L260: dparam[1] = dflag; return 0; } /* drotmg_ */ /* Subroutine */ int dscal_(integer *n, doublereal *da, doublereal *dx, integer *incx) { /* System generated locals */ integer i__1, i__2; /* Local variables */ static integer i__, m, nincx, mp1; /* Purpose ======= * scales a vector by a constant. uses unrolled loops for increment equal to one. jack dongarra, linpack, 3/11/78. modified 3/93 to return if incx .le. 0. modified 12/3/93, array(1) declarations changed to array(*) */ /* Parameter adjustments */ --dx; /* Function Body */ if (*n <= 0 || *incx <= 0) { return 0; } if (*incx == 1) { goto L20; } /* code for increment not equal to 1 */ nincx = *n * *incx; i__1 = nincx; i__2 = *incx; for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { dx[i__] = *da * dx[i__]; /* L10: */ } return 0; /* code for increment equal to 1 clean-up loop */ L20: m = *n % 5; if (m == 0) { goto L40; } i__2 = m; for (i__ = 1; i__ <= i__2; ++i__) { dx[i__] = *da * dx[i__]; /* L30: */ } if (*n < 5) { return 0; } L40: mp1 = m + 1; i__2 = *n; for (i__ = mp1; i__ <= i__2; i__ += 5) { dx[i__] = *da * dx[i__]; dx[i__ + 1] = *da * dx[i__ + 1]; dx[i__ + 2] = *da * dx[i__ + 2]; dx[i__ + 3] = *da * dx[i__ + 3]; dx[i__ + 4] = *da * dx[i__ + 4]; /* L50: */ } return 0; } /* dscal_ */ /* Subroutine */ int dswap_(integer *n, doublereal *dx, integer *incx, doublereal *dy, integer *incy) { /* System generated locals */ integer i__1; /* Local variables */ static integer i__, m; static doublereal dtemp; static integer ix, iy, mp1; /* Purpose ======= interchanges two vectors. uses unrolled loops for increments equal one. jack dongarra, linpack, 3/11/78. modified 12/3/93, array(1) declarations changed to array(*) */ /* Parameter adjustments */ --dy; --dx; /* Function Body */ if (*n <= 0) { return 0; } if (*incx == 1 && *incy == 1) { goto L20; } /* code for unequal increments or equal increments not equal to 1 */ ix = 1; iy = 1; if (*incx < 0) { ix = (-(*n) + 1) * *incx + 1; } if (*incy < 0) { iy = (-(*n) + 1) * *incy + 1; } i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { dtemp = dx[ix]; dx[ix] = dy[iy]; dy[iy] = dtemp; ix += *incx; iy += *incy; /* L10: */ } return 0; /* code for both increments equal to 1 clean-up loop */ L20: m = *n % 3; if (m == 0) { goto L40; } i__1 = m; for (i__ = 1; i__ <= i__1; ++i__) { dtemp = dx[i__]; dx[i__] = dy[i__]; dy[i__] = dtemp; /* L30: */ } if (*n < 3) { return 0; } L40: mp1 = m + 1; i__1 = *n; for (i__ = mp1; i__ <= i__1; i__ += 3) { dtemp = dx[i__]; dx[i__] = dy[i__]; dy[i__] = dtemp; dtemp = dx[i__ + 1]; dx[i__ + 1] = dy[i__ + 1]; dy[i__ + 1] = dtemp; dtemp = dx[i__ + 2]; dx[i__ + 2] = dy[i__ + 2]; dy[i__ + 2] = dtemp; /* L50: */ } return 0; } /* dswap_ */ /* Subroutine */ int dsymm_(char *side, char *uplo, integer *m, integer *n, doublereal *alpha, doublereal *a, integer *lda, doublereal *b, integer *ldb, doublereal *beta, doublereal *c__, integer *ldc) { /* System generated locals */ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2, i__3; /* Local variables */ static integer info; static doublereal temp1, temp2; static integer i__, j, k; extern logical lsame_(char *, char *); static integer nrowa; static logical upper; extern /* Subroutine */ int xerbla_(char *, integer *); /* Purpose ======= DSYMM performs one of the matrix-matrix operations C := alpha*A*B + beta*C, or C := alpha*B*A + beta*C, where alpha and beta are scalars, A is a symmetric matrix and B and C are m by n matrices. Arguments ========== SIDE - CHARACTER*1. On entry, SIDE specifies whether the symmetric matrix A appears on the left or right in the operation as follows: SIDE = 'L' or 'l' C := alpha*A*B + beta*C, SIDE = 'R' or 'r' C := alpha*B*A + beta*C, Unchanged on exit. UPLO - CHARACTER*1. On entry, UPLO specifies whether the upper or lower triangular part of the symmetric matrix A is to be referenced as follows: UPLO = 'U' or 'u' Only the upper triangular part of the symmetric matrix is to be referenced. UPLO = 'L' or 'l' Only the lower triangular part of the symmetric matrix is to be referenced. Unchanged on exit. M - INTEGER. On entry, M specifies the number of rows of the matrix C. M must be at least zero. Unchanged on exit. N - INTEGER. On entry, N specifies the number of columns of the matrix C. N must be at least zero. Unchanged on exit. ALPHA - DOUBLE PRECISION. On entry, ALPHA specifies the scalar alpha. Unchanged on exit. A - DOUBLE PRECISION array of DIMENSION ( LDA, ka ), where ka is m when SIDE = 'L' or 'l' and is n otherwise. Before entry with SIDE = 'L' or 'l', the m by m part of the array A must contain the symmetric matrix, such that when UPLO = 'U' or 'u', the leading m by m upper triangular part of the array A must contain the upper triangular part of the symmetric matrix and the strictly lower triangular part of A is not referenced, and when UPLO = 'L' or 'l', the leading m by m lower triangular part of the array A must contain the lower triangular part of the symmetric matrix and the strictly upper triangular part of A is not referenced. Before entry with SIDE = 'R' or 'r', the n by n part of the array A must contain the symmetric matrix, such that when UPLO = 'U' or 'u', the leading n by n upper triangular part of the array A must contain the upper triangular part of the symmetric matrix and the strictly lower triangular part of A is not referenced, and when UPLO = 'L' or 'l', the leading n by n lower triangular part of the array A must contain the lower triangular part of the symmetric matrix and the strictly upper triangular part of A is not referenced. Unchanged on exit. LDA - INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. When SIDE = 'L' or 'l' then LDA must be at least max( 1, m ), otherwise LDA must be at least max( 1, n ). Unchanged on exit. B - DOUBLE PRECISION array of DIMENSION ( LDB, n ). Before entry, the leading m by n part of the array B must contain the matrix B. Unchanged on exit. LDB - INTEGER. On entry, LDB specifies the first dimension of B as declared in the calling (sub) program. LDB must be at least max( 1, m ). Unchanged on exit. BETA - DOUBLE PRECISION. On entry, BETA specifies the scalar beta. When BETA is supplied as zero then C need not be set on input. Unchanged on exit. C - DOUBLE PRECISION array of DIMENSION ( LDC, n ). Before entry, the leading m by n part of the array C must contain the matrix C, except when beta is zero, in which case C need not be set on entry. On exit, the array C is overwritten by the m by n updated matrix. LDC - INTEGER. On entry, LDC specifies the first dimension of C as declared in the calling (sub) program. LDC must be at least max( 1, m ). Unchanged on exit. Level 3 Blas routine. -- Written on 8-February-1989. Jack Dongarra, Argonne National Laboratory. Iain Duff, AERE Harwell. Jeremy Du Croz, Numerical Algorithms Group Ltd. Sven Hammarling, Numerical Algorithms Group Ltd. Set NROWA as the number of rows of A. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; b_dim1 = *ldb; b_offset = 1 + b_dim1 * 1; b -= b_offset; c_dim1 = *ldc; c_offset = 1 + c_dim1 * 1; c__ -= c_offset; /* Function Body */ if (lsame_(side, "L")) { nrowa = *m; } else { nrowa = *n; } upper = lsame_(uplo, "U"); /* Test the input parameters. */ info = 0; if (! lsame_(side, "L") && ! lsame_(side, "R")) { info = 1; } else if (! upper && ! lsame_(uplo, "L")) { info = 2; } else if (*m < 0) { info = 3; } else if (*n < 0) { info = 4; } else if (*lda < max(1,nrowa)) { info = 7; } else if (*ldb < max(1,*m)) { info = 9; } else if (*ldc < max(1,*m)) { info = 12; } if (info != 0) { xerbla_("DSYMM ", &info); return 0; } /* Quick return if possible. */ if (*m == 0 || *n == 0 || *alpha == 0. && *beta == 1.) { return 0; } /* And when alpha.eq.zero. */ if (*alpha == 0.) { if (*beta == 0.) { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = 0.; /* L10: */ } /* L20: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]; /* L30: */ } /* L40: */ } } return 0; } /* Start the operations. */ if (lsame_(side, "L")) { /* Form C := alpha*A*B + beta*C. */ if (upper) { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { temp1 = *alpha * b[i__ + j * b_dim1]; temp2 = 0.; i__3 = i__ - 1; for (k = 1; k <= i__3; ++k) { c__[k + j * c_dim1] += temp1 * a[k + i__ * a_dim1]; temp2 += b[k + j * b_dim1] * a[k + i__ * a_dim1]; /* L50: */ } if (*beta == 0.) { c__[i__ + j * c_dim1] = temp1 * a[i__ + i__ * a_dim1] + *alpha * temp2; } else { c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1] + temp1 * a[i__ + i__ * a_dim1] + *alpha * temp2; } /* L60: */ } /* L70: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { for (i__ = *m; i__ >= 1; --i__) { temp1 = *alpha * b[i__ + j * b_dim1]; temp2 = 0.; i__2 = *m; for (k = i__ + 1; k <= i__2; ++k) { c__[k + j * c_dim1] += temp1 * a[k + i__ * a_dim1]; temp2 += b[k + j * b_dim1] * a[k + i__ * a_dim1]; /* L80: */ } if (*beta == 0.) { c__[i__ + j * c_dim1] = temp1 * a[i__ + i__ * a_dim1] + *alpha * temp2; } else { c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1] + temp1 * a[i__ + i__ * a_dim1] + *alpha * temp2; } /* L90: */ } /* L100: */ } } } else { /* Form C := alpha*B*A + beta*C. */ i__1 = *n; for (j = 1; j <= i__1; ++j) { temp1 = *alpha * a[j + j * a_dim1]; if (*beta == 0.) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = temp1 * b[i__ + j * b_dim1]; /* L110: */ } } else { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1] + temp1 * b[i__ + j * b_dim1]; /* L120: */ } } i__2 = j - 1; for (k = 1; k <= i__2; ++k) { if (upper) { temp1 = *alpha * a[k + j * a_dim1]; } else { temp1 = *alpha * a[j + k * a_dim1]; } i__3 = *m; for (i__ = 1; i__ <= i__3; ++i__) { c__[i__ + j * c_dim1] += temp1 * b[i__ + k * b_dim1]; /* L130: */ } /* L140: */ } i__2 = *n; for (k = j + 1; k <= i__2; ++k) { if (upper) { temp1 = *alpha * a[j + k * a_dim1]; } else { temp1 = *alpha * a[k + j * a_dim1]; } i__3 = *m; for (i__ = 1; i__ <= i__3; ++i__) { c__[i__ + j * c_dim1] += temp1 * b[i__ + k * b_dim1]; /* L150: */ } /* L160: */ } /* L170: */ } } return 0; /* End of DSYMM . */ } /* dsymm_ */ /* Subroutine */ int dsymv_(char *uplo, integer *n, doublereal *alpha, doublereal *a, integer *lda, doublereal *x, integer *incx, doublereal *beta, doublereal *y, integer *incy) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2; /* Local variables */ static integer info; static doublereal temp1, temp2; static integer i__, j; extern logical lsame_(char *, char *); static integer ix, iy, jx, jy, kx, ky; extern /* Subroutine */ int xerbla_(char *, integer *); /* Purpose ======= DSYMV performs the matrix-vector operation y := alpha*A*x + beta*y, where alpha and beta are scalars, x and y are n element vectors and A is an n by n symmetric matrix. Arguments ========== UPLO - CHARACTER*1. On entry, UPLO specifies whether the upper or lower triangular part of the array A is to be referenced as follows: UPLO = 'U' or 'u' Only the upper triangular part of A is to be referenced. UPLO = 'L' or 'l' Only the lower triangular part of A is to be referenced. Unchanged on exit. N - INTEGER. On entry, N specifies the order of the matrix A. N must be at least zero. Unchanged on exit. ALPHA - DOUBLE PRECISION. On entry, ALPHA specifies the scalar alpha. Unchanged on exit. A - DOUBLE PRECISION array of DIMENSION ( LDA, n ). Before entry with UPLO = 'U' or 'u', the leading n by n upper triangular part of the array A must contain the upper triangular part of the symmetric matrix and the strictly lower triangular part of A is not referenced. Before entry with UPLO = 'L' or 'l', the leading n by n lower triangular part of the array A must contain the lower triangular part of the symmetric matrix and the strictly upper triangular part of A is not referenced. Unchanged on exit. LDA - INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. LDA must be at least max( 1, n ). Unchanged on exit. X - DOUBLE PRECISION array of dimension at least ( 1 + ( n - 1 )*abs( INCX ) ). Before entry, the incremented array X must contain the n element vector x. Unchanged on exit. INCX - INTEGER. On entry, INCX specifies the increment for the elements of X. INCX must not be zero. Unchanged on exit. BETA - DOUBLE PRECISION. On entry, BETA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. Unchanged on exit. Y - DOUBLE PRECISION array of dimension at least ( 1 + ( n - 1 )*abs( INCY ) ). Before entry, the incremented array Y must contain the n element vector y. On exit, Y is overwritten by the updated vector y. INCY - INTEGER. On entry, INCY specifies the increment for the elements of Y. INCY must not be zero. Unchanged on exit. Level 2 Blas routine. -- Written on 22-October-1986. Jack Dongarra, Argonne National Lab. Jeremy Du Croz, Nag Central Office. Sven Hammarling, Nag Central Office. Richard Hanson, Sandia National Labs. Test the input parameters. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --x; --y; /* Function Body */ info = 0; if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) { info = 1; } else if (*n < 0) { info = 2; } else if (*lda < max(1,*n)) { info = 5; } else if (*incx == 0) { info = 7; } else if (*incy == 0) { info = 10; } if (info != 0) { xerbla_("DSYMV ", &info); return 0; } /* Quick return if possible. */ if (*n == 0 || *alpha == 0. && *beta == 1.) { return 0; } /* Set up the start points in X and Y. */ if (*incx > 0) { kx = 1; } else { kx = 1 - (*n - 1) * *incx; } if (*incy > 0) { ky = 1; } else { ky = 1 - (*n - 1) * *incy; } /* Start the operations. In this version the elements of A are accessed sequentially with one pass through the triangular part of A. First form y := beta*y. */ if (*beta != 1.) { if (*incy == 1) { if (*beta == 0.) { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { y[i__] = 0.; /* L10: */ } } else { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { y[i__] = *beta * y[i__]; /* L20: */ } } } else { iy = ky; if (*beta == 0.) { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { y[iy] = 0.; iy += *incy; /* L30: */ } } else { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { y[iy] = *beta * y[iy]; iy += *incy; /* L40: */ } } } } if (*alpha == 0.) { return 0; } if (lsame_(uplo, "U")) { /* Form y when A is stored in upper triangle. */ if (*incx == 1 && *incy == 1) { i__1 = *n; for (j = 1; j <= i__1; ++j) { temp1 = *alpha * x[j]; temp2 = 0.; i__2 = j - 1; for (i__ = 1; i__ <= i__2; ++i__) { y[i__] += temp1 * a[i__ + j * a_dim1]; temp2 += a[i__ + j * a_dim1] * x[i__]; /* L50: */ } y[j] = y[j] + temp1 * a[j + j * a_dim1] + *alpha * temp2; /* L60: */ } } else { jx = kx; jy = ky; i__1 = *n; for (j = 1; j <= i__1; ++j) { temp1 = *alpha * x[jx]; temp2 = 0.; ix = kx; iy = ky; i__2 = j - 1; for (i__ = 1; i__ <= i__2; ++i__) { y[iy] += temp1 * a[i__ + j * a_dim1]; temp2 += a[i__ + j * a_dim1] * x[ix]; ix += *incx; iy += *incy; /* L70: */ } y[jy] = y[jy] + temp1 * a[j + j * a_dim1] + *alpha * temp2; jx += *incx; jy += *incy; /* L80: */ } } } else { /* Form y when A is stored in lower triangle. */ if (*incx == 1 && *incy == 1) { i__1 = *n; for (j = 1; j <= i__1; ++j) { temp1 = *alpha * x[j]; temp2 = 0.; y[j] += temp1 * a[j + j * a_dim1]; i__2 = *n; for (i__ = j + 1; i__ <= i__2; ++i__) { y[i__] += temp1 * a[i__ + j * a_dim1]; temp2 += a[i__ + j * a_dim1] * x[i__]; /* L90: */ } y[j] += *alpha * temp2; /* L100: */ } } else { jx = kx; jy = ky; i__1 = *n; for (j = 1; j <= i__1; ++j) { temp1 = *alpha * x[jx]; temp2 = 0.; y[jy] += temp1 * a[j + j * a_dim1]; ix = jx; iy = jy; i__2 = *n; for (i__ = j + 1; i__ <= i__2; ++i__) { ix += *incx; iy += *incy; y[iy] += temp1 * a[i__ + j * a_dim1]; temp2 += a[i__ + j * a_dim1] * x[ix]; /* L110: */ } y[jy] += *alpha * temp2; jx += *incx; jy += *incy; /* L120: */ } } } return 0; /* End of DSYMV . */ } /* dsymv_ */ /* Subroutine */ int dsyr_(char *uplo, integer *n, doublereal *alpha, doublereal *x, integer *incx, doublereal *a, integer *lda) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2; /* Local variables */ static integer info; static doublereal temp; static integer i__, j; extern logical lsame_(char *, char *); static integer ix, jx, kx; extern /* Subroutine */ int xerbla_(char *, integer *); /* Purpose ======= DSYR performs the symmetric rank 1 operation A := alpha*x*x' + A, where alpha is a real scalar, x is an n element vector and A is an n by n symmetric matrix. Arguments ========== UPLO - CHARACTER*1. On entry, UPLO specifies whether the upper or lower triangular part of the array A is to be referenced as follows: UPLO = 'U' or 'u' Only the upper triangular part of A is to be referenced. UPLO = 'L' or 'l' Only the lower triangular part of A is to be referenced. Unchanged on exit. N - INTEGER. On entry, N specifies the order of the matrix A. N must be at least zero. Unchanged on exit. ALPHA - DOUBLE PRECISION. On entry, ALPHA specifies the scalar alpha. Unchanged on exit. X - DOUBLE PRECISION array of dimension at least ( 1 + ( n - 1 )*abs( INCX ) ). Before entry, the incremented array X must contain the n element vector x. Unchanged on exit. INCX - INTEGER. On entry, INCX specifies the increment for the elements of X. INCX must not be zero. Unchanged on exit. A - DOUBLE PRECISION array of DIMENSION ( LDA, n ). Before entry with UPLO = 'U' or 'u', the leading n by n upper triangular part of the array A must contain the upper triangular part of the symmetric matrix and the strictly lower triangular part of A is not referenced. On exit, the upper triangular part of the array A is overwritten by the upper triangular part of the updated matrix. Before entry with UPLO = 'L' or 'l', the leading n by n lower triangular part of the array A must contain the lower triangular part of the symmetric matrix and the strictly upper triangular part of A is not referenced. On exit, the lower triangular part of the array A is overwritten by the lower triangular part of the updated matrix. LDA - INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. LDA must be at least max( 1, n ). Unchanged on exit. Level 2 Blas routine. -- Written on 22-October-1986. Jack Dongarra, Argonne National Lab. Jeremy Du Croz, Nag Central Office. Sven Hammarling, Nag Central Office. Richard Hanson, Sandia National Labs. Test the input parameters. */ /* Parameter adjustments */ --x; a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; /* Function Body */ info = 0; if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) { info = 1; } else if (*n < 0) { info = 2; } else if (*incx == 0) { info = 5; } else if (*lda < max(1,*n)) { info = 7; } if (info != 0) { xerbla_("DSYR ", &info); return 0; } /* Quick return if possible. */ if (*n == 0 || *alpha == 0.) { return 0; } /* Set the start point in X if the increment is not unity. */ if (*incx <= 0) { kx = 1 - (*n - 1) * *incx; } else if (*incx != 1) { kx = 1; } /* Start the operations. In this version the elements of A are accessed sequentially with one pass through the triangular part of A. */ if (lsame_(uplo, "U")) { /* Form A when A is stored in upper triangle. */ if (*incx == 1) { i__1 = *n; for (j = 1; j <= i__1; ++j) { if (x[j] != 0.) { temp = *alpha * x[j]; i__2 = j; for (i__ = 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] += x[i__] * temp; /* L10: */ } } /* L20: */ } } else { jx = kx; i__1 = *n; for (j = 1; j <= i__1; ++j) { if (x[jx] != 0.) { temp = *alpha * x[jx]; ix = kx; i__2 = j; for (i__ = 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] += x[ix] * temp; ix += *incx; /* L30: */ } } jx += *incx; /* L40: */ } } } else { /* Form A when A is stored in lower triangle. */ if (*incx == 1) { i__1 = *n; for (j = 1; j <= i__1; ++j) { if (x[j] != 0.) { temp = *alpha * x[j]; i__2 = *n; for (i__ = j; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] += x[i__] * temp; /* L50: */ } } /* L60: */ } } else { jx = kx; i__1 = *n; for (j = 1; j <= i__1; ++j) { if (x[jx] != 0.) { temp = *alpha * x[jx]; ix = jx; i__2 = *n; for (i__ = j; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] += x[ix] * temp; ix += *incx; /* L70: */ } } jx += *incx; /* L80: */ } } } return 0; /* End of DSYR . */ } /* dsyr_ */ /* Subroutine */ int dsyr2_(char *uplo, integer *n, doublereal *alpha, doublereal *x, integer *incx, doublereal *y, integer *incy, doublereal *a, integer *lda) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2; /* Local variables */ static integer info; static doublereal temp1, temp2; static integer i__, j; extern logical lsame_(char *, char *); static integer ix, iy, jx, jy, kx, ky; extern /* Subroutine */ int xerbla_(char *, integer *); /* Purpose ======= DSYR2 performs the symmetric rank 2 operation A := alpha*x*y' + alpha*y*x' + A, where alpha is a scalar, x and y are n element vectors and A is an n by n symmetric matrix. Arguments ========== UPLO - CHARACTER*1. On entry, UPLO specifies whether the upper or lower triangular part of the array A is to be referenced as follows: UPLO = 'U' or 'u' Only the upper triangular part of A is to be referenced. UPLO = 'L' or 'l' Only the lower triangular part of A is to be referenced. Unchanged on exit. N - INTEGER. On entry, N specifies the order of the matrix A. N must be at least zero. Unchanged on exit. ALPHA - DOUBLE PRECISION. On entry, ALPHA specifies the scalar alpha. Unchanged on exit. X - DOUBLE PRECISION array of dimension at least ( 1 + ( n - 1 )*abs( INCX ) ). Before entry, the incremented array X must contain the n element vector x. Unchanged on exit. INCX - INTEGER. On entry, INCX specifies the increment for the elements of X. INCX must not be zero. Unchanged on exit. Y - DOUBLE PRECISION array of dimension at least ( 1 + ( n - 1 )*abs( INCY ) ). Before entry, the incremented array Y must contain the n element vector y. Unchanged on exit. INCY - INTEGER. On entry, INCY specifies the increment for the elements of Y. INCY must not be zero. Unchanged on exit. A - DOUBLE PRECISION array of DIMENSION ( LDA, n ). Before entry with UPLO = 'U' or 'u', the leading n by n upper triangular part of the array A must contain the upper triangular part of the symmetric matrix and the strictly lower triangular part of A is not referenced. On exit, the upper triangular part of the array A is overwritten by the upper triangular part of the updated matrix. Before entry with UPLO = 'L' or 'l', the leading n by n lower triangular part of the array A must contain the lower triangular part of the symmetric matrix and the strictly upper triangular part of A is not referenced. On exit, the lower triangular part of the array A is overwritten by the lower triangular part of the updated matrix. LDA - INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. LDA must be at least max( 1, n ). Unchanged on exit. Level 2 Blas routine. -- Written on 22-October-1986. Jack Dongarra, Argonne National Lab. Jeremy Du Croz, Nag Central Office. Sven Hammarling, Nag Central Office. Richard Hanson, Sandia National Labs. Test the input parameters. */ /* Parameter adjustments */ --x; --y; a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; /* Function Body */ info = 0; if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) { info = 1; } else if (*n < 0) { info = 2; } else if (*incx == 0) { info = 5; } else if (*incy == 0) { info = 7; } else if (*lda < max(1,*n)) { info = 9; } if (info != 0) { xerbla_("DSYR2 ", &info); return 0; } /* Quick return if possible. */ if (*n == 0 || *alpha == 0.) { return 0; } /* Set up the start points in X and Y if the increments are not both unity. */ if (*incx != 1 || *incy != 1) { if (*incx > 0) { kx = 1; } else { kx = 1 - (*n - 1) * *incx; } if (*incy > 0) { ky = 1; } else { ky = 1 - (*n - 1) * *incy; } jx = kx; jy = ky; } /* Start the operations. In this version the elements of A are accessed sequentially with one pass through the triangular part of A. */ if (lsame_(uplo, "U")) { /* Form A when A is stored in the upper triangle. */ if (*incx == 1 && *incy == 1) { i__1 = *n; for (j = 1; j <= i__1; ++j) { if (x[j] != 0. || y[j] != 0.) { temp1 = *alpha * y[j]; temp2 = *alpha * x[j]; i__2 = j; for (i__ = 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] = a[i__ + j * a_dim1] + x[i__] * temp1 + y[i__] * temp2; /* L10: */ } } /* L20: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { if (x[jx] != 0. || y[jy] != 0.) { temp1 = *alpha * y[jy]; temp2 = *alpha * x[jx]; ix = kx; iy = ky; i__2 = j; for (i__ = 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] = a[i__ + j * a_dim1] + x[ix] * temp1 + y[iy] * temp2; ix += *incx; iy += *incy; /* L30: */ } } jx += *incx; jy += *incy; /* L40: */ } } } else { /* Form A when A is stored in the lower triangle. */ if (*incx == 1 && *incy == 1) { i__1 = *n; for (j = 1; j <= i__1; ++j) { if (x[j] != 0. || y[j] != 0.) { temp1 = *alpha * y[j]; temp2 = *alpha * x[j]; i__2 = *n; for (i__ = j; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] = a[i__ + j * a_dim1] + x[i__] * temp1 + y[i__] * temp2; /* L50: */ } } /* L60: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { if (x[jx] != 0. || y[jy] != 0.) { temp1 = *alpha * y[jy]; temp2 = *alpha * x[jx]; ix = jx; iy = jy; i__2 = *n; for (i__ = j; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] = a[i__ + j * a_dim1] + x[ix] * temp1 + y[iy] * temp2; ix += *incx; iy += *incy; /* L70: */ } } jx += *incx; jy += *incy; /* L80: */ } } } return 0; /* End of DSYR2 . */ } /* dsyr2_ */ /* Subroutine */ int dsyr2k_(char *uplo, char *trans, integer *n, integer *k, doublereal *alpha, doublereal *a, integer *lda, doublereal *b, integer *ldb, doublereal *beta, doublereal *c__, integer *ldc) { /* System generated locals */ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2, i__3; /* Local variables */ static integer info; static doublereal temp1, temp2; static integer i__, j, l; extern logical lsame_(char *, char *); static integer nrowa; static logical upper; extern /* Subroutine */ int xerbla_(char *, integer *); /* Purpose ======= DSYR2K performs one of the symmetric rank 2k operations C := alpha*A*B' + alpha*B*A' + beta*C, or C := alpha*A'*B + alpha*B'*A + beta*C, where alpha and beta are scalars, C is an n by n symmetric matrix and A and B are n by k matrices in the first case and k by n matrices in the second case. Arguments ========== UPLO - CHARACTER*1. On entry, UPLO specifies whether the upper or lower triangular part of the array C is to be referenced as follows: UPLO = 'U' or 'u' Only the upper triangular part of C is to be referenced. UPLO = 'L' or 'l' Only the lower triangular part of C is to be referenced. Unchanged on exit. TRANS - CHARACTER*1. On entry, TRANS specifies the operation to be performed as follows: TRANS = 'N' or 'n' C := alpha*A*B' + alpha*B*A' + beta*C. TRANS = 'T' or 't' C := alpha*A'*B + alpha*B'*A + beta*C. TRANS = 'C' or 'c' C := alpha*A'*B + alpha*B'*A + beta*C. Unchanged on exit. N - INTEGER. On entry, N specifies the order of the matrix C. N must be at least zero. Unchanged on exit. K - INTEGER. On entry with TRANS = 'N' or 'n', K specifies the number of columns of the matrices A and B, and on entry with TRANS = 'T' or 't' or 'C' or 'c', K specifies the number of rows of the matrices A and B. K must be at least zero. Unchanged on exit. ALPHA - DOUBLE PRECISION. On entry, ALPHA specifies the scalar alpha. Unchanged on exit. A - DOUBLE PRECISION array of DIMENSION ( LDA, ka ), where ka is k when TRANS = 'N' or 'n', and is n otherwise. Before entry with TRANS = 'N' or 'n', the leading n by k part of the array A must contain the matrix A, otherwise the leading k by n part of the array A must contain the matrix A. Unchanged on exit. LDA - INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. When TRANS = 'N' or 'n' then LDA must be at least max( 1, n ), otherwise LDA must be at least max( 1, k ). Unchanged on exit. B - DOUBLE PRECISION array of DIMENSION ( LDB, kb ), where kb is k when TRANS = 'N' or 'n', and is n otherwise. Before entry with TRANS = 'N' or 'n', the leading n by k part of the array B must contain the matrix B, otherwise the leading k by n part of the array B must contain the matrix B. Unchanged on exit. LDB - INTEGER. On entry, LDB specifies the first dimension of B as declared in the calling (sub) program. When TRANS = 'N' or 'n' then LDB must be at least max( 1, n ), otherwise LDB must be at least max( 1, k ). Unchanged on exit. BETA - DOUBLE PRECISION. On entry, BETA specifies the scalar beta. Unchanged on exit. C - DOUBLE PRECISION array of DIMENSION ( LDC, n ). Before entry with UPLO = 'U' or 'u', the leading n by n upper triangular part of the array C must contain the upper triangular part of the symmetric matrix and the strictly lower triangular part of C is not referenced. On exit, the upper triangular part of the array C is overwritten by the upper triangular part of the updated matrix. Before entry with UPLO = 'L' or 'l', the leading n by n lower triangular part of the array C must contain the lower triangular part of the symmetric matrix and the strictly upper triangular part of C is not referenced. On exit, the lower triangular part of the array C is overwritten by the lower triangular part of the updated matrix. LDC - INTEGER. On entry, LDC specifies the first dimension of C as declared in the calling (sub) program. LDC must be at least max( 1, n ). Unchanged on exit. Level 3 Blas routine. -- Written on 8-February-1989. Jack Dongarra, Argonne National Laboratory. Iain Duff, AERE Harwell. Jeremy Du Croz, Numerical Algorithms Group Ltd. Sven Hammarling, Numerical Algorithms Group Ltd. Test the input parameters. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; b_dim1 = *ldb; b_offset = 1 + b_dim1 * 1; b -= b_offset; c_dim1 = *ldc; c_offset = 1 + c_dim1 * 1; c__ -= c_offset; /* Function Body */ if (lsame_(trans, "N")) { nrowa = *n; } else { nrowa = *k; } upper = lsame_(uplo, "U"); info = 0; if (! upper && ! lsame_(uplo, "L")) { info = 1; } else if (! lsame_(trans, "N") && ! lsame_(trans, "T") && ! lsame_(trans, "C")) { info = 2; } else if (*n < 0) { info = 3; } else if (*k < 0) { info = 4; } else if (*lda < max(1,nrowa)) { info = 7; } else if (*ldb < max(1,nrowa)) { info = 9; } else if (*ldc < max(1,*n)) { info = 12; } if (info != 0) { xerbla_("DSYR2K", &info); return 0; } /* Quick return if possible. */ if (*n == 0 || (*alpha == 0. || *k == 0) && *beta == 1.) { return 0; } /* And when alpha.eq.zero. */ if (*alpha == 0.) { if (upper) { if (*beta == 0.) { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = j; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = 0.; /* L10: */ } /* L20: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = j; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]; /* L30: */ } /* L40: */ } } } else { if (*beta == 0.) { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *n; for (i__ = j; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = 0.; /* L50: */ } /* L60: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *n; for (i__ = j; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]; /* L70: */ } /* L80: */ } } } return 0; } /* Start the operations. */ if (lsame_(trans, "N")) { /* Form C := alpha*A*B' + alpha*B*A' + C. */ if (upper) { i__1 = *n; for (j = 1; j <= i__1; ++j) { if (*beta == 0.) { i__2 = j; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = 0.; /* L90: */ } } else if (*beta != 1.) { i__2 = j; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]; /* L100: */ } } i__2 = *k; for (l = 1; l <= i__2; ++l) { if (a[j + l * a_dim1] != 0. || b[j + l * b_dim1] != 0.) { temp1 = *alpha * b[j + l * b_dim1]; temp2 = *alpha * a[j + l * a_dim1]; i__3 = j; for (i__ = 1; i__ <= i__3; ++i__) { c__[i__ + j * c_dim1] = c__[i__ + j * c_dim1] + a[ i__ + l * a_dim1] * temp1 + b[i__ + l * b_dim1] * temp2; /* L110: */ } } /* L120: */ } /* L130: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { if (*beta == 0.) { i__2 = *n; for (i__ = j; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = 0.; /* L140: */ } } else if (*beta != 1.) { i__2 = *n; for (i__ = j; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]; /* L150: */ } } i__2 = *k; for (l = 1; l <= i__2; ++l) { if (a[j + l * a_dim1] != 0. || b[j + l * b_dim1] != 0.) { temp1 = *alpha * b[j + l * b_dim1]; temp2 = *alpha * a[j + l * a_dim1]; i__3 = *n; for (i__ = j; i__ <= i__3; ++i__) { c__[i__ + j * c_dim1] = c__[i__ + j * c_dim1] + a[ i__ + l * a_dim1] * temp1 + b[i__ + l * b_dim1] * temp2; /* L160: */ } } /* L170: */ } /* L180: */ } } } else { /* Form C := alpha*A'*B + alpha*B'*A + C. */ if (upper) { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = j; for (i__ = 1; i__ <= i__2; ++i__) { temp1 = 0.; temp2 = 0.; i__3 = *k; for (l = 1; l <= i__3; ++l) { temp1 += a[l + i__ * a_dim1] * b[l + j * b_dim1]; temp2 += b[l + i__ * b_dim1] * a[l + j * a_dim1]; /* L190: */ } if (*beta == 0.) { c__[i__ + j * c_dim1] = *alpha * temp1 + *alpha * temp2; } else { c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1] + *alpha * temp1 + *alpha * temp2; } /* L200: */ } /* L210: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *n; for (i__ = j; i__ <= i__2; ++i__) { temp1 = 0.; temp2 = 0.; i__3 = *k; for (l = 1; l <= i__3; ++l) { temp1 += a[l + i__ * a_dim1] * b[l + j * b_dim1]; temp2 += b[l + i__ * b_dim1] * a[l + j * a_dim1]; /* L220: */ } if (*beta == 0.) { c__[i__ + j * c_dim1] = *alpha * temp1 + *alpha * temp2; } else { c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1] + *alpha * temp1 + *alpha * temp2; } /* L230: */ } /* L240: */ } } } return 0; /* End of DSYR2K. */ } /* dsyr2k_ */ /* Subroutine */ int dsyrk_(char *uplo, char *trans, integer *n, integer *k, doublereal *alpha, doublereal *a, integer *lda, doublereal *beta, doublereal *c__, integer *ldc) { /* System generated locals */ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3; /* Local variables */ static integer info; static doublereal temp; static integer i__, j, l; extern logical lsame_(char *, char *); static integer nrowa; static logical upper; extern /* Subroutine */ int xerbla_(char *, integer *); /* Purpose ======= DSYRK performs one of the symmetric rank k operations C := alpha*A*A' + beta*C, or C := alpha*A'*A + beta*C, where alpha and beta are scalars, C is an n by n symmetric matrix and A is an n by k matrix in the first case and a k by n matrix in the second case. Arguments ========== UPLO - CHARACTER*1. On entry, UPLO specifies whether the upper or lower triangular part of the array C is to be referenced as follows: UPLO = 'U' or 'u' Only the upper triangular part of C is to be referenced. UPLO = 'L' or 'l' Only the lower triangular part of C is to be referenced. Unchanged on exit. TRANS - CHARACTER*1. On entry, TRANS specifies the operation to be performed as follows: TRANS = 'N' or 'n' C := alpha*A*A' + beta*C. TRANS = 'T' or 't' C := alpha*A'*A + beta*C. TRANS = 'C' or 'c' C := alpha*A'*A + beta*C. Unchanged on exit. N - INTEGER. On entry, N specifies the order of the matrix C. N must be at least zero. Unchanged on exit. K - INTEGER. On entry with TRANS = 'N' or 'n', K specifies the number of columns of the matrix A, and on entry with TRANS = 'T' or 't' or 'C' or 'c', K specifies the number of rows of the matrix A. K must be at least zero. Unchanged on exit. ALPHA - DOUBLE PRECISION. On entry, ALPHA specifies the scalar alpha. Unchanged on exit. A - DOUBLE PRECISION array of DIMENSION ( LDA, ka ), where ka is k when TRANS = 'N' or 'n', and is n otherwise. Before entry with TRANS = 'N' or 'n', the leading n by k part of the array A must contain the matrix A, otherwise the leading k by n part of the array A must contain the matrix A. Unchanged on exit. LDA - INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. When TRANS = 'N' or 'n' then LDA must be at least max( 1, n ), otherwise LDA must be at least max( 1, k ). Unchanged on exit. BETA - DOUBLE PRECISION. On entry, BETA specifies the scalar beta. Unchanged on exit. C - DOUBLE PRECISION array of DIMENSION ( LDC, n ). Before entry with UPLO = 'U' or 'u', the leading n by n upper triangular part of the array C must contain the upper triangular part of the symmetric matrix and the strictly lower triangular part of C is not referenced. On exit, the upper triangular part of the array C is overwritten by the upper triangular part of the updated matrix. Before entry with UPLO = 'L' or 'l', the leading n by n lower triangular part of the array C must contain the lower triangular part of the symmetric matrix and the strictly upper triangular part of C is not referenced. On exit, the lower triangular part of the array C is overwritten by the lower triangular part of the updated matrix. LDC - INTEGER. On entry, LDC specifies the first dimension of C as declared in the calling (sub) program. LDC must be at least max( 1, n ). Unchanged on exit. Level 3 Blas routine. -- Written on 8-February-1989. Jack Dongarra, Argonne National Laboratory. Iain Duff, AERE Harwell. Jeremy Du Croz, Numerical Algorithms Group Ltd. Sven Hammarling, Numerical Algorithms Group Ltd. Test the input parameters. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; c_dim1 = *ldc; c_offset = 1 + c_dim1 * 1; c__ -= c_offset; /* Function Body */ if (lsame_(trans, "N")) { nrowa = *n; } else { nrowa = *k; } upper = lsame_(uplo, "U"); info = 0; if (! upper && ! lsame_(uplo, "L")) { info = 1; } else if (! lsame_(trans, "N") && ! lsame_(trans, "T") && ! lsame_(trans, "C")) { info = 2; } else if (*n < 0) { info = 3; } else if (*k < 0) { info = 4; } else if (*lda < max(1,nrowa)) { info = 7; } else if (*ldc < max(1,*n)) { info = 10; } if (info != 0) { xerbla_("DSYRK ", &info); return 0; } /* Quick return if possible. */ if (*n == 0 || (*alpha == 0. || *k == 0) && *beta == 1.) { return 0; } /* And when alpha.eq.zero. */ if (*alpha == 0.) { if (upper) { if (*beta == 0.) { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = j; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = 0.; /* L10: */ } /* L20: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = j; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]; /* L30: */ } /* L40: */ } } } else { if (*beta == 0.) { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *n; for (i__ = j; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = 0.; /* L50: */ } /* L60: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *n; for (i__ = j; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]; /* L70: */ } /* L80: */ } } } return 0; } /* Start the operations. */ if (lsame_(trans, "N")) { /* Form C := alpha*A*A' + beta*C. */ if (upper) { i__1 = *n; for (j = 1; j <= i__1; ++j) { if (*beta == 0.) { i__2 = j; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = 0.; /* L90: */ } } else if (*beta != 1.) { i__2 = j; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]; /* L100: */ } } i__2 = *k; for (l = 1; l <= i__2; ++l) { if (a[j + l * a_dim1] != 0.) { temp = *alpha * a[j + l * a_dim1]; i__3 = j; for (i__ = 1; i__ <= i__3; ++i__) { c__[i__ + j * c_dim1] += temp * a[i__ + l * a_dim1]; /* L110: */ } } /* L120: */ } /* L130: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { if (*beta == 0.) { i__2 = *n; for (i__ = j; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = 0.; /* L140: */ } } else if (*beta != 1.) { i__2 = *n; for (i__ = j; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]; /* L150: */ } } i__2 = *k; for (l = 1; l <= i__2; ++l) { if (a[j + l * a_dim1] != 0.) { temp = *alpha * a[j + l * a_dim1]; i__3 = *n; for (i__ = j; i__ <= i__3; ++i__) { c__[i__ + j * c_dim1] += temp * a[i__ + l * a_dim1]; /* L160: */ } } /* L170: */ } /* L180: */ } } } else { /* Form C := alpha*A'*A + beta*C. */ if (upper) { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = j; for (i__ = 1; i__ <= i__2; ++i__) { temp = 0.; i__3 = *k; for (l = 1; l <= i__3; ++l) { temp += a[l + i__ * a_dim1] * a[l + j * a_dim1]; /* L190: */ } if (*beta == 0.) { c__[i__ + j * c_dim1] = *alpha * temp; } else { c__[i__ + j * c_dim1] = *alpha * temp + *beta * c__[ i__ + j * c_dim1]; } /* L200: */ } /* L210: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *n; for (i__ = j; i__ <= i__2; ++i__) { temp = 0.; i__3 = *k; for (l = 1; l <= i__3; ++l) { temp += a[l + i__ * a_dim1] * a[l + j * a_dim1]; /* L220: */ } if (*beta == 0.) { c__[i__ + j * c_dim1] = *alpha * temp; } else { c__[i__ + j * c_dim1] = *alpha * temp + *beta * c__[ i__ + j * c_dim1]; } /* L230: */ } /* L240: */ } } } return 0; /* End of DSYRK . */ } /* dsyrk_ */ /* Subroutine */ int dtrmm_(char *side, char *uplo, char *transa, char *diag, integer *m, integer *n, doublereal *alpha, doublereal *a, integer * lda, doublereal *b, integer *ldb) { /* System generated locals */ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3; /* Local variables */ static integer info; static doublereal temp; static integer i__, j, k; static logical lside; extern logical lsame_(char *, char *); static integer nrowa; static logical upper; extern /* Subroutine */ int xerbla_(char *, integer *); static logical nounit; /* Purpose ======= DTRMM performs one of the matrix-matrix operations B := alpha*op( A )*B, or B := alpha*B*op( A ), where alpha is a scalar, B is an m by n matrix, A is a unit, or non-unit, upper or lower triangular matrix and op( A ) is one of op( A ) = A or op( A ) = A'. Arguments ========== SIDE - CHARACTER*1. On entry, SIDE specifies whether op( A ) multiplies B from the left or right as follows: SIDE = 'L' or 'l' B := alpha*op( A )*B. SIDE = 'R' or 'r' B := alpha*B*op( A ). Unchanged on exit. UPLO - CHARACTER*1. On entry, UPLO specifies whether the matrix A is an upper or lower triangular matrix as follows: UPLO = 'U' or 'u' A is an upper triangular matrix. UPLO = 'L' or 'l' A is a lower triangular matrix. Unchanged on exit. TRANSA - CHARACTER*1. On entry, TRANSA specifies the form of op( A ) to be used in the matrix multiplication as follows: TRANSA = 'N' or 'n' op( A ) = A. TRANSA = 'T' or 't' op( A ) = A'. TRANSA = 'C' or 'c' op( A ) = A'. Unchanged on exit. DIAG - CHARACTER*1. On entry, DIAG specifies whether or not A is unit triangular as follows: DIAG = 'U' or 'u' A is assumed to be unit triangular. DIAG = 'N' or 'n' A is not assumed to be unit triangular. Unchanged on exit. M - INTEGER. On entry, M specifies the number of rows of B. M must be at least zero. Unchanged on exit. N - INTEGER. On entry, N specifies the number of columns of B. N must be at least zero. Unchanged on exit. ALPHA - DOUBLE PRECISION. On entry, ALPHA specifies the scalar alpha. When alpha is zero then A is not referenced and B need not be set before entry. Unchanged on exit. A - DOUBLE PRECISION array of DIMENSION ( LDA, k ), where k is m when SIDE = 'L' or 'l' and is n when SIDE = 'R' or 'r'. Before entry with UPLO = 'U' or 'u', the leading k by k upper triangular part of the array A must contain the upper triangular matrix and the strictly lower triangular part of A is not referenced. Before entry with UPLO = 'L' or 'l', the leading k by k lower triangular part of the array A must contain the lower triangular matrix and the strictly upper triangular part of A is not referenced. Note that when DIAG = 'U' or 'u', the diagonal elements of A are not referenced either, but are assumed to be unity. Unchanged on exit. LDA - INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. When SIDE = 'L' or 'l' then LDA must be at least max( 1, m ), when SIDE = 'R' or 'r' then LDA must be at least max( 1, n ). Unchanged on exit. B - DOUBLE PRECISION array of DIMENSION ( LDB, n ). Before entry, the leading m by n part of the array B must contain the matrix B, and on exit is overwritten by the transformed matrix. LDB - INTEGER. On entry, LDB specifies the first dimension of B as declared in the calling (sub) program. LDB must be at least max( 1, m ). Unchanged on exit. Level 3 Blas routine. -- Written on 8-February-1989. Jack Dongarra, Argonne National Laboratory. Iain Duff, AERE Harwell. Jeremy Du Croz, Numerical Algorithms Group Ltd. Sven Hammarling, Numerical Algorithms Group Ltd. Test the input parameters. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; b_dim1 = *ldb; b_offset = 1 + b_dim1 * 1; b -= b_offset; /* Function Body */ lside = lsame_(side, "L"); if (lside) { nrowa = *m; } else { nrowa = *n; } nounit = lsame_(diag, "N"); upper = lsame_(uplo, "U"); info = 0; if (! lside && ! lsame_(side, "R")) { info = 1; } else if (! upper && ! lsame_(uplo, "L")) { info = 2; } else if (! lsame_(transa, "N") && ! lsame_(transa, "T") && ! lsame_(transa, "C")) { info = 3; } else if (! lsame_(diag, "U") && ! lsame_(diag, "N")) { info = 4; } else if (*m < 0) { info = 5; } else if (*n < 0) { info = 6; } else if (*lda < max(1,nrowa)) { info = 9; } else if (*ldb < max(1,*m)) { info = 11; } if (info != 0) { xerbla_("DTRMM ", &info); return 0; } /* Quick return if possible. */ if (*n == 0) { return 0; } /* And when alpha.eq.zero. */ if (*alpha == 0.) { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { b[i__ + j * b_dim1] = 0.; /* L10: */ } /* L20: */ } return 0; } /* Start the operations. */ if (lside) { if (lsame_(transa, "N")) { /* Form B := alpha*A*B. */ if (upper) { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (k = 1; k <= i__2; ++k) { if (b[k + j * b_dim1] != 0.) { temp = *alpha * b[k + j * b_dim1]; i__3 = k - 1; for (i__ = 1; i__ <= i__3; ++i__) { b[i__ + j * b_dim1] += temp * a[i__ + k * a_dim1]; /* L30: */ } if (nounit) { temp *= a[k + k * a_dim1]; } b[k + j * b_dim1] = temp; } /* L40: */ } /* L50: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { for (k = *m; k >= 1; --k) { if (b[k + j * b_dim1] != 0.) { temp = *alpha * b[k + j * b_dim1]; b[k + j * b_dim1] = temp; if (nounit) { b[k + j * b_dim1] *= a[k + k * a_dim1]; } i__2 = *m; for (i__ = k + 1; i__ <= i__2; ++i__) { b[i__ + j * b_dim1] += temp * a[i__ + k * a_dim1]; /* L60: */ } } /* L70: */ } /* L80: */ } } } else { /* Form B := alpha*A'*B. */ if (upper) { i__1 = *n; for (j = 1; j <= i__1; ++j) { for (i__ = *m; i__ >= 1; --i__) { temp = b[i__ + j * b_dim1]; if (nounit) { temp *= a[i__ + i__ * a_dim1]; } i__2 = i__ - 1; for (k = 1; k <= i__2; ++k) { temp += a[k + i__ * a_dim1] * b[k + j * b_dim1]; /* L90: */ } b[i__ + j * b_dim1] = *alpha * temp; /* L100: */ } /* L110: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { temp = b[i__ + j * b_dim1]; if (nounit) { temp *= a[i__ + i__ * a_dim1]; } i__3 = *m; for (k = i__ + 1; k <= i__3; ++k) { temp += a[k + i__ * a_dim1] * b[k + j * b_dim1]; /* L120: */ } b[i__ + j * b_dim1] = *alpha * temp; /* L130: */ } /* L140: */ } } } } else { if (lsame_(transa, "N")) { /* Form B := alpha*B*A. */ if (upper) { for (j = *n; j >= 1; --j) { temp = *alpha; if (nounit) { temp *= a[j + j * a_dim1]; } i__1 = *m; for (i__ = 1; i__ <= i__1; ++i__) { b[i__ + j * b_dim1] = temp * b[i__ + j * b_dim1]; /* L150: */ } i__1 = j - 1; for (k = 1; k <= i__1; ++k) { if (a[k + j * a_dim1] != 0.) { temp = *alpha * a[k + j * a_dim1]; i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { b[i__ + j * b_dim1] += temp * b[i__ + k * b_dim1]; /* L160: */ } } /* L170: */ } /* L180: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { temp = *alpha; if (nounit) { temp *= a[j + j * a_dim1]; } i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { b[i__ + j * b_dim1] = temp * b[i__ + j * b_dim1]; /* L190: */ } i__2 = *n; for (k = j + 1; k <= i__2; ++k) { if (a[k + j * a_dim1] != 0.) { temp = *alpha * a[k + j * a_dim1]; i__3 = *m; for (i__ = 1; i__ <= i__3; ++i__) { b[i__ + j * b_dim1] += temp * b[i__ + k * b_dim1]; /* L200: */ } } /* L210: */ } /* L220: */ } } } else { /* Form B := alpha*B*A'. */ if (upper) { i__1 = *n; for (k = 1; k <= i__1; ++k) { i__2 = k - 1; for (j = 1; j <= i__2; ++j) { if (a[j + k * a_dim1] != 0.) { temp = *alpha * a[j + k * a_dim1]; i__3 = *m; for (i__ = 1; i__ <= i__3; ++i__) { b[i__ + j * b_dim1] += temp * b[i__ + k * b_dim1]; /* L230: */ } } /* L240: */ } temp = *alpha; if (nounit) { temp *= a[k + k * a_dim1]; } if (temp != 1.) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { b[i__ + k * b_dim1] = temp * b[i__ + k * b_dim1]; /* L250: */ } } /* L260: */ } } else { for (k = *n; k >= 1; --k) { i__1 = *n; for (j = k + 1; j <= i__1; ++j) { if (a[j + k * a_dim1] != 0.) { temp = *alpha * a[j + k * a_dim1]; i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { b[i__ + j * b_dim1] += temp * b[i__ + k * b_dim1]; /* L270: */ } } /* L280: */ } temp = *alpha; if (nounit) { temp *= a[k + k * a_dim1]; } if (temp != 1.) { i__1 = *m; for (i__ = 1; i__ <= i__1; ++i__) { b[i__ + k * b_dim1] = temp * b[i__ + k * b_dim1]; /* L290: */ } } /* L300: */ } } } } return 0; /* End of DTRMM . */ } /* dtrmm_ */ /* Subroutine */ int dtrmv_(char *uplo, char *trans, char *diag, integer *n, doublereal *a, integer *lda, doublereal *x, integer *incx) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2; /* Local variables */ static integer info; static doublereal temp; static integer i__, j; extern logical lsame_(char *, char *); static integer ix, jx, kx; extern /* Subroutine */ int xerbla_(char *, integer *); static logical nounit; /* Purpose ======= DTRMV performs one of the matrix-vector operations x := A*x, or x := A'*x, where x is an n element vector and A is an n by n unit, or non-unit, upper or lower triangular matrix. Arguments ========== UPLO - CHARACTER*1. On entry, UPLO specifies whether the matrix is an upper or lower triangular matrix as follows: UPLO = 'U' or 'u' A is an upper triangular matrix. UPLO = 'L' or 'l' A is a lower triangular matrix. Unchanged on exit. TRANS - CHARACTER*1. On entry, TRANS specifies the operation to be performed as follows: TRANS = 'N' or 'n' x := A*x. TRANS = 'T' or 't' x := A'*x. TRANS = 'C' or 'c' x := A'*x. Unchanged on exit. DIAG - CHARACTER*1. On entry, DIAG specifies whether or not A is unit triangular as follows: DIAG = 'U' or 'u' A is assumed to be unit triangular. DIAG = 'N' or 'n' A is not assumed to be unit triangular. Unchanged on exit. N - INTEGER. On entry, N specifies the order of the matrix A. N must be at least zero. Unchanged on exit. A - DOUBLE PRECISION array of DIMENSION ( LDA, n ). Before entry with UPLO = 'U' or 'u', the leading n by n upper triangular part of the array A must contain the upper triangular matrix and the strictly lower triangular part of A is not referenced. Before entry with UPLO = 'L' or 'l', the leading n by n lower triangular part of the array A must contain the lower triangular matrix and the strictly upper triangular part of A is not referenced. Note that when DIAG = 'U' or 'u', the diagonal elements of A are not referenced either, but are assumed to be unity. Unchanged on exit. LDA - INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. LDA must be at least max( 1, n ). Unchanged on exit. X - DOUBLE PRECISION array of dimension at least ( 1 + ( n - 1 )*abs( INCX ) ). Before entry, the incremented array X must contain the n element vector x. On exit, X is overwritten with the tranformed vector x. INCX - INTEGER. On entry, INCX specifies the increment for the elements of X. INCX must not be zero. Unchanged on exit. Level 2 Blas routine. -- Written on 22-October-1986. Jack Dongarra, Argonne National Lab. Jeremy Du Croz, Nag Central Office. Sven Hammarling, Nag Central Office. Richard Hanson, Sandia National Labs. Test the input parameters. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --x; /* Function Body */ info = 0; if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) { info = 1; } else if (! lsame_(trans, "N") && ! lsame_(trans, "T") && ! lsame_(trans, "C")) { info = 2; } else if (! lsame_(diag, "U") && ! lsame_(diag, "N")) { info = 3; } else if (*n < 0) { info = 4; } else if (*lda < max(1,*n)) { info = 6; } else if (*incx == 0) { info = 8; } if (info != 0) { xerbla_("DTRMV ", &info); return 0; } /* Quick return if possible. */ if (*n == 0) { return 0; } nounit = lsame_(diag, "N"); /* Set up the start point in X if the increment is not unity. This will be ( N - 1 )*INCX too small for descending loops. */ if (*incx <= 0) { kx = 1 - (*n - 1) * *incx; } else if (*incx != 1) { kx = 1; } /* Start the operations. In this version the elements of A are accessed sequentially with one pass through A. */ if (lsame_(trans, "N")) { /* Form x := A*x. */ if (lsame_(uplo, "U")) { if (*incx == 1) { i__1 = *n; for (j = 1; j <= i__1; ++j) { if (x[j] != 0.) { temp = x[j]; i__2 = j - 1; for (i__ = 1; i__ <= i__2; ++i__) { x[i__] += temp * a[i__ + j * a_dim1]; /* L10: */ } if (nounit) { x[j] *= a[j + j * a_dim1]; } } /* L20: */ } } else { jx = kx; i__1 = *n; for (j = 1; j <= i__1; ++j) { if (x[jx] != 0.) { temp = x[jx]; ix = kx; i__2 = j - 1; for (i__ = 1; i__ <= i__2; ++i__) { x[ix] += temp * a[i__ + j * a_dim1]; ix += *incx; /* L30: */ } if (nounit) { x[jx] *= a[j + j * a_dim1]; } } jx += *incx; /* L40: */ } } } else { if (*incx == 1) { for (j = *n; j >= 1; --j) { if (x[j] != 0.) { temp = x[j]; i__1 = j + 1; for (i__ = *n; i__ >= i__1; --i__) { x[i__] += temp * a[i__ + j * a_dim1]; /* L50: */ } if (nounit) { x[j] *= a[j + j * a_dim1]; } } /* L60: */ } } else { kx += (*n - 1) * *incx; jx = kx; for (j = *n; j >= 1; --j) { if (x[jx] != 0.) { temp = x[jx]; ix = kx; i__1 = j + 1; for (i__ = *n; i__ >= i__1; --i__) { x[ix] += temp * a[i__ + j * a_dim1]; ix -= *incx; /* L70: */ } if (nounit) { x[jx] *= a[j + j * a_dim1]; } } jx -= *incx; /* L80: */ } } } } else { /* Form x := A'*x. */ if (lsame_(uplo, "U")) { if (*incx == 1) { for (j = *n; j >= 1; --j) { temp = x[j]; if (nounit) { temp *= a[j + j * a_dim1]; } for (i__ = j - 1; i__ >= 1; --i__) { temp += a[i__ + j * a_dim1] * x[i__]; /* L90: */ } x[j] = temp; /* L100: */ } } else { jx = kx + (*n - 1) * *incx; for (j = *n; j >= 1; --j) { temp = x[jx]; ix = jx; if (nounit) { temp *= a[j + j * a_dim1]; } for (i__ = j - 1; i__ >= 1; --i__) { ix -= *incx; temp += a[i__ + j * a_dim1] * x[ix]; /* L110: */ } x[jx] = temp; jx -= *incx; /* L120: */ } } } else { if (*incx == 1) { i__1 = *n; for (j = 1; j <= i__1; ++j) { temp = x[j]; if (nounit) { temp *= a[j + j * a_dim1]; } i__2 = *n; for (i__ = j + 1; i__ <= i__2; ++i__) { temp += a[i__ + j * a_dim1] * x[i__]; /* L130: */ } x[j] = temp; /* L140: */ } } else { jx = kx; i__1 = *n; for (j = 1; j <= i__1; ++j) { temp = x[jx]; ix = jx; if (nounit) { temp *= a[j + j * a_dim1]; } i__2 = *n; for (i__ = j + 1; i__ <= i__2; ++i__) { ix += *incx; temp += a[i__ + j * a_dim1] * x[ix]; /* L150: */ } x[jx] = temp; jx += *incx; /* L160: */ } } } } return 0; /* End of DTRMV . */ } /* dtrmv_ */ /* Subroutine */ int dtrsm_(char *side, char *uplo, char *transa, char *diag, integer *m, integer *n, doublereal *alpha, doublereal *a, integer * lda, doublereal *b, integer *ldb) { /* System generated locals */ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3; /* Local variables */ static integer info; static doublereal temp; static integer i__, j, k; static logical lside; extern logical lsame_(char *, char *); static integer nrowa; static logical upper; extern /* Subroutine */ int xerbla_(char *, integer *); static logical nounit; /* Purpose ======= DTRSM solves one of the matrix equations op( A )*X = alpha*B, or X*op( A ) = alpha*B, where alpha is a scalar, X and B are m by n matrices, A is a unit, or non-unit, upper or lower triangular matrix and op( A ) is one of op( A ) = A or op( A ) = A'. The matrix X is overwritten on B. Arguments ========== SIDE - CHARACTER*1. On entry, SIDE specifies whether op( A ) appears on the left or right of X as follows: SIDE = 'L' or 'l' op( A )*X = alpha*B. SIDE = 'R' or 'r' X*op( A ) = alpha*B. Unchanged on exit. UPLO - CHARACTER*1. On entry, UPLO specifies whether the matrix A is an upper or lower triangular matrix as follows: UPLO = 'U' or 'u' A is an upper triangular matrix. UPLO = 'L' or 'l' A is a lower triangular matrix. Unchanged on exit. TRANSA - CHARACTER*1. On entry, TRANSA specifies the form of op( A ) to be used in the matrix multiplication as follows: TRANSA = 'N' or 'n' op( A ) = A. TRANSA = 'T' or 't' op( A ) = A'. TRANSA = 'C' or 'c' op( A ) = A'. Unchanged on exit. DIAG - CHARACTER*1. On entry, DIAG specifies whether or not A is unit triangular as follows: DIAG = 'U' or 'u' A is assumed to be unit triangular. DIAG = 'N' or 'n' A is not assumed to be unit triangular. Unchanged on exit. M - INTEGER. On entry, M specifies the number of rows of B. M must be at least zero. Unchanged on exit. N - INTEGER. On entry, N specifies the number of columns of B. N must be at least zero. Unchanged on exit. ALPHA - DOUBLE PRECISION. On entry, ALPHA specifies the scalar alpha. When alpha is zero then A is not referenced and B need not be set before entry. Unchanged on exit. A - DOUBLE PRECISION array of DIMENSION ( LDA, k ), where k is m when SIDE = 'L' or 'l' and is n when SIDE = 'R' or 'r'. Before entry with UPLO = 'U' or 'u', the leading k by k upper triangular part of the array A must contain the upper triangular matrix and the strictly lower triangular part of A is not referenced. Before entry with UPLO = 'L' or 'l', the leading k by k lower triangular part of the array A must contain the lower triangular matrix and the strictly upper triangular part of A is not referenced. Note that when DIAG = 'U' or 'u', the diagonal elements of A are not referenced either, but are assumed to be unity. Unchanged on exit. LDA - INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. When SIDE = 'L' or 'l' then LDA must be at least max( 1, m ), when SIDE = 'R' or 'r' then LDA must be at least max( 1, n ). Unchanged on exit. B - DOUBLE PRECISION array of DIMENSION ( LDB, n ). Before entry, the leading m by n part of the array B must contain the right-hand side matrix B, and on exit is overwritten by the solution matrix X. LDB - INTEGER. On entry, LDB specifies the first dimension of B as declared in the calling (sub) program. LDB must be at least max( 1, m ). Unchanged on exit. Level 3 Blas routine. -- Written on 8-February-1989. Jack Dongarra, Argonne National Laboratory. Iain Duff, AERE Harwell. Jeremy Du Croz, Numerical Algorithms Group Ltd. Sven Hammarling, Numerical Algorithms Group Ltd. Test the input parameters. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; b_dim1 = *ldb; b_offset = 1 + b_dim1 * 1; b -= b_offset; /* Function Body */ lside = lsame_(side, "L"); if (lside) { nrowa = *m; } else { nrowa = *n; } nounit = lsame_(diag, "N"); upper = lsame_(uplo, "U"); info = 0; if (! lside && ! lsame_(side, "R")) { info = 1; } else if (! upper && ! lsame_(uplo, "L")) { info = 2; } else if (! lsame_(transa, "N") && ! lsame_(transa, "T") && ! lsame_(transa, "C")) { info = 3; } else if (! lsame_(diag, "U") && ! lsame_(diag, "N")) { info = 4; } else if (*m < 0) { info = 5; } else if (*n < 0) { info = 6; } else if (*lda < max(1,nrowa)) { info = 9; } else if (*ldb < max(1,*m)) { info = 11; } if (info != 0) { xerbla_("DTRSM ", &info); return 0; } /* Quick return if possible. */ if (*n == 0) { return 0; } /* And when alpha.eq.zero. */ if (*alpha == 0.) { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { b[i__ + j * b_dim1] = 0.; /* L10: */ } /* L20: */ } return 0; } /* Start the operations. */ if (lside) { if (lsame_(transa, "N")) { /* Form B := alpha*inv( A )*B. */ if (upper) { i__1 = *n; for (j = 1; j <= i__1; ++j) { if (*alpha != 1.) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { b[i__ + j * b_dim1] = *alpha * b[i__ + j * b_dim1] ; /* L30: */ } } for (k = *m; k >= 1; --k) { if (b[k + j * b_dim1] != 0.) { if (nounit) { b[k + j * b_dim1] /= a[k + k * a_dim1]; } i__2 = k - 1; for (i__ = 1; i__ <= i__2; ++i__) { b[i__ + j * b_dim1] -= b[k + j * b_dim1] * a[ i__ + k * a_dim1]; /* L40: */ } } /* L50: */ } /* L60: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { if (*alpha != 1.) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { b[i__ + j * b_dim1] = *alpha * b[i__ + j * b_dim1] ; /* L70: */ } } i__2 = *m; for (k = 1; k <= i__2; ++k) { if (b[k + j * b_dim1] != 0.) { if (nounit) { b[k + j * b_dim1] /= a[k + k * a_dim1]; } i__3 = *m; for (i__ = k + 1; i__ <= i__3; ++i__) { b[i__ + j * b_dim1] -= b[k + j * b_dim1] * a[ i__ + k * a_dim1]; /* L80: */ } } /* L90: */ } /* L100: */ } } } else { /* Form B := alpha*inv( A' )*B. */ if (upper) { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { temp = *alpha * b[i__ + j * b_dim1]; i__3 = i__ - 1; for (k = 1; k <= i__3; ++k) { temp -= a[k + i__ * a_dim1] * b[k + j * b_dim1]; /* L110: */ } if (nounit) { temp /= a[i__ + i__ * a_dim1]; } b[i__ + j * b_dim1] = temp; /* L120: */ } /* L130: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { for (i__ = *m; i__ >= 1; --i__) { temp = *alpha * b[i__ + j * b_dim1]; i__2 = *m; for (k = i__ + 1; k <= i__2; ++k) { temp -= a[k + i__ * a_dim1] * b[k + j * b_dim1]; /* L140: */ } if (nounit) { temp /= a[i__ + i__ * a_dim1]; } b[i__ + j * b_dim1] = temp; /* L150: */ } /* L160: */ } } } } else { if (lsame_(transa, "N")) { /* Form B := alpha*B*inv( A ). */ if (upper) { i__1 = *n; for (j = 1; j <= i__1; ++j) { if (*alpha != 1.) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { b[i__ + j * b_dim1] = *alpha * b[i__ + j * b_dim1] ; /* L170: */ } } i__2 = j - 1; for (k = 1; k <= i__2; ++k) { if (a[k + j * a_dim1] != 0.) { i__3 = *m; for (i__ = 1; i__ <= i__3; ++i__) { b[i__ + j * b_dim1] -= a[k + j * a_dim1] * b[ i__ + k * b_dim1]; /* L180: */ } } /* L190: */ } if (nounit) { temp = 1. / a[j + j * a_dim1]; i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { b[i__ + j * b_dim1] = temp * b[i__ + j * b_dim1]; /* L200: */ } } /* L210: */ } } else { for (j = *n; j >= 1; --j) { if (*alpha != 1.) { i__1 = *m; for (i__ = 1; i__ <= i__1; ++i__) { b[i__ + j * b_dim1] = *alpha * b[i__ + j * b_dim1] ; /* L220: */ } } i__1 = *n; for (k = j + 1; k <= i__1; ++k) { if (a[k + j * a_dim1] != 0.) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { b[i__ + j * b_dim1] -= a[k + j * a_dim1] * b[ i__ + k * b_dim1]; /* L230: */ } } /* L240: */ } if (nounit) { temp = 1. / a[j + j * a_dim1]; i__1 = *m; for (i__ = 1; i__ <= i__1; ++i__) { b[i__ + j * b_dim1] = temp * b[i__ + j * b_dim1]; /* L250: */ } } /* L260: */ } } } else { /* Form B := alpha*B*inv( A' ). */ if (upper) { for (k = *n; k >= 1; --k) { if (nounit) { temp = 1. / a[k + k * a_dim1]; i__1 = *m; for (i__ = 1; i__ <= i__1; ++i__) { b[i__ + k * b_dim1] = temp * b[i__ + k * b_dim1]; /* L270: */ } } i__1 = k - 1; for (j = 1; j <= i__1; ++j) { if (a[j + k * a_dim1] != 0.) { temp = a[j + k * a_dim1]; i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { b[i__ + j * b_dim1] -= temp * b[i__ + k * b_dim1]; /* L280: */ } } /* L290: */ } if (*alpha != 1.) { i__1 = *m; for (i__ = 1; i__ <= i__1; ++i__) { b[i__ + k * b_dim1] = *alpha * b[i__ + k * b_dim1] ; /* L300: */ } } /* L310: */ } } else { i__1 = *n; for (k = 1; k <= i__1; ++k) { if (nounit) { temp = 1. / a[k + k * a_dim1]; i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { b[i__ + k * b_dim1] = temp * b[i__ + k * b_dim1]; /* L320: */ } } i__2 = *n; for (j = k + 1; j <= i__2; ++j) { if (a[j + k * a_dim1] != 0.) { temp = a[j + k * a_dim1]; i__3 = *m; for (i__ = 1; i__ <= i__3; ++i__) { b[i__ + j * b_dim1] -= temp * b[i__ + k * b_dim1]; /* L330: */ } } /* L340: */ } if (*alpha != 1.) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { b[i__ + k * b_dim1] = *alpha * b[i__ + k * b_dim1] ; /* L350: */ } } /* L360: */ } } } } return 0; /* End of DTRSM . */ } /* dtrsm_ */ /* Subroutine */ int dtrsv_(char *uplo, char *trans, char *diag, integer *n, doublereal *a, integer *lda, doublereal *x, integer *incx) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2; /* Local variables */ static integer info; static doublereal temp; static integer i__, j; extern logical lsame_(char *, char *); static integer ix, jx, kx; extern /* Subroutine */ int xerbla_(char *, integer *); static logical nounit; /* Purpose ======= DTRSV solves one of the systems of equations A*x = b, or A'*x = b, where b and x are n element vectors and A is an n by n unit, or non-unit, upper or lower triangular matrix. No test for singularity or near-singularity is included in this routine. Such tests must be performed before calling this routine. Arguments ========== UPLO - CHARACTER*1. On entry, UPLO specifies whether the matrix is an upper or lower triangular matrix as follows: UPLO = 'U' or 'u' A is an upper triangular matrix. UPLO = 'L' or 'l' A is a lower triangular matrix. Unchanged on exit. TRANS - CHARACTER*1. On entry, TRANS specifies the equations to be solved as follows: TRANS = 'N' or 'n' A*x = b. TRANS = 'T' or 't' A'*x = b. TRANS = 'C' or 'c' A'*x = b. Unchanged on exit. DIAG - CHARACTER*1. On entry, DIAG specifies whether or not A is unit triangular as follows: DIAG = 'U' or 'u' A is assumed to be unit triangular. DIAG = 'N' or 'n' A is not assumed to be unit triangular. Unchanged on exit. N - INTEGER. On entry, N specifies the order of the matrix A. N must be at least zero. Unchanged on exit. A - DOUBLE PRECISION array of DIMENSION ( LDA, n ). Before entry with UPLO = 'U' or 'u', the leading n by n upper triangular part of the array A must contain the upper triangular matrix and the strictly lower triangular part of A is not referenced. Before entry with UPLO = 'L' or 'l', the leading n by n lower triangular part of the array A must contain the lower triangular matrix and the strictly upper triangular part of A is not referenced. Note that when DIAG = 'U' or 'u', the diagonal elements of A are not referenced either, but are assumed to be unity. Unchanged on exit. LDA - INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. LDA must be at least max( 1, n ). Unchanged on exit. X - DOUBLE PRECISION array of dimension at least ( 1 + ( n - 1 )*abs( INCX ) ). Before entry, the incremented array X must contain the n element right-hand side vector b. On exit, X is overwritten with the solution vector x. INCX - INTEGER. On entry, INCX specifies the increment for the elements of X. INCX must not be zero. Unchanged on exit. Level 2 Blas routine. -- Written on 22-October-1986. Jack Dongarra, Argonne National Lab. Jeremy Du Croz, Nag Central Office. Sven Hammarling, Nag Central Office. Richard Hanson, Sandia National Labs. Test the input parameters. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --x; /* Function Body */ info = 0; if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) { info = 1; } else if (! lsame_(trans, "N") && ! lsame_(trans, "T") && ! lsame_(trans, "C")) { info = 2; } else if (! lsame_(diag, "U") && ! lsame_(diag, "N")) { info = 3; } else if (*n < 0) { info = 4; } else if (*lda < max(1,*n)) { info = 6; } else if (*incx == 0) { info = 8; } if (info != 0) { xerbla_("DTRSV ", &info); return 0; } /* Quick return if possible. */ if (*n == 0) { return 0; } nounit = lsame_(diag, "N"); /* Set up the start point in X if the increment is not unity. This will be ( N - 1 )*INCX too small for descending loops. */ if (*incx <= 0) { kx = 1 - (*n - 1) * *incx; } else if (*incx != 1) { kx = 1; } /* Start the operations. In this version the elements of A are accessed sequentially with one pass through A. */ if (lsame_(trans, "N")) { /* Form x := inv( A )*x. */ if (lsame_(uplo, "U")) { if (*incx == 1) { for (j = *n; j >= 1; --j) { if (x[j] != 0.) { if (nounit) { x[j] /= a[j + j * a_dim1]; } temp = x[j]; for (i__ = j - 1; i__ >= 1; --i__) { x[i__] -= temp * a[i__ + j * a_dim1]; /* L10: */ } } /* L20: */ } } else { jx = kx + (*n - 1) * *incx; for (j = *n; j >= 1; --j) { if (x[jx] != 0.) { if (nounit) { x[jx] /= a[j + j * a_dim1]; } temp = x[jx]; ix = jx; for (i__ = j - 1; i__ >= 1; --i__) { ix -= *incx; x[ix] -= temp * a[i__ + j * a_dim1]; /* L30: */ } } jx -= *incx; /* L40: */ } } } else { if (*incx == 1) { i__1 = *n; for (j = 1; j <= i__1; ++j) { if (x[j] != 0.) { if (nounit) { x[j] /= a[j + j * a_dim1]; } temp = x[j]; i__2 = *n; for (i__ = j + 1; i__ <= i__2; ++i__) { x[i__] -= temp * a[i__ + j * a_dim1]; /* L50: */ } } /* L60: */ } } else { jx = kx; i__1 = *n; for (j = 1; j <= i__1; ++j) { if (x[jx] != 0.) { if (nounit) { x[jx] /= a[j + j * a_dim1]; } temp = x[jx]; ix = jx; i__2 = *n; for (i__ = j + 1; i__ <= i__2; ++i__) { ix += *incx; x[ix] -= temp * a[i__ + j * a_dim1]; /* L70: */ } } jx += *incx; /* L80: */ } } } } else { /* Form x := inv( A' )*x. */ if (lsame_(uplo, "U")) { if (*incx == 1) { i__1 = *n; for (j = 1; j <= i__1; ++j) { temp = x[j]; i__2 = j - 1; for (i__ = 1; i__ <= i__2; ++i__) { temp -= a[i__ + j * a_dim1] * x[i__]; /* L90: */ } if (nounit) { temp /= a[j + j * a_dim1]; } x[j] = temp; /* L100: */ } } else { jx = kx; i__1 = *n; for (j = 1; j <= i__1; ++j) { temp = x[jx]; ix = kx; i__2 = j - 1; for (i__ = 1; i__ <= i__2; ++i__) { temp -= a[i__ + j * a_dim1] * x[ix]; ix += *incx; /* L110: */ } if (nounit) { temp /= a[j + j * a_dim1]; } x[jx] = temp; jx += *incx; /* L120: */ } } } else { if (*incx == 1) { for (j = *n; j >= 1; --j) { temp = x[j]; i__1 = j + 1; for (i__ = *n; i__ >= i__1; --i__) { temp -= a[i__ + j * a_dim1] * x[i__]; /* L130: */ } if (nounit) { temp /= a[j + j * a_dim1]; } x[j] = temp; /* L140: */ } } else { kx += (*n - 1) * *incx; jx = kx; for (j = *n; j >= 1; --j) { temp = x[jx]; ix = kx; i__1 = j + 1; for (i__ = *n; i__ >= i__1; --i__) { temp -= a[i__ + j * a_dim1] * x[ix]; ix -= *incx; /* L150: */ } if (nounit) { temp /= a[j + j * a_dim1]; } x[jx] = temp; jx -= *incx; /* L160: */ } } } } return 0; /* End of DTRSV . */ } /* dtrsv_ */ integer idamax_(integer *n, doublereal *dx, integer *incx) { /* System generated locals */ integer ret_val, i__1; doublereal d__1; /* Local variables */ static doublereal dmax__; static integer i__, ix; /* Purpose ======= finds the index of element having max. absolute value. jack dongarra, linpack, 3/11/78. modified 3/93 to return if incx .le. 0. modified 12/3/93, array(1) declarations changed to array(*) */ /* Parameter adjustments */ --dx; /* Function Body */ ret_val = 0; if (*n < 1 || *incx <= 0) { return ret_val; } ret_val = 1; if (*n == 1) { return ret_val; } if (*incx == 1) { goto L20; } /* code for increment not equal to 1 */ ix = 1; dmax__ = abs(dx[1]); ix += *incx; i__1 = *n; for (i__ = 2; i__ <= i__1; ++i__) { if ((d__1 = dx[ix], abs(d__1)) <= dmax__) { goto L5; } ret_val = i__; dmax__ = (d__1 = dx[ix], abs(d__1)); L5: ix += *incx; /* L10: */ } return ret_val; /* code for increment equal to 1 */ L20: dmax__ = abs(dx[1]); i__1 = *n; for (i__ = 2; i__ <= i__1; ++i__) { if ((d__1 = dx[i__], abs(d__1)) <= dmax__) { goto L30; } ret_val = i__; dmax__ = (d__1 = dx[i__], abs(d__1)); L30: ; } return ret_val; } /* idamax_ */ logical lsame_(char *ca, char *cb) { /* System generated locals */ logical ret_val; /* Local variables */ static integer inta, intb, zcode; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= LSAME returns .TRUE. if CA is the same letter as CB regardless of case. Arguments ========= CA (input) CHARACTER*1 CB (input) CHARACTER*1 CA and CB specify the single characters to be compared. ===================================================================== Test if the characters are equal */ ret_val = *(unsigned char *)ca == *(unsigned char *)cb; if (ret_val) { return ret_val; } /* Now test for equivalence if both characters are alphabetic. */ zcode = 'Z'; /* Use 'Z' rather than 'A' so that ASCII can be detected on Prime machines, on which ICHAR returns a value with bit 8 set. ICHAR('A') on Prime machines returns 193 which is the same as ICHAR('A') on an EBCDIC machine. */ inta = *(unsigned char *)ca; intb = *(unsigned char *)cb; if (zcode == 90 || zcode == 122) { /* ASCII is assumed - ZCODE is the ASCII code of either lower or upper case 'Z'. */ if (inta >= 97 && inta <= 122) { inta += -32; } if (intb >= 97 && intb <= 122) { intb += -32; } } else if (zcode == 233 || zcode == 169) { /* EBCDIC is assumed - ZCODE is the EBCDIC code of either lower or upper case 'Z'. */ if (inta >= 129 && inta <= 137 || inta >= 145 && inta <= 153 || inta >= 162 && inta <= 169) { inta += 64; } if (intb >= 129 && intb <= 137 || intb >= 145 && intb <= 153 || intb >= 162 && intb <= 169) { intb += 64; } } else if (zcode == 218 || zcode == 250) { /* ASCII is assumed, on Prime machines - ZCODE is the ASCII code plus 128 of either lower or upper case 'Z'. */ if (inta >= 225 && inta <= 250) { inta += -32; } if (intb >= 225 && intb <= 250) { intb += -32; } } ret_val = inta == intb; /* RETURN End of LSAME */ return ret_val; } /* lsame_ */ /* Subroutine */ int xerbla_(char *srname, integer *info) { /* Format strings */ static char fmt_9999[] = "(\002 ** On entry to \002,a6,\002 parameter nu" "mber \002,i2,\002 had \002,\002an illegal value\002)"; /* Builtin functions */ integer s_wsfe(cilist *), do_fio(integer *, char *, ftnlen), e_wsfe(void); /* Subroutine */ int s_stop(char *, ftnlen); /* Fortran I/O blocks */ static cilist io___197 = { 0, 6, 0, fmt_9999, 0 }; /* -- LAPACK auxiliary routine (preliminary version) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= XERBLA is an error handler for the LAPACK routines. It is called by an LAPACK routine if an input parameter has an invalid value. A message is printed and execution stops. Installers may consider modifying the STOP statement in order to call system-specific exception-handling facilities. Arguments ========= SRNAME (input) CHARACTER*6 The name of the routine which called XERBLA. INFO (input) INTEGER The position of the invalid parameter in the parameter list of the calling routine. */ s_wsfe(&io___197); do_fio(&c__1, srname, (ftnlen)6); do_fio(&c__1, (char *)&(*info), (ftnlen)sizeof(integer)); e_wsfe(); s_stop("", (ftnlen)0); /* End of XERBLA */ return 0; } /* xerbla_ */ nipy-0.3.0/libcstat/lapack_lite/dlamch.c000066400000000000000000000576101210344137400201520ustar00rootroot00000000000000#include #include "f2c.h" /* If config.h is available, we only need dlamc3 */ #ifndef HAVE_CONFIG doublereal dlamch_(char *cmach) { /* -- LAPACK auxiliary routine (version 3.0) -- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., Courant Institute, Argonne National Lab, and Rice University October 31, 1992 Purpose ======= DLAMCH determines double precision machine parameters. Arguments ========= CMACH (input) CHARACTER*1 Specifies the value to be returned by DLAMCH: = 'E' or 'e', DLAMCH := eps = 'S' or 's , DLAMCH := sfmin = 'B' or 'b', DLAMCH := base = 'P' or 'p', DLAMCH := eps*base = 'N' or 'n', DLAMCH := t = 'R' or 'r', DLAMCH := rnd = 'M' or 'm', DLAMCH := emin = 'U' or 'u', DLAMCH := rmin = 'L' or 'l', DLAMCH := emax = 'O' or 'o', DLAMCH := rmax where eps = relative machine precision sfmin = safe minimum, such that 1/sfmin does not overflow base = base of the machine prec = eps*base t = number of (base) digits in the mantissa rnd = 1.0 when rounding occurs in addition, 0.0 otherwise emin = minimum exponent before (gradual) underflow rmin = underflow threshold - base**(emin-1) emax = largest exponent before overflow rmax = overflow threshold - (base**emax)*(1-eps) ===================================================================== */ /* >>Start of File<< Initialized data */ static logical first = TRUE_; /* System generated locals */ integer i__1; doublereal ret_val; /* Builtin functions */ double pow_di(doublereal *, integer *); /* Local variables */ static doublereal base; static integer beta; static doublereal emin, prec, emax; static integer imin, imax; static logical lrnd; static doublereal rmin, rmax, t, rmach; extern logical lsame_(char *, char *); static doublereal small, sfmin; extern /* Subroutine */ int dlamc2_(integer *, integer *, logical *, doublereal *, integer *, doublereal *, integer *, doublereal *); static integer it; static doublereal rnd, eps; if (first) { first = FALSE_; dlamc2_(&beta, &it, &lrnd, &eps, &imin, &rmin, &imax, &rmax); base = (doublereal) beta; t = (doublereal) it; if (lrnd) { rnd = 1.; i__1 = 1 - it; eps = pow_di(&base, &i__1) / 2; } else { rnd = 0.; i__1 = 1 - it; eps = pow_di(&base, &i__1); } prec = eps * base; emin = (doublereal) imin; emax = (doublereal) imax; sfmin = rmin; small = 1. / rmax; if (small >= sfmin) { /* Use SMALL plus a bit, to avoid the possibility of rou nding causing overflow when computing 1/sfmin. */ sfmin = small * (eps + 1.); } } if (lsame_(cmach, "E")) { rmach = eps; } else if (lsame_(cmach, "S")) { rmach = sfmin; } else if (lsame_(cmach, "B")) { rmach = base; } else if (lsame_(cmach, "P")) { rmach = prec; } else if (lsame_(cmach, "N")) { rmach = t; } else if (lsame_(cmach, "R")) { rmach = rnd; } else if (lsame_(cmach, "M")) { rmach = emin; } else if (lsame_(cmach, "U")) { rmach = rmin; } else if (lsame_(cmach, "L")) { rmach = emax; } else if (lsame_(cmach, "O")) { rmach = rmax; } ret_val = rmach; return ret_val; /* End of DLAMCH */ } /* dlamch_ */ /* Subroutine */ int dlamc1_(integer *beta, integer *t, logical *rnd, logical *ieee1) { /* -- LAPACK auxiliary routine (version 3.0) -- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., Courant Institute, Argonne National Lab, and Rice University October 31, 1992 Purpose ======= DLAMC1 determines the machine parameters given by BETA, T, RND, and IEEE1. Arguments ========= BETA (output) INTEGER The base of the machine. T (output) INTEGER The number of ( BETA ) digits in the mantissa. RND (output) LOGICAL Specifies whether proper rounding ( RND = .TRUE. ) or chopping ( RND = .FALSE. ) occurs in addition. This may not be a reliable guide to the way in which the machine performs its arithmetic. IEEE1 (output) LOGICAL Specifies whether rounding appears to be done in the IEEE 'round to nearest' style. Further Details =============== The routine is based on the routine ENVRON by Malcolm and incorporates suggestions by Gentleman and Marovich. See Malcolm M. A. (1972) Algorithms to reveal properties of floating-point arithmetic. Comms. of the ACM, 15, 949-951. Gentleman W. M. and Marovich S. B. (1974) More on algorithms that reveal properties of floating point arithmetic units. Comms. of the ACM, 17, 276-277. ===================================================================== */ /* Initialized data */ static logical first = TRUE_; /* System generated locals */ doublereal d__1, d__2; /* Local variables */ static logical lrnd; static doublereal a, b, c, f; static integer lbeta; static doublereal savec; extern doublereal dlamc3_(doublereal *, doublereal *); static logical lieee1; static doublereal t1, t2; static integer lt; static doublereal one, qtr; if (first) { first = FALSE_; one = 1.; /* LBETA, LIEEE1, LT and LRND are the local values of BE TA, IEEE1, T and RND. Throughout this routine we use the function DLAMC3 to ens ure that relevant values are stored and not held in registers, or are not affected by optimizers. Compute a = 2.0**m with the smallest positive integer m s uch that fl( a + 1.0 ) = a. */ a = 1.; c = 1.; /* + WHILE( C.EQ.ONE )LOOP */ L10: if (c == one) { a *= 2; c = dlamc3_(&a, &one); d__1 = -a; c = dlamc3_(&c, &d__1); goto L10; } /* + END WHILE Now compute b = 2.0**m with the smallest positive integer m such that fl( a + b ) .gt. a. */ b = 1.; c = dlamc3_(&a, &b); /* + WHILE( C.EQ.A )LOOP */ L20: if (c == a) { b *= 2; c = dlamc3_(&a, &b); goto L20; } /* + END WHILE Now compute the base. a and c are neighbouring floating po int numbers in the interval ( beta**t, beta**( t + 1 ) ) and so their difference is beta. Adding 0.25 to c is to ensure that it is truncated to beta and not ( beta - 1 ). */ qtr = one / 4; savec = c; d__1 = -a; c = dlamc3_(&c, &d__1); lbeta = (integer) (c + qtr); /* Now determine whether rounding or chopping occurs, by addin g a bit less than beta/2 and a bit more than beta/2 to a. */ b = (doublereal) lbeta; d__1 = b / 2; d__2 = -b / 100; f = dlamc3_(&d__1, &d__2); c = dlamc3_(&f, &a); if (c == a) { lrnd = TRUE_; } else { lrnd = FALSE_; } d__1 = b / 2; d__2 = b / 100; f = dlamc3_(&d__1, &d__2); c = dlamc3_(&f, &a); if (lrnd && c == a) { lrnd = FALSE_; } /* Try and decide whether rounding is done in the IEEE 'round to nearest' style. B/2 is half a unit in the last place of the two numbers A and SAVEC. Furthermore, A is even, i.e. has last bit zero, and SAVEC is odd. Thus adding B/2 to A should not cha nge A, but adding B/2 to SAVEC should change SAVEC. */ d__1 = b / 2; t1 = dlamc3_(&d__1, &a); d__1 = b / 2; t2 = dlamc3_(&d__1, &savec); lieee1 = t1 == a && t2 > savec && lrnd; /* Now find the mantissa, t. It should be the integer part of log to the base beta of a, however it is safer to determine t by powering. So we find t as the smallest positive integer for which fl( beta**t + 1.0 ) = 1.0. */ lt = 0; a = 1.; c = 1.; /* + WHILE( C.EQ.ONE )LOOP */ L30: if (c == one) { ++lt; a *= lbeta; c = dlamc3_(&a, &one); d__1 = -a; c = dlamc3_(&c, &d__1); goto L30; } /* + END WHILE */ } *beta = lbeta; *t = lt; *rnd = lrnd; *ieee1 = lieee1; return 0; /* End of DLAMC1 */ } /* dlamc1_ */ /* Subroutine */ int dlamc2_(integer *beta, integer *t, logical *rnd, doublereal *eps, integer *emin, doublereal *rmin, integer *emax, doublereal *rmax) { /* -- LAPACK auxiliary routine (version 3.0) -- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., Courant Institute, Argonne National Lab, and Rice University October 31, 1992 Purpose ======= DLAMC2 determines the machine parameters specified in its argument list. Arguments ========= BETA (output) INTEGER The base of the machine. T (output) INTEGER The number of ( BETA ) digits in the mantissa. RND (output) LOGICAL Specifies whether proper rounding ( RND = .TRUE. ) or chopping ( RND = .FALSE. ) occurs in addition. This may not be a reliable guide to the way in which the machine performs its arithmetic. EPS (output) DOUBLE PRECISION The smallest positive number such that fl( 1.0 - EPS ) .LT. 1.0, where fl denotes the computed value. EMIN (output) INTEGER The minimum exponent before (gradual) underflow occurs. RMIN (output) DOUBLE PRECISION The smallest normalized number for the machine, given by BASE**( EMIN - 1 ), where BASE is the floating point value of BETA. EMAX (output) INTEGER The maximum exponent before overflow occurs. RMAX (output) DOUBLE PRECISION The largest positive number for the machine, given by BASE**EMAX * ( 1 - EPS ), where BASE is the floating point value of BETA. Further Details =============== The computation of EPS is based on a routine PARANOIA by W. Kahan of the University of California at Berkeley. ===================================================================== */ /* Initialized data */ static logical first = TRUE_; static logical iwarn = FALSE_; /* System generated locals */ integer i__1; doublereal d__1, d__2, d__3, d__4, d__5; /* Builtin functions */ double pow_di(doublereal *, integer *); /* Local variables */ static logical ieee; static doublereal half; static logical lrnd; static doublereal leps, zero, a, b, c; static integer i, lbeta; static doublereal rbase; static integer lemin, lemax, gnmin; static doublereal small; static integer gpmin; static doublereal third, lrmin, lrmax, sixth; extern /* Subroutine */ int dlamc1_(integer *, integer *, logical *, logical *); extern doublereal dlamc3_(doublereal *, doublereal *); static logical lieee1; extern /* Subroutine */ int dlamc4_(integer *, doublereal *, integer *), dlamc5_(integer *, integer *, integer *, logical *, integer *, doublereal *); static integer lt, ngnmin, ngpmin; static doublereal one, two; if (first) { first = FALSE_; zero = 0.; one = 1.; two = 2.; /* LBETA, LT, LRND, LEPS, LEMIN and LRMIN are the local values of BETA, T, RND, EPS, EMIN and RMIN. Throughout this routine we use the function DLAMC3 to ens ure that relevant values are stored and not held in registers, or are not affected by optimizers. DLAMC1 returns the parameters LBETA, LT, LRND and LIEEE1. */ dlamc1_(&lbeta, <, &lrnd, &lieee1); /* Start to find EPS. */ b = (doublereal) lbeta; i__1 = -lt; a = pow_di(&b, &i__1); leps = a; /* Try some tricks to see whether or not this is the correct E PS. */ b = two / 3; half = one / 2; d__1 = -half; sixth = dlamc3_(&b, &d__1); third = dlamc3_(&sixth, &sixth); d__1 = -half; b = dlamc3_(&third, &d__1); b = dlamc3_(&b, &sixth); b = abs(b); if (b < leps) { b = leps; } leps = 1.; /* + WHILE( ( LEPS.GT.B ).AND.( B.GT.ZERO ) )LOOP */ L10: if (leps > b && b > zero) { leps = b; d__1 = half * leps; /* Computing 5th power */ d__3 = two, d__4 = d__3, d__3 *= d__3; /* Computing 2nd power */ d__5 = leps; d__2 = d__4 * (d__3 * d__3) * (d__5 * d__5); c = dlamc3_(&d__1, &d__2); d__1 = -c; c = dlamc3_(&half, &d__1); b = dlamc3_(&half, &c); d__1 = -b; c = dlamc3_(&half, &d__1); b = dlamc3_(&half, &c); goto L10; } /* + END WHILE */ if (a < leps) { leps = a; } /* Computation of EPS complete. Now find EMIN. Let A = + or - 1, and + or - (1 + BASE**(-3 )). Keep dividing A by BETA until (gradual) underflow occurs. T his is detected when we cannot recover the previous A. */ rbase = one / lbeta; small = one; for (i = 1; i <= 3; ++i) { d__1 = small * rbase; small = dlamc3_(&d__1, &zero); /* L20: */ } a = dlamc3_(&one, &small); dlamc4_(&ngpmin, &one, &lbeta); d__1 = -one; dlamc4_(&ngnmin, &d__1, &lbeta); dlamc4_(&gpmin, &a, &lbeta); d__1 = -a; dlamc4_(&gnmin, &d__1, &lbeta); ieee = FALSE_; if (ngpmin == ngnmin && gpmin == gnmin) { if (ngpmin == gpmin) { lemin = ngpmin; /* ( Non twos-complement machines, no gradual under flow; e.g., VAX ) */ } else if (gpmin - ngpmin == 3) { lemin = ngpmin - 1 + lt; ieee = TRUE_; /* ( Non twos-complement machines, with gradual und erflow; e.g., IEEE standard followers ) */ } else { lemin = min(ngpmin,gpmin); /* ( A guess; no known machine ) */ iwarn = TRUE_; } } else if (ngpmin == gpmin && ngnmin == gnmin) { if ((i__1 = ngpmin - ngnmin, abs(i__1)) == 1) { lemin = max(ngpmin,ngnmin); /* ( Twos-complement machines, no gradual underflow ; e.g., CYBER 205 ) */ } else { lemin = min(ngpmin,ngnmin); /* ( A guess; no known machine ) */ iwarn = TRUE_; } } else if ((i__1 = ngpmin - ngnmin, abs(i__1)) == 1 && gpmin == gnmin) { if (gpmin - min(ngpmin,ngnmin) == 3) { lemin = max(ngpmin,ngnmin) - 1 + lt; /* ( Twos-complement machines with gradual underflo w; no known machine ) */ } else { lemin = min(ngpmin,ngnmin); /* ( A guess; no known machine ) */ iwarn = TRUE_; } } else { /* Computing MIN */ i__1 = min(ngpmin,ngnmin), i__1 = min(i__1,gpmin); lemin = min(i__1,gnmin); /* ( A guess; no known machine ) */ iwarn = TRUE_; } /* ** Comment out this if block if EMIN is ok */ if (iwarn) { first = TRUE_; printf("\n\n WARNING. The value EMIN may be incorrect:- "); printf("EMIN = %8i\n",lemin); printf("If, after inspection, the value EMIN looks acceptable"); printf("please comment out \n the IF block as marked within the"); printf("code of routine DLAMC2, \n otherwise supply EMIN"); printf("explicitly.\n"); } /* ** Assume IEEE arithmetic if we found denormalised numbers abo ve, or if arithmetic seems to round in the IEEE style, determi ned in routine DLAMC1. A true IEEE machine should have both thi ngs true; however, faulty machines may have one or the other. */ ieee = ieee || lieee1; /* Compute RMIN by successive division by BETA. We could comp ute RMIN as BASE**( EMIN - 1 ), but some machines underflow dur ing this computation. */ lrmin = 1.; i__1 = 1 - lemin; for (i = 1; i <= 1-lemin; ++i) { d__1 = lrmin * rbase; lrmin = dlamc3_(&d__1, &zero); /* L30: */ } /* Finally, call DLAMC5 to compute EMAX and RMAX. */ dlamc5_(&lbeta, <, &lemin, &ieee, &lemax, &lrmax); } *beta = lbeta; *t = lt; *rnd = lrnd; *eps = leps; *emin = lemin; *rmin = lrmin; *emax = lemax; *rmax = lrmax; return 0; /* End of DLAMC2 */ } /* dlamc2_ */ #endif doublereal dlamc3_(doublereal *a, doublereal *b) { /* -- LAPACK auxiliary routine (version 3.0) -- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., Courant Institute, Argonne National Lab, and Rice University October 31, 1992 Purpose ======= DLAMC3 is intended to force A and B to be stored prior to doing the addition of A and B , for use in situations where optimizers might hold one of these in a register. Arguments ========= A, B (input) DOUBLE PRECISION The values A and B. ===================================================================== */ /* >>Start of File<< System generated locals */ volatile doublereal ret_val; ret_val = *a + *b; return ret_val; /* End of DLAMC3 */ } /* dlamc3_ */ #ifndef HAVE_CONFIG /* Subroutine */ int dlamc4_(integer *emin, doublereal *start, integer *base) { /* -- LAPACK auxiliary routine (version 2.0) -- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., Courant Institute, Argonne National Lab, and Rice University October 31, 1992 Purpose ======= DLAMC4 is a service routine for DLAMC2. Arguments ========= EMIN (output) EMIN The minimum exponent before (gradual) underflow, computed by setting A = START and dividing by BASE until the previous A can not be recovered. START (input) DOUBLE PRECISION The starting point for determining EMIN. BASE (input) INTEGER The base of the machine. ===================================================================== */ /* System generated locals */ integer i__1; doublereal d__1; /* Local variables */ static doublereal zero, a; static integer i; static doublereal rbase, b1, b2, c1, c2, d1, d2; extern doublereal dlamc3_(doublereal *, doublereal *); static doublereal one; a = *start; one = 1.; rbase = one / *base; zero = 0.; *emin = 1; d__1 = a * rbase; b1 = dlamc3_(&d__1, &zero); c1 = a; c2 = a; d1 = a; d2 = a; /* + WHILE( ( C1.EQ.A ).AND.( C2.EQ.A ).AND. $ ( D1.EQ.A ).AND.( D2.EQ.A ) )LOOP */ L10: if (c1 == a && c2 == a && d1 == a && d2 == a) { --(*emin); a = b1; d__1 = a / *base; b1 = dlamc3_(&d__1, &zero); d__1 = b1 * *base; c1 = dlamc3_(&d__1, &zero); d1 = zero; i__1 = *base; for (i = 1; i <= *base; ++i) { d1 += b1; /* L20: */ } d__1 = a * rbase; b2 = dlamc3_(&d__1, &zero); d__1 = b2 / rbase; c2 = dlamc3_(&d__1, &zero); d2 = zero; i__1 = *base; for (i = 1; i <= *base; ++i) { d2 += b2; /* L30: */ } goto L10; } /* + END WHILE */ return 0; /* End of DLAMC4 */ } /* dlamc4_ */ /* Subroutine */ int dlamc5_(integer *beta, integer *p, integer *emin, logical *ieee, integer *emax, doublereal *rmax) { /* -- LAPACK auxiliary routine (version 3.0) -- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., Courant Institute, Argonne National Lab, and Rice University October 31, 1992 Purpose ======= DLAMC5 attempts to compute RMAX, the largest machine floating-point number, without overflow. It assumes that EMAX + abs(EMIN) sum approximately to a power of 2. It will fail on machines where this assumption does not hold, for example, the Cyber 205 (EMIN = -28625, EMAX = 28718). It will also fail if the value supplied for EMIN is too large (i.e. too close to zero), probably with overflow. Arguments ========= BETA (input) INTEGER The base of floating-point arithmetic. P (input) INTEGER The number of base BETA digits in the mantissa of a floating-point value. EMIN (input) INTEGER The minimum exponent before (gradual) underflow. IEEE (input) LOGICAL A logical flag specifying whether or not the arithmetic system is thought to comply with the IEEE standard. EMAX (output) INTEGER The largest exponent before overflow RMAX (output) DOUBLE PRECISION The largest machine floating-point number. ===================================================================== First compute LEXP and UEXP, two powers of 2 that bound abs(EMIN). We then assume that EMAX + abs(EMIN) will sum approximately to the bound that is closest to abs(EMIN). (EMAX is the exponent of the required number RMAX). */ /* Table of constant values */ static doublereal c_b5 = 0.; /* System generated locals */ integer i__1; doublereal d__1; /* Local variables */ static integer lexp; static doublereal oldy; static integer uexp, i; static doublereal y, z; static integer nbits; extern doublereal dlamc3_(doublereal *, doublereal *); static doublereal recbas; static integer exbits, expsum, try__; lexp = 1; exbits = 1; L10: try__ = lexp << 1; if (try__ <= -(*emin)) { lexp = try__; ++exbits; goto L10; } if (lexp == -(*emin)) { uexp = lexp; } else { uexp = try__; ++exbits; } /* Now -LEXP is less than or equal to EMIN, and -UEXP is greater than or equal to EMIN. EXBITS is the number of bits needed to store the exponent. */ if (uexp + *emin > -lexp - *emin) { expsum = lexp << 1; } else { expsum = uexp << 1; } /* EXPSUM is the exponent range, approximately equal to EMAX - EMIN + 1 . */ *emax = expsum + *emin - 1; nbits = exbits + 1 + *p; /* NBITS is the total number of bits needed to store a floating-point number. */ if (nbits % 2 == 1 && *beta == 2) { /* Either there are an odd number of bits used to store a floating-point number, which is unlikely, or some bits are not used in the representation of numbers, which is possible , (e.g. Cray machines) or the mantissa has an implicit bit, (e.g. IEEE machines, Dec Vax machines), which is perhaps the most likely. We have to assume the last alternative. If this is true, then we need to reduce EMAX by one because there must be some way of representing zero in an implicit-b it system. On machines like Cray, we are reducing EMAX by one unnecessarily. */ --(*emax); } if (*ieee) { /* Assume we are on an IEEE machine which reserves one exponent for infinity and NaN. */ --(*emax); } /* Now create RMAX, the largest machine number, which should be equal to (1.0 - BETA**(-P)) * BETA**EMAX . First compute 1.0 - BETA**(-P), being careful that the result is less than 1.0 . */ recbas = 1. / *beta; z = *beta - 1.; y = 0.; i__1 = *p; for (i = 1; i <= *p; ++i) { z *= recbas; if (y < 1.) { oldy = y; } y = dlamc3_(&y, &z); /* L20: */ } if (y >= 1.) { y = oldy; } /* Now multiply by BETA**EMAX to get RMAX. */ i__1 = *emax; for (i = 1; i <= *emax; ++i) { d__1 = y * *beta; y = dlamc3_(&d__1, &c_b5); /* L30: */ } *rmax = y; return 0; /* End of DLAMC5 */ } /* dlamc5_ */ #endif nipy-0.3.0/libcstat/lapack_lite/dlapack_lite.c000066400000000000000000043447051210344137400213460ustar00rootroot00000000000000/* NOTE: This is generated code. Look in Misc/lapack_lite for information on remaking this file. */ #include "f2c.h" #ifdef HAVE_CONFIG #include "config.h" #else extern doublereal dlamch_(char *); #define EPSILON dlamch_("Epsilon") #define SAFEMINIMUM dlamch_("Safe minimum") #define PRECISION dlamch_("Precision") #define BASE dlamch_("Base") #endif extern doublereal dlapy2_(doublereal *x, doublereal *y); /* Table of constant values */ static integer c__9 = 9; static integer c__0 = 0; static doublereal c_b15 = 1.; static integer c__1 = 1; static doublereal c_b29 = 0.; static doublereal c_b94 = -.125; static doublereal c_b151 = -1.; static integer c_n1 = -1; static integer c__3 = 3; static integer c__2 = 2; static integer c__65 = 65; static integer c__6 = 6; static integer c__12 = 12; static integer c__49 = 49; static integer c__4 = 4; static logical c_false = FALSE_; static integer c__13 = 13; static integer c__15 = 15; static integer c__14 = 14; static integer c__16 = 16; static logical c_true = TRUE_; static integer c__10 = 10; static integer c__11 = 11; static doublereal c_b3176 = 2.; static real c_b4270 = 0.f; static real c_b4271 = 1.f; /* Subroutine */ int dbdsdc_(char *uplo, char *compq, integer *n, doublereal * d__, doublereal *e, doublereal *u, integer *ldu, doublereal *vt, integer *ldvt, doublereal *q, integer *iq, doublereal *work, integer * iwork, integer *info) { /* System generated locals */ integer u_dim1, u_offset, vt_dim1, vt_offset, i__1, i__2; doublereal d__1; /* Builtin functions */ double d_sign(doublereal *, doublereal *), log(doublereal); /* Local variables */ static integer difl, difr, ierr, perm, mlvl, sqre, i__, j, k; static doublereal p, r__; static integer z__; extern logical lsame_(char *, char *); extern /* Subroutine */ int dlasr_(char *, char *, char *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *), dcopy_(integer *, doublereal *, integer * , doublereal *, integer *), dswap_(integer *, doublereal *, integer *, doublereal *, integer *); static integer poles, iuplo, nsize, start; extern /* Subroutine */ int dlasd0_(integer *, integer *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, integer *, integer *, integer *, doublereal *, integer *); static integer ic, ii, kk; static doublereal cs; extern /* Subroutine */ int dlasda_(integer *, integer *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, integer *, integer *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, integer *); static integer is, iu; static doublereal sn; extern /* Subroutine */ int dlascl_(char *, integer *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *), dlasdq_(char *, integer *, integer *, integer *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *), dlaset_(char *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *), dlartg_(doublereal *, doublereal *, doublereal *, doublereal *, doublereal *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); extern /* Subroutine */ int xerbla_(char *, integer *); static integer givcol; extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *); static integer icompq; static doublereal orgnrm; static integer givnum, givptr, nm1, qstart, smlsiz, wstart, smlszp; static doublereal eps; static integer ivt; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DBDSDC computes the singular value decomposition (SVD) of a real N-by-N (upper or lower) bidiagonal matrix B: B = U * S * VT, using a divide and conquer method, where S is a diagonal matrix with non-negative diagonal elements (the singular values of B), and U and VT are orthogonal matrices of left and right singular vectors, respectively. DBDSDC can be used to compute all singular values, and optionally, singular vectors or singular vectors in compact form. This code makes very mild assumptions about floating point arithmetic. It will work on machines with a guard digit in add/subtract, or on those binary machines without guard digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or Cray-2. It could conceivably fail on hexadecimal or decimal machines without guard digits, but we know of none. See DLASD3 for details. The code currently calls DLASDQ if singular values only are desired. However, it can be slightly modified to compute singular values using the divide and conquer method. Arguments ========= UPLO (input) CHARACTER*1 = 'U': B is upper bidiagonal. = 'L': B is lower bidiagonal. COMPQ (input) CHARACTER*1 Specifies whether singular vectors are to be computed as follows: = 'N': Compute singular values only; = 'P': Compute singular values and compute singular vectors in compact form; = 'I': Compute singular values and singular vectors. N (input) INTEGER The order of the matrix B. N >= 0. D (input/output) DOUBLE PRECISION array, dimension (N) On entry, the n diagonal elements of the bidiagonal matrix B. On exit, if INFO=0, the singular values of B. E (input/output) DOUBLE PRECISION array, dimension (N-1) On entry, the elements of E contain the offdiagonal elements of the bidiagonal matrix whose SVD is desired. On exit, E has been destroyed. U (output) DOUBLE PRECISION array, dimension (LDU,N) If COMPQ = 'I', then: On exit, if INFO = 0, U contains the left singular vectors of the bidiagonal matrix. For other values of COMPQ, U is not referenced. LDU (input) INTEGER The leading dimension of the array U. LDU >= 1. If singular vectors are desired, then LDU >= max( 1, N ). VT (output) DOUBLE PRECISION array, dimension (LDVT,N) If COMPQ = 'I', then: On exit, if INFO = 0, VT' contains the right singular vectors of the bidiagonal matrix. For other values of COMPQ, VT is not referenced. LDVT (input) INTEGER The leading dimension of the array VT. LDVT >= 1. If singular vectors are desired, then LDVT >= max( 1, N ). Q (output) DOUBLE PRECISION array, dimension (LDQ) If COMPQ = 'P', then: On exit, if INFO = 0, Q and IQ contain the left and right singular vectors in a compact form, requiring O(N log N) space instead of 2*N**2. In particular, Q contains all the DOUBLE PRECISION data in LDQ >= N*(11 + 2*SMLSIZ + 8*INT(LOG_2(N/(SMLSIZ+1)))) words of memory, where SMLSIZ is returned by ILAENV and is equal to the maximum size of the subproblems at the bottom of the computation tree (usually about 25). For other values of COMPQ, Q is not referenced. IQ (output) INTEGER array, dimension (LDIQ) If COMPQ = 'P', then: On exit, if INFO = 0, Q and IQ contain the left and right singular vectors in a compact form, requiring O(N log N) space instead of 2*N**2. In particular, IQ contains all INTEGER data in LDIQ >= N*(3 + 3*INT(LOG_2(N/(SMLSIZ+1)))) words of memory, where SMLSIZ is returned by ILAENV and is equal to the maximum size of the subproblems at the bottom of the computation tree (usually about 25). For other values of COMPQ, IQ is not referenced. WORK (workspace) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) If COMPQ = 'N' then LWORK >= (4 * N). If COMPQ = 'P' then LWORK >= (6 * N). If COMPQ = 'I' then LWORK >= (3 * N**2 + 4 * N). IWORK (workspace) INTEGER array, dimension (8*N) INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. > 0: The algorithm failed to compute an singular value. The update process of divide and conquer failed. Further Details =============== Based on contributions by Ming Gu and Huan Ren, Computer Science Division, University of California at Berkeley, USA ===================================================================== Changed dimension statement in comment describing E from (N) to (N-1). Sven, 17 Feb 05. ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; --e; u_dim1 = *ldu; u_offset = 1 + u_dim1 * 1; u -= u_offset; vt_dim1 = *ldvt; vt_offset = 1 + vt_dim1 * 1; vt -= vt_offset; --q; --iq; --work; --iwork; /* Function Body */ *info = 0; iuplo = 0; if (lsame_(uplo, "U")) { iuplo = 1; } if (lsame_(uplo, "L")) { iuplo = 2; } if (lsame_(compq, "N")) { icompq = 0; } else if (lsame_(compq, "P")) { icompq = 1; } else if (lsame_(compq, "I")) { icompq = 2; } else { icompq = -1; } if (iuplo == 0) { *info = -1; } else if (icompq < 0) { *info = -2; } else if (*n < 0) { *info = -3; } else if (*ldu < 1 || icompq == 2 && *ldu < *n) { *info = -7; } else if (*ldvt < 1 || icompq == 2 && *ldvt < *n) { *info = -9; } if (*info != 0) { i__1 = -(*info); xerbla_("DBDSDC", &i__1); return 0; } /* Quick return if possible */ if (*n == 0) { return 0; } smlsiz = ilaenv_(&c__9, "DBDSDC", " ", &c__0, &c__0, &c__0, &c__0, ( ftnlen)6, (ftnlen)1); if (*n == 1) { if (icompq == 1) { q[1] = d_sign(&c_b15, &d__[1]); q[smlsiz * *n + 1] = 1.; } else if (icompq == 2) { u[u_dim1 + 1] = d_sign(&c_b15, &d__[1]); vt[vt_dim1 + 1] = 1.; } d__[1] = abs(d__[1]); return 0; } nm1 = *n - 1; /* If matrix lower bidiagonal, rotate to be upper bidiagonal by applying Givens rotations on the left */ wstart = 1; qstart = 3; if (icompq == 1) { dcopy_(n, &d__[1], &c__1, &q[1], &c__1); i__1 = *n - 1; dcopy_(&i__1, &e[1], &c__1, &q[*n + 1], &c__1); } if (iuplo == 2) { qstart = 5; wstart = (*n << 1) - 1; i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { dlartg_(&d__[i__], &e[i__], &cs, &sn, &r__); d__[i__] = r__; e[i__] = sn * d__[i__ + 1]; d__[i__ + 1] = cs * d__[i__ + 1]; if (icompq == 1) { q[i__ + (*n << 1)] = cs; q[i__ + *n * 3] = sn; } else if (icompq == 2) { work[i__] = cs; work[nm1 + i__] = -sn; } /* L10: */ } } /* If ICOMPQ = 0, use DLASDQ to compute the singular values. */ if (icompq == 0) { dlasdq_("U", &c__0, n, &c__0, &c__0, &c__0, &d__[1], &e[1], &vt[ vt_offset], ldvt, &u[u_offset], ldu, &u[u_offset], ldu, &work[ wstart], info); goto L40; } /* If N is smaller than the minimum divide size SMLSIZ, then solve the problem with another solver. */ if (*n <= smlsiz) { if (icompq == 2) { dlaset_("A", n, n, &c_b29, &c_b15, &u[u_offset], ldu); dlaset_("A", n, n, &c_b29, &c_b15, &vt[vt_offset], ldvt); dlasdq_("U", &c__0, n, n, n, &c__0, &d__[1], &e[1], &vt[vt_offset] , ldvt, &u[u_offset], ldu, &u[u_offset], ldu, &work[ wstart], info); } else if (icompq == 1) { iu = 1; ivt = iu + *n; dlaset_("A", n, n, &c_b29, &c_b15, &q[iu + (qstart - 1) * *n], n); dlaset_("A", n, n, &c_b29, &c_b15, &q[ivt + (qstart - 1) * *n], n); dlasdq_("U", &c__0, n, n, n, &c__0, &d__[1], &e[1], &q[ivt + ( qstart - 1) * *n], n, &q[iu + (qstart - 1) * *n], n, &q[ iu + (qstart - 1) * *n], n, &work[wstart], info); } goto L40; } if (icompq == 2) { dlaset_("A", n, n, &c_b29, &c_b15, &u[u_offset], ldu); dlaset_("A", n, n, &c_b29, &c_b15, &vt[vt_offset], ldvt); } /* Scale. */ orgnrm = dlanst_("M", n, &d__[1], &e[1]); if (orgnrm == 0.) { return 0; } dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, n, &c__1, &d__[1], n, &ierr); dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, &nm1, &c__1, &e[1], &nm1, & ierr); eps = EPSILON; mlvl = (integer) (log((doublereal) (*n) / (doublereal) (smlsiz + 1)) / log(2.)) + 1; smlszp = smlsiz + 1; if (icompq == 1) { iu = 1; ivt = smlsiz + 1; difl = ivt + smlszp; difr = difl + mlvl; z__ = difr + (mlvl << 1); ic = z__ + mlvl; is = ic + 1; poles = is + 1; givnum = poles + (mlvl << 1); k = 1; givptr = 2; perm = 3; givcol = perm + mlvl; } i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { if ((d__1 = d__[i__], abs(d__1)) < eps) { d__[i__] = d_sign(&eps, &d__[i__]); } /* L20: */ } start = 1; sqre = 0; i__1 = nm1; for (i__ = 1; i__ <= i__1; ++i__) { if ((d__1 = e[i__], abs(d__1)) < eps || i__ == nm1) { /* Subproblem found. First determine its size and then apply divide and conquer on it. */ if (i__ < nm1) { /* A subproblem with E(I) small for I < NM1. */ nsize = i__ - start + 1; } else if ((d__1 = e[i__], abs(d__1)) >= eps) { /* A subproblem with E(NM1) not too small but I = NM1. */ nsize = *n - start + 1; } else { /* A subproblem with E(NM1) small. This implies an 1-by-1 subproblem at D(N). Solve this 1-by-1 problem first. */ nsize = i__ - start + 1; if (icompq == 2) { u[*n + *n * u_dim1] = d_sign(&c_b15, &d__[*n]); vt[*n + *n * vt_dim1] = 1.; } else if (icompq == 1) { q[*n + (qstart - 1) * *n] = d_sign(&c_b15, &d__[*n]); q[*n + (smlsiz + qstart - 1) * *n] = 1.; } d__[*n] = (d__1 = d__[*n], abs(d__1)); } if (icompq == 2) { dlasd0_(&nsize, &sqre, &d__[start], &e[start], &u[start + start * u_dim1], ldu, &vt[start + start * vt_dim1], ldvt, &smlsiz, &iwork[1], &work[wstart], info); } else { dlasda_(&icompq, &smlsiz, &nsize, &sqre, &d__[start], &e[ start], &q[start + (iu + qstart - 2) * *n], n, &q[ start + (ivt + qstart - 2) * *n], &iq[start + k * *n], &q[start + (difl + qstart - 2) * *n], &q[start + ( difr + qstart - 2) * *n], &q[start + (z__ + qstart - 2) * *n], &q[start + (poles + qstart - 2) * *n], &iq[ start + givptr * *n], &iq[start + givcol * *n], n, & iq[start + perm * *n], &q[start + (givnum + qstart - 2) * *n], &q[start + (ic + qstart - 2) * *n], &q[ start + (is + qstart - 2) * *n], &work[wstart], & iwork[1], info); if (*info != 0) { return 0; } } start = i__ + 1; } /* L30: */ } /* Unscale */ dlascl_("G", &c__0, &c__0, &c_b15, &orgnrm, n, &c__1, &d__[1], n, &ierr); L40: /* Use Selection Sort to minimize swaps of singular vectors */ i__1 = *n; for (ii = 2; ii <= i__1; ++ii) { i__ = ii - 1; kk = i__; p = d__[i__]; i__2 = *n; for (j = ii; j <= i__2; ++j) { if (d__[j] > p) { kk = j; p = d__[j]; } /* L50: */ } if (kk != i__) { d__[kk] = d__[i__]; d__[i__] = p; if (icompq == 1) { iq[i__] = kk; } else if (icompq == 2) { dswap_(n, &u[i__ * u_dim1 + 1], &c__1, &u[kk * u_dim1 + 1], & c__1); dswap_(n, &vt[i__ + vt_dim1], ldvt, &vt[kk + vt_dim1], ldvt); } } else if (icompq == 1) { iq[i__] = i__; } /* L60: */ } /* If ICOMPQ = 1, use IQ(N,1) as the indicator for UPLO */ if (icompq == 1) { if (iuplo == 1) { iq[*n] = 1; } else { iq[*n] = 0; } } /* If B is lower bidiagonal, update U by those Givens rotations which rotated B to be upper bidiagonal */ if (iuplo == 2 && icompq == 2) { dlasr_("L", "V", "B", n, n, &work[1], &work[*n], &u[u_offset], ldu); } return 0; /* End of DBDSDC */ } /* dbdsdc_ */ /* Subroutine */ int dbdsqr_(char *uplo, integer *n, integer *ncvt, integer * nru, integer *ncc, doublereal *d__, doublereal *e, doublereal *vt, integer *ldvt, doublereal *u, integer *ldu, doublereal *c__, integer * ldc, doublereal *work, integer *info) { /* System generated locals */ integer c_dim1, c_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1, i__2; doublereal d__1, d__2, d__3, d__4; /* Builtin functions */ double pow_dd(doublereal *, doublereal *), sqrt(doublereal), d_sign( doublereal *, doublereal *); /* Local variables */ static doublereal abse; static integer idir; static doublereal abss; static integer oldm; static doublereal cosl; static integer isub, iter; static doublereal unfl, sinl, cosr, smin, smax, sinr; extern /* Subroutine */ int drot_(integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *), dlas2_( doublereal *, doublereal *, doublereal *, doublereal *, doublereal *); static doublereal f, g, h__; static integer i__, j, m; static doublereal r__; extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, integer *); extern logical lsame_(char *, char *); static doublereal oldcs; extern /* Subroutine */ int dlasr_(char *, char *, char *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *); static integer oldll; static doublereal shift, sigmn, oldsn; extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *, doublereal *, integer *); static integer maxit; static doublereal sminl, sigmx; static logical lower; extern /* Subroutine */ int dlasq1_(integer *, doublereal *, doublereal *, doublereal *, integer *), dlasv2_(doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *); static doublereal cs; static integer ll; static doublereal sn, mu; extern /* Subroutine */ int dlartg_(doublereal *, doublereal *, doublereal *, doublereal *, doublereal *), xerbla_(char *, integer *); static doublereal sminoa, thresh; static logical rotate; static integer nm1; static doublereal tolmul; static integer nm12, nm13, lll; static doublereal eps, sll, tol; /* -- LAPACK routine (version 3.1.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. January 2007 Purpose ======= DBDSQR computes the singular values and, optionally, the right and/or left singular vectors from the singular value decomposition (SVD) of a real N-by-N (upper or lower) bidiagonal matrix B using the implicit zero-shift QR algorithm. The SVD of B has the form B = Q * S * P**T where S is the diagonal matrix of singular values, Q is an orthogonal matrix of left singular vectors, and P is an orthogonal matrix of right singular vectors. If left singular vectors are requested, this subroutine actually returns U*Q instead of Q, and, if right singular vectors are requested, this subroutine returns P**T*VT instead of P**T, for given real input matrices U and VT. When U and VT are the orthogonal matrices that reduce a general matrix A to bidiagonal form: A = U*B*VT, as computed by DGEBRD, then A = (U*Q) * S * (P**T*VT) is the SVD of A. Optionally, the subroutine may also compute Q**T*C for a given real input matrix C. See "Computing Small Singular Values of Bidiagonal Matrices With Guaranteed High Relative Accuracy," by J. Demmel and W. Kahan, LAPACK Working Note #3 (or SIAM J. Sci. Statist. Comput. vol. 11, no. 5, pp. 873-912, Sept 1990) and "Accurate singular values and differential qd algorithms," by B. Parlett and V. Fernando, Technical Report CPAM-554, Mathematics Department, University of California at Berkeley, July 1992 for a detailed description of the algorithm. Arguments ========= UPLO (input) CHARACTER*1 = 'U': B is upper bidiagonal; = 'L': B is lower bidiagonal. N (input) INTEGER The order of the matrix B. N >= 0. NCVT (input) INTEGER The number of columns of the matrix VT. NCVT >= 0. NRU (input) INTEGER The number of rows of the matrix U. NRU >= 0. NCC (input) INTEGER The number of columns of the matrix C. NCC >= 0. D (input/output) DOUBLE PRECISION array, dimension (N) On entry, the n diagonal elements of the bidiagonal matrix B. On exit, if INFO=0, the singular values of B in decreasing order. E (input/output) DOUBLE PRECISION array, dimension (N-1) On entry, the N-1 offdiagonal elements of the bidiagonal matrix B. On exit, if INFO = 0, E is destroyed; if INFO > 0, D and E will contain the diagonal and superdiagonal elements of a bidiagonal matrix orthogonally equivalent to the one given as input. VT (input/output) DOUBLE PRECISION array, dimension (LDVT, NCVT) On entry, an N-by-NCVT matrix VT. On exit, VT is overwritten by P**T * VT. Not referenced if NCVT = 0. LDVT (input) INTEGER The leading dimension of the array VT. LDVT >= max(1,N) if NCVT > 0; LDVT >= 1 if NCVT = 0. U (input/output) DOUBLE PRECISION array, dimension (LDU, N) On entry, an NRU-by-N matrix U. On exit, U is overwritten by U * Q. Not referenced if NRU = 0. LDU (input) INTEGER The leading dimension of the array U. LDU >= max(1,NRU). C (input/output) DOUBLE PRECISION array, dimension (LDC, NCC) On entry, an N-by-NCC matrix C. On exit, C is overwritten by Q**T * C. Not referenced if NCC = 0. LDC (input) INTEGER The leading dimension of the array C. LDC >= max(1,N) if NCC > 0; LDC >=1 if NCC = 0. WORK (workspace) DOUBLE PRECISION array, dimension (2*N) if NCVT = NRU = NCC = 0, (max(1, 4*N)) otherwise INFO (output) INTEGER = 0: successful exit < 0: If INFO = -i, the i-th argument had an illegal value > 0: the algorithm did not converge; D and E contain the elements of a bidiagonal matrix which is orthogonally similar to the input matrix B; if INFO = i, i elements of E have not converged to zero. Internal Parameters =================== TOLMUL DOUBLE PRECISION, default = max(10,min(100,EPS**(-1/8))) TOLMUL controls the convergence criterion of the QR loop. If it is positive, TOLMUL*EPS is the desired relative precision in the computed singular values. If it is negative, abs(TOLMUL*EPS*sigma_max) is the desired absolute accuracy in the computed singular values (corresponds to relative accuracy abs(TOLMUL*EPS) in the largest singular value. abs(TOLMUL) should be between 1 and 1/EPS, and preferably between 10 (for fast convergence) and .1/EPS (for there to be some accuracy in the results). Default is to lose at either one eighth or 2 of the available decimal digits in each computed singular value (whichever is smaller). MAXITR INTEGER, default = 6 MAXITR controls the maximum number of passes of the algorithm through its inner loop. The algorithms stops (and so fails to converge) if the number of passes through the inner loop exceeds MAXITR*N**2. ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; --e; vt_dim1 = *ldvt; vt_offset = 1 + vt_dim1 * 1; vt -= vt_offset; u_dim1 = *ldu; u_offset = 1 + u_dim1 * 1; u -= u_offset; c_dim1 = *ldc; c_offset = 1 + c_dim1 * 1; c__ -= c_offset; --work; /* Function Body */ *info = 0; lower = lsame_(uplo, "L"); if (! lsame_(uplo, "U") && ! lower) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*ncvt < 0) { *info = -3; } else if (*nru < 0) { *info = -4; } else if (*ncc < 0) { *info = -5; } else if (*ncvt == 0 && *ldvt < 1 || *ncvt > 0 && *ldvt < max(1,*n)) { *info = -9; } else if (*ldu < max(1,*nru)) { *info = -11; } else if (*ncc == 0 && *ldc < 1 || *ncc > 0 && *ldc < max(1,*n)) { *info = -13; } if (*info != 0) { i__1 = -(*info); xerbla_("DBDSQR", &i__1); return 0; } if (*n == 0) { return 0; } if (*n == 1) { goto L160; } /* ROTATE is true if any singular vectors desired, false otherwise */ rotate = *ncvt > 0 || *nru > 0 || *ncc > 0; /* If no singular vectors desired, use qd algorithm */ if (! rotate) { dlasq1_(n, &d__[1], &e[1], &work[1], info); return 0; } nm1 = *n - 1; nm12 = nm1 + nm1; nm13 = nm12 + nm1; idir = 0; /* Get machine constants */ eps = EPSILON; unfl = SAFEMINIMUM; /* If matrix lower bidiagonal, rotate to be upper bidiagonal by applying Givens rotations on the left */ if (lower) { i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { dlartg_(&d__[i__], &e[i__], &cs, &sn, &r__); d__[i__] = r__; e[i__] = sn * d__[i__ + 1]; d__[i__ + 1] = cs * d__[i__ + 1]; work[i__] = cs; work[nm1 + i__] = sn; /* L10: */ } /* Update singular vectors if desired */ if (*nru > 0) { dlasr_("R", "V", "F", nru, n, &work[1], &work[*n], &u[u_offset], ldu); } if (*ncc > 0) { dlasr_("L", "V", "F", n, ncc, &work[1], &work[*n], &c__[c_offset], ldc); } } /* Compute singular values to relative accuracy TOL (By setting TOL to be negative, algorithm will compute singular values to absolute accuracy ABS(TOL)*norm(input matrix)) Computing MAX Computing MIN */ d__3 = 100., d__4 = pow_dd(&eps, &c_b94); d__1 = 10., d__2 = min(d__3,d__4); tolmul = max(d__1,d__2); tol = tolmul * eps; /* Compute approximate maximum, minimum singular values */ smax = 0.; i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { /* Computing MAX */ d__2 = smax, d__3 = (d__1 = d__[i__], abs(d__1)); smax = max(d__2,d__3); /* L20: */ } i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { /* Computing MAX */ d__2 = smax, d__3 = (d__1 = e[i__], abs(d__1)); smax = max(d__2,d__3); /* L30: */ } sminl = 0.; if (tol >= 0.) { /* Relative accuracy desired */ sminoa = abs(d__[1]); if (sminoa == 0.) { goto L50; } mu = sminoa; i__1 = *n; for (i__ = 2; i__ <= i__1; ++i__) { mu = (d__2 = d__[i__], abs(d__2)) * (mu / (mu + (d__1 = e[i__ - 1] , abs(d__1)))); sminoa = min(sminoa,mu); if (sminoa == 0.) { goto L50; } /* L40: */ } L50: sminoa /= sqrt((doublereal) (*n)); /* Computing MAX */ d__1 = tol * sminoa, d__2 = *n * 6 * *n * unfl; thresh = max(d__1,d__2); } else { /* Absolute accuracy desired Computing MAX */ d__1 = abs(tol) * smax, d__2 = *n * 6 * *n * unfl; thresh = max(d__1,d__2); } /* Prepare for main iteration loop for the singular values (MAXIT is the maximum number of passes through the inner loop permitted before nonconvergence signalled.) */ maxit = *n * 6 * *n; iter = 0; oldll = -1; oldm = -1; /* M points to last element of unconverged part of matrix */ m = *n; /* Begin main iteration loop */ L60: /* Check for convergence or exceeding iteration count */ if (m <= 1) { goto L160; } if (iter > maxit) { goto L200; } /* Find diagonal block of matrix to work on */ if (tol < 0. && (d__1 = d__[m], abs(d__1)) <= thresh) { d__[m] = 0.; } smax = (d__1 = d__[m], abs(d__1)); smin = smax; i__1 = m - 1; for (lll = 1; lll <= i__1; ++lll) { ll = m - lll; abss = (d__1 = d__[ll], abs(d__1)); abse = (d__1 = e[ll], abs(d__1)); if (tol < 0. && abss <= thresh) { d__[ll] = 0.; } if (abse <= thresh) { goto L80; } smin = min(smin,abss); /* Computing MAX */ d__1 = max(smax,abss); smax = max(d__1,abse); /* L70: */ } ll = 0; goto L90; L80: e[ll] = 0.; /* Matrix splits since E(LL) = 0 */ if (ll == m - 1) { /* Convergence of bottom singular value, return to top of loop */ --m; goto L60; } L90: ++ll; /* E(LL) through E(M-1) are nonzero, E(LL-1) is zero */ if (ll == m - 1) { /* 2 by 2 block, handle separately */ dlasv2_(&d__[m - 1], &e[m - 1], &d__[m], &sigmn, &sigmx, &sinr, &cosr, &sinl, &cosl); d__[m - 1] = sigmx; e[m - 1] = 0.; d__[m] = sigmn; /* Compute singular vectors, if desired */ if (*ncvt > 0) { drot_(ncvt, &vt[m - 1 + vt_dim1], ldvt, &vt[m + vt_dim1], ldvt, & cosr, &sinr); } if (*nru > 0) { drot_(nru, &u[(m - 1) * u_dim1 + 1], &c__1, &u[m * u_dim1 + 1], & c__1, &cosl, &sinl); } if (*ncc > 0) { drot_(ncc, &c__[m - 1 + c_dim1], ldc, &c__[m + c_dim1], ldc, & cosl, &sinl); } m += -2; goto L60; } /* If working on new submatrix, choose shift direction (from larger end diagonal element towards smaller) */ if (ll > oldm || m < oldll) { if ((d__1 = d__[ll], abs(d__1)) >= (d__2 = d__[m], abs(d__2))) { /* Chase bulge from top (big end) to bottom (small end) */ idir = 1; } else { /* Chase bulge from bottom (big end) to top (small end) */ idir = 2; } } /* Apply convergence tests */ if (idir == 1) { /* Run convergence test in forward direction First apply standard test to bottom of matrix */ if ((d__2 = e[m - 1], abs(d__2)) <= abs(tol) * (d__1 = d__[m], abs( d__1)) || tol < 0. && (d__3 = e[m - 1], abs(d__3)) <= thresh) { e[m - 1] = 0.; goto L60; } if (tol >= 0.) { /* If relative accuracy desired, apply convergence criterion forward */ mu = (d__1 = d__[ll], abs(d__1)); sminl = mu; i__1 = m - 1; for (lll = ll; lll <= i__1; ++lll) { if ((d__1 = e[lll], abs(d__1)) <= tol * mu) { e[lll] = 0.; goto L60; } mu = (d__2 = d__[lll + 1], abs(d__2)) * (mu / (mu + (d__1 = e[ lll], abs(d__1)))); sminl = min(sminl,mu); /* L100: */ } } } else { /* Run convergence test in backward direction First apply standard test to top of matrix */ if ((d__2 = e[ll], abs(d__2)) <= abs(tol) * (d__1 = d__[ll], abs(d__1) ) || tol < 0. && (d__3 = e[ll], abs(d__3)) <= thresh) { e[ll] = 0.; goto L60; } if (tol >= 0.) { /* If relative accuracy desired, apply convergence criterion backward */ mu = (d__1 = d__[m], abs(d__1)); sminl = mu; i__1 = ll; for (lll = m - 1; lll >= i__1; --lll) { if ((d__1 = e[lll], abs(d__1)) <= tol * mu) { e[lll] = 0.; goto L60; } mu = (d__2 = d__[lll], abs(d__2)) * (mu / (mu + (d__1 = e[lll] , abs(d__1)))); sminl = min(sminl,mu); /* L110: */ } } } oldll = ll; oldm = m; /* Compute shift. First, test if shifting would ruin relative accuracy, and if so set the shift to zero. Computing MAX */ d__1 = eps, d__2 = tol * .01; if (tol >= 0. && *n * tol * (sminl / smax) <= max(d__1,d__2)) { /* Use a zero shift to avoid loss of relative accuracy */ shift = 0.; } else { /* Compute the shift from 2-by-2 block at end of matrix */ if (idir == 1) { sll = (d__1 = d__[ll], abs(d__1)); dlas2_(&d__[m - 1], &e[m - 1], &d__[m], &shift, &r__); } else { sll = (d__1 = d__[m], abs(d__1)); dlas2_(&d__[ll], &e[ll], &d__[ll + 1], &shift, &r__); } /* Test if shift negligible, and if so set to zero */ if (sll > 0.) { /* Computing 2nd power */ d__1 = shift / sll; if (d__1 * d__1 < eps) { shift = 0.; } } } /* Increment iteration count */ iter = iter + m - ll; /* If SHIFT = 0, do simplified QR iteration */ if (shift == 0.) { if (idir == 1) { /* Chase bulge from top to bottom Save cosines and sines for later singular vector updates */ cs = 1.; oldcs = 1.; i__1 = m - 1; for (i__ = ll; i__ <= i__1; ++i__) { d__1 = d__[i__] * cs; dlartg_(&d__1, &e[i__], &cs, &sn, &r__); if (i__ > ll) { e[i__ - 1] = oldsn * r__; } d__1 = oldcs * r__; d__2 = d__[i__ + 1] * sn; dlartg_(&d__1, &d__2, &oldcs, &oldsn, &d__[i__]); work[i__ - ll + 1] = cs; work[i__ - ll + 1 + nm1] = sn; work[i__ - ll + 1 + nm12] = oldcs; work[i__ - ll + 1 + nm13] = oldsn; /* L120: */ } h__ = d__[m] * cs; d__[m] = h__ * oldcs; e[m - 1] = h__ * oldsn; /* Update singular vectors */ if (*ncvt > 0) { i__1 = m - ll + 1; dlasr_("L", "V", "F", &i__1, ncvt, &work[1], &work[*n], &vt[ ll + vt_dim1], ldvt); } if (*nru > 0) { i__1 = m - ll + 1; dlasr_("R", "V", "F", nru, &i__1, &work[nm12 + 1], &work[nm13 + 1], &u[ll * u_dim1 + 1], ldu); } if (*ncc > 0) { i__1 = m - ll + 1; dlasr_("L", "V", "F", &i__1, ncc, &work[nm12 + 1], &work[nm13 + 1], &c__[ll + c_dim1], ldc); } /* Test convergence */ if ((d__1 = e[m - 1], abs(d__1)) <= thresh) { e[m - 1] = 0.; } } else { /* Chase bulge from bottom to top Save cosines and sines for later singular vector updates */ cs = 1.; oldcs = 1.; i__1 = ll + 1; for (i__ = m; i__ >= i__1; --i__) { d__1 = d__[i__] * cs; dlartg_(&d__1, &e[i__ - 1], &cs, &sn, &r__); if (i__ < m) { e[i__] = oldsn * r__; } d__1 = oldcs * r__; d__2 = d__[i__ - 1] * sn; dlartg_(&d__1, &d__2, &oldcs, &oldsn, &d__[i__]); work[i__ - ll] = cs; work[i__ - ll + nm1] = -sn; work[i__ - ll + nm12] = oldcs; work[i__ - ll + nm13] = -oldsn; /* L130: */ } h__ = d__[ll] * cs; d__[ll] = h__ * oldcs; e[ll] = h__ * oldsn; /* Update singular vectors */ if (*ncvt > 0) { i__1 = m - ll + 1; dlasr_("L", "V", "B", &i__1, ncvt, &work[nm12 + 1], &work[ nm13 + 1], &vt[ll + vt_dim1], ldvt); } if (*nru > 0) { i__1 = m - ll + 1; dlasr_("R", "V", "B", nru, &i__1, &work[1], &work[*n], &u[ll * u_dim1 + 1], ldu); } if (*ncc > 0) { i__1 = m - ll + 1; dlasr_("L", "V", "B", &i__1, ncc, &work[1], &work[*n], &c__[ ll + c_dim1], ldc); } /* Test convergence */ if ((d__1 = e[ll], abs(d__1)) <= thresh) { e[ll] = 0.; } } } else { /* Use nonzero shift */ if (idir == 1) { /* Chase bulge from top to bottom Save cosines and sines for later singular vector updates */ f = ((d__1 = d__[ll], abs(d__1)) - shift) * (d_sign(&c_b15, &d__[ ll]) + shift / d__[ll]); g = e[ll]; i__1 = m - 1; for (i__ = ll; i__ <= i__1; ++i__) { dlartg_(&f, &g, &cosr, &sinr, &r__); if (i__ > ll) { e[i__ - 1] = r__; } f = cosr * d__[i__] + sinr * e[i__]; e[i__] = cosr * e[i__] - sinr * d__[i__]; g = sinr * d__[i__ + 1]; d__[i__ + 1] = cosr * d__[i__ + 1]; dlartg_(&f, &g, &cosl, &sinl, &r__); d__[i__] = r__; f = cosl * e[i__] + sinl * d__[i__ + 1]; d__[i__ + 1] = cosl * d__[i__ + 1] - sinl * e[i__]; if (i__ < m - 1) { g = sinl * e[i__ + 1]; e[i__ + 1] = cosl * e[i__ + 1]; } work[i__ - ll + 1] = cosr; work[i__ - ll + 1 + nm1] = sinr; work[i__ - ll + 1 + nm12] = cosl; work[i__ - ll + 1 + nm13] = sinl; /* L140: */ } e[m - 1] = f; /* Update singular vectors */ if (*ncvt > 0) { i__1 = m - ll + 1; dlasr_("L", "V", "F", &i__1, ncvt, &work[1], &work[*n], &vt[ ll + vt_dim1], ldvt); } if (*nru > 0) { i__1 = m - ll + 1; dlasr_("R", "V", "F", nru, &i__1, &work[nm12 + 1], &work[nm13 + 1], &u[ll * u_dim1 + 1], ldu); } if (*ncc > 0) { i__1 = m - ll + 1; dlasr_("L", "V", "F", &i__1, ncc, &work[nm12 + 1], &work[nm13 + 1], &c__[ll + c_dim1], ldc); } /* Test convergence */ if ((d__1 = e[m - 1], abs(d__1)) <= thresh) { e[m - 1] = 0.; } } else { /* Chase bulge from bottom to top Save cosines and sines for later singular vector updates */ f = ((d__1 = d__[m], abs(d__1)) - shift) * (d_sign(&c_b15, &d__[m] ) + shift / d__[m]); g = e[m - 1]; i__1 = ll + 1; for (i__ = m; i__ >= i__1; --i__) { dlartg_(&f, &g, &cosr, &sinr, &r__); if (i__ < m) { e[i__] = r__; } f = cosr * d__[i__] + sinr * e[i__ - 1]; e[i__ - 1] = cosr * e[i__ - 1] - sinr * d__[i__]; g = sinr * d__[i__ - 1]; d__[i__ - 1] = cosr * d__[i__ - 1]; dlartg_(&f, &g, &cosl, &sinl, &r__); d__[i__] = r__; f = cosl * e[i__ - 1] + sinl * d__[i__ - 1]; d__[i__ - 1] = cosl * d__[i__ - 1] - sinl * e[i__ - 1]; if (i__ > ll + 1) { g = sinl * e[i__ - 2]; e[i__ - 2] = cosl * e[i__ - 2]; } work[i__ - ll] = cosr; work[i__ - ll + nm1] = -sinr; work[i__ - ll + nm12] = cosl; work[i__ - ll + nm13] = -sinl; /* L150: */ } e[ll] = f; /* Test convergence */ if ((d__1 = e[ll], abs(d__1)) <= thresh) { e[ll] = 0.; } /* Update singular vectors if desired */ if (*ncvt > 0) { i__1 = m - ll + 1; dlasr_("L", "V", "B", &i__1, ncvt, &work[nm12 + 1], &work[ nm13 + 1], &vt[ll + vt_dim1], ldvt); } if (*nru > 0) { i__1 = m - ll + 1; dlasr_("R", "V", "B", nru, &i__1, &work[1], &work[*n], &u[ll * u_dim1 + 1], ldu); } if (*ncc > 0) { i__1 = m - ll + 1; dlasr_("L", "V", "B", &i__1, ncc, &work[1], &work[*n], &c__[ ll + c_dim1], ldc); } } } /* QR iteration finished, go back and check convergence */ goto L60; /* All singular values converged, so make them positive */ L160: i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { if (d__[i__] < 0.) { d__[i__] = -d__[i__]; /* Change sign of singular vectors, if desired */ if (*ncvt > 0) { dscal_(ncvt, &c_b151, &vt[i__ + vt_dim1], ldvt); } } /* L170: */ } /* Sort the singular values into decreasing order (insertion sort on singular values, but only one transposition per singular vector) */ i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { /* Scan for smallest D(I) */ isub = 1; smin = d__[1]; i__2 = *n + 1 - i__; for (j = 2; j <= i__2; ++j) { if (d__[j] <= smin) { isub = j; smin = d__[j]; } /* L180: */ } if (isub != *n + 1 - i__) { /* Swap singular values and vectors */ d__[isub] = d__[*n + 1 - i__]; d__[*n + 1 - i__] = smin; if (*ncvt > 0) { dswap_(ncvt, &vt[isub + vt_dim1], ldvt, &vt[*n + 1 - i__ + vt_dim1], ldvt); } if (*nru > 0) { dswap_(nru, &u[isub * u_dim1 + 1], &c__1, &u[(*n + 1 - i__) * u_dim1 + 1], &c__1); } if (*ncc > 0) { dswap_(ncc, &c__[isub + c_dim1], ldc, &c__[*n + 1 - i__ + c_dim1], ldc); } } /* L190: */ } goto L220; /* Maximum number of iterations exceeded, failure to converge */ L200: *info = 0; i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { if (e[i__] != 0.) { ++(*info); } /* L210: */ } L220: return 0; /* End of DBDSQR */ } /* dbdsqr_ */ /* Subroutine */ int dgebak_(char *job, char *side, integer *n, integer *ilo, integer *ihi, doublereal *scale, integer *m, doublereal *v, integer * ldv, integer *info) { /* System generated locals */ integer v_dim1, v_offset, i__1; /* Local variables */ static integer i__, k; static doublereal s; extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, integer *); extern logical lsame_(char *, char *); extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *, doublereal *, integer *); static logical leftv; static integer ii; extern /* Subroutine */ int xerbla_(char *, integer *); static logical rightv; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DGEBAK forms the right or left eigenvectors of a real general matrix by backward transformation on the computed eigenvectors of the balanced matrix output by DGEBAL. Arguments ========= JOB (input) CHARACTER*1 Specifies the type of backward transformation required: = 'N', do nothing, return immediately; = 'P', do backward transformation for permutation only; = 'S', do backward transformation for scaling only; = 'B', do backward transformations for both permutation and scaling. JOB must be the same as the argument JOB supplied to DGEBAL. SIDE (input) CHARACTER*1 = 'R': V contains right eigenvectors; = 'L': V contains left eigenvectors. N (input) INTEGER The number of rows of the matrix V. N >= 0. ILO (input) INTEGER IHI (input) INTEGER The integers ILO and IHI determined by DGEBAL. 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. SCALE (input) DOUBLE PRECISION array, dimension (N) Details of the permutation and scaling factors, as returned by DGEBAL. M (input) INTEGER The number of columns of the matrix V. M >= 0. V (input/output) DOUBLE PRECISION array, dimension (LDV,M) On entry, the matrix of right or left eigenvectors to be transformed, as returned by DHSEIN or DTREVC. On exit, V is overwritten by the transformed eigenvectors. LDV (input) INTEGER The leading dimension of the array V. LDV >= max(1,N). INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value. ===================================================================== Decode and Test the input parameters */ /* Parameter adjustments */ --scale; v_dim1 = *ldv; v_offset = 1 + v_dim1 * 1; v -= v_offset; /* Function Body */ rightv = lsame_(side, "R"); leftv = lsame_(side, "L"); *info = 0; if (! lsame_(job, "N") && ! lsame_(job, "P") && ! lsame_(job, "S") && ! lsame_(job, "B")) { *info = -1; } else if (! rightv && ! leftv) { *info = -2; } else if (*n < 0) { *info = -3; } else if (*ilo < 1 || *ilo > max(1,*n)) { *info = -4; } else if (*ihi < min(*ilo,*n) || *ihi > *n) { *info = -5; } else if (*m < 0) { *info = -7; } else if (*ldv < max(1,*n)) { *info = -9; } if (*info != 0) { i__1 = -(*info); xerbla_("DGEBAK", &i__1); return 0; } /* Quick return if possible */ if (*n == 0) { return 0; } if (*m == 0) { return 0; } if (lsame_(job, "N")) { return 0; } if (*ilo == *ihi) { goto L30; } /* Backward balance */ if (lsame_(job, "S") || lsame_(job, "B")) { if (rightv) { i__1 = *ihi; for (i__ = *ilo; i__ <= i__1; ++i__) { s = scale[i__]; dscal_(m, &s, &v[i__ + v_dim1], ldv); /* L10: */ } } if (leftv) { i__1 = *ihi; for (i__ = *ilo; i__ <= i__1; ++i__) { s = 1. / scale[i__]; dscal_(m, &s, &v[i__ + v_dim1], ldv); /* L20: */ } } } /* Backward permutation For I = ILO-1 step -1 until 1, IHI+1 step 1 until N do -- */ L30: if (lsame_(job, "P") || lsame_(job, "B")) { if (rightv) { i__1 = *n; for (ii = 1; ii <= i__1; ++ii) { i__ = ii; if (i__ >= *ilo && i__ <= *ihi) { goto L40; } if (i__ < *ilo) { i__ = *ilo - ii; } k = (integer) scale[i__]; if (k == i__) { goto L40; } dswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv); L40: ; } } if (leftv) { i__1 = *n; for (ii = 1; ii <= i__1; ++ii) { i__ = ii; if (i__ >= *ilo && i__ <= *ihi) { goto L50; } if (i__ < *ilo) { i__ = *ilo - ii; } k = (integer) scale[i__]; if (k == i__) { goto L50; } dswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv); L50: ; } } } return 0; /* End of DGEBAK */ } /* dgebak_ */ /* Subroutine */ int dgebal_(char *job, integer *n, doublereal *a, integer * lda, integer *ilo, integer *ihi, doublereal *scale, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2; doublereal d__1, d__2; /* Local variables */ static integer iexc; static doublereal c__, f, g; static integer i__, j, k, l, m; static doublereal r__, s; extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, integer *); extern logical lsame_(char *, char *); extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *, doublereal *, integer *); static doublereal sfmin1, sfmin2, sfmax1, sfmax2, ca, ra; extern integer idamax_(integer *, doublereal *, integer *); extern /* Subroutine */ int xerbla_(char *, integer *); static logical noconv; static integer ica, ira; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DGEBAL balances a general real matrix A. This involves, first, permuting A by a similarity transformation to isolate eigenvalues in the first 1 to ILO-1 and last IHI+1 to N elements on the diagonal; and second, applying a diagonal similarity transformation to rows and columns ILO to IHI to make the rows and columns as close in norm as possible. Both steps are optional. Balancing may reduce the 1-norm of the matrix, and improve the accuracy of the computed eigenvalues and/or eigenvectors. Arguments ========= JOB (input) CHARACTER*1 Specifies the operations to be performed on A: = 'N': none: simply set ILO = 1, IHI = N, SCALE(I) = 1.0 for i = 1,...,N; = 'P': permute only; = 'S': scale only; = 'B': both permute and scale. N (input) INTEGER The order of the matrix A. N >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the input matrix A. On exit, A is overwritten by the balanced matrix. If JOB = 'N', A is not referenced. See Further Details. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,N). ILO (output) INTEGER IHI (output) INTEGER ILO and IHI are set to integers such that on exit A(i,j) = 0 if i > j and j = 1,...,ILO-1 or I = IHI+1,...,N. If JOB = 'N' or 'S', ILO = 1 and IHI = N. SCALE (output) DOUBLE PRECISION array, dimension (N) Details of the permutations and scaling factors applied to A. If P(j) is the index of the row and column interchanged with row and column j and D(j) is the scaling factor applied to row and column j, then SCALE(j) = P(j) for j = 1,...,ILO-1 = D(j) for j = ILO,...,IHI = P(j) for j = IHI+1,...,N. The order in which the interchanges are made is N to IHI+1, then 1 to ILO-1. INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. Further Details =============== The permutations consist of row and column interchanges which put the matrix in the form ( T1 X Y ) P A P = ( 0 B Z ) ( 0 0 T2 ) where T1 and T2 are upper triangular matrices whose eigenvalues lie along the diagonal. The column indices ILO and IHI mark the starting and ending columns of the submatrix B. Balancing consists of applying a diagonal similarity transformation inv(D) * B * D to make the 1-norms of each row of B and its corresponding column nearly equal. The output matrix is ( T1 X*D Y ) ( 0 inv(D)*B*D inv(D)*Z ). ( 0 0 T2 ) Information about the permutations P and the diagonal matrix D is returned in the vector SCALE. This subroutine is based on the EISPACK routine BALANC. Modified by Tzu-Yi Chen, Computer Science Division, University of California at Berkeley, USA ===================================================================== Test the input parameters */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --scale; /* Function Body */ *info = 0; if (! lsame_(job, "N") && ! lsame_(job, "P") && ! lsame_(job, "S") && ! lsame_(job, "B")) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*lda < max(1,*n)) { *info = -4; } if (*info != 0) { i__1 = -(*info); xerbla_("DGEBAL", &i__1); return 0; } k = 1; l = *n; if (*n == 0) { goto L210; } if (lsame_(job, "N")) { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { scale[i__] = 1.; /* L10: */ } goto L210; } if (lsame_(job, "S")) { goto L120; } /* Permutation to isolate eigenvalues if possible */ goto L50; /* Row and column exchange. */ L20: scale[m] = (doublereal) j; if (j == m) { goto L30; } dswap_(&l, &a[j * a_dim1 + 1], &c__1, &a[m * a_dim1 + 1], &c__1); i__1 = *n - k + 1; dswap_(&i__1, &a[j + k * a_dim1], lda, &a[m + k * a_dim1], lda); L30: switch (iexc) { case 1: goto L40; case 2: goto L80; } /* Search for rows isolating an eigenvalue and push them down. */ L40: if (l == 1) { goto L210; } --l; L50: for (j = l; j >= 1; --j) { i__1 = l; for (i__ = 1; i__ <= i__1; ++i__) { if (i__ == j) { goto L60; } if (a[j + i__ * a_dim1] != 0.) { goto L70; } L60: ; } m = l; iexc = 1; goto L20; L70: ; } goto L90; /* Search for columns isolating an eigenvalue and push them left. */ L80: ++k; L90: i__1 = l; for (j = k; j <= i__1; ++j) { i__2 = l; for (i__ = k; i__ <= i__2; ++i__) { if (i__ == j) { goto L100; } if (a[i__ + j * a_dim1] != 0.) { goto L110; } L100: ; } m = k; iexc = 2; goto L20; L110: ; } L120: i__1 = l; for (i__ = k; i__ <= i__1; ++i__) { scale[i__] = 1.; /* L130: */ } if (lsame_(job, "P")) { goto L210; } /* Balance the submatrix in rows K to L. Iterative loop for norm reduction */ sfmin1 = SAFEMINIMUM / PRECISION; sfmax1 = 1. / sfmin1; sfmin2 = sfmin1 * 2.; sfmax2 = 1. / sfmin2; L140: noconv = FALSE_; i__1 = l; for (i__ = k; i__ <= i__1; ++i__) { c__ = 0.; r__ = 0.; i__2 = l; for (j = k; j <= i__2; ++j) { if (j == i__) { goto L150; } c__ += (d__1 = a[j + i__ * a_dim1], abs(d__1)); r__ += (d__1 = a[i__ + j * a_dim1], abs(d__1)); L150: ; } ica = idamax_(&l, &a[i__ * a_dim1 + 1], &c__1); ca = (d__1 = a[ica + i__ * a_dim1], abs(d__1)); i__2 = *n - k + 1; ira = idamax_(&i__2, &a[i__ + k * a_dim1], lda); ra = (d__1 = a[i__ + (ira + k - 1) * a_dim1], abs(d__1)); /* Guard against zero C or R due to underflow. */ if (c__ == 0. || r__ == 0.) { goto L200; } g = r__ / 2.; f = 1.; s = c__ + r__; L160: /* Computing MAX */ d__1 = max(f,c__); /* Computing MIN */ d__2 = min(r__,g); if (c__ >= g || max(d__1,ca) >= sfmax2 || min(d__2,ra) <= sfmin2) { goto L170; } f *= 2.; c__ *= 2.; ca *= 2.; r__ /= 2.; g /= 2.; ra /= 2.; goto L160; L170: g = c__ / 2.; L180: /* Computing MIN */ d__1 = min(f,c__), d__1 = min(d__1,g); if (g < r__ || max(r__,ra) >= sfmax2 || min(d__1,ca) <= sfmin2) { goto L190; } f /= 2.; c__ /= 2.; g /= 2.; ca /= 2.; r__ *= 2.; ra *= 2.; goto L180; /* Now balance. */ L190: if (c__ + r__ >= s * .95) { goto L200; } if (f < 1. && scale[i__] < 1.) { if (f * scale[i__] <= sfmin1) { goto L200; } } if (f > 1. && scale[i__] > 1.) { if (scale[i__] >= sfmax1 / f) { goto L200; } } g = 1. / f; scale[i__] *= f; noconv = TRUE_; i__2 = *n - k + 1; dscal_(&i__2, &g, &a[i__ + k * a_dim1], lda); dscal_(&l, &f, &a[i__ * a_dim1 + 1], &c__1); L200: ; } if (noconv) { goto L140; } L210: *ilo = k; *ihi = l; return 0; /* End of DGEBAL */ } /* dgebal_ */ /* Subroutine */ int dgebd2_(integer *m, integer *n, doublereal *a, integer * lda, doublereal *d__, doublereal *e, doublereal *tauq, doublereal * taup, doublereal *work, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; /* Local variables */ static integer i__; extern /* Subroutine */ int dlarf_(char *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *), dlarfg_(integer *, doublereal *, doublereal *, integer *, doublereal *), xerbla_(char *, integer *); /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DGEBD2 reduces a real general m by n matrix A to upper or lower bidiagonal form B by an orthogonal transformation: Q' * A * P = B. If m >= n, B is upper bidiagonal; if m < n, B is lower bidiagonal. Arguments ========= M (input) INTEGER The number of rows in the matrix A. M >= 0. N (input) INTEGER The number of columns in the matrix A. N >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the m by n general matrix to be reduced. On exit, if m >= n, the diagonal and the first superdiagonal are overwritten with the upper bidiagonal matrix B; the elements below the diagonal, with the array TAUQ, represent the orthogonal matrix Q as a product of elementary reflectors, and the elements above the first superdiagonal, with the array TAUP, represent the orthogonal matrix P as a product of elementary reflectors; if m < n, the diagonal and the first subdiagonal are overwritten with the lower bidiagonal matrix B; the elements below the first subdiagonal, with the array TAUQ, represent the orthogonal matrix Q as a product of elementary reflectors, and the elements above the diagonal, with the array TAUP, represent the orthogonal matrix P as a product of elementary reflectors. See Further Details. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,M). D (output) DOUBLE PRECISION array, dimension (min(M,N)) The diagonal elements of the bidiagonal matrix B: D(i) = A(i,i). E (output) DOUBLE PRECISION array, dimension (min(M,N)-1) The off-diagonal elements of the bidiagonal matrix B: if m >= n, E(i) = A(i,i+1) for i = 1,2,...,n-1; if m < n, E(i) = A(i+1,i) for i = 1,2,...,m-1. TAUQ (output) DOUBLE PRECISION array dimension (min(M,N)) The scalar factors of the elementary reflectors which represent the orthogonal matrix Q. See Further Details. TAUP (output) DOUBLE PRECISION array, dimension (min(M,N)) The scalar factors of the elementary reflectors which represent the orthogonal matrix P. See Further Details. WORK (workspace) DOUBLE PRECISION array, dimension (max(M,N)) INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. Further Details =============== The matrices Q and P are represented as products of elementary reflectors: If m >= n, Q = H(1) H(2) . . . H(n) and P = G(1) G(2) . . . G(n-1) Each H(i) and G(i) has the form: H(i) = I - tauq * v * v' and G(i) = I - taup * u * u' where tauq and taup are real scalars, and v and u are real vectors; v(1:i-1) = 0, v(i) = 1, and v(i+1:m) is stored on exit in A(i+1:m,i); u(1:i) = 0, u(i+1) = 1, and u(i+2:n) is stored on exit in A(i,i+2:n); tauq is stored in TAUQ(i) and taup in TAUP(i). If m < n, Q = H(1) H(2) . . . H(m-1) and P = G(1) G(2) . . . G(m) Each H(i) and G(i) has the form: H(i) = I - tauq * v * v' and G(i) = I - taup * u * u' where tauq and taup are real scalars, and v and u are real vectors; v(1:i) = 0, v(i+1) = 1, and v(i+2:m) is stored on exit in A(i+2:m,i); u(1:i-1) = 0, u(i) = 1, and u(i+1:n) is stored on exit in A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i). The contents of A on exit are illustrated by the following examples: m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n): ( d e u1 u1 u1 ) ( d u1 u1 u1 u1 u1 ) ( v1 d e u2 u2 ) ( e d u2 u2 u2 u2 ) ( v1 v2 d e u3 ) ( v1 e d u3 u3 u3 ) ( v1 v2 v3 d e ) ( v1 v2 e d u4 u4 ) ( v1 v2 v3 v4 d ) ( v1 v2 v3 e d u5 ) ( v1 v2 v3 v4 v5 ) where d and e denote diagonal and off-diagonal elements of B, vi denotes an element of the vector defining H(i), and ui an element of the vector defining G(i). ===================================================================== Test the input parameters */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --d__; --e; --tauq; --taup; --work; /* Function Body */ *info = 0; if (*m < 0) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*lda < max(1,*m)) { *info = -4; } if (*info < 0) { i__1 = -(*info); xerbla_("DGEBD2", &i__1); return 0; } if (*m >= *n) { /* Reduce to upper bidiagonal form */ i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { /* Generate elementary reflector H(i) to annihilate A(i+1:m,i) */ i__2 = *m - i__ + 1; /* Computing MIN */ i__3 = i__ + 1; dlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[min(i__3,*m) + i__ * a_dim1], &c__1, &tauq[i__]); d__[i__] = a[i__ + i__ * a_dim1]; a[i__ + i__ * a_dim1] = 1.; /* Apply H(i) to A(i:m,i+1:n) from the left */ if (i__ < *n) { i__2 = *m - i__ + 1; i__3 = *n - i__; dlarf_("Left", &i__2, &i__3, &a[i__ + i__ * a_dim1], &c__1, & tauq[i__], &a[i__ + (i__ + 1) * a_dim1], lda, &work[1] ); } a[i__ + i__ * a_dim1] = d__[i__]; if (i__ < *n) { /* Generate elementary reflector G(i) to annihilate A(i,i+2:n) */ i__2 = *n - i__; /* Computing MIN */ i__3 = i__ + 2; dlarfg_(&i__2, &a[i__ + (i__ + 1) * a_dim1], &a[i__ + min( i__3,*n) * a_dim1], lda, &taup[i__]); e[i__] = a[i__ + (i__ + 1) * a_dim1]; a[i__ + (i__ + 1) * a_dim1] = 1.; /* Apply G(i) to A(i+1:m,i+1:n) from the right */ i__2 = *m - i__; i__3 = *n - i__; dlarf_("Right", &i__2, &i__3, &a[i__ + (i__ + 1) * a_dim1], lda, &taup[i__], &a[i__ + 1 + (i__ + 1) * a_dim1], lda, &work[1]); a[i__ + (i__ + 1) * a_dim1] = e[i__]; } else { taup[i__] = 0.; } /* L10: */ } } else { /* Reduce to lower bidiagonal form */ i__1 = *m; for (i__ = 1; i__ <= i__1; ++i__) { /* Generate elementary reflector G(i) to annihilate A(i,i+1:n) */ i__2 = *n - i__ + 1; /* Computing MIN */ i__3 = i__ + 1; dlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[i__ + min(i__3,*n) * a_dim1], lda, &taup[i__]); d__[i__] = a[i__ + i__ * a_dim1]; a[i__ + i__ * a_dim1] = 1.; /* Apply G(i) to A(i+1:m,i:n) from the right */ if (i__ < *m) { i__2 = *m - i__; i__3 = *n - i__ + 1; dlarf_("Right", &i__2, &i__3, &a[i__ + i__ * a_dim1], lda, & taup[i__], &a[i__ + 1 + i__ * a_dim1], lda, &work[1]); } a[i__ + i__ * a_dim1] = d__[i__]; if (i__ < *m) { /* Generate elementary reflector H(i) to annihilate A(i+2:m,i) */ i__2 = *m - i__; /* Computing MIN */ i__3 = i__ + 2; dlarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3,*m) + i__ * a_dim1], &c__1, &tauq[i__]); e[i__] = a[i__ + 1 + i__ * a_dim1]; a[i__ + 1 + i__ * a_dim1] = 1.; /* Apply H(i) to A(i+1:m,i+1:n) from the left */ i__2 = *m - i__; i__3 = *n - i__; dlarf_("Left", &i__2, &i__3, &a[i__ + 1 + i__ * a_dim1], & c__1, &tauq[i__], &a[i__ + 1 + (i__ + 1) * a_dim1], lda, &work[1]); a[i__ + 1 + i__ * a_dim1] = e[i__]; } else { tauq[i__] = 0.; } /* L20: */ } } return 0; /* End of DGEBD2 */ } /* dgebd2_ */ /* Subroutine */ int dgebrd_(integer *m, integer *n, doublereal *a, integer * lda, doublereal *d__, doublereal *e, doublereal *tauq, doublereal * taup, doublereal *work, integer *lwork, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3, i__4; /* Local variables */ static integer i__, j; extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); static integer nbmin, iinfo, minmn; extern /* Subroutine */ int dgebd2_(integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, integer *); static integer nb; extern /* Subroutine */ int dlabrd_(integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, integer *); static integer nx; static doublereal ws; extern /* Subroutine */ int xerbla_(char *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); static integer ldwrkx, ldwrky, lwkopt; static logical lquery; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DGEBRD reduces a general real M-by-N matrix A to upper or lower bidiagonal form B by an orthogonal transformation: Q**T * A * P = B. If m >= n, B is upper bidiagonal; if m < n, B is lower bidiagonal. Arguments ========= M (input) INTEGER The number of rows in the matrix A. M >= 0. N (input) INTEGER The number of columns in the matrix A. N >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the M-by-N general matrix to be reduced. On exit, if m >= n, the diagonal and the first superdiagonal are overwritten with the upper bidiagonal matrix B; the elements below the diagonal, with the array TAUQ, represent the orthogonal matrix Q as a product of elementary reflectors, and the elements above the first superdiagonal, with the array TAUP, represent the orthogonal matrix P as a product of elementary reflectors; if m < n, the diagonal and the first subdiagonal are overwritten with the lower bidiagonal matrix B; the elements below the first subdiagonal, with the array TAUQ, represent the orthogonal matrix Q as a product of elementary reflectors, and the elements above the diagonal, with the array TAUP, represent the orthogonal matrix P as a product of elementary reflectors. See Further Details. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,M). D (output) DOUBLE PRECISION array, dimension (min(M,N)) The diagonal elements of the bidiagonal matrix B: D(i) = A(i,i). E (output) DOUBLE PRECISION array, dimension (min(M,N)-1) The off-diagonal elements of the bidiagonal matrix B: if m >= n, E(i) = A(i,i+1) for i = 1,2,...,n-1; if m < n, E(i) = A(i+1,i) for i = 1,2,...,m-1. TAUQ (output) DOUBLE PRECISION array dimension (min(M,N)) The scalar factors of the elementary reflectors which represent the orthogonal matrix Q. See Further Details. TAUP (output) DOUBLE PRECISION array, dimension (min(M,N)) The scalar factors of the elementary reflectors which represent the orthogonal matrix P. See Further Details. WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) On exit, if INFO = 0, WORK(1) returns the optimal LWORK. LWORK (input) INTEGER The length of the array WORK. LWORK >= max(1,M,N). For optimum performance LWORK >= (M+N)*NB, where NB is the optimal blocksize. If LWORK = -1, then a workspace query is assumed; the routine only calculates the optimal size of the WORK array, returns this value as the first entry of the WORK array, and no error message related to LWORK is issued by XERBLA. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value. Further Details =============== The matrices Q and P are represented as products of elementary reflectors: If m >= n, Q = H(1) H(2) . . . H(n) and P = G(1) G(2) . . . G(n-1) Each H(i) and G(i) has the form: H(i) = I - tauq * v * v' and G(i) = I - taup * u * u' where tauq and taup are real scalars, and v and u are real vectors; v(1:i-1) = 0, v(i) = 1, and v(i+1:m) is stored on exit in A(i+1:m,i); u(1:i) = 0, u(i+1) = 1, and u(i+2:n) is stored on exit in A(i,i+2:n); tauq is stored in TAUQ(i) and taup in TAUP(i). If m < n, Q = H(1) H(2) . . . H(m-1) and P = G(1) G(2) . . . G(m) Each H(i) and G(i) has the form: H(i) = I - tauq * v * v' and G(i) = I - taup * u * u' where tauq and taup are real scalars, and v and u are real vectors; v(1:i) = 0, v(i+1) = 1, and v(i+2:m) is stored on exit in A(i+2:m,i); u(1:i-1) = 0, u(i) = 1, and u(i+1:n) is stored on exit in A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i). The contents of A on exit are illustrated by the following examples: m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n): ( d e u1 u1 u1 ) ( d u1 u1 u1 u1 u1 ) ( v1 d e u2 u2 ) ( e d u2 u2 u2 u2 ) ( v1 v2 d e u3 ) ( v1 e d u3 u3 u3 ) ( v1 v2 v3 d e ) ( v1 v2 e d u4 u4 ) ( v1 v2 v3 v4 d ) ( v1 v2 v3 e d u5 ) ( v1 v2 v3 v4 v5 ) where d and e denote diagonal and off-diagonal elements of B, vi denotes an element of the vector defining H(i), and ui an element of the vector defining G(i). ===================================================================== Test the input parameters */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --d__; --e; --tauq; --taup; --work; /* Function Body */ *info = 0; /* Computing MAX */ i__1 = 1, i__2 = ilaenv_(&c__1, "DGEBRD", " ", m, n, &c_n1, &c_n1, ( ftnlen)6, (ftnlen)1); nb = max(i__1,i__2); lwkopt = (*m + *n) * nb; work[1] = (doublereal) lwkopt; lquery = *lwork == -1; if (*m < 0) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*lda < max(1,*m)) { *info = -4; } else /* if(complicated condition) */ { /* Computing MAX */ i__1 = max(1,*m); if (*lwork < max(i__1,*n) && ! lquery) { *info = -10; } } if (*info < 0) { i__1 = -(*info); xerbla_("DGEBRD", &i__1); return 0; } else if (lquery) { return 0; } /* Quick return if possible */ minmn = min(*m,*n); if (minmn == 0) { work[1] = 1.; return 0; } ws = (doublereal) max(*m,*n); ldwrkx = *m; ldwrky = *n; if (nb > 1 && nb < minmn) { /* Set the crossover point NX. Computing MAX */ i__1 = nb, i__2 = ilaenv_(&c__3, "DGEBRD", " ", m, n, &c_n1, &c_n1, ( ftnlen)6, (ftnlen)1); nx = max(i__1,i__2); /* Determine when to switch from blocked to unblocked code. */ if (nx < minmn) { ws = (doublereal) ((*m + *n) * nb); if ((doublereal) (*lwork) < ws) { /* Not enough work space for the optimal NB, consider using a smaller block size. */ nbmin = ilaenv_(&c__2, "DGEBRD", " ", m, n, &c_n1, &c_n1, ( ftnlen)6, (ftnlen)1); if (*lwork >= (*m + *n) * nbmin) { nb = *lwork / (*m + *n); } else { nb = 1; nx = minmn; } } } } else { nx = minmn; } i__1 = minmn - nx; i__2 = nb; for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { /* Reduce rows and columns i:i+nb-1 to bidiagonal form and return the matrices X and Y which are needed to update the unreduced part of the matrix */ i__3 = *m - i__ + 1; i__4 = *n - i__ + 1; dlabrd_(&i__3, &i__4, &nb, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[ i__], &tauq[i__], &taup[i__], &work[1], &ldwrkx, &work[ldwrkx * nb + 1], &ldwrky); /* Update the trailing submatrix A(i+nb:m,i+nb:n), using an update of the form A := A - V*Y' - X*U' */ i__3 = *m - i__ - nb + 1; i__4 = *n - i__ - nb + 1; dgemm_("No transpose", "Transpose", &i__3, &i__4, &nb, &c_b151, &a[ i__ + nb + i__ * a_dim1], lda, &work[ldwrkx * nb + nb + 1], & ldwrky, &c_b15, &a[i__ + nb + (i__ + nb) * a_dim1], lda); i__3 = *m - i__ - nb + 1; i__4 = *n - i__ - nb + 1; dgemm_("No transpose", "No transpose", &i__3, &i__4, &nb, &c_b151, & work[nb + 1], &ldwrkx, &a[i__ + (i__ + nb) * a_dim1], lda, & c_b15, &a[i__ + nb + (i__ + nb) * a_dim1], lda); /* Copy diagonal and off-diagonal elements of B back into A */ if (*m >= *n) { i__3 = i__ + nb - 1; for (j = i__; j <= i__3; ++j) { a[j + j * a_dim1] = d__[j]; a[j + (j + 1) * a_dim1] = e[j]; /* L10: */ } } else { i__3 = i__ + nb - 1; for (j = i__; j <= i__3; ++j) { a[j + j * a_dim1] = d__[j]; a[j + 1 + j * a_dim1] = e[j]; /* L20: */ } } /* L30: */ } /* Use unblocked code to reduce the remainder of the matrix */ i__2 = *m - i__ + 1; i__1 = *n - i__ + 1; dgebd2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[i__], & tauq[i__], &taup[i__], &work[1], &iinfo); work[1] = ws; return 0; /* End of DGEBRD */ } /* dgebrd_ */ /* Subroutine */ int dgeev_(char *jobvl, char *jobvr, integer *n, doublereal * a, integer *lda, doublereal *wr, doublereal *wi, doublereal *vl, integer *ldvl, doublereal *vr, integer *ldvr, doublereal *work, integer *lwork, integer *info) { /* System generated locals */ integer a_dim1, a_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, i__1, i__2, i__3; doublereal d__1, d__2; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static integer ibal; static char side[1]; static doublereal anrm; static integer ierr, itau; extern /* Subroutine */ int drot_(integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *); static integer iwrk, nout; extern doublereal dnrm2_(integer *, doublereal *, integer *); static integer i__, k; static doublereal r__; extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, integer *); extern logical lsame_(char *, char *); extern doublereal dlapy2_(doublereal *, doublereal *); extern /* Subroutine */ int dlabad_(doublereal *, doublereal *), dgebak_( char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, integer *, integer *), dgebal_(char *, integer *, doublereal *, integer *, integer *, integer *, doublereal *, integer *); static doublereal cs; static logical scalea; static doublereal cscale; extern doublereal dlange_(char *, integer *, integer *, doublereal *, integer *, doublereal *); extern /* Subroutine */ int dgehrd_(integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *); static doublereal sn; extern /* Subroutine */ int dlascl_(char *, integer *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *); extern integer idamax_(integer *, doublereal *, integer *); extern /* Subroutine */ int dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *), dlartg_(doublereal *, doublereal *, doublereal *, doublereal *, doublereal *), xerbla_(char *, integer *); static logical select[1]; extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); static doublereal bignum; extern /* Subroutine */ int dorghr_(integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *), dhseqr_(char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, integer *, integer *), dtrevc_(char *, char *, logical *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, integer *, integer *, doublereal *, integer *); static integer minwrk, maxwrk; static logical wantvl; static doublereal smlnum; static integer hswork; static logical lquery, wantvr; static integer ihi; static doublereal scl; static integer ilo; static doublereal dum[1], eps; /* -- LAPACK driver routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DGEEV computes for an N-by-N real nonsymmetric matrix A, the eigenvalues and, optionally, the left and/or right eigenvectors. The right eigenvector v(j) of A satisfies A * v(j) = lambda(j) * v(j) where lambda(j) is its eigenvalue. The left eigenvector u(j) of A satisfies u(j)**H * A = lambda(j) * u(j)**H where u(j)**H denotes the conjugate transpose of u(j). The computed eigenvectors are normalized to have Euclidean norm equal to 1 and largest component real. Arguments ========= JOBVL (input) CHARACTER*1 = 'N': left eigenvectors of A are not computed; = 'V': left eigenvectors of A are computed. JOBVR (input) CHARACTER*1 = 'N': right eigenvectors of A are not computed; = 'V': right eigenvectors of A are computed. N (input) INTEGER The order of the matrix A. N >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the N-by-N matrix A. On exit, A has been overwritten. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,N). WR (output) DOUBLE PRECISION array, dimension (N) WI (output) DOUBLE PRECISION array, dimension (N) WR and WI contain the real and imaginary parts, respectively, of the computed eigenvalues. Complex conjugate pairs of eigenvalues appear consecutively with the eigenvalue having the positive imaginary part first. VL (output) DOUBLE PRECISION array, dimension (LDVL,N) If JOBVL = 'V', the left eigenvectors u(j) are stored one after another in the columns of VL, in the same order as their eigenvalues. If JOBVL = 'N', VL is not referenced. If the j-th eigenvalue is real, then u(j) = VL(:,j), the j-th column of VL. If the j-th and (j+1)-st eigenvalues form a complex conjugate pair, then u(j) = VL(:,j) + i*VL(:,j+1) and u(j+1) = VL(:,j) - i*VL(:,j+1). LDVL (input) INTEGER The leading dimension of the array VL. LDVL >= 1; if JOBVL = 'V', LDVL >= N. VR (output) DOUBLE PRECISION array, dimension (LDVR,N) If JOBVR = 'V', the right eigenvectors v(j) are stored one after another in the columns of VR, in the same order as their eigenvalues. If JOBVR = 'N', VR is not referenced. If the j-th eigenvalue is real, then v(j) = VR(:,j), the j-th column of VR. If the j-th and (j+1)-st eigenvalues form a complex conjugate pair, then v(j) = VR(:,j) + i*VR(:,j+1) and v(j+1) = VR(:,j) - i*VR(:,j+1). LDVR (input) INTEGER The leading dimension of the array VR. LDVR >= 1; if JOBVR = 'V', LDVR >= N. WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) On exit, if INFO = 0, WORK(1) returns the optimal LWORK. LWORK (input) INTEGER The dimension of the array WORK. LWORK >= max(1,3*N), and if JOBVL = 'V' or JOBVR = 'V', LWORK >= 4*N. For good performance, LWORK must generally be larger. If LWORK = -1, then a workspace query is assumed; the routine only calculates the optimal size of the WORK array, returns this value as the first entry of the WORK array, and no error message related to LWORK is issued by XERBLA. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value. > 0: if INFO = i, the QR algorithm failed to compute all the eigenvalues, and no eigenvectors have been computed; elements i+1:N of WR and WI contain eigenvalues which have converged. ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --wr; --wi; vl_dim1 = *ldvl; vl_offset = 1 + vl_dim1 * 1; vl -= vl_offset; vr_dim1 = *ldvr; vr_offset = 1 + vr_dim1 * 1; vr -= vr_offset; --work; /* Function Body */ *info = 0; lquery = *lwork == -1; wantvl = lsame_(jobvl, "V"); wantvr = lsame_(jobvr, "V"); if (! wantvl && ! lsame_(jobvl, "N")) { *info = -1; } else if (! wantvr && ! lsame_(jobvr, "N")) { *info = -2; } else if (*n < 0) { *info = -3; } else if (*lda < max(1,*n)) { *info = -5; } else if (*ldvl < 1 || wantvl && *ldvl < *n) { *info = -9; } else if (*ldvr < 1 || wantvr && *ldvr < *n) { *info = -11; } /* Compute workspace (Note: Comments in the code beginning "Workspace:" describe the minimal amount of workspace needed at that point in the code, as well as the preferred amount for good performance. NB refers to the optimal block size for the immediately following subroutine, as returned by ILAENV. HSWORK refers to the workspace preferred by DHSEQR, as calculated below. HSWORK is computed assuming ILO=1 and IHI=N, the worst case.) */ if (*info == 0) { if (*n == 0) { minwrk = 1; maxwrk = 1; } else { maxwrk = (*n << 1) + *n * ilaenv_(&c__1, "DGEHRD", " ", n, &c__1, n, &c__0, (ftnlen)6, (ftnlen)1); if (wantvl) { minwrk = *n << 2; /* Computing MAX */ i__1 = maxwrk, i__2 = (*n << 1) + (*n - 1) * ilaenv_(&c__1, "DORGHR", " ", n, &c__1, n, &c_n1, (ftnlen)6, (ftnlen) 1); maxwrk = max(i__1,i__2); dhseqr_("S", "V", n, &c__1, n, &a[a_offset], lda, &wr[1], &wi[ 1], &vl[vl_offset], ldvl, &work[1], &c_n1, info); hswork = (integer) work[1]; /* Computing MAX */ i__1 = maxwrk, i__2 = *n + 1, i__1 = max(i__1,i__2), i__2 = * n + hswork; maxwrk = max(i__1,i__2); /* Computing MAX */ i__1 = maxwrk, i__2 = *n << 2; maxwrk = max(i__1,i__2); } else if (wantvr) { minwrk = *n << 2; /* Computing MAX */ i__1 = maxwrk, i__2 = (*n << 1) + (*n - 1) * ilaenv_(&c__1, "DORGHR", " ", n, &c__1, n, &c_n1, (ftnlen)6, (ftnlen) 1); maxwrk = max(i__1,i__2); dhseqr_("S", "V", n, &c__1, n, &a[a_offset], lda, &wr[1], &wi[ 1], &vr[vr_offset], ldvr, &work[1], &c_n1, info); hswork = (integer) work[1]; /* Computing MAX */ i__1 = maxwrk, i__2 = *n + 1, i__1 = max(i__1,i__2), i__2 = * n + hswork; maxwrk = max(i__1,i__2); /* Computing MAX */ i__1 = maxwrk, i__2 = *n << 2; maxwrk = max(i__1,i__2); } else { minwrk = *n * 3; dhseqr_("E", "N", n, &c__1, n, &a[a_offset], lda, &wr[1], &wi[ 1], &vr[vr_offset], ldvr, &work[1], &c_n1, info); hswork = (integer) work[1]; /* Computing MAX */ i__1 = maxwrk, i__2 = *n + 1, i__1 = max(i__1,i__2), i__2 = * n + hswork; maxwrk = max(i__1,i__2); } maxwrk = max(maxwrk,minwrk); } work[1] = (doublereal) maxwrk; if (*lwork < minwrk && ! lquery) { *info = -13; } } if (*info != 0) { i__1 = -(*info); xerbla_("DGEEV ", &i__1); return 0; } else if (lquery) { return 0; } /* Quick return if possible */ if (*n == 0) { return 0; } /* Get machine constants */ eps = PRECISION; smlnum = SAFEMINIMUM; bignum = 1. / smlnum; dlabad_(&smlnum, &bignum); smlnum = sqrt(smlnum) / eps; bignum = 1. / smlnum; /* Scale A if max element outside range [SMLNUM,BIGNUM] */ anrm = dlange_("M", n, n, &a[a_offset], lda, dum); scalea = FALSE_; if (anrm > 0. && anrm < smlnum) { scalea = TRUE_; cscale = smlnum; } else if (anrm > bignum) { scalea = TRUE_; cscale = bignum; } if (scalea) { dlascl_("G", &c__0, &c__0, &anrm, &cscale, n, n, &a[a_offset], lda, & ierr); } /* Balance the matrix (Workspace: need N) */ ibal = 1; dgebal_("B", n, &a[a_offset], lda, &ilo, &ihi, &work[ibal], &ierr); /* Reduce to upper Hessenberg form (Workspace: need 3*N, prefer 2*N+N*NB) */ itau = ibal + *n; iwrk = itau + *n; i__1 = *lwork - iwrk + 1; dgehrd_(n, &ilo, &ihi, &a[a_offset], lda, &work[itau], &work[iwrk], &i__1, &ierr); if (wantvl) { /* Want left eigenvectors Copy Householder vectors to VL */ *(unsigned char *)side = 'L'; dlacpy_("L", n, n, &a[a_offset], lda, &vl[vl_offset], ldvl) ; /* Generate orthogonal matrix in VL (Workspace: need 3*N-1, prefer 2*N+(N-1)*NB) */ i__1 = *lwork - iwrk + 1; dorghr_(n, &ilo, &ihi, &vl[vl_offset], ldvl, &work[itau], &work[iwrk], &i__1, &ierr); /* Perform QR iteration, accumulating Schur vectors in VL (Workspace: need N+1, prefer N+HSWORK (see comments) ) */ iwrk = itau; i__1 = *lwork - iwrk + 1; dhseqr_("S", "V", n, &ilo, &ihi, &a[a_offset], lda, &wr[1], &wi[1], & vl[vl_offset], ldvl, &work[iwrk], &i__1, info); if (wantvr) { /* Want left and right eigenvectors Copy Schur vectors to VR */ *(unsigned char *)side = 'B'; dlacpy_("F", n, n, &vl[vl_offset], ldvl, &vr[vr_offset], ldvr); } } else if (wantvr) { /* Want right eigenvectors Copy Householder vectors to VR */ *(unsigned char *)side = 'R'; dlacpy_("L", n, n, &a[a_offset], lda, &vr[vr_offset], ldvr) ; /* Generate orthogonal matrix in VR (Workspace: need 3*N-1, prefer 2*N+(N-1)*NB) */ i__1 = *lwork - iwrk + 1; dorghr_(n, &ilo, &ihi, &vr[vr_offset], ldvr, &work[itau], &work[iwrk], &i__1, &ierr); /* Perform QR iteration, accumulating Schur vectors in VR (Workspace: need N+1, prefer N+HSWORK (see comments) ) */ iwrk = itau; i__1 = *lwork - iwrk + 1; dhseqr_("S", "V", n, &ilo, &ihi, &a[a_offset], lda, &wr[1], &wi[1], & vr[vr_offset], ldvr, &work[iwrk], &i__1, info); } else { /* Compute eigenvalues only (Workspace: need N+1, prefer N+HSWORK (see comments) ) */ iwrk = itau; i__1 = *lwork - iwrk + 1; dhseqr_("E", "N", n, &ilo, &ihi, &a[a_offset], lda, &wr[1], &wi[1], & vr[vr_offset], ldvr, &work[iwrk], &i__1, info); } /* If INFO > 0 from DHSEQR, then quit */ if (*info > 0) { goto L50; } if (wantvl || wantvr) { /* Compute left and/or right eigenvectors (Workspace: need 4*N) */ dtrevc_(side, "B", select, n, &a[a_offset], lda, &vl[vl_offset], ldvl, &vr[vr_offset], ldvr, n, &nout, &work[iwrk], &ierr); } if (wantvl) { /* Undo balancing of left eigenvectors (Workspace: need N) */ dgebak_("B", "L", n, &ilo, &ihi, &work[ibal], n, &vl[vl_offset], ldvl, &ierr); /* Normalize left eigenvectors and make largest component real */ i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { if (wi[i__] == 0.) { scl = 1. / dnrm2_(n, &vl[i__ * vl_dim1 + 1], &c__1); dscal_(n, &scl, &vl[i__ * vl_dim1 + 1], &c__1); } else if (wi[i__] > 0.) { d__1 = dnrm2_(n, &vl[i__ * vl_dim1 + 1], &c__1); d__2 = dnrm2_(n, &vl[(i__ + 1) * vl_dim1 + 1], &c__1); scl = 1. / dlapy2_(&d__1, &d__2); dscal_(n, &scl, &vl[i__ * vl_dim1 + 1], &c__1); dscal_(n, &scl, &vl[(i__ + 1) * vl_dim1 + 1], &c__1); i__2 = *n; for (k = 1; k <= i__2; ++k) { /* Computing 2nd power */ d__1 = vl[k + i__ * vl_dim1]; /* Computing 2nd power */ d__2 = vl[k + (i__ + 1) * vl_dim1]; work[iwrk + k - 1] = d__1 * d__1 + d__2 * d__2; /* L10: */ } k = idamax_(n, &work[iwrk], &c__1); dlartg_(&vl[k + i__ * vl_dim1], &vl[k + (i__ + 1) * vl_dim1], &cs, &sn, &r__); drot_(n, &vl[i__ * vl_dim1 + 1], &c__1, &vl[(i__ + 1) * vl_dim1 + 1], &c__1, &cs, &sn); vl[k + (i__ + 1) * vl_dim1] = 0.; } /* L20: */ } } if (wantvr) { /* Undo balancing of right eigenvectors (Workspace: need N) */ dgebak_("B", "R", n, &ilo, &ihi, &work[ibal], n, &vr[vr_offset], ldvr, &ierr); /* Normalize right eigenvectors and make largest component real */ i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { if (wi[i__] == 0.) { scl = 1. / dnrm2_(n, &vr[i__ * vr_dim1 + 1], &c__1); dscal_(n, &scl, &vr[i__ * vr_dim1 + 1], &c__1); } else if (wi[i__] > 0.) { d__1 = dnrm2_(n, &vr[i__ * vr_dim1 + 1], &c__1); d__2 = dnrm2_(n, &vr[(i__ + 1) * vr_dim1 + 1], &c__1); scl = 1. / dlapy2_(&d__1, &d__2); dscal_(n, &scl, &vr[i__ * vr_dim1 + 1], &c__1); dscal_(n, &scl, &vr[(i__ + 1) * vr_dim1 + 1], &c__1); i__2 = *n; for (k = 1; k <= i__2; ++k) { /* Computing 2nd power */ d__1 = vr[k + i__ * vr_dim1]; /* Computing 2nd power */ d__2 = vr[k + (i__ + 1) * vr_dim1]; work[iwrk + k - 1] = d__1 * d__1 + d__2 * d__2; /* L30: */ } k = idamax_(n, &work[iwrk], &c__1); dlartg_(&vr[k + i__ * vr_dim1], &vr[k + (i__ + 1) * vr_dim1], &cs, &sn, &r__); drot_(n, &vr[i__ * vr_dim1 + 1], &c__1, &vr[(i__ + 1) * vr_dim1 + 1], &c__1, &cs, &sn); vr[k + (i__ + 1) * vr_dim1] = 0.; } /* L40: */ } } /* Undo scaling if necessary */ L50: if (scalea) { i__1 = *n - *info; /* Computing MAX */ i__3 = *n - *info; i__2 = max(i__3,1); dlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wr[*info + 1], &i__2, &ierr); i__1 = *n - *info; /* Computing MAX */ i__3 = *n - *info; i__2 = max(i__3,1); dlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wi[*info + 1], &i__2, &ierr); if (*info > 0) { i__1 = ilo - 1; dlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wr[1], n, &ierr); i__1 = ilo - 1; dlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wi[1], n, &ierr); } } work[1] = (doublereal) maxwrk; return 0; /* End of DGEEV */ } /* dgeev_ */ /* Subroutine */ int dgehd2_(integer *n, integer *ilo, integer *ihi, doublereal *a, integer *lda, doublereal *tau, doublereal *work, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; /* Local variables */ static integer i__; extern /* Subroutine */ int dlarf_(char *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *), dlarfg_(integer *, doublereal *, doublereal *, integer *, doublereal *), xerbla_(char *, integer *); static doublereal aii; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DGEHD2 reduces a real general matrix A to upper Hessenberg form H by an orthogonal similarity transformation: Q' * A * Q = H . Arguments ========= N (input) INTEGER The order of the matrix A. N >= 0. ILO (input) INTEGER IHI (input) INTEGER It is assumed that A is already upper triangular in rows and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally set by a previous call to DGEBAL; otherwise they should be set to 1 and N respectively. See Further Details. 1 <= ILO <= IHI <= max(1,N). A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the n by n general matrix to be reduced. On exit, the upper triangle and the first subdiagonal of A are overwritten with the upper Hessenberg matrix H, and the elements below the first subdiagonal, with the array TAU, represent the orthogonal matrix Q as a product of elementary reflectors. See Further Details. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,N). TAU (output) DOUBLE PRECISION array, dimension (N-1) The scalar factors of the elementary reflectors (see Further Details). WORK (workspace) DOUBLE PRECISION array, dimension (N) INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. Further Details =============== The matrix Q is represented as a product of (ihi-ilo) elementary reflectors Q = H(ilo) H(ilo+1) . . . H(ihi-1). Each H(i) has the form H(i) = I - tau * v * v' where tau is a real scalar, and v is a real vector with v(1:i) = 0, v(i+1) = 1 and v(ihi+1:n) = 0; v(i+2:ihi) is stored on exit in A(i+2:ihi,i), and tau in TAU(i). The contents of A are illustrated by the following example, with n = 7, ilo = 2 and ihi = 6: on entry, on exit, ( a a a a a a a ) ( a a h h h h a ) ( a a a a a a ) ( a h h h h a ) ( a a a a a a ) ( h h h h h h ) ( a a a a a a ) ( v2 h h h h h ) ( a a a a a a ) ( v2 v3 h h h h ) ( a a a a a a ) ( v2 v3 v4 h h h ) ( a ) ( a ) where a denotes an element of the original matrix A, h denotes a modified element of the upper Hessenberg matrix H, and vi denotes an element of the vector defining H(i). ===================================================================== Test the input parameters */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --tau; --work; /* Function Body */ *info = 0; if (*n < 0) { *info = -1; } else if (*ilo < 1 || *ilo > max(1,*n)) { *info = -2; } else if (*ihi < min(*ilo,*n) || *ihi > *n) { *info = -3; } else if (*lda < max(1,*n)) { *info = -5; } if (*info != 0) { i__1 = -(*info); xerbla_("DGEHD2", &i__1); return 0; } i__1 = *ihi - 1; for (i__ = *ilo; i__ <= i__1; ++i__) { /* Compute elementary reflector H(i) to annihilate A(i+2:ihi,i) */ i__2 = *ihi - i__; /* Computing MIN */ i__3 = i__ + 2; dlarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3,*n) + i__ * a_dim1], &c__1, &tau[i__]); aii = a[i__ + 1 + i__ * a_dim1]; a[i__ + 1 + i__ * a_dim1] = 1.; /* Apply H(i) to A(1:ihi,i+1:ihi) from the right */ i__2 = *ihi - i__; dlarf_("Right", ihi, &i__2, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[ i__], &a[(i__ + 1) * a_dim1 + 1], lda, &work[1]); /* Apply H(i) to A(i+1:ihi,i+1:n) from the left */ i__2 = *ihi - i__; i__3 = *n - i__; dlarf_("Left", &i__2, &i__3, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[ i__], &a[i__ + 1 + (i__ + 1) * a_dim1], lda, &work[1]); a[i__ + 1 + i__ * a_dim1] = aii; /* L10: */ } return 0; /* End of DGEHD2 */ } /* dgehd2_ */ /* Subroutine */ int dgehrd_(integer *n, integer *ilo, integer *ihi, doublereal *a, integer *lda, doublereal *tau, doublereal *work, integer *lwork, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3, i__4; /* Local variables */ static integer i__, j; static doublereal t[4160] /* was [65][64] */; extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); static integer nbmin, iinfo; extern /* Subroutine */ int dtrmm_(char *, char *, char *, char *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *), daxpy_( integer *, doublereal *, doublereal *, integer *, doublereal *, integer *), dgehd2_(integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *), dlahr2_( integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *); static integer ib; static doublereal ei; static integer nb, nh; extern /* Subroutine */ int dlarfb_(char *, char *, char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *); static integer nx; extern /* Subroutine */ int xerbla_(char *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); static integer ldwork, lwkopt; static logical lquery; static integer iws; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DGEHRD reduces a real general matrix A to upper Hessenberg form H by an orthogonal similarity transformation: Q' * A * Q = H . Arguments ========= N (input) INTEGER The order of the matrix A. N >= 0. ILO (input) INTEGER IHI (input) INTEGER It is assumed that A is already upper triangular in rows and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally set by a previous call to DGEBAL; otherwise they should be set to 1 and N respectively. See Further Details. 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the N-by-N general matrix to be reduced. On exit, the upper triangle and the first subdiagonal of A are overwritten with the upper Hessenberg matrix H, and the elements below the first subdiagonal, with the array TAU, represent the orthogonal matrix Q as a product of elementary reflectors. See Further Details. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,N). TAU (output) DOUBLE PRECISION array, dimension (N-1) The scalar factors of the elementary reflectors (see Further Details). Elements 1:ILO-1 and IHI:N-1 of TAU are set to zero. WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK) On exit, if INFO = 0, WORK(1) returns the optimal LWORK. LWORK (input) INTEGER The length of the array WORK. LWORK >= max(1,N). For optimum performance LWORK >= N*NB, where NB is the optimal blocksize. If LWORK = -1, then a workspace query is assumed; the routine only calculates the optimal size of the WORK array, returns this value as the first entry of the WORK array, and no error message related to LWORK is issued by XERBLA. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value. Further Details =============== The matrix Q is represented as a product of (ihi-ilo) elementary reflectors Q = H(ilo) H(ilo+1) . . . H(ihi-1). Each H(i) has the form H(i) = I - tau * v * v' where tau is a real scalar, and v is a real vector with v(1:i) = 0, v(i+1) = 1 and v(ihi+1:n) = 0; v(i+2:ihi) is stored on exit in A(i+2:ihi,i), and tau in TAU(i). The contents of A are illustrated by the following example, with n = 7, ilo = 2 and ihi = 6: on entry, on exit, ( a a a a a a a ) ( a a h h h h a ) ( a a a a a a ) ( a h h h h a ) ( a a a a a a ) ( h h h h h h ) ( a a a a a a ) ( v2 h h h h h ) ( a a a a a a ) ( v2 v3 h h h h ) ( a a a a a a ) ( v2 v3 v4 h h h ) ( a ) ( a ) where a denotes an element of the original matrix A, h denotes a modified element of the upper Hessenberg matrix H, and vi denotes an element of the vector defining H(i). This file is a slight modification of LAPACK-3.0's DGEHRD subroutine incorporating improvements proposed by Quintana-Orti and Van de Geijn (2005). ===================================================================== Test the input parameters */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --tau; --work; /* Function Body */ *info = 0; /* Computing MIN */ i__1 = 64, i__2 = ilaenv_(&c__1, "DGEHRD", " ", n, ilo, ihi, &c_n1, ( ftnlen)6, (ftnlen)1); nb = min(i__1,i__2); lwkopt = *n * nb; work[1] = (doublereal) lwkopt; lquery = *lwork == -1; if (*n < 0) { *info = -1; } else if (*ilo < 1 || *ilo > max(1,*n)) { *info = -2; } else if (*ihi < min(*ilo,*n) || *ihi > *n) { *info = -3; } else if (*lda < max(1,*n)) { *info = -5; } else if (*lwork < max(1,*n) && ! lquery) { *info = -8; } if (*info != 0) { i__1 = -(*info); xerbla_("DGEHRD", &i__1); return 0; } else if (lquery) { return 0; } /* Set elements 1:ILO-1 and IHI:N-1 of TAU to zero */ i__1 = *ilo - 1; for (i__ = 1; i__ <= i__1; ++i__) { tau[i__] = 0.; /* L10: */ } i__1 = *n - 1; for (i__ = max(1,*ihi); i__ <= i__1; ++i__) { tau[i__] = 0.; /* L20: */ } /* Quick return if possible */ nh = *ihi - *ilo + 1; if (nh <= 1) { work[1] = 1.; return 0; } /* Determine the block size Computing MIN */ i__1 = 64, i__2 = ilaenv_(&c__1, "DGEHRD", " ", n, ilo, ihi, &c_n1, ( ftnlen)6, (ftnlen)1); nb = min(i__1,i__2); nbmin = 2; iws = 1; if (nb > 1 && nb < nh) { /* Determine when to cross over from blocked to unblocked code (last block is always handled by unblocked code) Computing MAX */ i__1 = nb, i__2 = ilaenv_(&c__3, "DGEHRD", " ", n, ilo, ihi, &c_n1, ( ftnlen)6, (ftnlen)1); nx = max(i__1,i__2); if (nx < nh) { /* Determine if workspace is large enough for blocked code */ iws = *n * nb; if (*lwork < iws) { /* Not enough workspace to use optimal NB: determine the minimum value of NB, and reduce NB or force use of unblocked code Computing MAX */ i__1 = 2, i__2 = ilaenv_(&c__2, "DGEHRD", " ", n, ilo, ihi, & c_n1, (ftnlen)6, (ftnlen)1); nbmin = max(i__1,i__2); if (*lwork >= *n * nbmin) { nb = *lwork / *n; } else { nb = 1; } } } } ldwork = *n; if (nb < nbmin || nb >= nh) { /* Use unblocked code below */ i__ = *ilo; } else { /* Use blocked code */ i__1 = *ihi - 1 - nx; i__2 = nb; for (i__ = *ilo; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { /* Computing MIN */ i__3 = nb, i__4 = *ihi - i__; ib = min(i__3,i__4); /* Reduce columns i:i+ib-1 to Hessenberg form, returning the matrices V and T of the block reflector H = I - V*T*V' which performs the reduction, and also the matrix Y = A*V*T */ dlahr2_(ihi, &i__, &ib, &a[i__ * a_dim1 + 1], lda, &tau[i__], t, & c__65, &work[1], &ldwork); /* Apply the block reflector H to A(1:ihi,i+ib:ihi) from the right, computing A := A - Y * V'. V(i+ib,ib-1) must be set to 1 */ ei = a[i__ + ib + (i__ + ib - 1) * a_dim1]; a[i__ + ib + (i__ + ib - 1) * a_dim1] = 1.; i__3 = *ihi - i__ - ib + 1; dgemm_("No transpose", "Transpose", ihi, &i__3, &ib, &c_b151, & work[1], &ldwork, &a[i__ + ib + i__ * a_dim1], lda, & c_b15, &a[(i__ + ib) * a_dim1 + 1], lda); a[i__ + ib + (i__ + ib - 1) * a_dim1] = ei; /* Apply the block reflector H to A(1:i,i+1:i+ib-1) from the right */ i__3 = ib - 1; dtrmm_("Right", "Lower", "Transpose", "Unit", &i__, &i__3, &c_b15, &a[i__ + 1 + i__ * a_dim1], lda, &work[1], &ldwork); i__3 = ib - 2; for (j = 0; j <= i__3; ++j) { daxpy_(&i__, &c_b151, &work[ldwork * j + 1], &c__1, &a[(i__ + j + 1) * a_dim1 + 1], &c__1); /* L30: */ } /* Apply the block reflector H to A(i+1:ihi,i+ib:n) from the left */ i__3 = *ihi - i__; i__4 = *n - i__ - ib + 1; dlarfb_("Left", "Transpose", "Forward", "Columnwise", &i__3, & i__4, &ib, &a[i__ + 1 + i__ * a_dim1], lda, t, &c__65, &a[ i__ + 1 + (i__ + ib) * a_dim1], lda, &work[1], &ldwork); /* L40: */ } } /* Use unblocked code to reduce the rest of the matrix */ dgehd2_(n, &i__, ihi, &a[a_offset], lda, &tau[1], &work[1], &iinfo); work[1] = (doublereal) iws; return 0; /* End of DGEHRD */ } /* dgehrd_ */ /* Subroutine */ int dgelq2_(integer *m, integer *n, doublereal *a, integer * lda, doublereal *tau, doublereal *work, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; /* Local variables */ static integer i__, k; extern /* Subroutine */ int dlarf_(char *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *), dlarfg_(integer *, doublereal *, doublereal *, integer *, doublereal *), xerbla_(char *, integer *); static doublereal aii; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DGELQ2 computes an LQ factorization of a real m by n matrix A: A = L * Q. Arguments ========= M (input) INTEGER The number of rows of the matrix A. M >= 0. N (input) INTEGER The number of columns of the matrix A. N >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the m by n matrix A. On exit, the elements on and below the diagonal of the array contain the m by min(m,n) lower trapezoidal matrix L (L is lower triangular if m <= n); the elements above the diagonal, with the array TAU, represent the orthogonal matrix Q as a product of elementary reflectors (see Further Details). LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,M). TAU (output) DOUBLE PRECISION array, dimension (min(M,N)) The scalar factors of the elementary reflectors (see Further Details). WORK (workspace) DOUBLE PRECISION array, dimension (M) INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value Further Details =============== The matrix Q is represented as a product of elementary reflectors Q = H(k) . . . H(2) H(1), where k = min(m,n). Each H(i) has the form H(i) = I - tau * v * v' where tau is a real scalar, and v is a real vector with v(1:i-1) = 0 and v(i) = 1; v(i+1:n) is stored on exit in A(i,i+1:n), and tau in TAU(i). ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --tau; --work; /* Function Body */ *info = 0; if (*m < 0) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*lda < max(1,*m)) { *info = -4; } if (*info != 0) { i__1 = -(*info); xerbla_("DGELQ2", &i__1); return 0; } k = min(*m,*n); i__1 = k; for (i__ = 1; i__ <= i__1; ++i__) { /* Generate elementary reflector H(i) to annihilate A(i,i+1:n) */ i__2 = *n - i__ + 1; /* Computing MIN */ i__3 = i__ + 1; dlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[i__ + min(i__3,*n) * a_dim1] , lda, &tau[i__]); if (i__ < *m) { /* Apply H(i) to A(i+1:m,i:n) from the right */ aii = a[i__ + i__ * a_dim1]; a[i__ + i__ * a_dim1] = 1.; i__2 = *m - i__; i__3 = *n - i__ + 1; dlarf_("Right", &i__2, &i__3, &a[i__ + i__ * a_dim1], lda, &tau[ i__], &a[i__ + 1 + i__ * a_dim1], lda, &work[1]); a[i__ + i__ * a_dim1] = aii; } /* L10: */ } return 0; /* End of DGELQ2 */ } /* dgelq2_ */ /* Subroutine */ int dgelqf_(integer *m, integer *n, doublereal *a, integer * lda, doublereal *tau, doublereal *work, integer *lwork, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3, i__4; /* Local variables */ static integer i__, k, nbmin, iinfo; extern /* Subroutine */ int dgelq2_(integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); static integer ib, nb; extern /* Subroutine */ int dlarfb_(char *, char *, char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *); static integer nx; extern /* Subroutine */ int dlarft_(char *, char *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); static integer ldwork, lwkopt; static logical lquery; static integer iws; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DGELQF computes an LQ factorization of a real M-by-N matrix A: A = L * Q. Arguments ========= M (input) INTEGER The number of rows of the matrix A. M >= 0. N (input) INTEGER The number of columns of the matrix A. N >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the M-by-N matrix A. On exit, the elements on and below the diagonal of the array contain the m-by-min(m,n) lower trapezoidal matrix L (L is lower triangular if m <= n); the elements above the diagonal, with the array TAU, represent the orthogonal matrix Q as a product of elementary reflectors (see Further Details). LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,M). TAU (output) DOUBLE PRECISION array, dimension (min(M,N)) The scalar factors of the elementary reflectors (see Further Details). WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) On exit, if INFO = 0, WORK(1) returns the optimal LWORK. LWORK (input) INTEGER The dimension of the array WORK. LWORK >= max(1,M). For optimum performance LWORK >= M*NB, where NB is the optimal blocksize. If LWORK = -1, then a workspace query is assumed; the routine only calculates the optimal size of the WORK array, returns this value as the first entry of the WORK array, and no error message related to LWORK is issued by XERBLA. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value Further Details =============== The matrix Q is represented as a product of elementary reflectors Q = H(k) . . . H(2) H(1), where k = min(m,n). Each H(i) has the form H(i) = I - tau * v * v' where tau is a real scalar, and v is a real vector with v(1:i-1) = 0 and v(i) = 1; v(i+1:n) is stored on exit in A(i,i+1:n), and tau in TAU(i). ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --tau; --work; /* Function Body */ *info = 0; nb = ilaenv_(&c__1, "DGELQF", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen) 1); lwkopt = *m * nb; work[1] = (doublereal) lwkopt; lquery = *lwork == -1; if (*m < 0) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*lda < max(1,*m)) { *info = -4; } else if (*lwork < max(1,*m) && ! lquery) { *info = -7; } if (*info != 0) { i__1 = -(*info); xerbla_("DGELQF", &i__1); return 0; } else if (lquery) { return 0; } /* Quick return if possible */ k = min(*m,*n); if (k == 0) { work[1] = 1.; return 0; } nbmin = 2; nx = 0; iws = *m; if (nb > 1 && nb < k) { /* Determine when to cross over from blocked to unblocked code. Computing MAX */ i__1 = 0, i__2 = ilaenv_(&c__3, "DGELQF", " ", m, n, &c_n1, &c_n1, ( ftnlen)6, (ftnlen)1); nx = max(i__1,i__2); if (nx < k) { /* Determine if workspace is large enough for blocked code. */ ldwork = *m; iws = ldwork * nb; if (*lwork < iws) { /* Not enough workspace to use optimal NB: reduce NB and determine the minimum value of NB. */ nb = *lwork / ldwork; /* Computing MAX */ i__1 = 2, i__2 = ilaenv_(&c__2, "DGELQF", " ", m, n, &c_n1, & c_n1, (ftnlen)6, (ftnlen)1); nbmin = max(i__1,i__2); } } } if (nb >= nbmin && nb < k && nx < k) { /* Use blocked code initially */ i__1 = k - nx; i__2 = nb; for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { /* Computing MIN */ i__3 = k - i__ + 1; ib = min(i__3,nb); /* Compute the LQ factorization of the current block A(i:i+ib-1,i:n) */ i__3 = *n - i__ + 1; dgelq2_(&ib, &i__3, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[ 1], &iinfo); if (i__ + ib <= *m) { /* Form the triangular factor of the block reflector H = H(i) H(i+1) . . . H(i+ib-1) */ i__3 = *n - i__ + 1; dlarft_("Forward", "Rowwise", &i__3, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[1], &ldwork); /* Apply H to A(i+ib:m,i:n) from the right */ i__3 = *m - i__ - ib + 1; i__4 = *n - i__ + 1; dlarfb_("Right", "No transpose", "Forward", "Rowwise", &i__3, &i__4, &ib, &a[i__ + i__ * a_dim1], lda, &work[1], & ldwork, &a[i__ + ib + i__ * a_dim1], lda, &work[ib + 1], &ldwork); } /* L10: */ } } else { i__ = 1; } /* Use unblocked code to factor the last or only block. */ if (i__ <= k) { i__2 = *m - i__ + 1; i__1 = *n - i__ + 1; dgelq2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[1] , &iinfo); } work[1] = (doublereal) iws; return 0; /* End of DGELQF */ } /* dgelqf_ */ /* Subroutine */ int dgelsd_(integer *m, integer *n, integer *nrhs, doublereal *a, integer *lda, doublereal *b, integer *ldb, doublereal * s, doublereal *rcond, integer *rank, doublereal *work, integer *lwork, integer *iwork, integer *info) { /* System generated locals */ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4; /* Builtin functions */ double log(doublereal); /* Local variables */ static doublereal anrm, bnrm; static integer itau, nlvl, iascl, ibscl; static doublereal sfmin; static integer minmn, maxmn, itaup, itauq, mnthr, nwork; extern /* Subroutine */ int dlabad_(doublereal *, doublereal *); static integer ie, il; extern /* Subroutine */ int dgebrd_(integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, integer *); static integer mm; extern doublereal dlange_(char *, integer *, integer *, doublereal *, integer *, doublereal *); extern /* Subroutine */ int dgelqf_(integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *), dlalsd_(char *, integer *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, integer *), dlascl_(char *, integer *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *), dgeqrf_( integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *), dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *), dlaset_(char *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); static doublereal bignum; extern /* Subroutine */ int dormbr_(char *, char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, integer *); static integer wlalsd; extern /* Subroutine */ int dormlq_(char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, integer *); static integer ldwork; extern /* Subroutine */ int dormqr_(char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, integer *); static integer minwrk, maxwrk; static doublereal smlnum; static logical lquery; static integer smlsiz; static doublereal eps; /* -- LAPACK driver routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DGELSD computes the minimum-norm solution to a real linear least squares problem: minimize 2-norm(| b - A*x |) using the singular value decomposition (SVD) of A. A is an M-by-N matrix which may be rank-deficient. Several right hand side vectors b and solution vectors x can be handled in a single call; they are stored as the columns of the M-by-NRHS right hand side matrix B and the N-by-NRHS solution matrix X. The problem is solved in three steps: (1) Reduce the coefficient matrix A to bidiagonal form with Householder transformations, reducing the original problem into a "bidiagonal least squares problem" (BLS) (2) Solve the BLS using a divide and conquer approach. (3) Apply back all the Householder tranformations to solve the original least squares problem. The effective rank of A is determined by treating as zero those singular values which are less than RCOND times the largest singular value. The divide and conquer algorithm makes very mild assumptions about floating point arithmetic. It will work on machines with a guard digit in add/subtract, or on those binary machines without guard digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or Cray-2. It could conceivably fail on hexadecimal or decimal machines without guard digits, but we know of none. Arguments ========= M (input) INTEGER The number of rows of A. M >= 0. N (input) INTEGER The number of columns of A. N >= 0. NRHS (input) INTEGER The number of right hand sides, i.e., the number of columns of the matrices B and X. NRHS >= 0. A (input) DOUBLE PRECISION array, dimension (LDA,N) On entry, the M-by-N matrix A. On exit, A has been destroyed. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,M). B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) On entry, the M-by-NRHS right hand side matrix B. On exit, B is overwritten by the N-by-NRHS solution matrix X. If m >= n and RANK = n, the residual sum-of-squares for the solution in the i-th column is given by the sum of squares of elements n+1:m in that column. LDB (input) INTEGER The leading dimension of the array B. LDB >= max(1,max(M,N)). S (output) DOUBLE PRECISION array, dimension (min(M,N)) The singular values of A in decreasing order. The condition number of A in the 2-norm = S(1)/S(min(m,n)). RCOND (input) DOUBLE PRECISION RCOND is used to determine the effective rank of A. Singular values S(i) <= RCOND*S(1) are treated as zero. If RCOND < 0, machine precision is used instead. RANK (output) INTEGER The effective rank of A, i.e., the number of singular values which are greater than RCOND*S(1). WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) On exit, if INFO = 0, WORK(1) returns the optimal LWORK. LWORK (input) INTEGER The dimension of the array WORK. LWORK must be at least 1. The exact minimum amount of workspace needed depends on M, N and NRHS. As long as LWORK is at least 12*N + 2*N*SMLSIZ + 8*N*NLVL + N*NRHS + (SMLSIZ+1)**2, if M is greater than or equal to N or 12*M + 2*M*SMLSIZ + 8*M*NLVL + M*NRHS + (SMLSIZ+1)**2, if M is less than N, the code will execute correctly. SMLSIZ is returned by ILAENV and is equal to the maximum size of the subproblems at the bottom of the computation tree (usually about 25), and NLVL = MAX( 0, INT( LOG_2( MIN( M,N )/(SMLSIZ+1) ) ) + 1 ) For good performance, LWORK should generally be larger. If LWORK = -1, then a workspace query is assumed; the routine only calculates the optimal size of the WORK array, returns this value as the first entry of the WORK array, and no error message related to LWORK is issued by XERBLA. IWORK (workspace) INTEGER array, dimension (MAX(1,LIWORK)) LIWORK >= 3 * MINMN * NLVL + 11 * MINMN, where MINMN = MIN( M,N ). INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value. > 0: the algorithm for computing the SVD failed to converge; if INFO = i, i off-diagonal elements of an intermediate bidiagonal form did not converge to zero. Further Details =============== Based on contributions by Ming Gu and Ren-Cang Li, Computer Science Division, University of California at Berkeley, USA Osni Marques, LBNL/NERSC, USA ===================================================================== Test the input arguments. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; b_dim1 = *ldb; b_offset = 1 + b_dim1 * 1; b -= b_offset; --s; --work; --iwork; /* Function Body */ *info = 0; minmn = min(*m,*n); maxmn = max(*m,*n); mnthr = ilaenv_(&c__6, "DGELSD", " ", m, n, nrhs, &c_n1, (ftnlen)6, ( ftnlen)1); lquery = *lwork == -1; if (*m < 0) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*nrhs < 0) { *info = -3; } else if (*lda < max(1,*m)) { *info = -5; } else if (*ldb < max(1,maxmn)) { *info = -7; } smlsiz = ilaenv_(&c__9, "DGELSD", " ", &c__0, &c__0, &c__0, &c__0, ( ftnlen)6, (ftnlen)1); /* Compute workspace. (Note: Comments in the code beginning "Workspace:" describe the minimal amount of workspace needed at that point in the code, as well as the preferred amount for good performance. NB refers to the optimal block size for the immediately following subroutine, as returned by ILAENV.) */ minwrk = 1; minmn = max(1,minmn); /* Computing MAX */ i__1 = (integer) (log((doublereal) minmn / (doublereal) (smlsiz + 1)) / log(2.)) + 1; nlvl = max(i__1,0); if (*info == 0) { maxwrk = 0; mm = *m; if (*m >= *n && *m >= mnthr) { /* Path 1a - overdetermined, with many more rows than columns. */ mm = *n; /* Computing MAX */ i__1 = maxwrk, i__2 = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1); maxwrk = max(i__1,i__2); /* Computing MAX */ i__1 = maxwrk, i__2 = *n + *nrhs * ilaenv_(&c__1, "DORMQR", "LT", m, nrhs, n, &c_n1, (ftnlen)6, (ftnlen)2); maxwrk = max(i__1,i__2); } if (*m >= *n) { /* Path 1 - overdetermined or exactly determined. Computing MAX */ i__1 = maxwrk, i__2 = *n * 3 + (mm + *n) * ilaenv_(&c__1, "DGEBRD" , " ", &mm, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1); maxwrk = max(i__1,i__2); /* Computing MAX */ i__1 = maxwrk, i__2 = *n * 3 + *nrhs * ilaenv_(&c__1, "DORMBR", "QLT", &mm, nrhs, n, &c_n1, (ftnlen)6, (ftnlen)3); maxwrk = max(i__1,i__2); /* Computing MAX */ i__1 = maxwrk, i__2 = *n * 3 + (*n - 1) * ilaenv_(&c__1, "DORMBR", "PLN", n, nrhs, n, &c_n1, (ftnlen)6, (ftnlen)3); maxwrk = max(i__1,i__2); /* Computing 2nd power */ i__1 = smlsiz + 1; wlalsd = *n * 9 + (*n << 1) * smlsiz + (*n << 3) * nlvl + *n * * nrhs + i__1 * i__1; /* Computing MAX */ i__1 = maxwrk, i__2 = *n * 3 + wlalsd; maxwrk = max(i__1,i__2); /* Computing MAX */ i__1 = *n * 3 + mm, i__2 = *n * 3 + *nrhs, i__1 = max(i__1,i__2), i__2 = *n * 3 + wlalsd; minwrk = max(i__1,i__2); } if (*n > *m) { /* Computing 2nd power */ i__1 = smlsiz + 1; wlalsd = *m * 9 + (*m << 1) * smlsiz + (*m << 3) * nlvl + *m * * nrhs + i__1 * i__1; if (*n >= mnthr) { /* Path 2a - underdetermined, with many more columns than rows. */ maxwrk = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1); /* Computing MAX */ i__1 = maxwrk, i__2 = *m * *m + (*m << 2) + (*m << 1) * ilaenv_(&c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1, ( ftnlen)6, (ftnlen)1); maxwrk = max(i__1,i__2); /* Computing MAX */ i__1 = maxwrk, i__2 = *m * *m + (*m << 2) + *nrhs * ilaenv_(& c__1, "DORMBR", "QLT", m, nrhs, m, &c_n1, (ftnlen)6, ( ftnlen)3); maxwrk = max(i__1,i__2); /* Computing MAX */ i__1 = maxwrk, i__2 = *m * *m + (*m << 2) + (*m - 1) * ilaenv_(&c__1, "DORMBR", "PLN", m, nrhs, m, &c_n1, ( ftnlen)6, (ftnlen)3); maxwrk = max(i__1,i__2); if (*nrhs > 1) { /* Computing MAX */ i__1 = maxwrk, i__2 = *m * *m + *m + *m * *nrhs; maxwrk = max(i__1,i__2); } else { /* Computing MAX */ i__1 = maxwrk, i__2 = *m * *m + (*m << 1); maxwrk = max(i__1,i__2); } /* Computing MAX */ i__1 = maxwrk, i__2 = *m + *nrhs * ilaenv_(&c__1, "DORMLQ", "LT", n, nrhs, m, &c_n1, (ftnlen)6, (ftnlen)2); maxwrk = max(i__1,i__2); /* Computing MAX */ i__1 = maxwrk, i__2 = *m * *m + (*m << 2) + wlalsd; maxwrk = max(i__1,i__2); } else { /* Path 2 - remaining underdetermined cases. */ maxwrk = *m * 3 + (*n + *m) * ilaenv_(&c__1, "DGEBRD", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1); /* Computing MAX */ i__1 = maxwrk, i__2 = *m * 3 + *nrhs * ilaenv_(&c__1, "DORMBR" , "QLT", m, nrhs, n, &c_n1, (ftnlen)6, (ftnlen)3); maxwrk = max(i__1,i__2); /* Computing MAX */ i__1 = maxwrk, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR", "PLN", n, nrhs, m, &c_n1, (ftnlen)6, (ftnlen)3); maxwrk = max(i__1,i__2); /* Computing MAX */ i__1 = maxwrk, i__2 = *m * 3 + wlalsd; maxwrk = max(i__1,i__2); } /* Computing MAX */ i__1 = *m * 3 + *nrhs, i__2 = *m * 3 + *m, i__1 = max(i__1,i__2), i__2 = *m * 3 + wlalsd; minwrk = max(i__1,i__2); } minwrk = min(minwrk,maxwrk); work[1] = (doublereal) maxwrk; if (*lwork < minwrk && ! lquery) { *info = -12; } } if (*info != 0) { i__1 = -(*info); xerbla_("DGELSD", &i__1); return 0; } else if (lquery) { goto L10; } /* Quick return if possible. */ if (*m == 0 || *n == 0) { *rank = 0; return 0; } /* Get machine parameters. */ eps = PRECISION; sfmin = SAFEMINIMUM; smlnum = sfmin / eps; bignum = 1. / smlnum; dlabad_(&smlnum, &bignum); /* Scale A if max entry outside range [SMLNUM,BIGNUM]. */ anrm = dlange_("M", m, n, &a[a_offset], lda, &work[1]); iascl = 0; if (anrm > 0. && anrm < smlnum) { /* Scale matrix norm up to SMLNUM. */ dlascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda, info); iascl = 1; } else if (anrm > bignum) { /* Scale matrix norm down to BIGNUM. */ dlascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda, info); iascl = 2; } else if (anrm == 0.) { /* Matrix all zero. Return zero solution. */ i__1 = max(*m,*n); dlaset_("F", &i__1, nrhs, &c_b29, &c_b29, &b[b_offset], ldb); dlaset_("F", &minmn, &c__1, &c_b29, &c_b29, &s[1], &c__1); *rank = 0; goto L10; } /* Scale B if max entry outside range [SMLNUM,BIGNUM]. */ bnrm = dlange_("M", m, nrhs, &b[b_offset], ldb, &work[1]); ibscl = 0; if (bnrm > 0. && bnrm < smlnum) { /* Scale matrix norm up to SMLNUM. */ dlascl_("G", &c__0, &c__0, &bnrm, &smlnum, m, nrhs, &b[b_offset], ldb, info); ibscl = 1; } else if (bnrm > bignum) { /* Scale matrix norm down to BIGNUM. */ dlascl_("G", &c__0, &c__0, &bnrm, &bignum, m, nrhs, &b[b_offset], ldb, info); ibscl = 2; } /* If M < N make sure certain entries of B are zero. */ if (*m < *n) { i__1 = *n - *m; dlaset_("F", &i__1, nrhs, &c_b29, &c_b29, &b[*m + 1 + b_dim1], ldb); } /* Overdetermined case. */ if (*m >= *n) { /* Path 1 - overdetermined or exactly determined. */ mm = *m; if (*m >= mnthr) { /* Path 1a - overdetermined, with many more rows than columns. */ mm = *n; itau = 1; nwork = itau + *n; /* Compute A=Q*R. (Workspace: need 2*N, prefer N+N*NB) */ i__1 = *lwork - nwork + 1; dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &i__1, info); /* Multiply B by transpose(Q). (Workspace: need N+NRHS, prefer N+NRHS*NB) */ i__1 = *lwork - nwork + 1; dormqr_("L", "T", m, nrhs, n, &a[a_offset], lda, &work[itau], &b[ b_offset], ldb, &work[nwork], &i__1, info); /* Zero out below R. */ if (*n > 1) { i__1 = *n - 1; i__2 = *n - 1; dlaset_("L", &i__1, &i__2, &c_b29, &c_b29, &a[a_dim1 + 2], lda); } } ie = 1; itauq = ie + *n; itaup = itauq + *n; nwork = itaup + *n; /* Bidiagonalize R in A. (Workspace: need 3*N+MM, prefer 3*N+(MM+N)*NB) */ i__1 = *lwork - nwork + 1; dgebrd_(&mm, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], & work[itaup], &work[nwork], &i__1, info); /* Multiply B by transpose of left bidiagonalizing vectors of R. (Workspace: need 3*N+NRHS, prefer 3*N+NRHS*NB) */ i__1 = *lwork - nwork + 1; dormbr_("Q", "L", "T", &mm, nrhs, n, &a[a_offset], lda, &work[itauq], &b[b_offset], ldb, &work[nwork], &i__1, info); /* Solve the bidiagonal least squares problem. */ dlalsd_("U", &smlsiz, n, nrhs, &s[1], &work[ie], &b[b_offset], ldb, rcond, rank, &work[nwork], &iwork[1], info); if (*info != 0) { goto L10; } /* Multiply B by right bidiagonalizing vectors of R. */ i__1 = *lwork - nwork + 1; dormbr_("P", "L", "N", n, nrhs, n, &a[a_offset], lda, &work[itaup], & b[b_offset], ldb, &work[nwork], &i__1, info); } else /* if(complicated condition) */ { /* Computing MAX */ i__1 = *m, i__2 = (*m << 1) - 4, i__1 = max(i__1,i__2), i__1 = max( i__1,*nrhs), i__2 = *n - *m * 3, i__1 = max(i__1,i__2); if (*n >= mnthr && *lwork >= (*m << 2) + *m * *m + max(i__1,wlalsd)) { /* Path 2a - underdetermined, with many more columns than rows and sufficient workspace for an efficient algorithm. */ ldwork = *m; /* Computing MAX Computing MAX */ i__3 = *m, i__4 = (*m << 1) - 4, i__3 = max(i__3,i__4), i__3 = max(i__3,*nrhs), i__4 = *n - *m * 3; i__1 = (*m << 2) + *m * *lda + max(i__3,i__4), i__2 = *m * *lda + *m + *m * *nrhs, i__1 = max(i__1,i__2), i__2 = (*m << 2) + *m * *lda + wlalsd; if (*lwork >= max(i__1,i__2)) { ldwork = *lda; } itau = 1; nwork = *m + 1; /* Compute A=L*Q. (Workspace: need 2*M, prefer M+M*NB) */ i__1 = *lwork - nwork + 1; dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &i__1, info); il = nwork; /* Copy L to WORK(IL), zeroing out above its diagonal. */ dlacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwork); i__1 = *m - 1; i__2 = *m - 1; dlaset_("U", &i__1, &i__2, &c_b29, &c_b29, &work[il + ldwork], & ldwork); ie = il + ldwork * *m; itauq = ie + *m; itaup = itauq + *m; nwork = itaup + *m; /* Bidiagonalize L in WORK(IL). (Workspace: need M*M+5*M, prefer M*M+4*M+2*M*NB) */ i__1 = *lwork - nwork + 1; dgebrd_(m, m, &work[il], &ldwork, &s[1], &work[ie], &work[itauq], &work[itaup], &work[nwork], &i__1, info); /* Multiply B by transpose of left bidiagonalizing vectors of L. (Workspace: need M*M+4*M+NRHS, prefer M*M+4*M+NRHS*NB) */ i__1 = *lwork - nwork + 1; dormbr_("Q", "L", "T", m, nrhs, m, &work[il], &ldwork, &work[ itauq], &b[b_offset], ldb, &work[nwork], &i__1, info); /* Solve the bidiagonal least squares problem. */ dlalsd_("U", &smlsiz, m, nrhs, &s[1], &work[ie], &b[b_offset], ldb, rcond, rank, &work[nwork], &iwork[1], info); if (*info != 0) { goto L10; } /* Multiply B by right bidiagonalizing vectors of L. */ i__1 = *lwork - nwork + 1; dormbr_("P", "L", "N", m, nrhs, m, &work[il], &ldwork, &work[ itaup], &b[b_offset], ldb, &work[nwork], &i__1, info); /* Zero out below first M rows of B. */ i__1 = *n - *m; dlaset_("F", &i__1, nrhs, &c_b29, &c_b29, &b[*m + 1 + b_dim1], ldb); nwork = itau + *m; /* Multiply transpose(Q) by B. (Workspace: need M+NRHS, prefer M+NRHS*NB) */ i__1 = *lwork - nwork + 1; dormlq_("L", "T", n, nrhs, m, &a[a_offset], lda, &work[itau], &b[ b_offset], ldb, &work[nwork], &i__1, info); } else { /* Path 2 - remaining underdetermined cases. */ ie = 1; itauq = ie + *m; itaup = itauq + *m; nwork = itaup + *m; /* Bidiagonalize A. (Workspace: need 3*M+N, prefer 3*M+(M+N)*NB) */ i__1 = *lwork - nwork + 1; dgebrd_(m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], & work[itaup], &work[nwork], &i__1, info); /* Multiply B by transpose of left bidiagonalizing vectors. (Workspace: need 3*M+NRHS, prefer 3*M+NRHS*NB) */ i__1 = *lwork - nwork + 1; dormbr_("Q", "L", "T", m, nrhs, n, &a[a_offset], lda, &work[itauq] , &b[b_offset], ldb, &work[nwork], &i__1, info); /* Solve the bidiagonal least squares problem. */ dlalsd_("L", &smlsiz, m, nrhs, &s[1], &work[ie], &b[b_offset], ldb, rcond, rank, &work[nwork], &iwork[1], info); if (*info != 0) { goto L10; } /* Multiply B by right bidiagonalizing vectors of A. */ i__1 = *lwork - nwork + 1; dormbr_("P", "L", "N", n, nrhs, m, &a[a_offset], lda, &work[itaup] , &b[b_offset], ldb, &work[nwork], &i__1, info); } } /* Undo scaling. */ if (iascl == 1) { dlascl_("G", &c__0, &c__0, &anrm, &smlnum, n, nrhs, &b[b_offset], ldb, info); dlascl_("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], & minmn, info); } else if (iascl == 2) { dlascl_("G", &c__0, &c__0, &anrm, &bignum, n, nrhs, &b[b_offset], ldb, info); dlascl_("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], & minmn, info); } if (ibscl == 1) { dlascl_("G", &c__0, &c__0, &smlnum, &bnrm, n, nrhs, &b[b_offset], ldb, info); } else if (ibscl == 2) { dlascl_("G", &c__0, &c__0, &bignum, &bnrm, n, nrhs, &b[b_offset], ldb, info); } L10: work[1] = (doublereal) maxwrk; return 0; /* End of DGELSD */ } /* dgelsd_ */ /* Subroutine */ int dgeqr2_(integer *m, integer *n, doublereal *a, integer * lda, doublereal *tau, doublereal *work, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; /* Local variables */ static integer i__, k; extern /* Subroutine */ int dlarf_(char *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *), dlarfg_(integer *, doublereal *, doublereal *, integer *, doublereal *), xerbla_(char *, integer *); static doublereal aii; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DGEQR2 computes a QR factorization of a real m by n matrix A: A = Q * R. Arguments ========= M (input) INTEGER The number of rows of the matrix A. M >= 0. N (input) INTEGER The number of columns of the matrix A. N >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the m by n matrix A. On exit, the elements on and above the diagonal of the array contain the min(m,n) by n upper trapezoidal matrix R (R is upper triangular if m >= n); the elements below the diagonal, with the array TAU, represent the orthogonal matrix Q as a product of elementary reflectors (see Further Details). LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,M). TAU (output) DOUBLE PRECISION array, dimension (min(M,N)) The scalar factors of the elementary reflectors (see Further Details). WORK (workspace) DOUBLE PRECISION array, dimension (N) INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value Further Details =============== The matrix Q is represented as a product of elementary reflectors Q = H(1) H(2) . . . H(k), where k = min(m,n). Each H(i) has the form H(i) = I - tau * v * v' where tau is a real scalar, and v is a real vector with v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i), and tau in TAU(i). ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --tau; --work; /* Function Body */ *info = 0; if (*m < 0) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*lda < max(1,*m)) { *info = -4; } if (*info != 0) { i__1 = -(*info); xerbla_("DGEQR2", &i__1); return 0; } k = min(*m,*n); i__1 = k; for (i__ = 1; i__ <= i__1; ++i__) { /* Generate elementary reflector H(i) to annihilate A(i+1:m,i) */ i__2 = *m - i__ + 1; /* Computing MIN */ i__3 = i__ + 1; dlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[min(i__3,*m) + i__ * a_dim1] , &c__1, &tau[i__]); if (i__ < *n) { /* Apply H(i) to A(i:m,i+1:n) from the left */ aii = a[i__ + i__ * a_dim1]; a[i__ + i__ * a_dim1] = 1.; i__2 = *m - i__ + 1; i__3 = *n - i__; dlarf_("Left", &i__2, &i__3, &a[i__ + i__ * a_dim1], &c__1, &tau[ i__], &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]); a[i__ + i__ * a_dim1] = aii; } /* L10: */ } return 0; /* End of DGEQR2 */ } /* dgeqr2_ */ /* Subroutine */ int dgeqrf_(integer *m, integer *n, doublereal *a, integer * lda, doublereal *tau, doublereal *work, integer *lwork, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3, i__4; /* Local variables */ static integer i__, k, nbmin, iinfo; extern /* Subroutine */ int dgeqr2_(integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); static integer ib, nb; extern /* Subroutine */ int dlarfb_(char *, char *, char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *); static integer nx; extern /* Subroutine */ int dlarft_(char *, char *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); static integer ldwork, lwkopt; static logical lquery; static integer iws; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DGEQRF computes a QR factorization of a real M-by-N matrix A: A = Q * R. Arguments ========= M (input) INTEGER The number of rows of the matrix A. M >= 0. N (input) INTEGER The number of columns of the matrix A. N >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the M-by-N matrix A. On exit, the elements on and above the diagonal of the array contain the min(M,N)-by-N upper trapezoidal matrix R (R is upper triangular if m >= n); the elements below the diagonal, with the array TAU, represent the orthogonal matrix Q as a product of min(m,n) elementary reflectors (see Further Details). LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,M). TAU (output) DOUBLE PRECISION array, dimension (min(M,N)) The scalar factors of the elementary reflectors (see Further Details). WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) On exit, if INFO = 0, WORK(1) returns the optimal LWORK. LWORK (input) INTEGER The dimension of the array WORK. LWORK >= max(1,N). For optimum performance LWORK >= N*NB, where NB is the optimal blocksize. If LWORK = -1, then a workspace query is assumed; the routine only calculates the optimal size of the WORK array, returns this value as the first entry of the WORK array, and no error message related to LWORK is issued by XERBLA. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value Further Details =============== The matrix Q is represented as a product of elementary reflectors Q = H(1) H(2) . . . H(k), where k = min(m,n). Each H(i) has the form H(i) = I - tau * v * v' where tau is a real scalar, and v is a real vector with v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i), and tau in TAU(i). ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --tau; --work; /* Function Body */ *info = 0; nb = ilaenv_(&c__1, "DGEQRF", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen) 1); lwkopt = *n * nb; work[1] = (doublereal) lwkopt; lquery = *lwork == -1; if (*m < 0) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*lda < max(1,*m)) { *info = -4; } else if (*lwork < max(1,*n) && ! lquery) { *info = -7; } if (*info != 0) { i__1 = -(*info); xerbla_("DGEQRF", &i__1); return 0; } else if (lquery) { return 0; } /* Quick return if possible */ k = min(*m,*n); if (k == 0) { work[1] = 1.; return 0; } nbmin = 2; nx = 0; iws = *n; if (nb > 1 && nb < k) { /* Determine when to cross over from blocked to unblocked code. Computing MAX */ i__1 = 0, i__2 = ilaenv_(&c__3, "DGEQRF", " ", m, n, &c_n1, &c_n1, ( ftnlen)6, (ftnlen)1); nx = max(i__1,i__2); if (nx < k) { /* Determine if workspace is large enough for blocked code. */ ldwork = *n; iws = ldwork * nb; if (*lwork < iws) { /* Not enough workspace to use optimal NB: reduce NB and determine the minimum value of NB. */ nb = *lwork / ldwork; /* Computing MAX */ i__1 = 2, i__2 = ilaenv_(&c__2, "DGEQRF", " ", m, n, &c_n1, & c_n1, (ftnlen)6, (ftnlen)1); nbmin = max(i__1,i__2); } } } if (nb >= nbmin && nb < k && nx < k) { /* Use blocked code initially */ i__1 = k - nx; i__2 = nb; for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { /* Computing MIN */ i__3 = k - i__ + 1; ib = min(i__3,nb); /* Compute the QR factorization of the current block A(i:m,i:i+ib-1) */ i__3 = *m - i__ + 1; dgeqr2_(&i__3, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[ 1], &iinfo); if (i__ + ib <= *n) { /* Form the triangular factor of the block reflector H = H(i) H(i+1) . . . H(i+ib-1) */ i__3 = *m - i__ + 1; dlarft_("Forward", "Columnwise", &i__3, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[1], &ldwork); /* Apply H' to A(i:m,i+ib:n) from the left */ i__3 = *m - i__ + 1; i__4 = *n - i__ - ib + 1; dlarfb_("Left", "Transpose", "Forward", "Columnwise", &i__3, & i__4, &ib, &a[i__ + i__ * a_dim1], lda, &work[1], & ldwork, &a[i__ + (i__ + ib) * a_dim1], lda, &work[ib + 1], &ldwork); } /* L10: */ } } else { i__ = 1; } /* Use unblocked code to factor the last or only block. */ if (i__ <= k) { i__2 = *m - i__ + 1; i__1 = *n - i__ + 1; dgeqr2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[1] , &iinfo); } work[1] = (doublereal) iws; return 0; /* End of DGEQRF */ } /* dgeqrf_ */ /* Subroutine */ int dgesdd_(char *jobz, integer *m, integer *n, doublereal * a, integer *lda, doublereal *s, doublereal *u, integer *ldu, doublereal *vt, integer *ldvt, doublereal *work, integer *lwork, integer *iwork, integer *info) { /* System generated locals */ integer a_dim1, a_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1, i__2, i__3; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static integer iscl; static doublereal anrm; static integer idum[1], ierr, itau, i__; extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); extern logical lsame_(char *, char *); static integer chunk, minmn, wrkbl, itaup, itauq, mnthr; static logical wntqa; static integer nwork; static logical wntqn, wntqo, wntqs; static integer ie; extern /* Subroutine */ int dbdsdc_(char *, char *, integer *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, integer *); static integer il; extern /* Subroutine */ int dgebrd_(integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, integer *); static integer ir, bdspac; extern doublereal dlange_(char *, integer *, integer *, doublereal *, integer *, doublereal *); static integer iu; extern /* Subroutine */ int dgelqf_(integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *), dlascl_(char *, integer *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *), dgeqrf_(integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *), dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *), dlaset_(char *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *), dorgbr_(char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); static doublereal bignum; extern /* Subroutine */ int dormbr_(char *, char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, integer *), dorglq_(integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *), dorgqr_(integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *); static integer ldwrkl, ldwrkr, minwrk, ldwrku, maxwrk, ldwkvt; static doublereal smlnum; static logical wntqas, lquery; static integer blk; static doublereal dum[1], eps; static integer ivt; /* -- LAPACK driver routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DGESDD computes the singular value decomposition (SVD) of a real M-by-N matrix A, optionally computing the left and right singular vectors. If singular vectors are desired, it uses a divide-and-conquer algorithm. The SVD is written A = U * SIGMA * transpose(V) where SIGMA is an M-by-N matrix which is zero except for its min(m,n) diagonal elements, U is an M-by-M orthogonal matrix, and V is an N-by-N orthogonal matrix. The diagonal elements of SIGMA are the singular values of A; they are real and non-negative, and are returned in descending order. The first min(m,n) columns of U and V are the left and right singular vectors of A. Note that the routine returns VT = V**T, not V. The divide and conquer algorithm makes very mild assumptions about floating point arithmetic. It will work on machines with a guard digit in add/subtract, or on those binary machines without guard digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or Cray-2. It could conceivably fail on hexadecimal or decimal machines without guard digits, but we know of none. Arguments ========= JOBZ (input) CHARACTER*1 Specifies options for computing all or part of the matrix U: = 'A': all M columns of U and all N rows of V**T are returned in the arrays U and VT; = 'S': the first min(M,N) columns of U and the first min(M,N) rows of V**T are returned in the arrays U and VT; = 'O': If M >= N, the first N columns of U are overwritten on the array A and all rows of V**T are returned in the array VT; otherwise, all columns of U are returned in the array U and the first M rows of V**T are overwritten in the array A; = 'N': no columns of U or rows of V**T are computed. M (input) INTEGER The number of rows of the input matrix A. M >= 0. N (input) INTEGER The number of columns of the input matrix A. N >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the M-by-N matrix A. On exit, if JOBZ = 'O', A is overwritten with the first N columns of U (the left singular vectors, stored columnwise) if M >= N; A is overwritten with the first M rows of V**T (the right singular vectors, stored rowwise) otherwise. if JOBZ .ne. 'O', the contents of A are destroyed. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,M). S (output) DOUBLE PRECISION array, dimension (min(M,N)) The singular values of A, sorted so that S(i) >= S(i+1). U (output) DOUBLE PRECISION array, dimension (LDU,UCOL) UCOL = M if JOBZ = 'A' or JOBZ = 'O' and M < N; UCOL = min(M,N) if JOBZ = 'S'. If JOBZ = 'A' or JOBZ = 'O' and M < N, U contains the M-by-M orthogonal matrix U; if JOBZ = 'S', U contains the first min(M,N) columns of U (the left singular vectors, stored columnwise); if JOBZ = 'O' and M >= N, or JOBZ = 'N', U is not referenced. LDU (input) INTEGER The leading dimension of the array U. LDU >= 1; if JOBZ = 'S' or 'A' or JOBZ = 'O' and M < N, LDU >= M. VT (output) DOUBLE PRECISION array, dimension (LDVT,N) If JOBZ = 'A' or JOBZ = 'O' and M >= N, VT contains the N-by-N orthogonal matrix V**T; if JOBZ = 'S', VT contains the first min(M,N) rows of V**T (the right singular vectors, stored rowwise); if JOBZ = 'O' and M < N, or JOBZ = 'N', VT is not referenced. LDVT (input) INTEGER The leading dimension of the array VT. LDVT >= 1; if JOBZ = 'A' or JOBZ = 'O' and M >= N, LDVT >= N; if JOBZ = 'S', LDVT >= min(M,N). WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) On exit, if INFO = 0, WORK(1) returns the optimal LWORK; LWORK (input) INTEGER The dimension of the array WORK. LWORK >= 1. If JOBZ = 'N', LWORK >= 3*min(M,N) + max(max(M,N),7*min(M,N)). If JOBZ = 'O', LWORK >= 3*min(M,N)*min(M,N) + max(max(M,N),5*min(M,N)*min(M,N)+4*min(M,N)). If JOBZ = 'S' or 'A' LWORK >= 3*min(M,N)*min(M,N) + max(max(M,N),4*min(M,N)*min(M,N)+4*min(M,N)). For good performance, LWORK should generally be larger. If LWORK = -1 but other input arguments are legal, WORK(1) returns the optimal LWORK. IWORK (workspace) INTEGER array, dimension (8*min(M,N)) INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. > 0: DBDSDC did not converge, updating process failed. Further Details =============== Based on contributions by Ming Gu and Huan Ren, Computer Science Division, University of California at Berkeley, USA ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --s; u_dim1 = *ldu; u_offset = 1 + u_dim1 * 1; u -= u_offset; vt_dim1 = *ldvt; vt_offset = 1 + vt_dim1 * 1; vt -= vt_offset; --work; --iwork; /* Function Body */ *info = 0; minmn = min(*m,*n); wntqa = lsame_(jobz, "A"); wntqs = lsame_(jobz, "S"); wntqas = wntqa || wntqs; wntqo = lsame_(jobz, "O"); wntqn = lsame_(jobz, "N"); lquery = *lwork == -1; if (! (wntqa || wntqs || wntqo || wntqn)) { *info = -1; } else if (*m < 0) { *info = -2; } else if (*n < 0) { *info = -3; } else if (*lda < max(1,*m)) { *info = -5; } else if (*ldu < 1 || wntqas && *ldu < *m || wntqo && *m < *n && *ldu < * m) { *info = -8; } else if (*ldvt < 1 || wntqa && *ldvt < *n || wntqs && *ldvt < minmn || wntqo && *m >= *n && *ldvt < *n) { *info = -10; } /* Compute workspace (Note: Comments in the code beginning "Workspace:" describe the minimal amount of workspace needed at that point in the code, as well as the preferred amount for good performance. NB refers to the optimal block size for the immediately following subroutine, as returned by ILAENV.) */ if (*info == 0) { minwrk = 1; maxwrk = 1; if (*m >= *n && minmn > 0) { /* Compute space needed for DBDSDC */ mnthr = (integer) (minmn * 11. / 6.); if (wntqn) { bdspac = *n * 7; } else { bdspac = *n * 3 * *n + (*n << 2); } if (*m >= mnthr) { if (wntqn) { /* Path 1 (M much larger than N, JOBZ='N') */ wrkbl = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m, n, & c_n1, &c_n1, (ftnlen)6, (ftnlen)1); /* Computing MAX */ i__1 = wrkbl, i__2 = *n * 3 + (*n << 1) * ilaenv_(&c__1, "DGEBRD", " ", n, n, &c_n1, &c_n1, (ftnlen)6, ( ftnlen)1); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = bdspac + *n; maxwrk = max(i__1,i__2); minwrk = bdspac + *n; } else if (wntqo) { /* Path 2 (M much larger than N, JOBZ='O') */ wrkbl = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m, n, & c_n1, &c_n1, (ftnlen)6, (ftnlen)1); /* Computing MAX */ i__1 = wrkbl, i__2 = *n + *n * ilaenv_(&c__1, "DORGQR", " ", m, n, n, &c_n1, (ftnlen)6, (ftnlen)1); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *n * 3 + (*n << 1) * ilaenv_(&c__1, "DGEBRD", " ", n, n, &c_n1, &c_n1, (ftnlen)6, ( ftnlen)1); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR" , "QLN", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR" , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = bdspac + *n * 3; wrkbl = max(i__1,i__2); maxwrk = wrkbl + (*n << 1) * *n; minwrk = bdspac + (*n << 1) * *n + *n * 3; } else if (wntqs) { /* Path 3 (M much larger than N, JOBZ='S') */ wrkbl = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m, n, & c_n1, &c_n1, (ftnlen)6, (ftnlen)1); /* Computing MAX */ i__1 = wrkbl, i__2 = *n + *n * ilaenv_(&c__1, "DORGQR", " ", m, n, n, &c_n1, (ftnlen)6, (ftnlen)1); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *n * 3 + (*n << 1) * ilaenv_(&c__1, "DGEBRD", " ", n, n, &c_n1, &c_n1, (ftnlen)6, ( ftnlen)1); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR" , "QLN", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR" , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = bdspac + *n * 3; wrkbl = max(i__1,i__2); maxwrk = wrkbl + *n * *n; minwrk = bdspac + *n * *n + *n * 3; } else if (wntqa) { /* Path 4 (M much larger than N, JOBZ='A') */ wrkbl = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m, n, & c_n1, &c_n1, (ftnlen)6, (ftnlen)1); /* Computing MAX */ i__1 = wrkbl, i__2 = *n + *m * ilaenv_(&c__1, "DORGQR", " ", m, m, n, &c_n1, (ftnlen)6, (ftnlen)1); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *n * 3 + (*n << 1) * ilaenv_(&c__1, "DGEBRD", " ", n, n, &c_n1, &c_n1, (ftnlen)6, ( ftnlen)1); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR" , "QLN", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR" , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = bdspac + *n * 3; wrkbl = max(i__1,i__2); maxwrk = wrkbl + *n * *n; minwrk = bdspac + *n * *n + *n * 3; } } else { /* Path 5 (M at least N, but not much larger) */ wrkbl = *n * 3 + (*m + *n) * ilaenv_(&c__1, "DGEBRD", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1); if (wntqn) { /* Computing MAX */ i__1 = wrkbl, i__2 = bdspac + *n * 3; maxwrk = max(i__1,i__2); minwrk = *n * 3 + max(*m,bdspac); } else if (wntqo) { /* Computing MAX */ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR" , "QLN", m, n, n, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR" , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = bdspac + *n * 3; wrkbl = max(i__1,i__2); maxwrk = wrkbl + *m * *n; /* Computing MAX */ i__1 = *m, i__2 = *n * *n + bdspac; minwrk = *n * 3 + max(i__1,i__2); } else if (wntqs) { /* Computing MAX */ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR" , "QLN", m, n, n, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR" , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = bdspac + *n * 3; maxwrk = max(i__1,i__2); minwrk = *n * 3 + max(*m,bdspac); } else if (wntqa) { /* Computing MAX */ i__1 = wrkbl, i__2 = *n * 3 + *m * ilaenv_(&c__1, "DORMBR" , "QLN", m, m, n, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR" , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = maxwrk, i__2 = bdspac + *n * 3; maxwrk = max(i__1,i__2); minwrk = *n * 3 + max(*m,bdspac); } } } else if (minmn > 0) { /* Compute space needed for DBDSDC */ mnthr = (integer) (minmn * 11. / 6.); if (wntqn) { bdspac = *m * 7; } else { bdspac = *m * 3 * *m + (*m << 2); } if (*n >= mnthr) { if (wntqn) { /* Path 1t (N much larger than M, JOBZ='N') */ wrkbl = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, & c_n1, &c_n1, (ftnlen)6, (ftnlen)1); /* Computing MAX */ i__1 = wrkbl, i__2 = *m * 3 + (*m << 1) * ilaenv_(&c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1, (ftnlen)6, ( ftnlen)1); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = bdspac + *m; maxwrk = max(i__1,i__2); minwrk = bdspac + *m; } else if (wntqo) { /* Path 2t (N much larger than M, JOBZ='O') */ wrkbl = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, & c_n1, &c_n1, (ftnlen)6, (ftnlen)1); /* Computing MAX */ i__1 = wrkbl, i__2 = *m + *m * ilaenv_(&c__1, "DORGLQ", " ", m, n, m, &c_n1, (ftnlen)6, (ftnlen)1); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *m * 3 + (*m << 1) * ilaenv_(&c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1, (ftnlen)6, ( ftnlen)1); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR" , "QLN", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR" , "PRT", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = bdspac + *m * 3; wrkbl = max(i__1,i__2); maxwrk = wrkbl + (*m << 1) * *m; minwrk = bdspac + (*m << 1) * *m + *m * 3; } else if (wntqs) { /* Path 3t (N much larger than M, JOBZ='S') */ wrkbl = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, & c_n1, &c_n1, (ftnlen)6, (ftnlen)1); /* Computing MAX */ i__1 = wrkbl, i__2 = *m + *m * ilaenv_(&c__1, "DORGLQ", " ", m, n, m, &c_n1, (ftnlen)6, (ftnlen)1); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *m * 3 + (*m << 1) * ilaenv_(&c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1, (ftnlen)6, ( ftnlen)1); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR" , "QLN", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR" , "PRT", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = bdspac + *m * 3; wrkbl = max(i__1,i__2); maxwrk = wrkbl + *m * *m; minwrk = bdspac + *m * *m + *m * 3; } else if (wntqa) { /* Path 4t (N much larger than M, JOBZ='A') */ wrkbl = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, & c_n1, &c_n1, (ftnlen)6, (ftnlen)1); /* Computing MAX */ i__1 = wrkbl, i__2 = *m + *n * ilaenv_(&c__1, "DORGLQ", " ", n, n, m, &c_n1, (ftnlen)6, (ftnlen)1); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *m * 3 + (*m << 1) * ilaenv_(&c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1, (ftnlen)6, ( ftnlen)1); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR" , "QLN", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR" , "PRT", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = bdspac + *m * 3; wrkbl = max(i__1,i__2); maxwrk = wrkbl + *m * *m; minwrk = bdspac + *m * *m + *m * 3; } } else { /* Path 5t (N greater than M, but not much larger) */ wrkbl = *m * 3 + (*m + *n) * ilaenv_(&c__1, "DGEBRD", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1); if (wntqn) { /* Computing MAX */ i__1 = wrkbl, i__2 = bdspac + *m * 3; maxwrk = max(i__1,i__2); minwrk = *m * 3 + max(*n,bdspac); } else if (wntqo) { /* Computing MAX */ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR" , "QLN", m, m, n, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR" , "PRT", m, n, m, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = bdspac + *m * 3; wrkbl = max(i__1,i__2); maxwrk = wrkbl + *m * *n; /* Computing MAX */ i__1 = *n, i__2 = *m * *m + bdspac; minwrk = *m * 3 + max(i__1,i__2); } else if (wntqs) { /* Computing MAX */ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR" , "QLN", m, m, n, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR" , "PRT", m, n, m, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = bdspac + *m * 3; maxwrk = max(i__1,i__2); minwrk = *m * 3 + max(*n,bdspac); } else if (wntqa) { /* Computing MAX */ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR" , "QLN", m, m, n, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR" , "PRT", n, n, m, &c_n1, (ftnlen)6, (ftnlen)3); wrkbl = max(i__1,i__2); /* Computing MAX */ i__1 = wrkbl, i__2 = bdspac + *m * 3; maxwrk = max(i__1,i__2); minwrk = *m * 3 + max(*n,bdspac); } } } maxwrk = max(maxwrk,minwrk); work[1] = (doublereal) maxwrk; if (*lwork < minwrk && ! lquery) { *info = -12; } } if (*info != 0) { i__1 = -(*info); xerbla_("DGESDD", &i__1); return 0; } else if (lquery) { return 0; } /* Quick return if possible */ if (*m == 0 || *n == 0) { return 0; } /* Get machine constants */ eps = PRECISION; smlnum = sqrt(SAFEMINIMUM) / eps; bignum = 1. / smlnum; /* Scale A if max element outside range [SMLNUM,BIGNUM] */ anrm = dlange_("M", m, n, &a[a_offset], lda, dum); iscl = 0; if (anrm > 0. && anrm < smlnum) { iscl = 1; dlascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda, & ierr); } else if (anrm > bignum) { iscl = 1; dlascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda, & ierr); } if (*m >= *n) { /* A has at least as many rows as columns. If A has sufficiently more rows than columns, first reduce using the QR decomposition (if sufficient workspace available) */ if (*m >= mnthr) { if (wntqn) { /* Path 1 (M much larger than N, JOBZ='N') No singular vectors to be computed */ itau = 1; nwork = itau + *n; /* Compute A=Q*R (Workspace: need 2*N, prefer N+N*NB) */ i__1 = *lwork - nwork + 1; dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], & i__1, &ierr); /* Zero out below R */ i__1 = *n - 1; i__2 = *n - 1; dlaset_("L", &i__1, &i__2, &c_b29, &c_b29, &a[a_dim1 + 2], lda); ie = 1; itauq = ie + *n; itaup = itauq + *n; nwork = itaup + *n; /* Bidiagonalize R in A (Workspace: need 4*N, prefer 3*N+2*N*NB) */ i__1 = *lwork - nwork + 1; dgebrd_(n, n, &a[a_offset], lda, &s[1], &work[ie], &work[ itauq], &work[itaup], &work[nwork], &i__1, &ierr); nwork = ie + *n; /* Perform bidiagonal SVD, computing singular values only (Workspace: need N+BDSPAC) */ dbdsdc_("U", "N", n, &s[1], &work[ie], dum, &c__1, dum, &c__1, dum, idum, &work[nwork], &iwork[1], info); } else if (wntqo) { /* Path 2 (M much larger than N, JOBZ = 'O') N left singular vectors to be overwritten on A and N right singular vectors to be computed in VT */ ir = 1; /* WORK(IR) is LDWRKR by N */ if (*lwork >= *lda * *n + *n * *n + *n * 3 + bdspac) { ldwrkr = *lda; } else { ldwrkr = (*lwork - *n * *n - *n * 3 - bdspac) / *n; } itau = ir + ldwrkr * *n; nwork = itau + *n; /* Compute A=Q*R (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */ i__1 = *lwork - nwork + 1; dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], & i__1, &ierr); /* Copy R to WORK(IR), zeroing out below it */ dlacpy_("U", n, n, &a[a_offset], lda, &work[ir], &ldwrkr); i__1 = *n - 1; i__2 = *n - 1; dlaset_("L", &i__1, &i__2, &c_b29, &c_b29, &work[ir + 1], & ldwrkr); /* Generate Q in A (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */ i__1 = *lwork - nwork + 1; dorgqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[nwork], &i__1, &ierr); ie = itau; itauq = ie + *n; itaup = itauq + *n; nwork = itaup + *n; /* Bidiagonalize R in VT, copying result to WORK(IR) (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB) */ i__1 = *lwork - nwork + 1; dgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &work[ie], &work[ itauq], &work[itaup], &work[nwork], &i__1, &ierr); /* WORK(IU) is N by N */ iu = nwork; nwork = iu + *n * *n; /* Perform bidiagonal SVD, computing left singular vectors of bidiagonal matrix in WORK(IU) and computing right singular vectors of bidiagonal matrix in VT (Workspace: need N+N*N+BDSPAC) */ dbdsdc_("U", "I", n, &s[1], &work[ie], &work[iu], n, &vt[ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1], info); /* Overwrite WORK(IU) by left singular vectors of R and VT by right singular vectors of R (Workspace: need 2*N*N+3*N, prefer 2*N*N+2*N+N*NB) */ i__1 = *lwork - nwork + 1; dormbr_("Q", "L", "N", n, n, n, &work[ir], &ldwrkr, &work[ itauq], &work[iu], n, &work[nwork], &i__1, &ierr); i__1 = *lwork - nwork + 1; dormbr_("P", "R", "T", n, n, n, &work[ir], &ldwrkr, &work[ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, & ierr); /* Multiply Q in A by left singular vectors of R in WORK(IU), storing result in WORK(IR) and copying to A (Workspace: need 2*N*N, prefer N*N+M*N) */ i__1 = *m; i__2 = ldwrkr; for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { /* Computing MIN */ i__3 = *m - i__ + 1; chunk = min(i__3,ldwrkr); dgemm_("N", "N", &chunk, n, n, &c_b15, &a[i__ + a_dim1], lda, &work[iu], n, &c_b29, &work[ir], &ldwrkr); dlacpy_("F", &chunk, n, &work[ir], &ldwrkr, &a[i__ + a_dim1], lda); /* L10: */ } } else if (wntqs) { /* Path 3 (M much larger than N, JOBZ='S') N left singular vectors to be computed in U and N right singular vectors to be computed in VT */ ir = 1; /* WORK(IR) is N by N */ ldwrkr = *n; itau = ir + ldwrkr * *n; nwork = itau + *n; /* Compute A=Q*R (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */ i__2 = *lwork - nwork + 1; dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], & i__2, &ierr); /* Copy R to WORK(IR), zeroing out below it */ dlacpy_("U", n, n, &a[a_offset], lda, &work[ir], &ldwrkr); i__2 = *n - 1; i__1 = *n - 1; dlaset_("L", &i__2, &i__1, &c_b29, &c_b29, &work[ir + 1], & ldwrkr); /* Generate Q in A (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */ i__2 = *lwork - nwork + 1; dorgqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[nwork], &i__2, &ierr); ie = itau; itauq = ie + *n; itaup = itauq + *n; nwork = itaup + *n; /* Bidiagonalize R in WORK(IR) (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB) */ i__2 = *lwork - nwork + 1; dgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &work[ie], &work[ itauq], &work[itaup], &work[nwork], &i__2, &ierr); /* Perform bidiagonal SVD, computing left singular vectors of bidiagoal matrix in U and computing right singular vectors of bidiagonal matrix in VT (Workspace: need N+BDSPAC) */ dbdsdc_("U", "I", n, &s[1], &work[ie], &u[u_offset], ldu, &vt[ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1], info); /* Overwrite U by left singular vectors of R and VT by right singular vectors of R (Workspace: need N*N+3*N, prefer N*N+2*N+N*NB) */ i__2 = *lwork - nwork + 1; dormbr_("Q", "L", "N", n, n, n, &work[ir], &ldwrkr, &work[ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr); i__2 = *lwork - nwork + 1; dormbr_("P", "R", "T", n, n, n, &work[ir], &ldwrkr, &work[ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, & ierr); /* Multiply Q in A by left singular vectors of R in WORK(IR), storing result in U (Workspace: need N*N) */ dlacpy_("F", n, n, &u[u_offset], ldu, &work[ir], &ldwrkr); dgemm_("N", "N", m, n, n, &c_b15, &a[a_offset], lda, &work[ir] , &ldwrkr, &c_b29, &u[u_offset], ldu); } else if (wntqa) { /* Path 4 (M much larger than N, JOBZ='A') M left singular vectors to be computed in U and N right singular vectors to be computed in VT */ iu = 1; /* WORK(IU) is N by N */ ldwrku = *n; itau = iu + ldwrku * *n; nwork = itau + *n; /* Compute A=Q*R, copying result to U (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */ i__2 = *lwork - nwork + 1; dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], & i__2, &ierr); dlacpy_("L", m, n, &a[a_offset], lda, &u[u_offset], ldu); /* Generate Q in U (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */ i__2 = *lwork - nwork + 1; dorgqr_(m, m, n, &u[u_offset], ldu, &work[itau], &work[nwork], &i__2, &ierr); /* Produce R in A, zeroing out other entries */ i__2 = *n - 1; i__1 = *n - 1; dlaset_("L", &i__2, &i__1, &c_b29, &c_b29, &a[a_dim1 + 2], lda); ie = itau; itauq = ie + *n; itaup = itauq + *n; nwork = itaup + *n; /* Bidiagonalize R in A (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB) */ i__2 = *lwork - nwork + 1; dgebrd_(n, n, &a[a_offset], lda, &s[1], &work[ie], &work[ itauq], &work[itaup], &work[nwork], &i__2, &ierr); /* Perform bidiagonal SVD, computing left singular vectors of bidiagonal matrix in WORK(IU) and computing right singular vectors of bidiagonal matrix in VT (Workspace: need N+N*N+BDSPAC) */ dbdsdc_("U", "I", n, &s[1], &work[ie], &work[iu], n, &vt[ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1], info); /* Overwrite WORK(IU) by left singular vectors of R and VT by right singular vectors of R (Workspace: need N*N+3*N, prefer N*N+2*N+N*NB) */ i__2 = *lwork - nwork + 1; dormbr_("Q", "L", "N", n, n, n, &a[a_offset], lda, &work[ itauq], &work[iu], &ldwrku, &work[nwork], &i__2, & ierr); i__2 = *lwork - nwork + 1; dormbr_("P", "R", "T", n, n, n, &a[a_offset], lda, &work[ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, & ierr); /* Multiply Q in U by left singular vectors of R in WORK(IU), storing result in A (Workspace: need N*N) */ dgemm_("N", "N", m, n, n, &c_b15, &u[u_offset], ldu, &work[iu] , &ldwrku, &c_b29, &a[a_offset], lda); /* Copy left singular vectors of A from A to U */ dlacpy_("F", m, n, &a[a_offset], lda, &u[u_offset], ldu); } } else { /* M .LT. MNTHR Path 5 (M at least N, but not much larger) Reduce to bidiagonal form without QR decomposition */ ie = 1; itauq = ie + *n; itaup = itauq + *n; nwork = itaup + *n; /* Bidiagonalize A (Workspace: need 3*N+M, prefer 3*N+(M+N)*NB) */ i__2 = *lwork - nwork + 1; dgebrd_(m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], & work[itaup], &work[nwork], &i__2, &ierr); if (wntqn) { /* Perform bidiagonal SVD, only computing singular values (Workspace: need N+BDSPAC) */ dbdsdc_("U", "N", n, &s[1], &work[ie], dum, &c__1, dum, &c__1, dum, idum, &work[nwork], &iwork[1], info); } else if (wntqo) { iu = nwork; if (*lwork >= *m * *n + *n * 3 + bdspac) { /* WORK( IU ) is M by N */ ldwrku = *m; nwork = iu + ldwrku * *n; dlaset_("F", m, n, &c_b29, &c_b29, &work[iu], &ldwrku); } else { /* WORK( IU ) is N by N */ ldwrku = *n; nwork = iu + ldwrku * *n; /* WORK(IR) is LDWRKR by N */ ir = nwork; ldwrkr = (*lwork - *n * *n - *n * 3) / *n; } nwork = iu + ldwrku * *n; /* Perform bidiagonal SVD, computing left singular vectors of bidiagonal matrix in WORK(IU) and computing right singular vectors of bidiagonal matrix in VT (Workspace: need N+N*N+BDSPAC) */ dbdsdc_("U", "I", n, &s[1], &work[ie], &work[iu], &ldwrku, & vt[vt_offset], ldvt, dum, idum, &work[nwork], &iwork[ 1], info); /* Overwrite VT by right singular vectors of A (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */ i__2 = *lwork - nwork + 1; dormbr_("P", "R", "T", n, n, n, &a[a_offset], lda, &work[ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, & ierr); if (*lwork >= *m * *n + *n * 3 + bdspac) { /* Overwrite WORK(IU) by left singular vectors of A (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */ i__2 = *lwork - nwork + 1; dormbr_("Q", "L", "N", m, n, n, &a[a_offset], lda, &work[ itauq], &work[iu], &ldwrku, &work[nwork], &i__2, & ierr); /* Copy left singular vectors of A from WORK(IU) to A */ dlacpy_("F", m, n, &work[iu], &ldwrku, &a[a_offset], lda); } else { /* Generate Q in A (Workspace: need N*N+2*N, prefer N*N+N+N*NB) */ i__2 = *lwork - nwork + 1; dorgbr_("Q", m, n, n, &a[a_offset], lda, &work[itauq], & work[nwork], &i__2, &ierr); /* Multiply Q in A by left singular vectors of bidiagonal matrix in WORK(IU), storing result in WORK(IR) and copying to A (Workspace: need 2*N*N, prefer N*N+M*N) */ i__2 = *m; i__1 = ldwrkr; for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) { /* Computing MIN */ i__3 = *m - i__ + 1; chunk = min(i__3,ldwrkr); dgemm_("N", "N", &chunk, n, n, &c_b15, &a[i__ + a_dim1], lda, &work[iu], &ldwrku, &c_b29, & work[ir], &ldwrkr); dlacpy_("F", &chunk, n, &work[ir], &ldwrkr, &a[i__ + a_dim1], lda); /* L20: */ } } } else if (wntqs) { /* Perform bidiagonal SVD, computing left singular vectors of bidiagonal matrix in U and computing right singular vectors of bidiagonal matrix in VT (Workspace: need N+BDSPAC) */ dlaset_("F", m, n, &c_b29, &c_b29, &u[u_offset], ldu); dbdsdc_("U", "I", n, &s[1], &work[ie], &u[u_offset], ldu, &vt[ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1], info); /* Overwrite U by left singular vectors of A and VT by right singular vectors of A (Workspace: need 3*N, prefer 2*N+N*NB) */ i__1 = *lwork - nwork + 1; dormbr_("Q", "L", "N", m, n, n, &a[a_offset], lda, &work[ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr); i__1 = *lwork - nwork + 1; dormbr_("P", "R", "T", n, n, n, &a[a_offset], lda, &work[ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, & ierr); } else if (wntqa) { /* Perform bidiagonal SVD, computing left singular vectors of bidiagonal matrix in U and computing right singular vectors of bidiagonal matrix in VT (Workspace: need N+BDSPAC) */ dlaset_("F", m, m, &c_b29, &c_b29, &u[u_offset], ldu); dbdsdc_("U", "I", n, &s[1], &work[ie], &u[u_offset], ldu, &vt[ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1], info); /* Set the right corner of U to identity matrix */ if (*m > *n) { i__1 = *m - *n; i__2 = *m - *n; dlaset_("F", &i__1, &i__2, &c_b29, &c_b15, &u[*n + 1 + (* n + 1) * u_dim1], ldu); } /* Overwrite U by left singular vectors of A and VT by right singular vectors of A (Workspace: need N*N+2*N+M, prefer N*N+2*N+M*NB) */ i__1 = *lwork - nwork + 1; dormbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr); i__1 = *lwork - nwork + 1; dormbr_("P", "R", "T", n, n, m, &a[a_offset], lda, &work[ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, & ierr); } } } else { /* A has more columns than rows. If A has sufficiently more columns than rows, first reduce using the LQ decomposition (if sufficient workspace available) */ if (*n >= mnthr) { if (wntqn) { /* Path 1t (N much larger than M, JOBZ='N') No singular vectors to be computed */ itau = 1; nwork = itau + *m; /* Compute A=L*Q (Workspace: need 2*M, prefer M+M*NB) */ i__1 = *lwork - nwork + 1; dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], & i__1, &ierr); /* Zero out above L */ i__1 = *m - 1; i__2 = *m - 1; dlaset_("U", &i__1, &i__2, &c_b29, &c_b29, &a[(a_dim1 << 1) + 1], lda); ie = 1; itauq = ie + *m; itaup = itauq + *m; nwork = itaup + *m; /* Bidiagonalize L in A (Workspace: need 4*M, prefer 3*M+2*M*NB) */ i__1 = *lwork - nwork + 1; dgebrd_(m, m, &a[a_offset], lda, &s[1], &work[ie], &work[ itauq], &work[itaup], &work[nwork], &i__1, &ierr); nwork = ie + *m; /* Perform bidiagonal SVD, computing singular values only (Workspace: need M+BDSPAC) */ dbdsdc_("U", "N", m, &s[1], &work[ie], dum, &c__1, dum, &c__1, dum, idum, &work[nwork], &iwork[1], info); } else if (wntqo) { /* Path 2t (N much larger than M, JOBZ='O') M right singular vectors to be overwritten on A and M left singular vectors to be computed in U */ ivt = 1; /* IVT is M by M */ il = ivt + *m * *m; if (*lwork >= *m * *n + *m * *m + *m * 3 + bdspac) { /* WORK(IL) is M by N */ ldwrkl = *m; chunk = *n; } else { ldwrkl = *m; chunk = (*lwork - *m * *m) / *m; } itau = il + ldwrkl * *m; nwork = itau + *m; /* Compute A=L*Q (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */ i__1 = *lwork - nwork + 1; dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], & i__1, &ierr); /* Copy L to WORK(IL), zeroing about above it */ dlacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwrkl); i__1 = *m - 1; i__2 = *m - 1; dlaset_("U", &i__1, &i__2, &c_b29, &c_b29, &work[il + ldwrkl], &ldwrkl); /* Generate Q in A (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */ i__1 = *lwork - nwork + 1; dorglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[nwork], &i__1, &ierr); ie = itau; itauq = ie + *m; itaup = itauq + *m; nwork = itaup + *m; /* Bidiagonalize L in WORK(IL) (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB) */ i__1 = *lwork - nwork + 1; dgebrd_(m, m, &work[il], &ldwrkl, &s[1], &work[ie], &work[ itauq], &work[itaup], &work[nwork], &i__1, &ierr); /* Perform bidiagonal SVD, computing left singular vectors of bidiagonal matrix in U, and computing right singular vectors of bidiagonal matrix in WORK(IVT) (Workspace: need M+M*M+BDSPAC) */ dbdsdc_("U", "I", m, &s[1], &work[ie], &u[u_offset], ldu, & work[ivt], m, dum, idum, &work[nwork], &iwork[1], info); /* Overwrite U by left singular vectors of L and WORK(IVT) by right singular vectors of L (Workspace: need 2*M*M+3*M, prefer 2*M*M+2*M+M*NB) */ i__1 = *lwork - nwork + 1; dormbr_("Q", "L", "N", m, m, m, &work[il], &ldwrkl, &work[ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr); i__1 = *lwork - nwork + 1; dormbr_("P", "R", "T", m, m, m, &work[il], &ldwrkl, &work[ itaup], &work[ivt], m, &work[nwork], &i__1, &ierr); /* Multiply right singular vectors of L in WORK(IVT) by Q in A, storing result in WORK(IL) and copying to A (Workspace: need 2*M*M, prefer M*M+M*N) */ i__1 = *n; i__2 = chunk; for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { /* Computing MIN */ i__3 = *n - i__ + 1; blk = min(i__3,chunk); dgemm_("N", "N", m, &blk, m, &c_b15, &work[ivt], m, &a[ i__ * a_dim1 + 1], lda, &c_b29, &work[il], & ldwrkl); dlacpy_("F", m, &blk, &work[il], &ldwrkl, &a[i__ * a_dim1 + 1], lda); /* L30: */ } } else if (wntqs) { /* Path 3t (N much larger than M, JOBZ='S') M right singular vectors to be computed in VT and M left singular vectors to be computed in U */ il = 1; /* WORK(IL) is M by M */ ldwrkl = *m; itau = il + ldwrkl * *m; nwork = itau + *m; /* Compute A=L*Q (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */ i__2 = *lwork - nwork + 1; dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], & i__2, &ierr); /* Copy L to WORK(IL), zeroing out above it */ dlacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwrkl); i__2 = *m - 1; i__1 = *m - 1; dlaset_("U", &i__2, &i__1, &c_b29, &c_b29, &work[il + ldwrkl], &ldwrkl); /* Generate Q in A (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */ i__2 = *lwork - nwork + 1; dorglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[nwork], &i__2, &ierr); ie = itau; itauq = ie + *m; itaup = itauq + *m; nwork = itaup + *m; /* Bidiagonalize L in WORK(IU), copying result to U (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB) */ i__2 = *lwork - nwork + 1; dgebrd_(m, m, &work[il], &ldwrkl, &s[1], &work[ie], &work[ itauq], &work[itaup], &work[nwork], &i__2, &ierr); /* Perform bidiagonal SVD, computing left singular vectors of bidiagonal matrix in U and computing right singular vectors of bidiagonal matrix in VT (Workspace: need M+BDSPAC) */ dbdsdc_("U", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &vt[ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1], info); /* Overwrite U by left singular vectors of L and VT by right singular vectors of L (Workspace: need M*M+3*M, prefer M*M+2*M+M*NB) */ i__2 = *lwork - nwork + 1; dormbr_("Q", "L", "N", m, m, m, &work[il], &ldwrkl, &work[ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr); i__2 = *lwork - nwork + 1; dormbr_("P", "R", "T", m, m, m, &work[il], &ldwrkl, &work[ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, & ierr); /* Multiply right singular vectors of L in WORK(IL) by Q in A, storing result in VT (Workspace: need M*M) */ dlacpy_("F", m, m, &vt[vt_offset], ldvt, &work[il], &ldwrkl); dgemm_("N", "N", m, n, m, &c_b15, &work[il], &ldwrkl, &a[ a_offset], lda, &c_b29, &vt[vt_offset], ldvt); } else if (wntqa) { /* Path 4t (N much larger than M, JOBZ='A') N right singular vectors to be computed in VT and M left singular vectors to be computed in U */ ivt = 1; /* WORK(IVT) is M by M */ ldwkvt = *m; itau = ivt + ldwkvt * *m; nwork = itau + *m; /* Compute A=L*Q, copying result to VT (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */ i__2 = *lwork - nwork + 1; dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], & i__2, &ierr); dlacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt); /* Generate Q in VT (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */ i__2 = *lwork - nwork + 1; dorglq_(n, n, m, &vt[vt_offset], ldvt, &work[itau], &work[ nwork], &i__2, &ierr); /* Produce L in A, zeroing out other entries */ i__2 = *m - 1; i__1 = *m - 1; dlaset_("U", &i__2, &i__1, &c_b29, &c_b29, &a[(a_dim1 << 1) + 1], lda); ie = itau; itauq = ie + *m; itaup = itauq + *m; nwork = itaup + *m; /* Bidiagonalize L in A (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB) */ i__2 = *lwork - nwork + 1; dgebrd_(m, m, &a[a_offset], lda, &s[1], &work[ie], &work[ itauq], &work[itaup], &work[nwork], &i__2, &ierr); /* Perform bidiagonal SVD, computing left singular vectors of bidiagonal matrix in U and computing right singular vectors of bidiagonal matrix in WORK(IVT) (Workspace: need M+M*M+BDSPAC) */ dbdsdc_("U", "I", m, &s[1], &work[ie], &u[u_offset], ldu, & work[ivt], &ldwkvt, dum, idum, &work[nwork], &iwork[1] , info); /* Overwrite U by left singular vectors of L and WORK(IVT) by right singular vectors of L (Workspace: need M*M+3*M, prefer M*M+2*M+M*NB) */ i__2 = *lwork - nwork + 1; dormbr_("Q", "L", "N", m, m, m, &a[a_offset], lda, &work[ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr); i__2 = *lwork - nwork + 1; dormbr_("P", "R", "T", m, m, m, &a[a_offset], lda, &work[ itaup], &work[ivt], &ldwkvt, &work[nwork], &i__2, & ierr); /* Multiply right singular vectors of L in WORK(IVT) by Q in VT, storing result in A (Workspace: need M*M) */ dgemm_("N", "N", m, n, m, &c_b15, &work[ivt], &ldwkvt, &vt[ vt_offset], ldvt, &c_b29, &a[a_offset], lda); /* Copy right singular vectors of A from A to VT */ dlacpy_("F", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt); } } else { /* N .LT. MNTHR Path 5t (N greater than M, but not much larger) Reduce to bidiagonal form without LQ decomposition */ ie = 1; itauq = ie + *m; itaup = itauq + *m; nwork = itaup + *m; /* Bidiagonalize A (Workspace: need 3*M+N, prefer 3*M+(M+N)*NB) */ i__2 = *lwork - nwork + 1; dgebrd_(m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], & work[itaup], &work[nwork], &i__2, &ierr); if (wntqn) { /* Perform bidiagonal SVD, only computing singular values (Workspace: need M+BDSPAC) */ dbdsdc_("L", "N", m, &s[1], &work[ie], dum, &c__1, dum, &c__1, dum, idum, &work[nwork], &iwork[1], info); } else if (wntqo) { ldwkvt = *m; ivt = nwork; if (*lwork >= *m * *n + *m * 3 + bdspac) { /* WORK( IVT ) is M by N */ dlaset_("F", m, n, &c_b29, &c_b29, &work[ivt], &ldwkvt); nwork = ivt + ldwkvt * *n; } else { /* WORK( IVT ) is M by M */ nwork = ivt + ldwkvt * *m; il = nwork; /* WORK(IL) is M by CHUNK */ chunk = (*lwork - *m * *m - *m * 3) / *m; } /* Perform bidiagonal SVD, computing left singular vectors of bidiagonal matrix in U and computing right singular vectors of bidiagonal matrix in WORK(IVT) (Workspace: need M*M+BDSPAC) */ dbdsdc_("L", "I", m, &s[1], &work[ie], &u[u_offset], ldu, & work[ivt], &ldwkvt, dum, idum, &work[nwork], &iwork[1] , info); /* Overwrite U by left singular vectors of A (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */ i__2 = *lwork - nwork + 1; dormbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr); if (*lwork >= *m * *n + *m * 3 + bdspac) { /* Overwrite WORK(IVT) by left singular vectors of A (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */ i__2 = *lwork - nwork + 1; dormbr_("P", "R", "T", m, n, m, &a[a_offset], lda, &work[ itaup], &work[ivt], &ldwkvt, &work[nwork], &i__2, &ierr); /* Copy right singular vectors of A from WORK(IVT) to A */ dlacpy_("F", m, n, &work[ivt], &ldwkvt, &a[a_offset], lda); } else { /* Generate P**T in A (Workspace: need M*M+2*M, prefer M*M+M+M*NB) */ i__2 = *lwork - nwork + 1; dorgbr_("P", m, n, m, &a[a_offset], lda, &work[itaup], & work[nwork], &i__2, &ierr); /* Multiply Q in A by right singular vectors of bidiagonal matrix in WORK(IVT), storing result in WORK(IL) and copying to A (Workspace: need 2*M*M, prefer M*M+M*N) */ i__2 = *n; i__1 = chunk; for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) { /* Computing MIN */ i__3 = *n - i__ + 1; blk = min(i__3,chunk); dgemm_("N", "N", m, &blk, m, &c_b15, &work[ivt], & ldwkvt, &a[i__ * a_dim1 + 1], lda, &c_b29, & work[il], m); dlacpy_("F", m, &blk, &work[il], m, &a[i__ * a_dim1 + 1], lda); /* L40: */ } } } else if (wntqs) { /* Perform bidiagonal SVD, computing left singular vectors of bidiagonal matrix in U and computing right singular vectors of bidiagonal matrix in VT (Workspace: need M+BDSPAC) */ dlaset_("F", m, n, &c_b29, &c_b29, &vt[vt_offset], ldvt); dbdsdc_("L", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &vt[ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1], info); /* Overwrite U by left singular vectors of A and VT by right singular vectors of A (Workspace: need 3*M, prefer 2*M+M*NB) */ i__1 = *lwork - nwork + 1; dormbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr); i__1 = *lwork - nwork + 1; dormbr_("P", "R", "T", m, n, m, &a[a_offset], lda, &work[ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, & ierr); } else if (wntqa) { /* Perform bidiagonal SVD, computing left singular vectors of bidiagonal matrix in U and computing right singular vectors of bidiagonal matrix in VT (Workspace: need M+BDSPAC) */ dlaset_("F", n, n, &c_b29, &c_b29, &vt[vt_offset], ldvt); dbdsdc_("L", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &vt[ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1], info); /* Set the right corner of VT to identity matrix */ if (*n > *m) { i__1 = *n - *m; i__2 = *n - *m; dlaset_("F", &i__1, &i__2, &c_b29, &c_b15, &vt[*m + 1 + (* m + 1) * vt_dim1], ldvt); } /* Overwrite U by left singular vectors of A and VT by right singular vectors of A (Workspace: need 2*M+N, prefer 2*M+N*NB) */ i__1 = *lwork - nwork + 1; dormbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr); i__1 = *lwork - nwork + 1; dormbr_("P", "R", "T", n, n, m, &a[a_offset], lda, &work[ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, & ierr); } } } /* Undo scaling if necessary */ if (iscl == 1) { if (anrm > bignum) { dlascl_("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], & minmn, &ierr); } if (anrm < smlnum) { dlascl_("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], & minmn, &ierr); } } /* Return optimal workspace in WORK(1) */ work[1] = (doublereal) maxwrk; return 0; /* End of DGESDD */ } /* dgesdd_ */ /* Subroutine */ int dgesv_(integer *n, integer *nrhs, doublereal *a, integer *lda, integer *ipiv, doublereal *b, integer *ldb, integer *info) { /* System generated locals */ integer a_dim1, a_offset, b_dim1, b_offset, i__1; /* Local variables */ extern /* Subroutine */ int dgetrf_(integer *, integer *, doublereal *, integer *, integer *, integer *), xerbla_(char *, integer *), dgetrs_(char *, integer *, integer *, doublereal *, integer *, integer *, doublereal *, integer *, integer *); /* -- LAPACK driver routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DGESV computes the solution to a real system of linear equations A * X = B, where A is an N-by-N matrix and X and B are N-by-NRHS matrices. The LU decomposition with partial pivoting and row interchanges is used to factor A as A = P * L * U, where P is a permutation matrix, L is unit lower triangular, and U is upper triangular. The factored form of A is then used to solve the system of equations A * X = B. Arguments ========= N (input) INTEGER The number of linear equations, i.e., the order of the matrix A. N >= 0. NRHS (input) INTEGER The number of right hand sides, i.e., the number of columns of the matrix B. NRHS >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the N-by-N coefficient matrix A. On exit, the factors L and U from the factorization A = P*L*U; the unit diagonal elements of L are not stored. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,N). IPIV (output) INTEGER array, dimension (N) The pivot indices that define the permutation matrix P; row i of the matrix was interchanged with row IPIV(i). B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) On entry, the N-by-NRHS matrix of right hand side matrix B. On exit, if INFO = 0, the N-by-NRHS solution matrix X. LDB (input) INTEGER The leading dimension of the array B. LDB >= max(1,N). INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value > 0: if INFO = i, U(i,i) is exactly zero. The factorization has been completed, but the factor U is exactly singular, so the solution could not be computed. ===================================================================== Test the input parameters. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --ipiv; b_dim1 = *ldb; b_offset = 1 + b_dim1 * 1; b -= b_offset; /* Function Body */ *info = 0; if (*n < 0) { *info = -1; } else if (*nrhs < 0) { *info = -2; } else if (*lda < max(1,*n)) { *info = -4; } else if (*ldb < max(1,*n)) { *info = -7; } if (*info != 0) { i__1 = -(*info); xerbla_("DGESV ", &i__1); return 0; } /* Compute the LU factorization of A. */ dgetrf_(n, n, &a[a_offset], lda, &ipiv[1], info); if (*info == 0) { /* Solve the system A*X = B, overwriting B with X. */ dgetrs_("No transpose", n, nrhs, &a[a_offset], lda, &ipiv[1], &b[ b_offset], ldb, info); } return 0; /* End of DGESV */ } /* dgesv_ */ /* Subroutine */ int dgetf2_(integer *m, integer *n, doublereal *a, integer * lda, integer *ipiv, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; doublereal d__1; /* Local variables */ extern /* Subroutine */ int dger_(integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *); static integer i__, j; extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, integer *); static doublereal sfmin; extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *, doublereal *, integer *); static integer jp; extern integer idamax_(integer *, doublereal *, integer *); extern /* Subroutine */ int xerbla_(char *, integer *); /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DGETF2 computes an LU factorization of a general m-by-n matrix A using partial pivoting with row interchanges. The factorization has the form A = P * L * U where P is a permutation matrix, L is lower triangular with unit diagonal elements (lower trapezoidal if m > n), and U is upper triangular (upper trapezoidal if m < n). This is the right-looking Level 2 BLAS version of the algorithm. Arguments ========= M (input) INTEGER The number of rows of the matrix A. M >= 0. N (input) INTEGER The number of columns of the matrix A. N >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the m by n matrix to be factored. On exit, the factors L and U from the factorization A = P*L*U; the unit diagonal elements of L are not stored. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,M). IPIV (output) INTEGER array, dimension (min(M,N)) The pivot indices; for 1 <= i <= min(M,N), row i of the matrix was interchanged with row IPIV(i). INFO (output) INTEGER = 0: successful exit < 0: if INFO = -k, the k-th argument had an illegal value > 0: if INFO = k, U(k,k) is exactly zero. The factorization has been completed, but the factor U is exactly singular, and division by zero will occur if it is used to solve a system of equations. ===================================================================== Test the input parameters. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --ipiv; /* Function Body */ *info = 0; if (*m < 0) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*lda < max(1,*m)) { *info = -4; } if (*info != 0) { i__1 = -(*info); xerbla_("DGETF2", &i__1); return 0; } /* Quick return if possible */ if (*m == 0 || *n == 0) { return 0; } /* Compute machine safe minimum */ sfmin = SAFEMINIMUM; i__1 = min(*m,*n); for (j = 1; j <= i__1; ++j) { /* Find pivot and test for singularity. */ i__2 = *m - j + 1; jp = j - 1 + idamax_(&i__2, &a[j + j * a_dim1], &c__1); ipiv[j] = jp; if (a[jp + j * a_dim1] != 0.) { /* Apply the interchange to columns 1:N. */ if (jp != j) { dswap_(n, &a[j + a_dim1], lda, &a[jp + a_dim1], lda); } /* Compute elements J+1:M of J-th column. */ if (j < *m) { if ((d__1 = a[j + j * a_dim1], abs(d__1)) >= sfmin) { i__2 = *m - j; d__1 = 1. / a[j + j * a_dim1]; dscal_(&i__2, &d__1, &a[j + 1 + j * a_dim1], &c__1); } else { i__2 = *m - j; for (i__ = 1; i__ <= i__2; ++i__) { a[j + i__ + j * a_dim1] /= a[j + j * a_dim1]; /* L20: */ } } } } else if (*info == 0) { *info = j; } if (j < min(*m,*n)) { /* Update trailing submatrix. */ i__2 = *m - j; i__3 = *n - j; dger_(&i__2, &i__3, &c_b151, &a[j + 1 + j * a_dim1], &c__1, &a[j + (j + 1) * a_dim1], lda, &a[j + 1 + (j + 1) * a_dim1], lda); } /* L10: */ } return 0; /* End of DGETF2 */ } /* dgetf2_ */ /* Subroutine */ int dgetrf_(integer *m, integer *n, doublereal *a, integer * lda, integer *ipiv, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5; /* Local variables */ static integer i__, j; extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); static integer iinfo; extern /* Subroutine */ int dtrsm_(char *, char *, char *, char *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *), dgetf2_( integer *, integer *, doublereal *, integer *, integer *, integer *); static integer jb, nb; extern /* Subroutine */ int xerbla_(char *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); extern /* Subroutine */ int dlaswp_(integer *, doublereal *, integer *, integer *, integer *, integer *, integer *); /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DGETRF computes an LU factorization of a general M-by-N matrix A using partial pivoting with row interchanges. The factorization has the form A = P * L * U where P is a permutation matrix, L is lower triangular with unit diagonal elements (lower trapezoidal if m > n), and U is upper triangular (upper trapezoidal if m < n). This is the right-looking Level 3 BLAS version of the algorithm. Arguments ========= M (input) INTEGER The number of rows of the matrix A. M >= 0. N (input) INTEGER The number of columns of the matrix A. N >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the M-by-N matrix to be factored. On exit, the factors L and U from the factorization A = P*L*U; the unit diagonal elements of L are not stored. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,M). IPIV (output) INTEGER array, dimension (min(M,N)) The pivot indices; for 1 <= i <= min(M,N), row i of the matrix was interchanged with row IPIV(i). INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value > 0: if INFO = i, U(i,i) is exactly zero. The factorization has been completed, but the factor U is exactly singular, and division by zero will occur if it is used to solve a system of equations. ===================================================================== Test the input parameters. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --ipiv; /* Function Body */ *info = 0; if (*m < 0) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*lda < max(1,*m)) { *info = -4; } if (*info != 0) { i__1 = -(*info); xerbla_("DGETRF", &i__1); return 0; } /* Quick return if possible */ if (*m == 0 || *n == 0) { return 0; } /* Determine the block size for this environment. */ nb = ilaenv_(&c__1, "DGETRF", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen) 1); if (nb <= 1 || nb >= min(*m,*n)) { /* Use unblocked code. */ dgetf2_(m, n, &a[a_offset], lda, &ipiv[1], info); } else { /* Use blocked code. */ i__1 = min(*m,*n); i__2 = nb; for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) { /* Computing MIN */ i__3 = min(*m,*n) - j + 1; jb = min(i__3,nb); /* Factor diagonal and subdiagonal blocks and test for exact singularity. */ i__3 = *m - j + 1; dgetf2_(&i__3, &jb, &a[j + j * a_dim1], lda, &ipiv[j], &iinfo); /* Adjust INFO and the pivot indices. */ if (*info == 0 && iinfo > 0) { *info = iinfo + j - 1; } /* Computing MIN */ i__4 = *m, i__5 = j + jb - 1; i__3 = min(i__4,i__5); for (i__ = j; i__ <= i__3; ++i__) { ipiv[i__] = j - 1 + ipiv[i__]; /* L10: */ } /* Apply interchanges to columns 1:J-1. */ i__3 = j - 1; i__4 = j + jb - 1; dlaswp_(&i__3, &a[a_offset], lda, &j, &i__4, &ipiv[1], &c__1); if (j + jb <= *n) { /* Apply interchanges to columns J+JB:N. */ i__3 = *n - j - jb + 1; i__4 = j + jb - 1; dlaswp_(&i__3, &a[(j + jb) * a_dim1 + 1], lda, &j, &i__4, & ipiv[1], &c__1); /* Compute block row of U. */ i__3 = *n - j - jb + 1; dtrsm_("Left", "Lower", "No transpose", "Unit", &jb, &i__3, & c_b15, &a[j + j * a_dim1], lda, &a[j + (j + jb) * a_dim1], lda); if (j + jb <= *m) { /* Update trailing submatrix. */ i__3 = *m - j - jb + 1; i__4 = *n - j - jb + 1; dgemm_("No transpose", "No transpose", &i__3, &i__4, &jb, &c_b151, &a[j + jb + j * a_dim1], lda, &a[j + (j + jb) * a_dim1], lda, &c_b15, &a[j + jb + (j + jb) * a_dim1], lda); } } /* L20: */ } } return 0; /* End of DGETRF */ } /* dgetrf_ */ /* Subroutine */ int dgetrs_(char *trans, integer *n, integer *nrhs, doublereal *a, integer *lda, integer *ipiv, doublereal *b, integer * ldb, integer *info) { /* System generated locals */ integer a_dim1, a_offset, b_dim1, b_offset, i__1; /* Local variables */ extern logical lsame_(char *, char *); extern /* Subroutine */ int dtrsm_(char *, char *, char *, char *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *), xerbla_( char *, integer *), dlaswp_(integer *, doublereal *, integer *, integer *, integer *, integer *, integer *); static logical notran; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DGETRS solves a system of linear equations A * X = B or A' * X = B with a general N-by-N matrix A using the LU factorization computed by DGETRF. Arguments ========= TRANS (input) CHARACTER*1 Specifies the form of the system of equations: = 'N': A * X = B (No transpose) = 'T': A'* X = B (Transpose) = 'C': A'* X = B (Conjugate transpose = Transpose) N (input) INTEGER The order of the matrix A. N >= 0. NRHS (input) INTEGER The number of right hand sides, i.e., the number of columns of the matrix B. NRHS >= 0. A (input) DOUBLE PRECISION array, dimension (LDA,N) The factors L and U from the factorization A = P*L*U as computed by DGETRF. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,N). IPIV (input) INTEGER array, dimension (N) The pivot indices from DGETRF; for 1<=i<=N, row i of the matrix was interchanged with row IPIV(i). B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) On entry, the right hand side matrix B. On exit, the solution matrix X. LDB (input) INTEGER The leading dimension of the array B. LDB >= max(1,N). INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value ===================================================================== Test the input parameters. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --ipiv; b_dim1 = *ldb; b_offset = 1 + b_dim1 * 1; b -= b_offset; /* Function Body */ *info = 0; notran = lsame_(trans, "N"); if (! notran && ! lsame_(trans, "T") && ! lsame_( trans, "C")) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*nrhs < 0) { *info = -3; } else if (*lda < max(1,*n)) { *info = -5; } else if (*ldb < max(1,*n)) { *info = -8; } if (*info != 0) { i__1 = -(*info); xerbla_("DGETRS", &i__1); return 0; } /* Quick return if possible */ if (*n == 0 || *nrhs == 0) { return 0; } if (notran) { /* Solve A * X = B. Apply row interchanges to the right hand sides. */ dlaswp_(nrhs, &b[b_offset], ldb, &c__1, n, &ipiv[1], &c__1); /* Solve L*X = B, overwriting B with X. */ dtrsm_("Left", "Lower", "No transpose", "Unit", n, nrhs, &c_b15, &a[ a_offset], lda, &b[b_offset], ldb); /* Solve U*X = B, overwriting B with X. */ dtrsm_("Left", "Upper", "No transpose", "Non-unit", n, nrhs, &c_b15, & a[a_offset], lda, &b[b_offset], ldb); } else { /* Solve A' * X = B. Solve U'*X = B, overwriting B with X. */ dtrsm_("Left", "Upper", "Transpose", "Non-unit", n, nrhs, &c_b15, &a[ a_offset], lda, &b[b_offset], ldb); /* Solve L'*X = B, overwriting B with X. */ dtrsm_("Left", "Lower", "Transpose", "Unit", n, nrhs, &c_b15, &a[ a_offset], lda, &b[b_offset], ldb); /* Apply row interchanges to the solution vectors. */ dlaswp_(nrhs, &b[b_offset], ldb, &c__1, n, &ipiv[1], &c_n1); } return 0; /* End of DGETRS */ } /* dgetrs_ */ /* Subroutine */ int dhseqr_(char *job, char *compz, integer *n, integer *ilo, integer *ihi, doublereal *h__, integer *ldh, doublereal *wr, doublereal *wi, doublereal *z__, integer *ldz, doublereal *work, integer *lwork, integer *info) { /* System generated locals */ address a__1[2]; integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2[2], i__3; doublereal d__1; char ch__1[2]; /* Builtin functions */ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); /* Local variables */ static integer kbot, nmin, i__; extern logical lsame_(char *, char *); static logical initz; static doublereal workl[49]; static logical wantt, wantz; extern /* Subroutine */ int dlaqr0_(logical *, logical *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, doublereal *, integer *, integer *); static doublereal hl[2401] /* was [49][49] */; extern /* Subroutine */ int dlahqr_(logical *, logical *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *), dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *), dlaset_(char *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); extern /* Subroutine */ int xerbla_(char *, integer *); static logical lquery; /* -- LAPACK driver routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DHSEQR computes the eigenvalues of a Hessenberg matrix H and, optionally, the matrices T and Z from the Schur decomposition H = Z T Z**T, where T is an upper quasi-triangular matrix (the Schur form), and Z is the orthogonal matrix of Schur vectors. Optionally Z may be postmultiplied into an input orthogonal matrix Q so that this routine can give the Schur factorization of a matrix A which has been reduced to the Hessenberg form H by the orthogonal matrix Q: A = Q*H*Q**T = (QZ)*T*(QZ)**T. Arguments ========= JOB (input) CHARACTER*1 = 'E': compute eigenvalues only; = 'S': compute eigenvalues and the Schur form T. COMPZ (input) CHARACTER*1 = 'N': no Schur vectors are computed; = 'I': Z is initialized to the unit matrix and the matrix Z of Schur vectors of H is returned; = 'V': Z must contain an orthogonal matrix Q on entry, and the product Q*Z is returned. N (input) INTEGER The order of the matrix H. N .GE. 0. ILO (input) INTEGER IHI (input) INTEGER It is assumed that H is already upper triangular in rows and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally set by a previous call to DGEBAL, and then passed to DGEHRD when the matrix output by DGEBAL is reduced to Hessenberg form. Otherwise ILO and IHI should be set to 1 and N respectively. If N.GT.0, then 1.LE.ILO.LE.IHI.LE.N. If N = 0, then ILO = 1 and IHI = 0. H (input/output) DOUBLE PRECISION array, dimension (LDH,N) On entry, the upper Hessenberg matrix H. On exit, if INFO = 0 and JOB = 'S', then H contains the upper quasi-triangular matrix T from the Schur decomposition (the Schur form); 2-by-2 diagonal blocks (corresponding to complex conjugate pairs of eigenvalues) are returned in standard form, with H(i,i) = H(i+1,i+1) and H(i+1,i)*H(i,i+1).LT.0. If INFO = 0 and JOB = 'E', the contents of H are unspecified on exit. (The output value of H when INFO.GT.0 is given under the description of INFO below.) Unlike earlier versions of DHSEQR, this subroutine may explicitly H(i,j) = 0 for i.GT.j and j = 1, 2, ... ILO-1 or j = IHI+1, IHI+2, ... N. LDH (input) INTEGER The leading dimension of the array H. LDH .GE. max(1,N). WR (output) DOUBLE PRECISION array, dimension (N) WI (output) DOUBLE PRECISION array, dimension (N) The real and imaginary parts, respectively, of the computed eigenvalues. If two eigenvalues are computed as a complex conjugate pair, they are stored in consecutive elements of WR and WI, say the i-th and (i+1)th, with WI(i) .GT. 0 and WI(i+1) .LT. 0. If JOB = 'S', the eigenvalues are stored in the same order as on the diagonal of the Schur form returned in H, with WR(i) = H(i,i) and, if H(i:i+1,i:i+1) is a 2-by-2 diagonal block, WI(i) = sqrt(-H(i+1,i)*H(i,i+1)) and WI(i+1) = -WI(i). Z (input/output) DOUBLE PRECISION array, dimension (LDZ,N) If COMPZ = 'N', Z is not referenced. If COMPZ = 'I', on entry Z need not be set and on exit, if INFO = 0, Z contains the orthogonal matrix Z of the Schur vectors of H. If COMPZ = 'V', on entry Z must contain an N-by-N matrix Q, which is assumed to be equal to the unit matrix except for the submatrix Z(ILO:IHI,ILO:IHI). On exit, if INFO = 0, Z contains Q*Z. Normally Q is the orthogonal matrix generated by DORGHR after the call to DGEHRD which formed the Hessenberg matrix H. (The output value of Z when INFO.GT.0 is given under the description of INFO below.) LDZ (input) INTEGER The leading dimension of the array Z. if COMPZ = 'I' or COMPZ = 'V', then LDZ.GE.MAX(1,N). Otherwize, LDZ.GE.1. WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK) On exit, if INFO = 0, WORK(1) returns an estimate of the optimal value for LWORK. LWORK (input) INTEGER The dimension of the array WORK. LWORK .GE. max(1,N) is sufficient, but LWORK typically as large as 6*N may be required for optimal performance. A workspace query to determine the optimal workspace size is recommended. If LWORK = -1, then DHSEQR does a workspace query. In this case, DHSEQR checks the input parameters and estimates the optimal workspace size for the given values of N, ILO and IHI. The estimate is returned in WORK(1). No error message related to LWORK is issued by XERBLA. Neither H nor Z are accessed. INFO (output) INTEGER = 0: successful exit .LT. 0: if INFO = -i, the i-th argument had an illegal value .GT. 0: if INFO = i, DHSEQR failed to compute all of the eigenvalues. Elements 1:ilo-1 and i+1:n of WR and WI contain those eigenvalues which have been successfully computed. (Failures are rare.) If INFO .GT. 0 and JOB = 'E', then on exit, the remaining unconverged eigenvalues are the eigen- values of the upper Hessenberg matrix rows and columns ILO through INFO of the final, output value of H. If INFO .GT. 0 and JOB = 'S', then on exit (*) (initial value of H)*U = U*(final value of H) where U is an orthogonal matrix. The final value of H is upper Hessenberg and quasi-triangular in rows and columns INFO+1 through IHI. If INFO .GT. 0 and COMPZ = 'V', then on exit (final value of Z) = (initial value of Z)*U where U is the orthogonal matrix in (*) (regard- less of the value of JOB.) If INFO .GT. 0 and COMPZ = 'I', then on exit (final value of Z) = U where U is the orthogonal matrix in (*) (regard- less of the value of JOB.) If INFO .GT. 0 and COMPZ = 'N', then Z is not accessed. ================================================================ Default values supplied by ILAENV(ISPEC,'DHSEQR',JOB(:1)//COMPZ(:1),N,ILO,IHI,LWORK). It is suggested that these defaults be adjusted in order to attain best performance in each particular computational environment. ISPEC=1: The DLAHQR vs DLAQR0 crossover point. Default: 75. (Must be at least 11.) ISPEC=2: Recommended deflation window size. This depends on ILO, IHI and NS. NS is the number of simultaneous shifts returned by ILAENV(ISPEC=4). (See ISPEC=4 below.) The default for (IHI-ILO+1).LE.500 is NS. The default for (IHI-ILO+1).GT.500 is 3*NS/2. ISPEC=3: Nibble crossover point. (See ILAENV for details.) Default: 14% of deflation window size. ISPEC=4: Number of simultaneous shifts, NS, in a multi-shift QR iteration. If IHI-ILO+1 is ... greater than ...but less ... the or equal to ... than default is 1 30 NS - 2(+) 30 60 NS - 4(+) 60 150 NS = 10(+) 150 590 NS = ** 590 3000 NS = 64 3000 6000 NS = 128 6000 infinity NS = 256 (+) By default some or all matrices of this order are passed to the implicit double shift routine DLAHQR and NS is ignored. See ISPEC=1 above and comments in IPARM for details. The asterisks (**) indicate an ad-hoc function of N increasing from 10 to 64. ISPEC=5: Select structured matrix multiply. (See ILAENV for details.) Default: 3. ================================================================ Based on contributions by Karen Braman and Ralph Byers, Department of Mathematics, University of Kansas, USA ================================================================ References: K. Braman, R. Byers and R. Mathias, The Multi-Shift QR Algorithm Part I: Maintaining Well Focused Shifts, and Level 3 Performance, SIAM Journal of Matrix Analysis, volume 23, pages 929--947, 2002. K. Braman, R. Byers and R. Mathias, The Multi-Shift QR Algorithm Part II: Aggressive Early Deflation, SIAM Journal of Matrix Analysis, volume 23, pages 948--973, 2002. ================================================================ ==== Matrices of order NTINY or smaller must be processed by . DLAHQR because of insufficient subdiagonal scratch space. . (This is a hard limit.) ==== ==== NL allocates some local workspace to help small matrices . through a rare DLAHQR failure. NL .GT. NTINY = 11 is . required and NL .LE. NMIN = ILAENV(ISPEC=1,...) is recom- . mended. (The default value of NMIN is 75.) Using NL = 49 . allows up to six simultaneous shifts and a 16-by-16 . deflation window. ==== ==== Decode and check the input parameters. ==== */ /* Parameter adjustments */ h_dim1 = *ldh; h_offset = 1 + h_dim1 * 1; h__ -= h_offset; --wr; --wi; z_dim1 = *ldz; z_offset = 1 + z_dim1 * 1; z__ -= z_offset; --work; /* Function Body */ wantt = lsame_(job, "S"); initz = lsame_(compz, "I"); wantz = initz || lsame_(compz, "V"); work[1] = (doublereal) max(1,*n); lquery = *lwork == -1; *info = 0; if (! lsame_(job, "E") && ! wantt) { *info = -1; } else if (! lsame_(compz, "N") && ! wantz) { *info = -2; } else if (*n < 0) { *info = -3; } else if (*ilo < 1 || *ilo > max(1,*n)) { *info = -4; } else if (*ihi < min(*ilo,*n) || *ihi > *n) { *info = -5; } else if (*ldh < max(1,*n)) { *info = -7; } else if (*ldz < 1 || wantz && *ldz < max(1,*n)) { *info = -11; } else if (*lwork < max(1,*n) && ! lquery) { *info = -13; } if (*info != 0) { /* ==== Quick return in case of invalid argument. ==== */ i__1 = -(*info); xerbla_("DHSEQR", &i__1); return 0; } else if (*n == 0) { /* ==== Quick return in case N = 0; nothing to do. ==== */ return 0; } else if (lquery) { /* ==== Quick return in case of a workspace query ==== */ dlaqr0_(&wantt, &wantz, n, ilo, ihi, &h__[h_offset], ldh, &wr[1], &wi[ 1], ilo, ihi, &z__[z_offset], ldz, &work[1], lwork, info); /* ==== Ensure reported workspace size is backward-compatible with . previous LAPACK versions. ==== Computing MAX */ d__1 = (doublereal) max(1,*n); work[1] = max(d__1,work[1]); return 0; } else { /* ==== copy eigenvalues isolated by DGEBAL ==== */ i__1 = *ilo - 1; for (i__ = 1; i__ <= i__1; ++i__) { wr[i__] = h__[i__ + i__ * h_dim1]; wi[i__] = 0.; /* L10: */ } i__1 = *n; for (i__ = *ihi + 1; i__ <= i__1; ++i__) { wr[i__] = h__[i__ + i__ * h_dim1]; wi[i__] = 0.; /* L20: */ } /* ==== Initialize Z, if requested ==== */ if (initz) { dlaset_("A", n, n, &c_b29, &c_b15, &z__[z_offset], ldz) ; } /* ==== Quick return if possible ==== */ if (*ilo == *ihi) { wr[*ilo] = h__[*ilo + *ilo * h_dim1]; wi[*ilo] = 0.; return 0; } /* ==== DLAHQR/DLAQR0 crossover point ==== Writing concatenation */ i__2[0] = 1, a__1[0] = job; i__2[1] = 1, a__1[1] = compz; s_cat(ch__1, a__1, i__2, &c__2, (ftnlen)2); nmin = ilaenv_(&c__12, "DHSEQR", ch__1, n, ilo, ihi, lwork, (ftnlen)6, (ftnlen)2); nmin = max(11,nmin); /* ==== DLAQR0 for big matrices; DLAHQR for small ones ==== */ if (*n > nmin) { dlaqr0_(&wantt, &wantz, n, ilo, ihi, &h__[h_offset], ldh, &wr[1], &wi[1], ilo, ihi, &z__[z_offset], ldz, &work[1], lwork, info); } else { /* ==== Small matrix ==== */ dlahqr_(&wantt, &wantz, n, ilo, ihi, &h__[h_offset], ldh, &wr[1], &wi[1], ilo, ihi, &z__[z_offset], ldz, info); if (*info > 0) { /* ==== A rare DLAHQR failure! DLAQR0 sometimes succeeds . when DLAHQR fails. ==== */ kbot = *info; if (*n >= 49) { /* ==== Larger matrices have enough subdiagonal scratch . space to call DLAQR0 directly. ==== */ dlaqr0_(&wantt, &wantz, n, ilo, &kbot, &h__[h_offset], ldh, &wr[1], &wi[1], ilo, ihi, &z__[z_offset], ldz, &work[1], lwork, info); } else { /* ==== Tiny matrices don't have enough subdiagonal . scratch space to benefit from DLAQR0. Hence, . tiny matrices must be copied into a larger . array before calling DLAQR0. ==== */ dlacpy_("A", n, n, &h__[h_offset], ldh, hl, &c__49); hl[*n + 1 + *n * 49 - 50] = 0.; i__1 = 49 - *n; dlaset_("A", &c__49, &i__1, &c_b29, &c_b29, &hl[(*n + 1) * 49 - 49], &c__49); dlaqr0_(&wantt, &wantz, &c__49, ilo, &kbot, hl, &c__49, & wr[1], &wi[1], ilo, ihi, &z__[z_offset], ldz, workl, &c__49, info); if (wantt || *info != 0) { dlacpy_("A", n, n, hl, &c__49, &h__[h_offset], ldh); } } } } /* ==== Clear out the trash, if necessary. ==== */ if ((wantt || *info != 0) && *n > 2) { i__1 = *n - 2; i__3 = *n - 2; dlaset_("L", &i__1, &i__3, &c_b29, &c_b29, &h__[h_dim1 + 3], ldh); } /* ==== Ensure reported workspace size is backward-compatible with . previous LAPACK versions. ==== Computing MAX */ d__1 = (doublereal) max(1,*n); work[1] = max(d__1,work[1]); } /* ==== End of DHSEQR ==== */ return 0; } /* dhseqr_ */ /* Subroutine */ int dlabad_(doublereal *small, doublereal *large) { /* Builtin functions */ double d_lg10(doublereal *), sqrt(doublereal); /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLABAD takes as input the values computed by DLAMCH for underflow and overflow, and returns the square root of each of these values if the log of LARGE is sufficiently large. This subroutine is intended to identify machines with a large exponent range, such as the Crays, and redefine the underflow and overflow limits to be the square roots of the values computed by DLAMCH. This subroutine is needed because DLAMCH does not compensate for poor arithmetic in the upper half of the exponent range, as is found on a Cray. Arguments ========= SMALL (input/output) DOUBLE PRECISION On entry, the underflow threshold as computed by DLAMCH. On exit, if LOG10(LARGE) is sufficiently large, the square root of SMALL, otherwise unchanged. LARGE (input/output) DOUBLE PRECISION On entry, the overflow threshold as computed by DLAMCH. On exit, if LOG10(LARGE) is sufficiently large, the square root of LARGE, otherwise unchanged. ===================================================================== If it looks like we're on a Cray, take the square root of SMALL and LARGE to avoid overflow and underflow problems. */ if (d_lg10(large) > 2e3) { *small = sqrt(*small); *large = sqrt(*large); } return 0; /* End of DLABAD */ } /* dlabad_ */ /* Subroutine */ int dlabrd_(integer *m, integer *n, integer *nb, doublereal * a, integer *lda, doublereal *d__, doublereal *e, doublereal *tauq, doublereal *taup, doublereal *x, integer *ldx, doublereal *y, integer *ldy) { /* System generated locals */ integer a_dim1, a_offset, x_dim1, x_offset, y_dim1, y_offset, i__1, i__2, i__3; /* Local variables */ static integer i__; extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, integer *), dgemv_(char *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *), dlarfg_(integer *, doublereal *, doublereal *, integer *, doublereal *); /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLABRD reduces the first NB rows and columns of a real general m by n matrix A to upper or lower bidiagonal form by an orthogonal transformation Q' * A * P, and returns the matrices X and Y which are needed to apply the transformation to the unreduced part of A. If m >= n, A is reduced to upper bidiagonal form; if m < n, to lower bidiagonal form. This is an auxiliary routine called by DGEBRD Arguments ========= M (input) INTEGER The number of rows in the matrix A. N (input) INTEGER The number of columns in the matrix A. NB (input) INTEGER The number of leading rows and columns of A to be reduced. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the m by n general matrix to be reduced. On exit, the first NB rows and columns of the matrix are overwritten; the rest of the array is unchanged. If m >= n, elements on and below the diagonal in the first NB columns, with the array TAUQ, represent the orthogonal matrix Q as a product of elementary reflectors; and elements above the diagonal in the first NB rows, with the array TAUP, represent the orthogonal matrix P as a product of elementary reflectors. If m < n, elements below the diagonal in the first NB columns, with the array TAUQ, represent the orthogonal matrix Q as a product of elementary reflectors, and elements on and above the diagonal in the first NB rows, with the array TAUP, represent the orthogonal matrix P as a product of elementary reflectors. See Further Details. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,M). D (output) DOUBLE PRECISION array, dimension (NB) The diagonal elements of the first NB rows and columns of the reduced matrix. D(i) = A(i,i). E (output) DOUBLE PRECISION array, dimension (NB) The off-diagonal elements of the first NB rows and columns of the reduced matrix. TAUQ (output) DOUBLE PRECISION array dimension (NB) The scalar factors of the elementary reflectors which represent the orthogonal matrix Q. See Further Details. TAUP (output) DOUBLE PRECISION array, dimension (NB) The scalar factors of the elementary reflectors which represent the orthogonal matrix P. See Further Details. X (output) DOUBLE PRECISION array, dimension (LDX,NB) The m-by-nb matrix X required to update the unreduced part of A. LDX (input) INTEGER The leading dimension of the array X. LDX >= M. Y (output) DOUBLE PRECISION array, dimension (LDY,NB) The n-by-nb matrix Y required to update the unreduced part of A. LDY (input) INTEGER The leading dimension of the array Y. LDY >= N. Further Details =============== The matrices Q and P are represented as products of elementary reflectors: Q = H(1) H(2) . . . H(nb) and P = G(1) G(2) . . . G(nb) Each H(i) and G(i) has the form: H(i) = I - tauq * v * v' and G(i) = I - taup * u * u' where tauq and taup are real scalars, and v and u are real vectors. If m >= n, v(1:i-1) = 0, v(i) = 1, and v(i:m) is stored on exit in A(i:m,i); u(1:i) = 0, u(i+1) = 1, and u(i+1:n) is stored on exit in A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i). If m < n, v(1:i) = 0, v(i+1) = 1, and v(i+1:m) is stored on exit in A(i+2:m,i); u(1:i-1) = 0, u(i) = 1, and u(i:n) is stored on exit in A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i). The elements of the vectors v and u together form the m-by-nb matrix V and the nb-by-n matrix U' which are needed, with X and Y, to apply the transformation to the unreduced part of the matrix, using a block update of the form: A := A - V*Y' - X*U'. The contents of A on exit are illustrated by the following examples with nb = 2: m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n): ( 1 1 u1 u1 u1 ) ( 1 u1 u1 u1 u1 u1 ) ( v1 1 1 u2 u2 ) ( 1 1 u2 u2 u2 u2 ) ( v1 v2 a a a ) ( v1 1 a a a a ) ( v1 v2 a a a ) ( v1 v2 a a a a ) ( v1 v2 a a a ) ( v1 v2 a a a a ) ( v1 v2 a a a ) where a denotes an element of the original matrix which is unchanged, vi denotes an element of the vector defining H(i), and ui an element of the vector defining G(i). ===================================================================== Quick return if possible */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --d__; --e; --tauq; --taup; x_dim1 = *ldx; x_offset = 1 + x_dim1 * 1; x -= x_offset; y_dim1 = *ldy; y_offset = 1 + y_dim1 * 1; y -= y_offset; /* Function Body */ if (*m <= 0 || *n <= 0) { return 0; } if (*m >= *n) { /* Reduce to upper bidiagonal form */ i__1 = *nb; for (i__ = 1; i__ <= i__1; ++i__) { /* Update A(i:m,i) */ i__2 = *m - i__ + 1; i__3 = i__ - 1; dgemv_("No transpose", &i__2, &i__3, &c_b151, &a[i__ + a_dim1], lda, &y[i__ + y_dim1], ldy, &c_b15, &a[i__ + i__ * a_dim1] , &c__1); i__2 = *m - i__ + 1; i__3 = i__ - 1; dgemv_("No transpose", &i__2, &i__3, &c_b151, &x[i__ + x_dim1], ldx, &a[i__ * a_dim1 + 1], &c__1, &c_b15, &a[i__ + i__ * a_dim1], &c__1); /* Generate reflection Q(i) to annihilate A(i+1:m,i) */ i__2 = *m - i__ + 1; /* Computing MIN */ i__3 = i__ + 1; dlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[min(i__3,*m) + i__ * a_dim1], &c__1, &tauq[i__]); d__[i__] = a[i__ + i__ * a_dim1]; if (i__ < *n) { a[i__ + i__ * a_dim1] = 1.; /* Compute Y(i+1:n,i) */ i__2 = *m - i__ + 1; i__3 = *n - i__; dgemv_("Transpose", &i__2, &i__3, &c_b15, &a[i__ + (i__ + 1) * a_dim1], lda, &a[i__ + i__ * a_dim1], &c__1, &c_b29, &y[i__ + 1 + i__ * y_dim1], &c__1); i__2 = *m - i__ + 1; i__3 = i__ - 1; dgemv_("Transpose", &i__2, &i__3, &c_b15, &a[i__ + a_dim1], lda, &a[i__ + i__ * a_dim1], &c__1, &c_b29, &y[i__ * y_dim1 + 1], &c__1); i__2 = *n - i__; i__3 = i__ - 1; dgemv_("No transpose", &i__2, &i__3, &c_b151, &y[i__ + 1 + y_dim1], ldy, &y[i__ * y_dim1 + 1], &c__1, &c_b15, &y[ i__ + 1 + i__ * y_dim1], &c__1); i__2 = *m - i__ + 1; i__3 = i__ - 1; dgemv_("Transpose", &i__2, &i__3, &c_b15, &x[i__ + x_dim1], ldx, &a[i__ + i__ * a_dim1], &c__1, &c_b29, &y[i__ * y_dim1 + 1], &c__1); i__2 = i__ - 1; i__3 = *n - i__; dgemv_("Transpose", &i__2, &i__3, &c_b151, &a[(i__ + 1) * a_dim1 + 1], lda, &y[i__ * y_dim1 + 1], &c__1, &c_b15, &y[i__ + 1 + i__ * y_dim1], &c__1); i__2 = *n - i__; dscal_(&i__2, &tauq[i__], &y[i__ + 1 + i__ * y_dim1], &c__1); /* Update A(i,i+1:n) */ i__2 = *n - i__; dgemv_("No transpose", &i__2, &i__, &c_b151, &y[i__ + 1 + y_dim1], ldy, &a[i__ + a_dim1], lda, &c_b15, &a[i__ + (i__ + 1) * a_dim1], lda); i__2 = i__ - 1; i__3 = *n - i__; dgemv_("Transpose", &i__2, &i__3, &c_b151, &a[(i__ + 1) * a_dim1 + 1], lda, &x[i__ + x_dim1], ldx, &c_b15, &a[ i__ + (i__ + 1) * a_dim1], lda); /* Generate reflection P(i) to annihilate A(i,i+2:n) */ i__2 = *n - i__; /* Computing MIN */ i__3 = i__ + 2; dlarfg_(&i__2, &a[i__ + (i__ + 1) * a_dim1], &a[i__ + min( i__3,*n) * a_dim1], lda, &taup[i__]); e[i__] = a[i__ + (i__ + 1) * a_dim1]; a[i__ + (i__ + 1) * a_dim1] = 1.; /* Compute X(i+1:m,i) */ i__2 = *m - i__; i__3 = *n - i__; dgemv_("No transpose", &i__2, &i__3, &c_b15, &a[i__ + 1 + ( i__ + 1) * a_dim1], lda, &a[i__ + (i__ + 1) * a_dim1], lda, &c_b29, &x[i__ + 1 + i__ * x_dim1], &c__1); i__2 = *n - i__; dgemv_("Transpose", &i__2, &i__, &c_b15, &y[i__ + 1 + y_dim1], ldy, &a[i__ + (i__ + 1) * a_dim1], lda, &c_b29, &x[ i__ * x_dim1 + 1], &c__1); i__2 = *m - i__; dgemv_("No transpose", &i__2, &i__, &c_b151, &a[i__ + 1 + a_dim1], lda, &x[i__ * x_dim1 + 1], &c__1, &c_b15, &x[ i__ + 1 + i__ * x_dim1], &c__1); i__2 = i__ - 1; i__3 = *n - i__; dgemv_("No transpose", &i__2, &i__3, &c_b15, &a[(i__ + 1) * a_dim1 + 1], lda, &a[i__ + (i__ + 1) * a_dim1], lda, & c_b29, &x[i__ * x_dim1 + 1], &c__1); i__2 = *m - i__; i__3 = i__ - 1; dgemv_("No transpose", &i__2, &i__3, &c_b151, &x[i__ + 1 + x_dim1], ldx, &x[i__ * x_dim1 + 1], &c__1, &c_b15, &x[ i__ + 1 + i__ * x_dim1], &c__1); i__2 = *m - i__; dscal_(&i__2, &taup[i__], &x[i__ + 1 + i__ * x_dim1], &c__1); } /* L10: */ } } else { /* Reduce to lower bidiagonal form */ i__1 = *nb; for (i__ = 1; i__ <= i__1; ++i__) { /* Update A(i,i:n) */ i__2 = *n - i__ + 1; i__3 = i__ - 1; dgemv_("No transpose", &i__2, &i__3, &c_b151, &y[i__ + y_dim1], ldy, &a[i__ + a_dim1], lda, &c_b15, &a[i__ + i__ * a_dim1] , lda); i__2 = i__ - 1; i__3 = *n - i__ + 1; dgemv_("Transpose", &i__2, &i__3, &c_b151, &a[i__ * a_dim1 + 1], lda, &x[i__ + x_dim1], ldx, &c_b15, &a[i__ + i__ * a_dim1] , lda); /* Generate reflection P(i) to annihilate A(i,i+1:n) */ i__2 = *n - i__ + 1; /* Computing MIN */ i__3 = i__ + 1; dlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[i__ + min(i__3,*n) * a_dim1], lda, &taup[i__]); d__[i__] = a[i__ + i__ * a_dim1]; if (i__ < *m) { a[i__ + i__ * a_dim1] = 1.; /* Compute X(i+1:m,i) */ i__2 = *m - i__; i__3 = *n - i__ + 1; dgemv_("No transpose", &i__2, &i__3, &c_b15, &a[i__ + 1 + i__ * a_dim1], lda, &a[i__ + i__ * a_dim1], lda, &c_b29, & x[i__ + 1 + i__ * x_dim1], &c__1); i__2 = *n - i__ + 1; i__3 = i__ - 1; dgemv_("Transpose", &i__2, &i__3, &c_b15, &y[i__ + y_dim1], ldy, &a[i__ + i__ * a_dim1], lda, &c_b29, &x[i__ * x_dim1 + 1], &c__1); i__2 = *m - i__; i__3 = i__ - 1; dgemv_("No transpose", &i__2, &i__3, &c_b151, &a[i__ + 1 + a_dim1], lda, &x[i__ * x_dim1 + 1], &c__1, &c_b15, &x[ i__ + 1 + i__ * x_dim1], &c__1); i__2 = i__ - 1; i__3 = *n - i__ + 1; dgemv_("No transpose", &i__2, &i__3, &c_b15, &a[i__ * a_dim1 + 1], lda, &a[i__ + i__ * a_dim1], lda, &c_b29, &x[ i__ * x_dim1 + 1], &c__1); i__2 = *m - i__; i__3 = i__ - 1; dgemv_("No transpose", &i__2, &i__3, &c_b151, &x[i__ + 1 + x_dim1], ldx, &x[i__ * x_dim1 + 1], &c__1, &c_b15, &x[ i__ + 1 + i__ * x_dim1], &c__1); i__2 = *m - i__; dscal_(&i__2, &taup[i__], &x[i__ + 1 + i__ * x_dim1], &c__1); /* Update A(i+1:m,i) */ i__2 = *m - i__; i__3 = i__ - 1; dgemv_("No transpose", &i__2, &i__3, &c_b151, &a[i__ + 1 + a_dim1], lda, &y[i__ + y_dim1], ldy, &c_b15, &a[i__ + 1 + i__ * a_dim1], &c__1); i__2 = *m - i__; dgemv_("No transpose", &i__2, &i__, &c_b151, &x[i__ + 1 + x_dim1], ldx, &a[i__ * a_dim1 + 1], &c__1, &c_b15, &a[ i__ + 1 + i__ * a_dim1], &c__1); /* Generate reflection Q(i) to annihilate A(i+2:m,i) */ i__2 = *m - i__; /* Computing MIN */ i__3 = i__ + 2; dlarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3,*m) + i__ * a_dim1], &c__1, &tauq[i__]); e[i__] = a[i__ + 1 + i__ * a_dim1]; a[i__ + 1 + i__ * a_dim1] = 1.; /* Compute Y(i+1:n,i) */ i__2 = *m - i__; i__3 = *n - i__; dgemv_("Transpose", &i__2, &i__3, &c_b15, &a[i__ + 1 + (i__ + 1) * a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b29, &y[i__ + 1 + i__ * y_dim1], &c__1); i__2 = *m - i__; i__3 = i__ - 1; dgemv_("Transpose", &i__2, &i__3, &c_b15, &a[i__ + 1 + a_dim1] , lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b29, &y[ i__ * y_dim1 + 1], &c__1); i__2 = *n - i__; i__3 = i__ - 1; dgemv_("No transpose", &i__2, &i__3, &c_b151, &y[i__ + 1 + y_dim1], ldy, &y[i__ * y_dim1 + 1], &c__1, &c_b15, &y[ i__ + 1 + i__ * y_dim1], &c__1); i__2 = *m - i__; dgemv_("Transpose", &i__2, &i__, &c_b15, &x[i__ + 1 + x_dim1], ldx, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b29, &y[ i__ * y_dim1 + 1], &c__1); i__2 = *n - i__; dgemv_("Transpose", &i__, &i__2, &c_b151, &a[(i__ + 1) * a_dim1 + 1], lda, &y[i__ * y_dim1 + 1], &c__1, &c_b15, &y[i__ + 1 + i__ * y_dim1], &c__1); i__2 = *n - i__; dscal_(&i__2, &tauq[i__], &y[i__ + 1 + i__ * y_dim1], &c__1); } /* L20: */ } } return 0; /* End of DLABRD */ } /* dlabrd_ */ /* Subroutine */ int dlacpy_(char *uplo, integer *m, integer *n, doublereal * a, integer *lda, doublereal *b, integer *ldb) { /* System generated locals */ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2; /* Local variables */ static integer i__, j; extern logical lsame_(char *, char *); /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLACPY copies all or part of a two-dimensional matrix A to another matrix B. Arguments ========= UPLO (input) CHARACTER*1 Specifies the part of the matrix A to be copied to B. = 'U': Upper triangular part = 'L': Lower triangular part Otherwise: All of the matrix A M (input) INTEGER The number of rows of the matrix A. M >= 0. N (input) INTEGER The number of columns of the matrix A. N >= 0. A (input) DOUBLE PRECISION array, dimension (LDA,N) The m by n matrix A. If UPLO = 'U', only the upper triangle or trapezoid is accessed; if UPLO = 'L', only the lower triangle or trapezoid is accessed. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,M). B (output) DOUBLE PRECISION array, dimension (LDB,N) On exit, B = A in the locations specified by UPLO. LDB (input) INTEGER The leading dimension of the array B. LDB >= max(1,M). ===================================================================== */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; b_dim1 = *ldb; b_offset = 1 + b_dim1 * 1; b -= b_offset; /* Function Body */ if (lsame_(uplo, "U")) { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = min(j,*m); for (i__ = 1; i__ <= i__2; ++i__) { b[i__ + j * b_dim1] = a[i__ + j * a_dim1]; /* L10: */ } /* L20: */ } } else if (lsame_(uplo, "L")) { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = j; i__ <= i__2; ++i__) { b[i__ + j * b_dim1] = a[i__ + j * a_dim1]; /* L30: */ } /* L40: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { b[i__ + j * b_dim1] = a[i__ + j * a_dim1]; /* L50: */ } /* L60: */ } } return 0; /* End of DLACPY */ } /* dlacpy_ */ /* Subroutine */ int dladiv_(doublereal *a, doublereal *b, doublereal *c__, doublereal *d__, doublereal *p, doublereal *q) { static doublereal e, f; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLADIV performs complex division in real arithmetic a + i*b p + i*q = --------- c + i*d The algorithm is due to Robert L. Smith and can be found in D. Knuth, The art of Computer Programming, Vol.2, p.195 Arguments ========= A (input) DOUBLE PRECISION B (input) DOUBLE PRECISION C (input) DOUBLE PRECISION D (input) DOUBLE PRECISION The scalars a, b, c, and d in the above expression. P (output) DOUBLE PRECISION Q (output) DOUBLE PRECISION The scalars p and q in the above expression. ===================================================================== */ if (abs(*d__) < abs(*c__)) { e = *d__ / *c__; f = *c__ + *d__ * e; *p = (*a + *b * e) / f; *q = (*b - *a * e) / f; } else { e = *c__ / *d__; f = *d__ + *c__ * e; *p = (*b + *a * e) / f; *q = (-(*a) + *b * e) / f; } return 0; /* End of DLADIV */ } /* dladiv_ */ /* Subroutine */ int dlae2_(doublereal *a, doublereal *b, doublereal *c__, doublereal *rt1, doublereal *rt2) { /* System generated locals */ doublereal d__1; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static doublereal acmn, acmx, ab, df, tb, sm, rt, adf; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLAE2 computes the eigenvalues of a 2-by-2 symmetric matrix [ A B ] [ B C ]. On return, RT1 is the eigenvalue of larger absolute value, and RT2 is the eigenvalue of smaller absolute value. Arguments ========= A (input) DOUBLE PRECISION The (1,1) element of the 2-by-2 matrix. B (input) DOUBLE PRECISION The (1,2) and (2,1) elements of the 2-by-2 matrix. C (input) DOUBLE PRECISION The (2,2) element of the 2-by-2 matrix. RT1 (output) DOUBLE PRECISION The eigenvalue of larger absolute value. RT2 (output) DOUBLE PRECISION The eigenvalue of smaller absolute value. Further Details =============== RT1 is accurate to a few ulps barring over/underflow. RT2 may be inaccurate if there is massive cancellation in the determinant A*C-B*B; higher precision or correctly rounded or correctly truncated arithmetic would be needed to compute RT2 accurately in all cases. Overflow is possible only if RT1 is within a factor of 5 of overflow. Underflow is harmless if the input data is 0 or exceeds underflow_threshold / macheps. ===================================================================== Compute the eigenvalues */ sm = *a + *c__; df = *a - *c__; adf = abs(df); tb = *b + *b; ab = abs(tb); if (abs(*a) > abs(*c__)) { acmx = *a; acmn = *c__; } else { acmx = *c__; acmn = *a; } if (adf > ab) { /* Computing 2nd power */ d__1 = ab / adf; rt = adf * sqrt(d__1 * d__1 + 1.); } else if (adf < ab) { /* Computing 2nd power */ d__1 = adf / ab; rt = ab * sqrt(d__1 * d__1 + 1.); } else { /* Includes case AB=ADF=0 */ rt = ab * sqrt(2.); } if (sm < 0.) { *rt1 = (sm - rt) * .5; /* Order of execution important. To get fully accurate smaller eigenvalue, next line needs to be executed in higher precision. */ *rt2 = acmx / *rt1 * acmn - *b / *rt1 * *b; } else if (sm > 0.) { *rt1 = (sm + rt) * .5; /* Order of execution important. To get fully accurate smaller eigenvalue, next line needs to be executed in higher precision. */ *rt2 = acmx / *rt1 * acmn - *b / *rt1 * *b; } else { /* Includes case RT1 = RT2 = 0 */ *rt1 = rt * .5; *rt2 = rt * -.5; } return 0; /* End of DLAE2 */ } /* dlae2_ */ /* Subroutine */ int dlaed0_(integer *icompq, integer *qsiz, integer *n, doublereal *d__, doublereal *e, doublereal *q, integer *ldq, doublereal *qstore, integer *ldqs, doublereal *work, integer *iwork, integer *info) { /* System generated locals */ integer q_dim1, q_offset, qstore_dim1, qstore_offset, i__1, i__2; doublereal d__1; /* Builtin functions */ double log(doublereal); integer pow_ii(integer *, integer *); /* Local variables */ static doublereal temp; static integer curr, i__, j, k; extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); static integer iperm; extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, doublereal *, integer *); static integer indxq, iwrem; extern /* Subroutine */ int dlaed1_(integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, doublereal *, integer *, integer *); static integer iqptr; extern /* Subroutine */ int dlaed7_(integer *, integer *, integer *, integer *, integer *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, doublereal *, integer *, integer *, integer *, integer *, integer *, doublereal *, doublereal *, integer *, integer *); static integer tlvls, iq; extern /* Subroutine */ int dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *); static integer igivcl; extern /* Subroutine */ int xerbla_(char *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); static integer igivnm, submat, curprb, subpbs, igivpt; extern /* Subroutine */ int dsteqr_(char *, integer *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, integer *); static integer curlvl, matsiz, iprmpt, smlsiz, lgn, msd2, smm1, spm1, spm2; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLAED0 computes all eigenvalues and corresponding eigenvectors of a symmetric tridiagonal matrix using the divide and conquer method. Arguments ========= ICOMPQ (input) INTEGER = 0: Compute eigenvalues only. = 1: Compute eigenvectors of original dense symmetric matrix also. On entry, Q contains the orthogonal matrix used to reduce the original matrix to tridiagonal form. = 2: Compute eigenvalues and eigenvectors of tridiagonal matrix. QSIZ (input) INTEGER The dimension of the orthogonal matrix used to reduce the full matrix to tridiagonal form. QSIZ >= N if ICOMPQ = 1. N (input) INTEGER The dimension of the symmetric tridiagonal matrix. N >= 0. D (input/output) DOUBLE PRECISION array, dimension (N) On entry, the main diagonal of the tridiagonal matrix. On exit, its eigenvalues. E (input) DOUBLE PRECISION array, dimension (N-1) The off-diagonal elements of the tridiagonal matrix. On exit, E has been destroyed. Q (input/output) DOUBLE PRECISION array, dimension (LDQ, N) On entry, Q must contain an N-by-N orthogonal matrix. If ICOMPQ = 0 Q is not referenced. If ICOMPQ = 1 On entry, Q is a subset of the columns of the orthogonal matrix used to reduce the full matrix to tridiagonal form corresponding to the subset of the full matrix which is being decomposed at this time. If ICOMPQ = 2 On entry, Q will be the identity matrix. On exit, Q contains the eigenvectors of the tridiagonal matrix. LDQ (input) INTEGER The leading dimension of the array Q. If eigenvectors are desired, then LDQ >= max(1,N). In any case, LDQ >= 1. QSTORE (workspace) DOUBLE PRECISION array, dimension (LDQS, N) Referenced only when ICOMPQ = 1. Used to store parts of the eigenvector matrix when the updating matrix multiplies take place. LDQS (input) INTEGER The leading dimension of the array QSTORE. If ICOMPQ = 1, then LDQS >= max(1,N). In any case, LDQS >= 1. WORK (workspace) DOUBLE PRECISION array, If ICOMPQ = 0 or 1, the dimension of WORK must be at least 1 + 3*N + 2*N*lg N + 2*N**2 ( lg( N ) = smallest integer k such that 2^k >= N ) If ICOMPQ = 2, the dimension of WORK must be at least 4*N + N**2. IWORK (workspace) INTEGER array, If ICOMPQ = 0 or 1, the dimension of IWORK must be at least 6 + 6*N + 5*N*lg N. ( lg( N ) = smallest integer k such that 2^k >= N ) If ICOMPQ = 2, the dimension of IWORK must be at least 3 + 5*N. INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. > 0: The algorithm failed to compute an eigenvalue while working on the submatrix lying in rows and columns INFO/(N+1) through mod(INFO,N+1). Further Details =============== Based on contributions by Jeff Rutter, Computer Science Division, University of California at Berkeley, USA ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; --e; q_dim1 = *ldq; q_offset = 1 + q_dim1 * 1; q -= q_offset; qstore_dim1 = *ldqs; qstore_offset = 1 + qstore_dim1 * 1; qstore -= qstore_offset; --work; --iwork; /* Function Body */ *info = 0; if (*icompq < 0 || *icompq > 2) { *info = -1; } else if (*icompq == 1 && *qsiz < max(0,*n)) { *info = -2; } else if (*n < 0) { *info = -3; } else if (*ldq < max(1,*n)) { *info = -7; } else if (*ldqs < max(1,*n)) { *info = -9; } if (*info != 0) { i__1 = -(*info); xerbla_("DLAED0", &i__1); return 0; } /* Quick return if possible */ if (*n == 0) { return 0; } smlsiz = ilaenv_(&c__9, "DLAED0", " ", &c__0, &c__0, &c__0, &c__0, ( ftnlen)6, (ftnlen)1); /* Determine the size and placement of the submatrices, and save in the leading elements of IWORK. */ iwork[1] = *n; subpbs = 1; tlvls = 0; L10: if (iwork[subpbs] > smlsiz) { for (j = subpbs; j >= 1; --j) { iwork[j * 2] = (iwork[j] + 1) / 2; iwork[(j << 1) - 1] = iwork[j] / 2; /* L20: */ } ++tlvls; subpbs <<= 1; goto L10; } i__1 = subpbs; for (j = 2; j <= i__1; ++j) { iwork[j] += iwork[j - 1]; /* L30: */ } /* Divide the matrix into SUBPBS submatrices of size at most SMLSIZ+1 using rank-1 modifications (cuts). */ spm1 = subpbs - 1; i__1 = spm1; for (i__ = 1; i__ <= i__1; ++i__) { submat = iwork[i__] + 1; smm1 = submat - 1; d__[smm1] -= (d__1 = e[smm1], abs(d__1)); d__[submat] -= (d__1 = e[smm1], abs(d__1)); /* L40: */ } indxq = (*n << 2) + 3; if (*icompq != 2) { /* Set up workspaces for eigenvalues only/accumulate new vectors routine */ temp = log((doublereal) (*n)) / log(2.); lgn = (integer) temp; if (pow_ii(&c__2, &lgn) < *n) { ++lgn; } if (pow_ii(&c__2, &lgn) < *n) { ++lgn; } iprmpt = indxq + *n + 1; iperm = iprmpt + *n * lgn; iqptr = iperm + *n * lgn; igivpt = iqptr + *n + 2; igivcl = igivpt + *n * lgn; igivnm = 1; iq = igivnm + (*n << 1) * lgn; /* Computing 2nd power */ i__1 = *n; iwrem = iq + i__1 * i__1 + 1; /* Initialize pointers */ i__1 = subpbs; for (i__ = 0; i__ <= i__1; ++i__) { iwork[iprmpt + i__] = 1; iwork[igivpt + i__] = 1; /* L50: */ } iwork[iqptr] = 1; } /* Solve each submatrix eigenproblem at the bottom of the divide and conquer tree. */ curr = 0; i__1 = spm1; for (i__ = 0; i__ <= i__1; ++i__) { if (i__ == 0) { submat = 1; matsiz = iwork[1]; } else { submat = iwork[i__] + 1; matsiz = iwork[i__ + 1] - iwork[i__]; } if (*icompq == 2) { dsteqr_("I", &matsiz, &d__[submat], &e[submat], &q[submat + submat * q_dim1], ldq, &work[1], info); if (*info != 0) { goto L130; } } else { dsteqr_("I", &matsiz, &d__[submat], &e[submat], &work[iq - 1 + iwork[iqptr + curr]], &matsiz, &work[1], info); if (*info != 0) { goto L130; } if (*icompq == 1) { dgemm_("N", "N", qsiz, &matsiz, &matsiz, &c_b15, &q[submat * q_dim1 + 1], ldq, &work[iq - 1 + iwork[iqptr + curr]], &matsiz, &c_b29, &qstore[submat * qstore_dim1 + 1], ldqs); } /* Computing 2nd power */ i__2 = matsiz; iwork[iqptr + curr + 1] = iwork[iqptr + curr] + i__2 * i__2; ++curr; } k = 1; i__2 = iwork[i__ + 1]; for (j = submat; j <= i__2; ++j) { iwork[indxq + j] = k; ++k; /* L60: */ } /* L70: */ } /* Successively merge eigensystems of adjacent submatrices into eigensystem for the corresponding larger matrix. while ( SUBPBS > 1 ) */ curlvl = 1; L80: if (subpbs > 1) { spm2 = subpbs - 2; i__1 = spm2; for (i__ = 0; i__ <= i__1; i__ += 2) { if (i__ == 0) { submat = 1; matsiz = iwork[2]; msd2 = iwork[1]; curprb = 0; } else { submat = iwork[i__] + 1; matsiz = iwork[i__ + 2] - iwork[i__]; msd2 = matsiz / 2; ++curprb; } /* Merge lower order eigensystems (of size MSD2 and MATSIZ - MSD2) into an eigensystem of size MATSIZ. DLAED1 is used only for the full eigensystem of a tridiagonal matrix. DLAED7 handles the cases in which eigenvalues only or eigenvalues and eigenvectors of a full symmetric matrix (which was reduced to tridiagonal form) are desired. */ if (*icompq == 2) { dlaed1_(&matsiz, &d__[submat], &q[submat + submat * q_dim1], ldq, &iwork[indxq + submat], &e[submat + msd2 - 1], & msd2, &work[1], &iwork[subpbs + 1], info); } else { dlaed7_(icompq, &matsiz, qsiz, &tlvls, &curlvl, &curprb, &d__[ submat], &qstore[submat * qstore_dim1 + 1], ldqs, & iwork[indxq + submat], &e[submat + msd2 - 1], &msd2, & work[iq], &iwork[iqptr], &iwork[iprmpt], &iwork[iperm] , &iwork[igivpt], &iwork[igivcl], &work[igivnm], & work[iwrem], &iwork[subpbs + 1], info); } if (*info != 0) { goto L130; } iwork[i__ / 2 + 1] = iwork[i__ + 2]; /* L90: */ } subpbs /= 2; ++curlvl; goto L80; } /* end while Re-merge the eigenvalues/vectors which were deflated at the final merge step. */ if (*icompq == 1) { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { j = iwork[indxq + i__]; work[i__] = d__[j]; dcopy_(qsiz, &qstore[j * qstore_dim1 + 1], &c__1, &q[i__ * q_dim1 + 1], &c__1); /* L100: */ } dcopy_(n, &work[1], &c__1, &d__[1], &c__1); } else if (*icompq == 2) { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { j = iwork[indxq + i__]; work[i__] = d__[j]; dcopy_(n, &q[j * q_dim1 + 1], &c__1, &work[*n * i__ + 1], &c__1); /* L110: */ } dcopy_(n, &work[1], &c__1, &d__[1], &c__1); dlacpy_("A", n, n, &work[*n + 1], n, &q[q_offset], ldq); } else { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { j = iwork[indxq + i__]; work[i__] = d__[j]; /* L120: */ } dcopy_(n, &work[1], &c__1, &d__[1], &c__1); } goto L140; L130: *info = submat * (*n + 1) + submat + matsiz - 1; L140: return 0; /* End of DLAED0 */ } /* dlaed0_ */ /* Subroutine */ int dlaed1_(integer *n, doublereal *d__, doublereal *q, integer *ldq, integer *indxq, doublereal *rho, integer *cutpnt, doublereal *work, integer *iwork, integer *info) { /* System generated locals */ integer q_dim1, q_offset, i__1, i__2; /* Local variables */ static integer indx, i__, k, indxc; extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, doublereal *, integer *); static integer indxp; extern /* Subroutine */ int dlaed2_(integer *, integer *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, integer *, integer *, integer *, integer *), dlaed3_(integer *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, integer *, integer *, doublereal *, doublereal *, integer *); static integer n1, n2, idlmda, is, iw, iz; extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *, integer *, integer *, integer *), xerbla_(char *, integer *); static integer coltyp, iq2, zpp1; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLAED1 computes the updated eigensystem of a diagonal matrix after modification by a rank-one symmetric matrix. This routine is used only for the eigenproblem which requires all eigenvalues and eigenvectors of a tridiagonal matrix. DLAED7 handles the case in which eigenvalues only or eigenvalues and eigenvectors of a full symmetric matrix (which was reduced to tridiagonal form) are desired. T = Q(in) ( D(in) + RHO * Z*Z' ) Q'(in) = Q(out) * D(out) * Q'(out) where Z = Q'u, u is a vector of length N with ones in the CUTPNT and CUTPNT + 1 th elements and zeros elsewhere. The eigenvectors of the original matrix are stored in Q, and the eigenvalues are in D. The algorithm consists of three stages: The first stage consists of deflating the size of the problem when there are multiple eigenvalues or if there is a zero in the Z vector. For each such occurence the dimension of the secular equation problem is reduced by one. This stage is performed by the routine DLAED2. The second stage consists of calculating the updated eigenvalues. This is done by finding the roots of the secular equation via the routine DLAED4 (as called by DLAED3). This routine also calculates the eigenvectors of the current problem. The final stage consists of computing the updated eigenvectors directly using the updated eigenvalues. The eigenvectors for the current problem are multiplied with the eigenvectors from the overall problem. Arguments ========= N (input) INTEGER The dimension of the symmetric tridiagonal matrix. N >= 0. D (input/output) DOUBLE PRECISION array, dimension (N) On entry, the eigenvalues of the rank-1-perturbed matrix. On exit, the eigenvalues of the repaired matrix. Q (input/output) DOUBLE PRECISION array, dimension (LDQ,N) On entry, the eigenvectors of the rank-1-perturbed matrix. On exit, the eigenvectors of the repaired tridiagonal matrix. LDQ (input) INTEGER The leading dimension of the array Q. LDQ >= max(1,N). INDXQ (input/output) INTEGER array, dimension (N) On entry, the permutation which separately sorts the two subproblems in D into ascending order. On exit, the permutation which will reintegrate the subproblems back into sorted order, i.e. D( INDXQ( I = 1, N ) ) will be in ascending order. RHO (input) DOUBLE PRECISION The subdiagonal entry used to create the rank-1 modification. CUTPNT (input) INTEGER The location of the last eigenvalue in the leading sub-matrix. min(1,N) <= CUTPNT <= N/2. WORK (workspace) DOUBLE PRECISION array, dimension (4*N + N**2) IWORK (workspace) INTEGER array, dimension (4*N) INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. > 0: if INFO = 1, an eigenvalue did not converge Further Details =============== Based on contributions by Jeff Rutter, Computer Science Division, University of California at Berkeley, USA Modified by Francoise Tisseur, University of Tennessee. ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; q_dim1 = *ldq; q_offset = 1 + q_dim1 * 1; q -= q_offset; --indxq; --work; --iwork; /* Function Body */ *info = 0; if (*n < 0) { *info = -1; } else if (*ldq < max(1,*n)) { *info = -4; } else /* if(complicated condition) */ { /* Computing MIN */ i__1 = 1, i__2 = *n / 2; if (min(i__1,i__2) > *cutpnt || *n / 2 < *cutpnt) { *info = -7; } } if (*info != 0) { i__1 = -(*info); xerbla_("DLAED1", &i__1); return 0; } /* Quick return if possible */ if (*n == 0) { return 0; } /* The following values are integer pointers which indicate the portion of the workspace used by a particular array in DLAED2 and DLAED3. */ iz = 1; idlmda = iz + *n; iw = idlmda + *n; iq2 = iw + *n; indx = 1; indxc = indx + *n; coltyp = indxc + *n; indxp = coltyp + *n; /* Form the z-vector which consists of the last row of Q_1 and the first row of Q_2. */ dcopy_(cutpnt, &q[*cutpnt + q_dim1], ldq, &work[iz], &c__1); zpp1 = *cutpnt + 1; i__1 = *n - *cutpnt; dcopy_(&i__1, &q[zpp1 + zpp1 * q_dim1], ldq, &work[iz + *cutpnt], &c__1); /* Deflate eigenvalues. */ dlaed2_(&k, n, cutpnt, &d__[1], &q[q_offset], ldq, &indxq[1], rho, &work[ iz], &work[idlmda], &work[iw], &work[iq2], &iwork[indx], &iwork[ indxc], &iwork[indxp], &iwork[coltyp], info); if (*info != 0) { goto L20; } /* Solve Secular Equation. */ if (k != 0) { is = (iwork[coltyp] + iwork[coltyp + 1]) * *cutpnt + (iwork[coltyp + 1] + iwork[coltyp + 2]) * (*n - *cutpnt) + iq2; dlaed3_(&k, n, cutpnt, &d__[1], &q[q_offset], ldq, rho, &work[idlmda], &work[iq2], &iwork[indxc], &iwork[coltyp], &work[iw], &work[ is], info); if (*info != 0) { goto L20; } /* Prepare the INDXQ sorting permutation. */ n1 = k; n2 = *n - k; dlamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &indxq[1]); } else { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { indxq[i__] = i__; /* L10: */ } } L20: return 0; /* End of DLAED1 */ } /* dlaed1_ */ /* Subroutine */ int dlaed2_(integer *k, integer *n, integer *n1, doublereal * d__, doublereal *q, integer *ldq, integer *indxq, doublereal *rho, doublereal *z__, doublereal *dlamda, doublereal *w, doublereal *q2, integer *indx, integer *indxc, integer *indxp, integer *coltyp, integer *info) { /* System generated locals */ integer q_dim1, q_offset, i__1, i__2; doublereal d__1, d__2, d__3, d__4; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static integer imax, jmax; extern /* Subroutine */ int drot_(integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *); static integer ctot[4]; static doublereal c__; static integer i__, j; static doublereal s, t; extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, integer *), dcopy_(integer *, doublereal *, integer *, doublereal *, integer *); static integer k2, n2; extern doublereal dlapy2_(doublereal *, doublereal *); static integer ct, nj; static integer pj, js; extern integer idamax_(integer *, doublereal *, integer *); extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *, integer *, integer *, integer *), dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *), xerbla_(char *, integer *); static integer iq1, iq2, n1p1; static doublereal eps, tau, tol; static integer psm[4]; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLAED2 merges the two sets of eigenvalues together into a single sorted set. Then it tries to deflate the size of the problem. There are two ways in which deflation can occur: when two or more eigenvalues are close together or if there is a tiny entry in the Z vector. For each such occurrence the order of the related secular equation problem is reduced by one. Arguments ========= K (output) INTEGER The number of non-deflated eigenvalues, and the order of the related secular equation. 0 <= K <=N. N (input) INTEGER The dimension of the symmetric tridiagonal matrix. N >= 0. N1 (input) INTEGER The location of the last eigenvalue in the leading sub-matrix. min(1,N) <= N1 <= N/2. D (input/output) DOUBLE PRECISION array, dimension (N) On entry, D contains the eigenvalues of the two submatrices to be combined. On exit, D contains the trailing (N-K) updated eigenvalues (those which were deflated) sorted into increasing order. Q (input/output) DOUBLE PRECISION array, dimension (LDQ, N) On entry, Q contains the eigenvectors of two submatrices in the two square blocks with corners at (1,1), (N1,N1) and (N1+1, N1+1), (N,N). On exit, Q contains the trailing (N-K) updated eigenvectors (those which were deflated) in its last N-K columns. LDQ (input) INTEGER The leading dimension of the array Q. LDQ >= max(1,N). INDXQ (input/output) INTEGER array, dimension (N) The permutation which separately sorts the two sub-problems in D into ascending order. Note that elements in the second half of this permutation must first have N1 added to their values. Destroyed on exit. RHO (input/output) DOUBLE PRECISION On entry, the off-diagonal element associated with the rank-1 cut which originally split the two submatrices which are now being recombined. On exit, RHO has been modified to the value required by DLAED3. Z (input) DOUBLE PRECISION array, dimension (N) On entry, Z contains the updating vector (the last row of the first sub-eigenvector matrix and the first row of the second sub-eigenvector matrix). On exit, the contents of Z have been destroyed by the updating process. DLAMDA (output) DOUBLE PRECISION array, dimension (N) A copy of the first K eigenvalues which will be used by DLAED3 to form the secular equation. W (output) DOUBLE PRECISION array, dimension (N) The first k values of the final deflation-altered z-vector which will be passed to DLAED3. Q2 (output) DOUBLE PRECISION array, dimension (N1**2+(N-N1)**2) A copy of the first K eigenvectors which will be used by DLAED3 in a matrix multiply (DGEMM) to solve for the new eigenvectors. INDX (workspace) INTEGER array, dimension (N) The permutation used to sort the contents of DLAMDA into ascending order. INDXC (output) INTEGER array, dimension (N) The permutation used to arrange the columns of the deflated Q matrix into three groups: the first group contains non-zero elements only at and above N1, the second contains non-zero elements only below N1, and the third is dense. INDXP (workspace) INTEGER array, dimension (N) The permutation used to place deflated values of D at the end of the array. INDXP(1:K) points to the nondeflated D-values and INDXP(K+1:N) points to the deflated eigenvalues. COLTYP (workspace/output) INTEGER array, dimension (N) During execution, a label which will indicate which of the following types a column in the Q2 matrix is: 1 : non-zero in the upper half only; 2 : dense; 3 : non-zero in the lower half only; 4 : deflated. On exit, COLTYP(i) is the number of columns of type i, for i=1 to 4 only. INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. Further Details =============== Based on contributions by Jeff Rutter, Computer Science Division, University of California at Berkeley, USA Modified by Francoise Tisseur, University of Tennessee. ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; q_dim1 = *ldq; q_offset = 1 + q_dim1 * 1; q -= q_offset; --indxq; --z__; --dlamda; --w; --q2; --indx; --indxc; --indxp; --coltyp; /* Function Body */ *info = 0; if (*n < 0) { *info = -2; } else if (*ldq < max(1,*n)) { *info = -6; } else /* if(complicated condition) */ { /* Computing MIN */ i__1 = 1, i__2 = *n / 2; if (min(i__1,i__2) > *n1 || *n / 2 < *n1) { *info = -3; } } if (*info != 0) { i__1 = -(*info); xerbla_("DLAED2", &i__1); return 0; } /* Quick return if possible */ if (*n == 0) { return 0; } n2 = *n - *n1; n1p1 = *n1 + 1; if (*rho < 0.) { dscal_(&n2, &c_b151, &z__[n1p1], &c__1); } /* Normalize z so that norm(z) = 1. Since z is the concatenation of two normalized vectors, norm2(z) = sqrt(2). */ t = 1. / sqrt(2.); dscal_(n, &t, &z__[1], &c__1); /* RHO = ABS( norm(z)**2 * RHO ) */ *rho = (d__1 = *rho * 2., abs(d__1)); /* Sort the eigenvalues into increasing order */ i__1 = *n; for (i__ = n1p1; i__ <= i__1; ++i__) { indxq[i__] += *n1; /* L10: */ } /* re-integrate the deflated parts from the last pass */ i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { dlamda[i__] = d__[indxq[i__]]; /* L20: */ } dlamrg_(n1, &n2, &dlamda[1], &c__1, &c__1, &indxc[1]); i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { indx[i__] = indxq[indxc[i__]]; /* L30: */ } /* Calculate the allowable deflation tolerance */ imax = idamax_(n, &z__[1], &c__1); jmax = idamax_(n, &d__[1], &c__1); eps = EPSILON; /* Computing MAX */ d__3 = (d__1 = d__[jmax], abs(d__1)), d__4 = (d__2 = z__[imax], abs(d__2)) ; tol = eps * 8. * max(d__3,d__4); /* If the rank-1 modifier is small enough, no more needs to be done except to reorganize Q so that its columns correspond with the elements in D. */ if (*rho * (d__1 = z__[imax], abs(d__1)) <= tol) { *k = 0; iq2 = 1; i__1 = *n; for (j = 1; j <= i__1; ++j) { i__ = indx[j]; dcopy_(n, &q[i__ * q_dim1 + 1], &c__1, &q2[iq2], &c__1); dlamda[j] = d__[i__]; iq2 += *n; /* L40: */ } dlacpy_("A", n, n, &q2[1], n, &q[q_offset], ldq); dcopy_(n, &dlamda[1], &c__1, &d__[1], &c__1); goto L190; } /* If there are multiple eigenvalues then the problem deflates. Here the number of equal eigenvalues are found. As each equal eigenvalue is found, an elementary reflector is computed to rotate the corresponding eigensubspace so that the corresponding components of Z are zero in this new basis. */ i__1 = *n1; for (i__ = 1; i__ <= i__1; ++i__) { coltyp[i__] = 1; /* L50: */ } i__1 = *n; for (i__ = n1p1; i__ <= i__1; ++i__) { coltyp[i__] = 3; /* L60: */ } *k = 0; k2 = *n + 1; i__1 = *n; for (j = 1; j <= i__1; ++j) { nj = indx[j]; if (*rho * (d__1 = z__[nj], abs(d__1)) <= tol) { /* Deflate due to small z component. */ --k2; coltyp[nj] = 4; indxp[k2] = nj; if (j == *n) { goto L100; } } else { pj = nj; goto L80; } /* L70: */ } L80: ++j; nj = indx[j]; if (j > *n) { goto L100; } if (*rho * (d__1 = z__[nj], abs(d__1)) <= tol) { /* Deflate due to small z component. */ --k2; coltyp[nj] = 4; indxp[k2] = nj; } else { /* Check if eigenvalues are close enough to allow deflation. */ s = z__[pj]; c__ = z__[nj]; /* Find sqrt(a**2+b**2) without overflow or destructive underflow. */ tau = dlapy2_(&c__, &s); t = d__[nj] - d__[pj]; c__ /= tau; s = -s / tau; if ((d__1 = t * c__ * s, abs(d__1)) <= tol) { /* Deflation is possible. */ z__[nj] = tau; z__[pj] = 0.; if (coltyp[nj] != coltyp[pj]) { coltyp[nj] = 2; } coltyp[pj] = 4; drot_(n, &q[pj * q_dim1 + 1], &c__1, &q[nj * q_dim1 + 1], &c__1, & c__, &s); /* Computing 2nd power */ d__1 = c__; /* Computing 2nd power */ d__2 = s; t = d__[pj] * (d__1 * d__1) + d__[nj] * (d__2 * d__2); /* Computing 2nd power */ d__1 = s; /* Computing 2nd power */ d__2 = c__; d__[nj] = d__[pj] * (d__1 * d__1) + d__[nj] * (d__2 * d__2); d__[pj] = t; --k2; i__ = 1; L90: if (k2 + i__ <= *n) { if (d__[pj] < d__[indxp[k2 + i__]]) { indxp[k2 + i__ - 1] = indxp[k2 + i__]; indxp[k2 + i__] = pj; ++i__; goto L90; } else { indxp[k2 + i__ - 1] = pj; } } else { indxp[k2 + i__ - 1] = pj; } pj = nj; } else { ++(*k); dlamda[*k] = d__[pj]; w[*k] = z__[pj]; indxp[*k] = pj; pj = nj; } } goto L80; L100: /* Record the last eigenvalue. */ ++(*k); dlamda[*k] = d__[pj]; w[*k] = z__[pj]; indxp[*k] = pj; /* Count up the total number of the various types of columns, then form a permutation which positions the four column types into four uniform groups (although one or more of these groups may be empty). */ for (j = 1; j <= 4; ++j) { ctot[j - 1] = 0; /* L110: */ } i__1 = *n; for (j = 1; j <= i__1; ++j) { ct = coltyp[j]; ++ctot[ct - 1]; /* L120: */ } /* PSM(*) = Position in SubMatrix (of types 1 through 4) */ psm[0] = 1; psm[1] = ctot[0] + 1; psm[2] = psm[1] + ctot[1]; psm[3] = psm[2] + ctot[2]; *k = *n - ctot[3]; /* Fill out the INDXC array so that the permutation which it induces will place all type-1 columns first, all type-2 columns next, then all type-3's, and finally all type-4's. */ i__1 = *n; for (j = 1; j <= i__1; ++j) { js = indxp[j]; ct = coltyp[js]; indx[psm[ct - 1]] = js; indxc[psm[ct - 1]] = j; ++psm[ct - 1]; /* L130: */ } /* Sort the eigenvalues and corresponding eigenvectors into DLAMDA and Q2 respectively. The eigenvalues/vectors which were not deflated go into the first K slots of DLAMDA and Q2 respectively, while those which were deflated go into the last N - K slots. */ i__ = 1; iq1 = 1; iq2 = (ctot[0] + ctot[1]) * *n1 + 1; i__1 = ctot[0]; for (j = 1; j <= i__1; ++j) { js = indx[i__]; dcopy_(n1, &q[js * q_dim1 + 1], &c__1, &q2[iq1], &c__1); z__[i__] = d__[js]; ++i__; iq1 += *n1; /* L140: */ } i__1 = ctot[1]; for (j = 1; j <= i__1; ++j) { js = indx[i__]; dcopy_(n1, &q[js * q_dim1 + 1], &c__1, &q2[iq1], &c__1); dcopy_(&n2, &q[*n1 + 1 + js * q_dim1], &c__1, &q2[iq2], &c__1); z__[i__] = d__[js]; ++i__; iq1 += *n1; iq2 += n2; /* L150: */ } i__1 = ctot[2]; for (j = 1; j <= i__1; ++j) { js = indx[i__]; dcopy_(&n2, &q[*n1 + 1 + js * q_dim1], &c__1, &q2[iq2], &c__1); z__[i__] = d__[js]; ++i__; iq2 += n2; /* L160: */ } iq1 = iq2; i__1 = ctot[3]; for (j = 1; j <= i__1; ++j) { js = indx[i__]; dcopy_(n, &q[js * q_dim1 + 1], &c__1, &q2[iq2], &c__1); iq2 += *n; z__[i__] = d__[js]; ++i__; /* L170: */ } /* The deflated eigenvalues and their corresponding vectors go back into the last N - K slots of D and Q respectively. */ dlacpy_("A", n, &ctot[3], &q2[iq1], n, &q[(*k + 1) * q_dim1 + 1], ldq); i__1 = *n - *k; dcopy_(&i__1, &z__[*k + 1], &c__1, &d__[*k + 1], &c__1); /* Copy CTOT into COLTYP for referencing in DLAED3. */ for (j = 1; j <= 4; ++j) { coltyp[j] = ctot[j - 1]; /* L180: */ } L190: return 0; /* End of DLAED2 */ } /* dlaed2_ */ /* Subroutine */ int dlaed3_(integer *k, integer *n, integer *n1, doublereal * d__, doublereal *q, integer *ldq, doublereal *rho, doublereal *dlamda, doublereal *q2, integer *indx, integer *ctot, doublereal *w, doublereal *s, integer *info) { /* System generated locals */ integer q_dim1, q_offset, i__1, i__2; doublereal d__1; /* Builtin functions */ double sqrt(doublereal), d_sign(doublereal *, doublereal *); /* Local variables */ static doublereal temp; extern doublereal dnrm2_(integer *, doublereal *, integer *); static integer i__, j; extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *), dcopy_(integer *, doublereal *, integer *, doublereal *, integer *), dlaed4_(integer *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, integer *); static integer n2; extern doublereal dlamc3_(doublereal *, doublereal *); static integer n12, ii, n23; extern /* Subroutine */ int dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *), dlaset_(char *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *); static integer iq2; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLAED3 finds the roots of the secular equation, as defined by the values in D, W, and RHO, between 1 and K. It makes the appropriate calls to DLAED4 and then updates the eigenvectors by multiplying the matrix of eigenvectors of the pair of eigensystems being combined by the matrix of eigenvectors of the K-by-K system which is solved here. This code makes very mild assumptions about floating point arithmetic. It will work on machines with a guard digit in add/subtract, or on those binary machines without guard digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or Cray-2. It could conceivably fail on hexadecimal or decimal machines without guard digits, but we know of none. Arguments ========= K (input) INTEGER The number of terms in the rational function to be solved by DLAED4. K >= 0. N (input) INTEGER The number of rows and columns in the Q matrix. N >= K (deflation may result in N>K). N1 (input) INTEGER The location of the last eigenvalue in the leading submatrix. min(1,N) <= N1 <= N/2. D (output) DOUBLE PRECISION array, dimension (N) D(I) contains the updated eigenvalues for 1 <= I <= K. Q (output) DOUBLE PRECISION array, dimension (LDQ,N) Initially the first K columns are used as workspace. On output the columns 1 to K contain the updated eigenvectors. LDQ (input) INTEGER The leading dimension of the array Q. LDQ >= max(1,N). RHO (input) DOUBLE PRECISION The value of the parameter in the rank one update equation. RHO >= 0 required. DLAMDA (input/output) DOUBLE PRECISION array, dimension (K) The first K elements of this array contain the old roots of the deflated updating problem. These are the poles of the secular equation. May be changed on output by having lowest order bit set to zero on Cray X-MP, Cray Y-MP, Cray-2, or Cray C-90, as described above. Q2 (input) DOUBLE PRECISION array, dimension (LDQ2, N) The first K columns of this matrix contain the non-deflated eigenvectors for the split problem. INDX (input) INTEGER array, dimension (N) The permutation used to arrange the columns of the deflated Q matrix into three groups (see DLAED2). The rows of the eigenvectors found by DLAED4 must be likewise permuted before the matrix multiply can take place. CTOT (input) INTEGER array, dimension (4) A count of the total number of the various types of columns in Q, as described in INDX. The fourth column type is any column which has been deflated. W (input/output) DOUBLE PRECISION array, dimension (K) The first K elements of this array contain the components of the deflation-adjusted updating vector. Destroyed on output. S (workspace) DOUBLE PRECISION array, dimension (N1 + 1)*K Will contain the eigenvectors of the repaired matrix which will be multiplied by the previously accumulated eigenvectors to update the system. LDS (input) INTEGER The leading dimension of S. LDS >= max(1,K). INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. > 0: if INFO = 1, an eigenvalue did not converge Further Details =============== Based on contributions by Jeff Rutter, Computer Science Division, University of California at Berkeley, USA Modified by Francoise Tisseur, University of Tennessee. ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; q_dim1 = *ldq; q_offset = 1 + q_dim1 * 1; q -= q_offset; --dlamda; --q2; --indx; --ctot; --w; --s; /* Function Body */ *info = 0; if (*k < 0) { *info = -1; } else if (*n < *k) { *info = -2; } else if (*ldq < max(1,*n)) { *info = -6; } if (*info != 0) { i__1 = -(*info); xerbla_("DLAED3", &i__1); return 0; } /* Quick return if possible */ if (*k == 0) { return 0; } /* Modify values DLAMDA(i) to make sure all DLAMDA(i)-DLAMDA(j) can be computed with high relative accuracy (barring over/underflow). This is a problem on machines without a guard digit in add/subtract (Cray XMP, Cray YMP, Cray C 90 and Cray 2). The following code replaces DLAMDA(I) by 2*DLAMDA(I)-DLAMDA(I), which on any of these machines zeros out the bottommost bit of DLAMDA(I) if it is 1; this makes the subsequent subtractions DLAMDA(I)-DLAMDA(J) unproblematic when cancellation occurs. On binary machines with a guard digit (almost all machines) it does not change DLAMDA(I) at all. On hexadecimal and decimal machines with a guard digit, it slightly changes the bottommost bits of DLAMDA(I). It does not account for hexadecimal or decimal machines without guard digits (we know of none). We use a subroutine call to compute 2*DLAMBDA(I) to prevent optimizing compilers from eliminating this code. */ i__1 = *k; for (i__ = 1; i__ <= i__1; ++i__) { dlamda[i__] = dlamc3_(&dlamda[i__], &dlamda[i__]) - dlamda[i__]; /* L10: */ } i__1 = *k; for (j = 1; j <= i__1; ++j) { dlaed4_(k, &j, &dlamda[1], &w[1], &q[j * q_dim1 + 1], rho, &d__[j], info); /* If the zero finder fails, the computation is terminated. */ if (*info != 0) { goto L120; } /* L20: */ } if (*k == 1) { goto L110; } if (*k == 2) { i__1 = *k; for (j = 1; j <= i__1; ++j) { w[1] = q[j * q_dim1 + 1]; w[2] = q[j * q_dim1 + 2]; ii = indx[1]; q[j * q_dim1 + 1] = w[ii]; ii = indx[2]; q[j * q_dim1 + 2] = w[ii]; /* L30: */ } goto L110; } /* Compute updated W. */ dcopy_(k, &w[1], &c__1, &s[1], &c__1); /* Initialize W(I) = Q(I,I) */ i__1 = *ldq + 1; dcopy_(k, &q[q_offset], &i__1, &w[1], &c__1); i__1 = *k; for (j = 1; j <= i__1; ++j) { i__2 = j - 1; for (i__ = 1; i__ <= i__2; ++i__) { w[i__] *= q[i__ + j * q_dim1] / (dlamda[i__] - dlamda[j]); /* L40: */ } i__2 = *k; for (i__ = j + 1; i__ <= i__2; ++i__) { w[i__] *= q[i__ + j * q_dim1] / (dlamda[i__] - dlamda[j]); /* L50: */ } /* L60: */ } i__1 = *k; for (i__ = 1; i__ <= i__1; ++i__) { d__1 = sqrt(-w[i__]); w[i__] = d_sign(&d__1, &s[i__]); /* L70: */ } /* Compute eigenvectors of the modified rank-1 modification. */ i__1 = *k; for (j = 1; j <= i__1; ++j) { i__2 = *k; for (i__ = 1; i__ <= i__2; ++i__) { s[i__] = w[i__] / q[i__ + j * q_dim1]; /* L80: */ } temp = dnrm2_(k, &s[1], &c__1); i__2 = *k; for (i__ = 1; i__ <= i__2; ++i__) { ii = indx[i__]; q[i__ + j * q_dim1] = s[ii] / temp; /* L90: */ } /* L100: */ } /* Compute the updated eigenvectors. */ L110: n2 = *n - *n1; n12 = ctot[1] + ctot[2]; n23 = ctot[2] + ctot[3]; dlacpy_("A", &n23, k, &q[ctot[1] + 1 + q_dim1], ldq, &s[1], &n23); iq2 = *n1 * n12 + 1; if (n23 != 0) { dgemm_("N", "N", &n2, k, &n23, &c_b15, &q2[iq2], &n2, &s[1], &n23, & c_b29, &q[*n1 + 1 + q_dim1], ldq); } else { dlaset_("A", &n2, k, &c_b29, &c_b29, &q[*n1 + 1 + q_dim1], ldq); } dlacpy_("A", &n12, k, &q[q_offset], ldq, &s[1], &n12); if (n12 != 0) { dgemm_("N", "N", n1, k, &n12, &c_b15, &q2[1], n1, &s[1], &n12, &c_b29, &q[q_offset], ldq); } else { dlaset_("A", n1, k, &c_b29, &c_b29, &q[q_dim1 + 1], ldq); } L120: return 0; /* End of DLAED3 */ } /* dlaed3_ */ /* Subroutine */ int dlaed4_(integer *n, integer *i__, doublereal *d__, doublereal *z__, doublereal *delta, doublereal *rho, doublereal *dlam, integer *info) { /* System generated locals */ integer i__1; doublereal d__1; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static doublereal dphi, dpsi; static integer iter; static doublereal temp, prew, temp1, a, b, c__; static integer j; static doublereal w, dltlb, dltub, midpt; static integer niter; static logical swtch; extern /* Subroutine */ int dlaed5_(integer *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *), dlaed6_(integer *, logical *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, integer *); static logical swtch3; static integer ii; static doublereal dw, zz[3]; static logical orgati; static doublereal erretm, rhoinv; static integer ip1; static doublereal del, eta, phi, eps, tau, psi; static integer iim1, iip1; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= This subroutine computes the I-th updated eigenvalue of a symmetric rank-one modification to a diagonal matrix whose elements are given in the array d, and that D(i) < D(j) for i < j and that RHO > 0. This is arranged by the calling routine, and is no loss in generality. The rank-one modified system is thus diag( D ) + RHO * Z * Z_transpose. where we assume the Euclidean norm of Z is 1. The method consists of approximating the rational functions in the secular equation by simpler interpolating rational functions. Arguments ========= N (input) INTEGER The length of all arrays. I (input) INTEGER The index of the eigenvalue to be computed. 1 <= I <= N. D (input) DOUBLE PRECISION array, dimension (N) The original eigenvalues. It is assumed that they are in order, D(I) < D(J) for I < J. Z (input) DOUBLE PRECISION array, dimension (N) The components of the updating vector. DELTA (output) DOUBLE PRECISION array, dimension (N) If N .GT. 2, DELTA contains (D(j) - lambda_I) in its j-th component. If N = 1, then DELTA(1) = 1. If N = 2, see DLAED5 for detail. The vector DELTA contains the information necessary to construct the eigenvectors by DLAED3 and DLAED9. RHO (input) DOUBLE PRECISION The scalar in the symmetric updating formula. DLAM (output) DOUBLE PRECISION The computed lambda_I, the I-th updated eigenvalue. INFO (output) INTEGER = 0: successful exit > 0: if INFO = 1, the updating process failed. Internal Parameters =================== Logical variable ORGATI (origin-at-i?) is used for distinguishing whether D(i) or D(i+1) is treated as the origin. ORGATI = .true. origin at i ORGATI = .false. origin at i+1 Logical variable SWTCH3 (switch-for-3-poles?) is for noting if we are working with THREE poles! MAXIT is the maximum number of iterations allowed for each eigenvalue. Further Details =============== Based on contributions by Ren-Cang Li, Computer Science Division, University of California at Berkeley, USA ===================================================================== Since this routine is called in an inner loop, we do no argument checking. Quick return for N=1 and 2. */ /* Parameter adjustments */ --delta; --z__; --d__; /* Function Body */ *info = 0; if (*n == 1) { /* Presumably, I=1 upon entry */ *dlam = d__[1] + *rho * z__[1] * z__[1]; delta[1] = 1.; return 0; } if (*n == 2) { dlaed5_(i__, &d__[1], &z__[1], &delta[1], rho, dlam); return 0; } /* Compute machine epsilon */ eps = EPSILON; rhoinv = 1. / *rho; /* The case I = N */ if (*i__ == *n) { /* Initialize some basic variables */ ii = *n - 1; niter = 1; /* Calculate initial guess */ midpt = *rho / 2.; /* If ||Z||_2 is not one, then TEMP should be set to RHO * ||Z||_2^2 / TWO */ i__1 = *n; for (j = 1; j <= i__1; ++j) { delta[j] = d__[j] - d__[*i__] - midpt; /* L10: */ } psi = 0.; i__1 = *n - 2; for (j = 1; j <= i__1; ++j) { psi += z__[j] * z__[j] / delta[j]; /* L20: */ } c__ = rhoinv + psi; w = c__ + z__[ii] * z__[ii] / delta[ii] + z__[*n] * z__[*n] / delta[* n]; if (w <= 0.) { temp = z__[*n - 1] * z__[*n - 1] / (d__[*n] - d__[*n - 1] + *rho) + z__[*n] * z__[*n] / *rho; if (c__ <= temp) { tau = *rho; } else { del = d__[*n] - d__[*n - 1]; a = -c__ * del + z__[*n - 1] * z__[*n - 1] + z__[*n] * z__[*n] ; b = z__[*n] * z__[*n] * del; if (a < 0.) { tau = b * 2. / (sqrt(a * a + b * 4. * c__) - a); } else { tau = (a + sqrt(a * a + b * 4. * c__)) / (c__ * 2.); } } /* It can be proved that D(N)+RHO/2 <= LAMBDA(N) < D(N)+TAU <= D(N)+RHO */ dltlb = midpt; dltub = *rho; } else { del = d__[*n] - d__[*n - 1]; a = -c__ * del + z__[*n - 1] * z__[*n - 1] + z__[*n] * z__[*n]; b = z__[*n] * z__[*n] * del; if (a < 0.) { tau = b * 2. / (sqrt(a * a + b * 4. * c__) - a); } else { tau = (a + sqrt(a * a + b * 4. * c__)) / (c__ * 2.); } /* It can be proved that D(N) < D(N)+TAU < LAMBDA(N) < D(N)+RHO/2 */ dltlb = 0.; dltub = midpt; } i__1 = *n; for (j = 1; j <= i__1; ++j) { delta[j] = d__[j] - d__[*i__] - tau; /* L30: */ } /* Evaluate PSI and the derivative DPSI */ dpsi = 0.; psi = 0.; erretm = 0.; i__1 = ii; for (j = 1; j <= i__1; ++j) { temp = z__[j] / delta[j]; psi += z__[j] * temp; dpsi += temp * temp; erretm += psi; /* L40: */ } erretm = abs(erretm); /* Evaluate PHI and the derivative DPHI */ temp = z__[*n] / delta[*n]; phi = z__[*n] * temp; dphi = temp * temp; erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * (dpsi + dphi); w = rhoinv + phi + psi; /* Test for convergence */ if (abs(w) <= eps * erretm) { *dlam = d__[*i__] + tau; goto L250; } if (w <= 0.) { dltlb = max(dltlb,tau); } else { dltub = min(dltub,tau); } /* Calculate the new step */ ++niter; c__ = w - delta[*n - 1] * dpsi - delta[*n] * dphi; a = (delta[*n - 1] + delta[*n]) * w - delta[*n - 1] * delta[*n] * ( dpsi + dphi); b = delta[*n - 1] * delta[*n] * w; if (c__ < 0.) { c__ = abs(c__); } if (c__ == 0.) { /* ETA = B/A ETA = RHO - TAU */ eta = dltub - tau; } else if (a >= 0.) { eta = (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (c__ * 2.); } else { eta = b * 2. / (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1))) ); } /* Note, eta should be positive if w is negative, and eta should be negative otherwise. However, if for some reason caused by roundoff, eta*w > 0, we simply use one Newton step instead. This way will guarantee eta*w < 0. */ if (w * eta > 0.) { eta = -w / (dpsi + dphi); } temp = tau + eta; if (temp > dltub || temp < dltlb) { if (w < 0.) { eta = (dltub - tau) / 2.; } else { eta = (dltlb - tau) / 2.; } } i__1 = *n; for (j = 1; j <= i__1; ++j) { delta[j] -= eta; /* L50: */ } tau += eta; /* Evaluate PSI and the derivative DPSI */ dpsi = 0.; psi = 0.; erretm = 0.; i__1 = ii; for (j = 1; j <= i__1; ++j) { temp = z__[j] / delta[j]; psi += z__[j] * temp; dpsi += temp * temp; erretm += psi; /* L60: */ } erretm = abs(erretm); /* Evaluate PHI and the derivative DPHI */ temp = z__[*n] / delta[*n]; phi = z__[*n] * temp; dphi = temp * temp; erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * (dpsi + dphi); w = rhoinv + phi + psi; /* Main loop to update the values of the array DELTA */ iter = niter + 1; for (niter = iter; niter <= 30; ++niter) { /* Test for convergence */ if (abs(w) <= eps * erretm) { *dlam = d__[*i__] + tau; goto L250; } if (w <= 0.) { dltlb = max(dltlb,tau); } else { dltub = min(dltub,tau); } /* Calculate the new step */ c__ = w - delta[*n - 1] * dpsi - delta[*n] * dphi; a = (delta[*n - 1] + delta[*n]) * w - delta[*n - 1] * delta[*n] * (dpsi + dphi); b = delta[*n - 1] * delta[*n] * w; if (a >= 0.) { eta = (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / ( c__ * 2.); } else { eta = b * 2. / (a - sqrt((d__1 = a * a - b * 4. * c__, abs( d__1)))); } /* Note, eta should be positive if w is negative, and eta should be negative otherwise. However, if for some reason caused by roundoff, eta*w > 0, we simply use one Newton step instead. This way will guarantee eta*w < 0. */ if (w * eta > 0.) { eta = -w / (dpsi + dphi); } temp = tau + eta; if (temp > dltub || temp < dltlb) { if (w < 0.) { eta = (dltub - tau) / 2.; } else { eta = (dltlb - tau) / 2.; } } i__1 = *n; for (j = 1; j <= i__1; ++j) { delta[j] -= eta; /* L70: */ } tau += eta; /* Evaluate PSI and the derivative DPSI */ dpsi = 0.; psi = 0.; erretm = 0.; i__1 = ii; for (j = 1; j <= i__1; ++j) { temp = z__[j] / delta[j]; psi += z__[j] * temp; dpsi += temp * temp; erretm += psi; /* L80: */ } erretm = abs(erretm); /* Evaluate PHI and the derivative DPHI */ temp = z__[*n] / delta[*n]; phi = z__[*n] * temp; dphi = temp * temp; erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * ( dpsi + dphi); w = rhoinv + phi + psi; /* L90: */ } /* Return with INFO = 1, NITER = MAXIT and not converged */ *info = 1; *dlam = d__[*i__] + tau; goto L250; /* End for the case I = N */ } else { /* The case for I < N */ niter = 1; ip1 = *i__ + 1; /* Calculate initial guess */ del = d__[ip1] - d__[*i__]; midpt = del / 2.; i__1 = *n; for (j = 1; j <= i__1; ++j) { delta[j] = d__[j] - d__[*i__] - midpt; /* L100: */ } psi = 0.; i__1 = *i__ - 1; for (j = 1; j <= i__1; ++j) { psi += z__[j] * z__[j] / delta[j]; /* L110: */ } phi = 0.; i__1 = *i__ + 2; for (j = *n; j >= i__1; --j) { phi += z__[j] * z__[j] / delta[j]; /* L120: */ } c__ = rhoinv + psi + phi; w = c__ + z__[*i__] * z__[*i__] / delta[*i__] + z__[ip1] * z__[ip1] / delta[ip1]; if (w > 0.) { /* d(i)< the ith eigenvalue < (d(i)+d(i+1))/2 We choose d(i) as origin. */ orgati = TRUE_; a = c__ * del + z__[*i__] * z__[*i__] + z__[ip1] * z__[ip1]; b = z__[*i__] * z__[*i__] * del; if (a > 0.) { tau = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs( d__1)))); } else { tau = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / ( c__ * 2.); } dltlb = 0.; dltub = midpt; } else { /* (d(i)+d(i+1))/2 <= the ith eigenvalue < d(i+1) We choose d(i+1) as origin. */ orgati = FALSE_; a = c__ * del - z__[*i__] * z__[*i__] - z__[ip1] * z__[ip1]; b = z__[ip1] * z__[ip1] * del; if (a < 0.) { tau = b * 2. / (a - sqrt((d__1 = a * a + b * 4. * c__, abs( d__1)))); } else { tau = -(a + sqrt((d__1 = a * a + b * 4. * c__, abs(d__1)))) / (c__ * 2.); } dltlb = -midpt; dltub = 0.; } if (orgati) { i__1 = *n; for (j = 1; j <= i__1; ++j) { delta[j] = d__[j] - d__[*i__] - tau; /* L130: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { delta[j] = d__[j] - d__[ip1] - tau; /* L140: */ } } if (orgati) { ii = *i__; } else { ii = *i__ + 1; } iim1 = ii - 1; iip1 = ii + 1; /* Evaluate PSI and the derivative DPSI */ dpsi = 0.; psi = 0.; erretm = 0.; i__1 = iim1; for (j = 1; j <= i__1; ++j) { temp = z__[j] / delta[j]; psi += z__[j] * temp; dpsi += temp * temp; erretm += psi; /* L150: */ } erretm = abs(erretm); /* Evaluate PHI and the derivative DPHI */ dphi = 0.; phi = 0.; i__1 = iip1; for (j = *n; j >= i__1; --j) { temp = z__[j] / delta[j]; phi += z__[j] * temp; dphi += temp * temp; erretm += phi; /* L160: */ } w = rhoinv + phi + psi; /* W is the value of the secular function with its ii-th element removed. */ swtch3 = FALSE_; if (orgati) { if (w < 0.) { swtch3 = TRUE_; } } else { if (w > 0.) { swtch3 = TRUE_; } } if (ii == 1 || ii == *n) { swtch3 = FALSE_; } temp = z__[ii] / delta[ii]; dw = dpsi + dphi + temp * temp; temp = z__[ii] * temp; w += temp; erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3. + abs(tau) * dw; /* Test for convergence */ if (abs(w) <= eps * erretm) { if (orgati) { *dlam = d__[*i__] + tau; } else { *dlam = d__[ip1] + tau; } goto L250; } if (w <= 0.) { dltlb = max(dltlb,tau); } else { dltub = min(dltub,tau); } /* Calculate the new step */ ++niter; if (! swtch3) { if (orgati) { /* Computing 2nd power */ d__1 = z__[*i__] / delta[*i__]; c__ = w - delta[ip1] * dw - (d__[*i__] - d__[ip1]) * (d__1 * d__1); } else { /* Computing 2nd power */ d__1 = z__[ip1] / delta[ip1]; c__ = w - delta[*i__] * dw - (d__[ip1] - d__[*i__]) * (d__1 * d__1); } a = (delta[*i__] + delta[ip1]) * w - delta[*i__] * delta[ip1] * dw; b = delta[*i__] * delta[ip1] * w; if (c__ == 0.) { if (a == 0.) { if (orgati) { a = z__[*i__] * z__[*i__] + delta[ip1] * delta[ip1] * (dpsi + dphi); } else { a = z__[ip1] * z__[ip1] + delta[*i__] * delta[*i__] * (dpsi + dphi); } } eta = b / a; } else if (a <= 0.) { eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / ( c__ * 2.); } else { eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs( d__1)))); } } else { /* Interpolation using THREE most relevant poles */ temp = rhoinv + psi + phi; if (orgati) { temp1 = z__[iim1] / delta[iim1]; temp1 *= temp1; c__ = temp - delta[iip1] * (dpsi + dphi) - (d__[iim1] - d__[ iip1]) * temp1; zz[0] = z__[iim1] * z__[iim1]; zz[2] = delta[iip1] * delta[iip1] * (dpsi - temp1 + dphi); } else { temp1 = z__[iip1] / delta[iip1]; temp1 *= temp1; c__ = temp - delta[iim1] * (dpsi + dphi) - (d__[iip1] - d__[ iim1]) * temp1; zz[0] = delta[iim1] * delta[iim1] * (dpsi + (dphi - temp1)); zz[2] = z__[iip1] * z__[iip1]; } zz[1] = z__[ii] * z__[ii]; dlaed6_(&niter, &orgati, &c__, &delta[iim1], zz, &w, &eta, info); if (*info != 0) { goto L250; } } /* Note, eta should be positive if w is negative, and eta should be negative otherwise. However, if for some reason caused by roundoff, eta*w > 0, we simply use one Newton step instead. This way will guarantee eta*w < 0. */ if (w * eta >= 0.) { eta = -w / dw; } temp = tau + eta; if (temp > dltub || temp < dltlb) { if (w < 0.) { eta = (dltub - tau) / 2.; } else { eta = (dltlb - tau) / 2.; } } prew = w; i__1 = *n; for (j = 1; j <= i__1; ++j) { delta[j] -= eta; /* L180: */ } /* Evaluate PSI and the derivative DPSI */ dpsi = 0.; psi = 0.; erretm = 0.; i__1 = iim1; for (j = 1; j <= i__1; ++j) { temp = z__[j] / delta[j]; psi += z__[j] * temp; dpsi += temp * temp; erretm += psi; /* L190: */ } erretm = abs(erretm); /* Evaluate PHI and the derivative DPHI */ dphi = 0.; phi = 0.; i__1 = iip1; for (j = *n; j >= i__1; --j) { temp = z__[j] / delta[j]; phi += z__[j] * temp; dphi += temp * temp; erretm += phi; /* L200: */ } temp = z__[ii] / delta[ii]; dw = dpsi + dphi + temp * temp; temp = z__[ii] * temp; w = rhoinv + phi + psi + temp; erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3. + ( d__1 = tau + eta, abs(d__1)) * dw; swtch = FALSE_; if (orgati) { if (-w > abs(prew) / 10.) { swtch = TRUE_; } } else { if (w > abs(prew) / 10.) { swtch = TRUE_; } } tau += eta; /* Main loop to update the values of the array DELTA */ iter = niter + 1; for (niter = iter; niter <= 30; ++niter) { /* Test for convergence */ if (abs(w) <= eps * erretm) { if (orgati) { *dlam = d__[*i__] + tau; } else { *dlam = d__[ip1] + tau; } goto L250; } if (w <= 0.) { dltlb = max(dltlb,tau); } else { dltub = min(dltub,tau); } /* Calculate the new step */ if (! swtch3) { if (! swtch) { if (orgati) { /* Computing 2nd power */ d__1 = z__[*i__] / delta[*i__]; c__ = w - delta[ip1] * dw - (d__[*i__] - d__[ip1]) * ( d__1 * d__1); } else { /* Computing 2nd power */ d__1 = z__[ip1] / delta[ip1]; c__ = w - delta[*i__] * dw - (d__[ip1] - d__[*i__]) * (d__1 * d__1); } } else { temp = z__[ii] / delta[ii]; if (orgati) { dpsi += temp * temp; } else { dphi += temp * temp; } c__ = w - delta[*i__] * dpsi - delta[ip1] * dphi; } a = (delta[*i__] + delta[ip1]) * w - delta[*i__] * delta[ip1] * dw; b = delta[*i__] * delta[ip1] * w; if (c__ == 0.) { if (a == 0.) { if (! swtch) { if (orgati) { a = z__[*i__] * z__[*i__] + delta[ip1] * delta[ip1] * (dpsi + dphi); } else { a = z__[ip1] * z__[ip1] + delta[*i__] * delta[ *i__] * (dpsi + dphi); } } else { a = delta[*i__] * delta[*i__] * dpsi + delta[ip1] * delta[ip1] * dphi; } } eta = b / a; } else if (a <= 0.) { eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (c__ * 2.); } else { eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))); } } else { /* Interpolation using THREE most relevant poles */ temp = rhoinv + psi + phi; if (swtch) { c__ = temp - delta[iim1] * dpsi - delta[iip1] * dphi; zz[0] = delta[iim1] * delta[iim1] * dpsi; zz[2] = delta[iip1] * delta[iip1] * dphi; } else { if (orgati) { temp1 = z__[iim1] / delta[iim1]; temp1 *= temp1; c__ = temp - delta[iip1] * (dpsi + dphi) - (d__[iim1] - d__[iip1]) * temp1; zz[0] = z__[iim1] * z__[iim1]; zz[2] = delta[iip1] * delta[iip1] * (dpsi - temp1 + dphi); } else { temp1 = z__[iip1] / delta[iip1]; temp1 *= temp1; c__ = temp - delta[iim1] * (dpsi + dphi) - (d__[iip1] - d__[iim1]) * temp1; zz[0] = delta[iim1] * delta[iim1] * (dpsi + (dphi - temp1)); zz[2] = z__[iip1] * z__[iip1]; } } dlaed6_(&niter, &orgati, &c__, &delta[iim1], zz, &w, &eta, info); if (*info != 0) { goto L250; } } /* Note, eta should be positive if w is negative, and eta should be negative otherwise. However, if for some reason caused by roundoff, eta*w > 0, we simply use one Newton step instead. This way will guarantee eta*w < 0. */ if (w * eta >= 0.) { eta = -w / dw; } temp = tau + eta; if (temp > dltub || temp < dltlb) { if (w < 0.) { eta = (dltub - tau) / 2.; } else { eta = (dltlb - tau) / 2.; } } i__1 = *n; for (j = 1; j <= i__1; ++j) { delta[j] -= eta; /* L210: */ } tau += eta; prew = w; /* Evaluate PSI and the derivative DPSI */ dpsi = 0.; psi = 0.; erretm = 0.; i__1 = iim1; for (j = 1; j <= i__1; ++j) { temp = z__[j] / delta[j]; psi += z__[j] * temp; dpsi += temp * temp; erretm += psi; /* L220: */ } erretm = abs(erretm); /* Evaluate PHI and the derivative DPHI */ dphi = 0.; phi = 0.; i__1 = iip1; for (j = *n; j >= i__1; --j) { temp = z__[j] / delta[j]; phi += z__[j] * temp; dphi += temp * temp; erretm += phi; /* L230: */ } temp = z__[ii] / delta[ii]; dw = dpsi + dphi + temp * temp; temp = z__[ii] * temp; w = rhoinv + phi + psi + temp; erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3. + abs(tau) * dw; if (w * prew > 0. && abs(w) > abs(prew) / 10.) { swtch = ! swtch; } /* L240: */ } /* Return with INFO = 1, NITER = MAXIT and not converged */ *info = 1; if (orgati) { *dlam = d__[*i__] + tau; } else { *dlam = d__[ip1] + tau; } } L250: return 0; /* End of DLAED4 */ } /* dlaed4_ */ /* Subroutine */ int dlaed5_(integer *i__, doublereal *d__, doublereal *z__, doublereal *delta, doublereal *rho, doublereal *dlam) { /* System generated locals */ doublereal d__1; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static doublereal temp, b, c__, w, del, tau; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= This subroutine computes the I-th eigenvalue of a symmetric rank-one modification of a 2-by-2 diagonal matrix diag( D ) + RHO * Z * transpose(Z) . The diagonal elements in the array D are assumed to satisfy D(i) < D(j) for i < j . We also assume RHO > 0 and that the Euclidean norm of the vector Z is one. Arguments ========= I (input) INTEGER The index of the eigenvalue to be computed. I = 1 or I = 2. D (input) DOUBLE PRECISION array, dimension (2) The original eigenvalues. We assume D(1) < D(2). Z (input) DOUBLE PRECISION array, dimension (2) The components of the updating vector. DELTA (output) DOUBLE PRECISION array, dimension (2) The vector DELTA contains the information necessary to construct the eigenvectors. RHO (input) DOUBLE PRECISION The scalar in the symmetric updating formula. DLAM (output) DOUBLE PRECISION The computed lambda_I, the I-th updated eigenvalue. Further Details =============== Based on contributions by Ren-Cang Li, Computer Science Division, University of California at Berkeley, USA ===================================================================== */ /* Parameter adjustments */ --delta; --z__; --d__; /* Function Body */ del = d__[2] - d__[1]; if (*i__ == 1) { w = *rho * 2. * (z__[2] * z__[2] - z__[1] * z__[1]) / del + 1.; if (w > 0.) { b = del + *rho * (z__[1] * z__[1] + z__[2] * z__[2]); c__ = *rho * z__[1] * z__[1] * del; /* B > ZERO, always */ tau = c__ * 2. / (b + sqrt((d__1 = b * b - c__ * 4., abs(d__1)))); *dlam = d__[1] + tau; delta[1] = -z__[1] / tau; delta[2] = z__[2] / (del - tau); } else { b = -del + *rho * (z__[1] * z__[1] + z__[2] * z__[2]); c__ = *rho * z__[2] * z__[2] * del; if (b > 0.) { tau = c__ * -2. / (b + sqrt(b * b + c__ * 4.)); } else { tau = (b - sqrt(b * b + c__ * 4.)) / 2.; } *dlam = d__[2] + tau; delta[1] = -z__[1] / (del + tau); delta[2] = -z__[2] / tau; } temp = sqrt(delta[1] * delta[1] + delta[2] * delta[2]); delta[1] /= temp; delta[2] /= temp; } else { /* Now I=2 */ b = -del + *rho * (z__[1] * z__[1] + z__[2] * z__[2]); c__ = *rho * z__[2] * z__[2] * del; if (b > 0.) { tau = (b + sqrt(b * b + c__ * 4.)) / 2.; } else { tau = c__ * 2. / (-b + sqrt(b * b + c__ * 4.)); } *dlam = d__[2] + tau; delta[1] = -z__[1] / (del + tau); delta[2] = -z__[2] / tau; temp = sqrt(delta[1] * delta[1] + delta[2] * delta[2]); delta[1] /= temp; delta[2] /= temp; } return 0; /* End OF DLAED5 */ } /* dlaed5_ */ /* Subroutine */ int dlaed6_(integer *kniter, logical *orgati, doublereal * rho, doublereal *d__, doublereal *z__, doublereal *finit, doublereal * tau, integer *info) { /* System generated locals */ integer i__1; doublereal d__1, d__2, d__3, d__4; /* Builtin functions */ double sqrt(doublereal), log(doublereal), pow_di(doublereal *, integer *); /* Local variables */ static doublereal base; static integer iter; static doublereal temp, temp1, temp2, temp3, temp4, a, b, c__, f; static integer i__; static logical scale; static integer niter; static doublereal small1, small2, fc, df, sminv1, sminv2; static doublereal dscale[3], sclfac, zscale[3], erretm, sclinv, ddf, lbd, eta, ubd, eps; /* -- LAPACK routine (version 3.1.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. February 2007 Purpose ======= DLAED6 computes the positive or negative root (closest to the origin) of z(1) z(2) z(3) f(x) = rho + --------- + ---------- + --------- d(1)-x d(2)-x d(3)-x It is assumed that if ORGATI = .true. the root is between d(2) and d(3); otherwise it is between d(1) and d(2) This routine will be called by DLAED4 when necessary. In most cases, the root sought is the smallest in magnitude, though it might not be in some extremely rare situations. Arguments ========= KNITER (input) INTEGER Refer to DLAED4 for its significance. ORGATI (input) LOGICAL If ORGATI is true, the needed root is between d(2) and d(3); otherwise it is between d(1) and d(2). See DLAED4 for further details. RHO (input) DOUBLE PRECISION Refer to the equation f(x) above. D (input) DOUBLE PRECISION array, dimension (3) D satisfies d(1) < d(2) < d(3). Z (input) DOUBLE PRECISION array, dimension (3) Each of the elements in z must be positive. FINIT (input) DOUBLE PRECISION The value of f at 0. It is more accurate than the one evaluated inside this routine (if someone wants to do so). TAU (output) DOUBLE PRECISION The root of the equation f(x). INFO (output) INTEGER = 0: successful exit > 0: if INFO = 1, failure to converge Further Details =============== 30/06/99: Based on contributions by Ren-Cang Li, Computer Science Division, University of California at Berkeley, USA 10/02/03: This version has a few statements commented out for thread safety (machine parameters are computed on each entry). SJH. 05/10/06: Modified from a new version of Ren-Cang Li, use Gragg-Thornton-Warner cubic convergent scheme for better stability. ===================================================================== */ /* Parameter adjustments */ --z__; --d__; /* Function Body */ *info = 0; if (*orgati) { lbd = d__[2]; ubd = d__[3]; } else { lbd = d__[1]; ubd = d__[2]; } if (*finit < 0.) { lbd = 0.; } else { ubd = 0.; } niter = 1; *tau = 0.; if (*kniter == 2) { if (*orgati) { temp = (d__[3] - d__[2]) / 2.; c__ = *rho + z__[1] / (d__[1] - d__[2] - temp); a = c__ * (d__[2] + d__[3]) + z__[2] + z__[3]; b = c__ * d__[2] * d__[3] + z__[2] * d__[3] + z__[3] * d__[2]; } else { temp = (d__[1] - d__[2]) / 2.; c__ = *rho + z__[3] / (d__[3] - d__[2] - temp); a = c__ * (d__[1] + d__[2]) + z__[1] + z__[2]; b = c__ * d__[1] * d__[2] + z__[1] * d__[2] + z__[2] * d__[1]; } /* Computing MAX */ d__1 = abs(a), d__2 = abs(b), d__1 = max(d__1,d__2), d__2 = abs(c__); temp = max(d__1,d__2); a /= temp; b /= temp; c__ /= temp; if (c__ == 0.) { *tau = b / a; } else if (a <= 0.) { *tau = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / ( c__ * 2.); } else { *tau = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)) )); } if (*tau < lbd || *tau > ubd) { *tau = (lbd + ubd) / 2.; } if (d__[1] == *tau || d__[2] == *tau || d__[3] == *tau) { *tau = 0.; } else { temp = *finit + *tau * z__[1] / (d__[1] * (d__[1] - *tau)) + *tau * z__[2] / (d__[2] * (d__[2] - *tau)) + *tau * z__[3] / ( d__[3] * (d__[3] - *tau)); if (temp <= 0.) { lbd = *tau; } else { ubd = *tau; } if (abs(*finit) <= abs(temp)) { *tau = 0.; } } } /* get machine parameters for possible scaling to avoid overflow modified by Sven: parameters SMALL1, SMINV1, SMALL2, SMINV2, EPS are not SAVEd anymore between one call to the others but recomputed at each call */ eps = EPSILON; base = BASE; i__1 = (integer) (log(SAFEMINIMUM) / log(base) / 3.); small1 = pow_di(&base, &i__1); sminv1 = 1. / small1; small2 = small1 * small1; sminv2 = sminv1 * sminv1; /* Determine if scaling of inputs necessary to avoid overflow when computing 1/TEMP**3 */ if (*orgati) { /* Computing MIN */ d__3 = (d__1 = d__[2] - *tau, abs(d__1)), d__4 = (d__2 = d__[3] - * tau, abs(d__2)); temp = min(d__3,d__4); } else { /* Computing MIN */ d__3 = (d__1 = d__[1] - *tau, abs(d__1)), d__4 = (d__2 = d__[2] - * tau, abs(d__2)); temp = min(d__3,d__4); } scale = FALSE_; if (temp <= small1) { scale = TRUE_; if (temp <= small2) { /* Scale up by power of radix nearest 1/SAFMIN**(2/3) */ sclfac = sminv2; sclinv = small2; } else { /* Scale up by power of radix nearest 1/SAFMIN**(1/3) */ sclfac = sminv1; sclinv = small1; } /* Scaling up safe because D, Z, TAU scaled elsewhere to be O(1) */ for (i__ = 1; i__ <= 3; ++i__) { dscale[i__ - 1] = d__[i__] * sclfac; zscale[i__ - 1] = z__[i__] * sclfac; /* L10: */ } *tau *= sclfac; lbd *= sclfac; ubd *= sclfac; } else { /* Copy D and Z to DSCALE and ZSCALE */ for (i__ = 1; i__ <= 3; ++i__) { dscale[i__ - 1] = d__[i__]; zscale[i__ - 1] = z__[i__]; /* L20: */ } } fc = 0.; df = 0.; ddf = 0.; for (i__ = 1; i__ <= 3; ++i__) { temp = 1. / (dscale[i__ - 1] - *tau); temp1 = zscale[i__ - 1] * temp; temp2 = temp1 * temp; temp3 = temp2 * temp; fc += temp1 / dscale[i__ - 1]; df += temp2; ddf += temp3; /* L30: */ } f = *finit + *tau * fc; if (abs(f) <= 0.) { goto L60; } if (f <= 0.) { lbd = *tau; } else { ubd = *tau; } /* Iteration begins -- Use Gragg-Thornton-Warner cubic convergent scheme It is not hard to see that 1) Iterations will go up monotonically if FINIT < 0; 2) Iterations will go down monotonically if FINIT > 0. */ iter = niter + 1; for (niter = iter; niter <= 40; ++niter) { if (*orgati) { temp1 = dscale[1] - *tau; temp2 = dscale[2] - *tau; } else { temp1 = dscale[0] - *tau; temp2 = dscale[1] - *tau; } a = (temp1 + temp2) * f - temp1 * temp2 * df; b = temp1 * temp2 * f; c__ = f - (temp1 + temp2) * df + temp1 * temp2 * ddf; /* Computing MAX */ d__1 = abs(a), d__2 = abs(b), d__1 = max(d__1,d__2), d__2 = abs(c__); temp = max(d__1,d__2); a /= temp; b /= temp; c__ /= temp; if (c__ == 0.) { eta = b / a; } else if (a <= 0.) { eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (c__ * 2.); } else { eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1))) ); } if (f * eta >= 0.) { eta = -f / df; } *tau += eta; if (*tau < lbd || *tau > ubd) { *tau = (lbd + ubd) / 2.; } fc = 0.; erretm = 0.; df = 0.; ddf = 0.; for (i__ = 1; i__ <= 3; ++i__) { temp = 1. / (dscale[i__ - 1] - *tau); temp1 = zscale[i__ - 1] * temp; temp2 = temp1 * temp; temp3 = temp2 * temp; temp4 = temp1 / dscale[i__ - 1]; fc += temp4; erretm += abs(temp4); df += temp2; ddf += temp3; /* L40: */ } f = *finit + *tau * fc; erretm = (abs(*finit) + abs(*tau) * erretm) * 8. + abs(*tau) * df; if (abs(f) <= eps * erretm) { goto L60; } if (f <= 0.) { lbd = *tau; } else { ubd = *tau; } /* L50: */ } *info = 1; L60: /* Undo scaling */ if (scale) { *tau *= sclinv; } return 0; /* End of DLAED6 */ } /* dlaed6_ */ /* Subroutine */ int dlaed7_(integer *icompq, integer *n, integer *qsiz, integer *tlvls, integer *curlvl, integer *curpbm, doublereal *d__, doublereal *q, integer *ldq, integer *indxq, doublereal *rho, integer *cutpnt, doublereal *qstore, integer *qptr, integer *prmptr, integer * perm, integer *givptr, integer *givcol, doublereal *givnum, doublereal *work, integer *iwork, integer *info) { /* System generated locals */ integer q_dim1, q_offset, i__1, i__2; /* Builtin functions */ integer pow_ii(integer *, integer *); /* Local variables */ static integer indx, curr, i__, k; extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); static integer indxc, indxp, n1, n2; extern /* Subroutine */ int dlaed8_(integer *, integer *, integer *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, integer *, integer *, integer *, doublereal *, integer *, integer *, integer *), dlaed9_(integer *, integer *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, integer *), dlaeda_(integer *, integer *, integer *, integer *, integer *, integer *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, doublereal *, integer *) ; static integer idlmda, is, iw, iz; extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *, integer *, integer *, integer *), xerbla_(char *, integer *); static integer coltyp, iq2, ptr, ldq2; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLAED7 computes the updated eigensystem of a diagonal matrix after modification by a rank-one symmetric matrix. This routine is used only for the eigenproblem which requires all eigenvalues and optionally eigenvectors of a dense symmetric matrix that has been reduced to tridiagonal form. DLAED1 handles the case in which all eigenvalues and eigenvectors of a symmetric tridiagonal matrix are desired. T = Q(in) ( D(in) + RHO * Z*Z' ) Q'(in) = Q(out) * D(out) * Q'(out) where Z = Q'u, u is a vector of length N with ones in the CUTPNT and CUTPNT + 1 th elements and zeros elsewhere. The eigenvectors of the original matrix are stored in Q, and the eigenvalues are in D. The algorithm consists of three stages: The first stage consists of deflating the size of the problem when there are multiple eigenvalues or if there is a zero in the Z vector. For each such occurence the dimension of the secular equation problem is reduced by one. This stage is performed by the routine DLAED8. The second stage consists of calculating the updated eigenvalues. This is done by finding the roots of the secular equation via the routine DLAED4 (as called by DLAED9). This routine also calculates the eigenvectors of the current problem. The final stage consists of computing the updated eigenvectors directly using the updated eigenvalues. The eigenvectors for the current problem are multiplied with the eigenvectors from the overall problem. Arguments ========= ICOMPQ (input) INTEGER = 0: Compute eigenvalues only. = 1: Compute eigenvectors of original dense symmetric matrix also. On entry, Q contains the orthogonal matrix used to reduce the original matrix to tridiagonal form. N (input) INTEGER The dimension of the symmetric tridiagonal matrix. N >= 0. QSIZ (input) INTEGER The dimension of the orthogonal matrix used to reduce the full matrix to tridiagonal form. QSIZ >= N if ICOMPQ = 1. TLVLS (input) INTEGER The total number of merging levels in the overall divide and conquer tree. CURLVL (input) INTEGER The current level in the overall merge routine, 0 <= CURLVL <= TLVLS. CURPBM (input) INTEGER The current problem in the current level in the overall merge routine (counting from upper left to lower right). D (input/output) DOUBLE PRECISION array, dimension (N) On entry, the eigenvalues of the rank-1-perturbed matrix. On exit, the eigenvalues of the repaired matrix. Q (input/output) DOUBLE PRECISION array, dimension (LDQ, N) On entry, the eigenvectors of the rank-1-perturbed matrix. On exit, the eigenvectors of the repaired tridiagonal matrix. LDQ (input) INTEGER The leading dimension of the array Q. LDQ >= max(1,N). INDXQ (output) INTEGER array, dimension (N) The permutation which will reintegrate the subproblem just solved back into sorted order, i.e., D( INDXQ( I = 1, N ) ) will be in ascending order. RHO (input) DOUBLE PRECISION The subdiagonal element used to create the rank-1 modification. CUTPNT (input) INTEGER Contains the location of the last eigenvalue in the leading sub-matrix. min(1,N) <= CUTPNT <= N. QSTORE (input/output) DOUBLE PRECISION array, dimension (N**2+1) Stores eigenvectors of submatrices encountered during divide and conquer, packed together. QPTR points to beginning of the submatrices. QPTR (input/output) INTEGER array, dimension (N+2) List of indices pointing to beginning of submatrices stored in QSTORE. The submatrices are numbered starting at the bottom left of the divide and conquer tree, from left to right and bottom to top. PRMPTR (input) INTEGER array, dimension (N lg N) Contains a list of pointers which indicate where in PERM a level's permutation is stored. PRMPTR(i+1) - PRMPTR(i) indicates the size of the permutation and also the size of the full, non-deflated problem. PERM (input) INTEGER array, dimension (N lg N) Contains the permutations (from deflation and sorting) to be applied to each eigenblock. GIVPTR (input) INTEGER array, dimension (N lg N) Contains a list of pointers which indicate where in GIVCOL a level's Givens rotations are stored. GIVPTR(i+1) - GIVPTR(i) indicates the number of Givens rotations. GIVCOL (input) INTEGER array, dimension (2, N lg N) Each pair of numbers indicates a pair of columns to take place in a Givens rotation. GIVNUM (input) DOUBLE PRECISION array, dimension (2, N lg N) Each number indicates the S value to be used in the corresponding Givens rotation. WORK (workspace) DOUBLE PRECISION array, dimension (3*N+QSIZ*N) IWORK (workspace) INTEGER array, dimension (4*N) INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. > 0: if INFO = 1, an eigenvalue did not converge Further Details =============== Based on contributions by Jeff Rutter, Computer Science Division, University of California at Berkeley, USA ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; q_dim1 = *ldq; q_offset = 1 + q_dim1 * 1; q -= q_offset; --indxq; --qstore; --qptr; --prmptr; --perm; --givptr; givcol -= 3; givnum -= 3; --work; --iwork; /* Function Body */ *info = 0; if (*icompq < 0 || *icompq > 1) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*icompq == 1 && *qsiz < *n) { *info = -4; } else if (*ldq < max(1,*n)) { *info = -9; } else if (min(1,*n) > *cutpnt || *n < *cutpnt) { *info = -12; } if (*info != 0) { i__1 = -(*info); xerbla_("DLAED7", &i__1); return 0; } /* Quick return if possible */ if (*n == 0) { return 0; } /* The following values are for bookkeeping purposes only. They are integer pointers which indicate the portion of the workspace used by a particular array in DLAED8 and DLAED9. */ if (*icompq == 1) { ldq2 = *qsiz; } else { ldq2 = *n; } iz = 1; idlmda = iz + *n; iw = idlmda + *n; iq2 = iw + *n; is = iq2 + *n * ldq2; indx = 1; indxc = indx + *n; coltyp = indxc + *n; indxp = coltyp + *n; /* Form the z-vector which consists of the last row of Q_1 and the first row of Q_2. */ ptr = pow_ii(&c__2, tlvls) + 1; i__1 = *curlvl - 1; for (i__ = 1; i__ <= i__1; ++i__) { i__2 = *tlvls - i__; ptr += pow_ii(&c__2, &i__2); /* L10: */ } curr = ptr + *curpbm; dlaeda_(n, tlvls, curlvl, curpbm, &prmptr[1], &perm[1], &givptr[1], & givcol[3], &givnum[3], &qstore[1], &qptr[1], &work[iz], &work[iz + *n], info); /* When solving the final problem, we no longer need the stored data, so we will overwrite the data from this level onto the previously used storage space. */ if (*curlvl == *tlvls) { qptr[curr] = 1; prmptr[curr] = 1; givptr[curr] = 1; } /* Sort and Deflate eigenvalues. */ dlaed8_(icompq, &k, n, qsiz, &d__[1], &q[q_offset], ldq, &indxq[1], rho, cutpnt, &work[iz], &work[idlmda], &work[iq2], &ldq2, &work[iw], & perm[prmptr[curr]], &givptr[curr + 1], &givcol[(givptr[curr] << 1) + 1], &givnum[(givptr[curr] << 1) + 1], &iwork[indxp], &iwork[ indx], info); prmptr[curr + 1] = prmptr[curr] + *n; givptr[curr + 1] += givptr[curr]; /* Solve Secular Equation. */ if (k != 0) { dlaed9_(&k, &c__1, &k, n, &d__[1], &work[is], &k, rho, &work[idlmda], &work[iw], &qstore[qptr[curr]], &k, info); if (*info != 0) { goto L30; } if (*icompq == 1) { dgemm_("N", "N", qsiz, &k, &k, &c_b15, &work[iq2], &ldq2, &qstore[ qptr[curr]], &k, &c_b29, &q[q_offset], ldq); } /* Computing 2nd power */ i__1 = k; qptr[curr + 1] = qptr[curr] + i__1 * i__1; /* Prepare the INDXQ sorting permutation. */ n1 = k; n2 = *n - k; dlamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &indxq[1]); } else { qptr[curr + 1] = qptr[curr]; i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { indxq[i__] = i__; /* L20: */ } } L30: return 0; /* End of DLAED7 */ } /* dlaed7_ */ /* Subroutine */ int dlaed8_(integer *icompq, integer *k, integer *n, integer *qsiz, doublereal *d__, doublereal *q, integer *ldq, integer *indxq, doublereal *rho, integer *cutpnt, doublereal *z__, doublereal *dlamda, doublereal *q2, integer *ldq2, doublereal *w, integer *perm, integer *givptr, integer *givcol, doublereal *givnum, integer *indxp, integer *indx, integer *info) { /* System generated locals */ integer q_dim1, q_offset, q2_dim1, q2_offset, i__1; doublereal d__1; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static integer jlam, imax, jmax; extern /* Subroutine */ int drot_(integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *); static doublereal c__; static integer i__, j; static doublereal s, t; extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, integer *), dcopy_(integer *, doublereal *, integer *, doublereal *, integer *); static integer k2, n1, n2; static integer jp; extern integer idamax_(integer *, doublereal *, integer *); extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *, integer *, integer *, integer *), dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *), xerbla_(char *, integer *); static integer n1p1; static doublereal eps, tau, tol; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLAED8 merges the two sets of eigenvalues together into a single sorted set. Then it tries to deflate the size of the problem. There are two ways in which deflation can occur: when two or more eigenvalues are close together or if there is a tiny element in the Z vector. For each such occurrence the order of the related secular equation problem is reduced by one. Arguments ========= ICOMPQ (input) INTEGER = 0: Compute eigenvalues only. = 1: Compute eigenvectors of original dense symmetric matrix also. On entry, Q contains the orthogonal matrix used to reduce the original matrix to tridiagonal form. K (output) INTEGER The number of non-deflated eigenvalues, and the order of the related secular equation. N (input) INTEGER The dimension of the symmetric tridiagonal matrix. N >= 0. QSIZ (input) INTEGER The dimension of the orthogonal matrix used to reduce the full matrix to tridiagonal form. QSIZ >= N if ICOMPQ = 1. D (input/output) DOUBLE PRECISION array, dimension (N) On entry, the eigenvalues of the two submatrices to be combined. On exit, the trailing (N-K) updated eigenvalues (those which were deflated) sorted into increasing order. Q (input/output) DOUBLE PRECISION array, dimension (LDQ,N) If ICOMPQ = 0, Q is not referenced. Otherwise, on entry, Q contains the eigenvectors of the partially solved system which has been previously updated in matrix multiplies with other partially solved eigensystems. On exit, Q contains the trailing (N-K) updated eigenvectors (those which were deflated) in its last N-K columns. LDQ (input) INTEGER The leading dimension of the array Q. LDQ >= max(1,N). INDXQ (input) INTEGER array, dimension (N) The permutation which separately sorts the two sub-problems in D into ascending order. Note that elements in the second half of this permutation must first have CUTPNT added to their values in order to be accurate. RHO (input/output) DOUBLE PRECISION On entry, the off-diagonal element associated with the rank-1 cut which originally split the two submatrices which are now being recombined. On exit, RHO has been modified to the value required by DLAED3. CUTPNT (input) INTEGER The location of the last eigenvalue in the leading sub-matrix. min(1,N) <= CUTPNT <= N. Z (input) DOUBLE PRECISION array, dimension (N) On entry, Z contains the updating vector (the last row of the first sub-eigenvector matrix and the first row of the second sub-eigenvector matrix). On exit, the contents of Z are destroyed by the updating process. DLAMDA (output) DOUBLE PRECISION array, dimension (N) A copy of the first K eigenvalues which will be used by DLAED3 to form the secular equation. Q2 (output) DOUBLE PRECISION array, dimension (LDQ2,N) If ICOMPQ = 0, Q2 is not referenced. Otherwise, a copy of the first K eigenvectors which will be used by DLAED7 in a matrix multiply (DGEMM) to update the new eigenvectors. LDQ2 (input) INTEGER The leading dimension of the array Q2. LDQ2 >= max(1,N). W (output) DOUBLE PRECISION array, dimension (N) The first k values of the final deflation-altered z-vector and will be passed to DLAED3. PERM (output) INTEGER array, dimension (N) The permutations (from deflation and sorting) to be applied to each eigenblock. GIVPTR (output) INTEGER The number of Givens rotations which took place in this subproblem. GIVCOL (output) INTEGER array, dimension (2, N) Each pair of numbers indicates a pair of columns to take place in a Givens rotation. GIVNUM (output) DOUBLE PRECISION array, dimension (2, N) Each number indicates the S value to be used in the corresponding Givens rotation. INDXP (workspace) INTEGER array, dimension (N) The permutation used to place deflated values of D at the end of the array. INDXP(1:K) points to the nondeflated D-values and INDXP(K+1:N) points to the deflated eigenvalues. INDX (workspace) INTEGER array, dimension (N) The permutation used to sort the contents of D into ascending order. INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. Further Details =============== Based on contributions by Jeff Rutter, Computer Science Division, University of California at Berkeley, USA ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; q_dim1 = *ldq; q_offset = 1 + q_dim1 * 1; q -= q_offset; --indxq; --z__; --dlamda; q2_dim1 = *ldq2; q2_offset = 1 + q2_dim1 * 1; q2 -= q2_offset; --w; --perm; givcol -= 3; givnum -= 3; --indxp; --indx; /* Function Body */ *info = 0; if (*icompq < 0 || *icompq > 1) { *info = -1; } else if (*n < 0) { *info = -3; } else if (*icompq == 1 && *qsiz < *n) { *info = -4; } else if (*ldq < max(1,*n)) { *info = -7; } else if (*cutpnt < min(1,*n) || *cutpnt > *n) { *info = -10; } else if (*ldq2 < max(1,*n)) { *info = -14; } if (*info != 0) { i__1 = -(*info); xerbla_("DLAED8", &i__1); return 0; } /* Quick return if possible */ if (*n == 0) { return 0; } n1 = *cutpnt; n2 = *n - n1; n1p1 = n1 + 1; if (*rho < 0.) { dscal_(&n2, &c_b151, &z__[n1p1], &c__1); } /* Normalize z so that norm(z) = 1 */ t = 1. / sqrt(2.); i__1 = *n; for (j = 1; j <= i__1; ++j) { indx[j] = j; /* L10: */ } dscal_(n, &t, &z__[1], &c__1); *rho = (d__1 = *rho * 2., abs(d__1)); /* Sort the eigenvalues into increasing order */ i__1 = *n; for (i__ = *cutpnt + 1; i__ <= i__1; ++i__) { indxq[i__] += *cutpnt; /* L20: */ } i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { dlamda[i__] = d__[indxq[i__]]; w[i__] = z__[indxq[i__]]; /* L30: */ } i__ = 1; j = *cutpnt + 1; dlamrg_(&n1, &n2, &dlamda[1], &c__1, &c__1, &indx[1]); i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { d__[i__] = dlamda[indx[i__]]; z__[i__] = w[indx[i__]]; /* L40: */ } /* Calculate the allowable deflation tolerence */ imax = idamax_(n, &z__[1], &c__1); jmax = idamax_(n, &d__[1], &c__1); eps = EPSILON; tol = eps * 8. * (d__1 = d__[jmax], abs(d__1)); /* If the rank-1 modifier is small enough, no more needs to be done except to reorganize Q so that its columns correspond with the elements in D. */ if (*rho * (d__1 = z__[imax], abs(d__1)) <= tol) { *k = 0; if (*icompq == 0) { i__1 = *n; for (j = 1; j <= i__1; ++j) { perm[j] = indxq[indx[j]]; /* L50: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { perm[j] = indxq[indx[j]]; dcopy_(qsiz, &q[perm[j] * q_dim1 + 1], &c__1, &q2[j * q2_dim1 + 1], &c__1); /* L60: */ } dlacpy_("A", qsiz, n, &q2[q2_dim1 + 1], ldq2, &q[q_dim1 + 1], ldq); } return 0; } /* If there are multiple eigenvalues then the problem deflates. Here the number of equal eigenvalues are found. As each equal eigenvalue is found, an elementary reflector is computed to rotate the corresponding eigensubspace so that the corresponding components of Z are zero in this new basis. */ *k = 0; *givptr = 0; k2 = *n + 1; i__1 = *n; for (j = 1; j <= i__1; ++j) { if (*rho * (d__1 = z__[j], abs(d__1)) <= tol) { /* Deflate due to small z component. */ --k2; indxp[k2] = j; if (j == *n) { goto L110; } } else { jlam = j; goto L80; } /* L70: */ } L80: ++j; if (j > *n) { goto L100; } if (*rho * (d__1 = z__[j], abs(d__1)) <= tol) { /* Deflate due to small z component. */ --k2; indxp[k2] = j; } else { /* Check if eigenvalues are close enough to allow deflation. */ s = z__[jlam]; c__ = z__[j]; /* Find sqrt(a**2+b**2) without overflow or destructive underflow. */ tau = dlapy2_(&c__, &s); t = d__[j] - d__[jlam]; c__ /= tau; s = -s / tau; if ((d__1 = t * c__ * s, abs(d__1)) <= tol) { /* Deflation is possible. */ z__[j] = tau; z__[jlam] = 0.; /* Record the appropriate Givens rotation */ ++(*givptr); givcol[(*givptr << 1) + 1] = indxq[indx[jlam]]; givcol[(*givptr << 1) + 2] = indxq[indx[j]]; givnum[(*givptr << 1) + 1] = c__; givnum[(*givptr << 1) + 2] = s; if (*icompq == 1) { drot_(qsiz, &q[indxq[indx[jlam]] * q_dim1 + 1], &c__1, &q[ indxq[indx[j]] * q_dim1 + 1], &c__1, &c__, &s); } t = d__[jlam] * c__ * c__ + d__[j] * s * s; d__[j] = d__[jlam] * s * s + d__[j] * c__ * c__; d__[jlam] = t; --k2; i__ = 1; L90: if (k2 + i__ <= *n) { if (d__[jlam] < d__[indxp[k2 + i__]]) { indxp[k2 + i__ - 1] = indxp[k2 + i__]; indxp[k2 + i__] = jlam; ++i__; goto L90; } else { indxp[k2 + i__ - 1] = jlam; } } else { indxp[k2 + i__ - 1] = jlam; } jlam = j; } else { ++(*k); w[*k] = z__[jlam]; dlamda[*k] = d__[jlam]; indxp[*k] = jlam; jlam = j; } } goto L80; L100: /* Record the last eigenvalue. */ ++(*k); w[*k] = z__[jlam]; dlamda[*k] = d__[jlam]; indxp[*k] = jlam; L110: /* Sort the eigenvalues and corresponding eigenvectors into DLAMDA and Q2 respectively. The eigenvalues/vectors which were not deflated go into the first K slots of DLAMDA and Q2 respectively, while those which were deflated go into the last N - K slots. */ if (*icompq == 0) { i__1 = *n; for (j = 1; j <= i__1; ++j) { jp = indxp[j]; dlamda[j] = d__[jp]; perm[j] = indxq[indx[jp]]; /* L120: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { jp = indxp[j]; dlamda[j] = d__[jp]; perm[j] = indxq[indx[jp]]; dcopy_(qsiz, &q[perm[j] * q_dim1 + 1], &c__1, &q2[j * q2_dim1 + 1] , &c__1); /* L130: */ } } /* The deflated eigenvalues and their corresponding vectors go back into the last N - K slots of D and Q respectively. */ if (*k < *n) { if (*icompq == 0) { i__1 = *n - *k; dcopy_(&i__1, &dlamda[*k + 1], &c__1, &d__[*k + 1], &c__1); } else { i__1 = *n - *k; dcopy_(&i__1, &dlamda[*k + 1], &c__1, &d__[*k + 1], &c__1); i__1 = *n - *k; dlacpy_("A", qsiz, &i__1, &q2[(*k + 1) * q2_dim1 + 1], ldq2, &q[(* k + 1) * q_dim1 + 1], ldq); } } return 0; /* End of DLAED8 */ } /* dlaed8_ */ /* Subroutine */ int dlaed9_(integer *k, integer *kstart, integer *kstop, integer *n, doublereal *d__, doublereal *q, integer *ldq, doublereal * rho, doublereal *dlamda, doublereal *w, doublereal *s, integer *lds, integer *info) { /* System generated locals */ integer q_dim1, q_offset, s_dim1, s_offset, i__1, i__2; doublereal d__1; /* Builtin functions */ double sqrt(doublereal), d_sign(doublereal *, doublereal *); /* Local variables */ static doublereal temp; extern doublereal dnrm2_(integer *, doublereal *, integer *); static integer i__, j; extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, doublereal *, integer *), dlaed4_(integer *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, integer *); extern doublereal dlamc3_(doublereal *, doublereal *); extern /* Subroutine */ int xerbla_(char *, integer *); /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLAED9 finds the roots of the secular equation, as defined by the values in D, Z, and RHO, between KSTART and KSTOP. It makes the appropriate calls to DLAED4 and then stores the new matrix of eigenvectors for use in calculating the next level of Z vectors. Arguments ========= K (input) INTEGER The number of terms in the rational function to be solved by DLAED4. K >= 0. KSTART (input) INTEGER KSTOP (input) INTEGER The updated eigenvalues Lambda(I), KSTART <= I <= KSTOP are to be computed. 1 <= KSTART <= KSTOP <= K. N (input) INTEGER The number of rows and columns in the Q matrix. N >= K (delation may result in N > K). D (output) DOUBLE PRECISION array, dimension (N) D(I) contains the updated eigenvalues for KSTART <= I <= KSTOP. Q (workspace) DOUBLE PRECISION array, dimension (LDQ,N) LDQ (input) INTEGER The leading dimension of the array Q. LDQ >= max( 1, N ). RHO (input) DOUBLE PRECISION The value of the parameter in the rank one update equation. RHO >= 0 required. DLAMDA (input) DOUBLE PRECISION array, dimension (K) The first K elements of this array contain the old roots of the deflated updating problem. These are the poles of the secular equation. W (input) DOUBLE PRECISION array, dimension (K) The first K elements of this array contain the components of the deflation-adjusted updating vector. S (output) DOUBLE PRECISION array, dimension (LDS, K) Will contain the eigenvectors of the repaired matrix which will be stored for subsequent Z vector calculation and multiplied by the previously accumulated eigenvectors to update the system. LDS (input) INTEGER The leading dimension of S. LDS >= max( 1, K ). INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. > 0: if INFO = 1, an eigenvalue did not converge Further Details =============== Based on contributions by Jeff Rutter, Computer Science Division, University of California at Berkeley, USA ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; q_dim1 = *ldq; q_offset = 1 + q_dim1 * 1; q -= q_offset; --dlamda; --w; s_dim1 = *lds; s_offset = 1 + s_dim1 * 1; s -= s_offset; /* Function Body */ *info = 0; if (*k < 0) { *info = -1; } else if (*kstart < 1 || *kstart > max(1,*k)) { *info = -2; } else if (max(1,*kstop) < *kstart || *kstop > max(1,*k)) { *info = -3; } else if (*n < *k) { *info = -4; } else if (*ldq < max(1,*k)) { *info = -7; } else if (*lds < max(1,*k)) { *info = -12; } if (*info != 0) { i__1 = -(*info); xerbla_("DLAED9", &i__1); return 0; } /* Quick return if possible */ if (*k == 0) { return 0; } /* Modify values DLAMDA(i) to make sure all DLAMDA(i)-DLAMDA(j) can be computed with high relative accuracy (barring over/underflow). This is a problem on machines without a guard digit in add/subtract (Cray XMP, Cray YMP, Cray C 90 and Cray 2). The following code replaces DLAMDA(I) by 2*DLAMDA(I)-DLAMDA(I), which on any of these machines zeros out the bottommost bit of DLAMDA(I) if it is 1; this makes the subsequent subtractions DLAMDA(I)-DLAMDA(J) unproblematic when cancellation occurs. On binary machines with a guard digit (almost all machines) it does not change DLAMDA(I) at all. On hexadecimal and decimal machines with a guard digit, it slightly changes the bottommost bits of DLAMDA(I). It does not account for hexadecimal or decimal machines without guard digits (we know of none). We use a subroutine call to compute 2*DLAMBDA(I) to prevent optimizing compilers from eliminating this code. */ i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { dlamda[i__] = dlamc3_(&dlamda[i__], &dlamda[i__]) - dlamda[i__]; /* L10: */ } i__1 = *kstop; for (j = *kstart; j <= i__1; ++j) { dlaed4_(k, &j, &dlamda[1], &w[1], &q[j * q_dim1 + 1], rho, &d__[j], info); /* If the zero finder fails, the computation is terminated. */ if (*info != 0) { goto L120; } /* L20: */ } if (*k == 1 || *k == 2) { i__1 = *k; for (i__ = 1; i__ <= i__1; ++i__) { i__2 = *k; for (j = 1; j <= i__2; ++j) { s[j + i__ * s_dim1] = q[j + i__ * q_dim1]; /* L30: */ } /* L40: */ } goto L120; } /* Compute updated W. */ dcopy_(k, &w[1], &c__1, &s[s_offset], &c__1); /* Initialize W(I) = Q(I,I) */ i__1 = *ldq + 1; dcopy_(k, &q[q_offset], &i__1, &w[1], &c__1); i__1 = *k; for (j = 1; j <= i__1; ++j) { i__2 = j - 1; for (i__ = 1; i__ <= i__2; ++i__) { w[i__] *= q[i__ + j * q_dim1] / (dlamda[i__] - dlamda[j]); /* L50: */ } i__2 = *k; for (i__ = j + 1; i__ <= i__2; ++i__) { w[i__] *= q[i__ + j * q_dim1] / (dlamda[i__] - dlamda[j]); /* L60: */ } /* L70: */ } i__1 = *k; for (i__ = 1; i__ <= i__1; ++i__) { d__1 = sqrt(-w[i__]); w[i__] = d_sign(&d__1, &s[i__ + s_dim1]); /* L80: */ } /* Compute eigenvectors of the modified rank-1 modification. */ i__1 = *k; for (j = 1; j <= i__1; ++j) { i__2 = *k; for (i__ = 1; i__ <= i__2; ++i__) { q[i__ + j * q_dim1] = w[i__] / q[i__ + j * q_dim1]; /* L90: */ } temp = dnrm2_(k, &q[j * q_dim1 + 1], &c__1); i__2 = *k; for (i__ = 1; i__ <= i__2; ++i__) { s[i__ + j * s_dim1] = q[i__ + j * q_dim1] / temp; /* L100: */ } /* L110: */ } L120: return 0; /* End of DLAED9 */ } /* dlaed9_ */ /* Subroutine */ int dlaeda_(integer *n, integer *tlvls, integer *curlvl, integer *curpbm, integer *prmptr, integer *perm, integer *givptr, integer *givcol, doublereal *givnum, doublereal *q, integer *qptr, doublereal *z__, doublereal *ztemp, integer *info) { /* System generated locals */ integer i__1, i__2, i__3; /* Builtin functions */ integer pow_ii(integer *, integer *); double sqrt(doublereal); /* Local variables */ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *); static integer curr, bsiz1, bsiz2, psiz1, psiz2, i__, k, zptr1; extern /* Subroutine */ int dgemv_(char *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *), dcopy_(integer *, doublereal *, integer *, doublereal *, integer *), xerbla_(char *, integer *); static integer mid, ptr; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLAEDA computes the Z vector corresponding to the merge step in the CURLVLth step of the merge process with TLVLS steps for the CURPBMth problem. Arguments ========= N (input) INTEGER The dimension of the symmetric tridiagonal matrix. N >= 0. TLVLS (input) INTEGER The total number of merging levels in the overall divide and conquer tree. CURLVL (input) INTEGER The current level in the overall merge routine, 0 <= curlvl <= tlvls. CURPBM (input) INTEGER The current problem in the current level in the overall merge routine (counting from upper left to lower right). PRMPTR (input) INTEGER array, dimension (N lg N) Contains a list of pointers which indicate where in PERM a level's permutation is stored. PRMPTR(i+1) - PRMPTR(i) indicates the size of the permutation and incidentally the size of the full, non-deflated problem. PERM (input) INTEGER array, dimension (N lg N) Contains the permutations (from deflation and sorting) to be applied to each eigenblock. GIVPTR (input) INTEGER array, dimension (N lg N) Contains a list of pointers which indicate where in GIVCOL a level's Givens rotations are stored. GIVPTR(i+1) - GIVPTR(i) indicates the number of Givens rotations. GIVCOL (input) INTEGER array, dimension (2, N lg N) Each pair of numbers indicates a pair of columns to take place in a Givens rotation. GIVNUM (input) DOUBLE PRECISION array, dimension (2, N lg N) Each number indicates the S value to be used in the corresponding Givens rotation. Q (input) DOUBLE PRECISION array, dimension (N**2) Contains the square eigenblocks from previous levels, the starting positions for blocks are given by QPTR. QPTR (input) INTEGER array, dimension (N+2) Contains a list of pointers which indicate where in Q an eigenblock is stored. SQRT( QPTR(i+1) - QPTR(i) ) indicates the size of the block. Z (output) DOUBLE PRECISION array, dimension (N) On output this vector contains the updating vector (the last row of the first sub-eigenvector matrix and the first row of the second sub-eigenvector matrix). ZTEMP (workspace) DOUBLE PRECISION array, dimension (N) INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. Further Details =============== Based on contributions by Jeff Rutter, Computer Science Division, University of California at Berkeley, USA ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --ztemp; --z__; --qptr; --q; givnum -= 3; givcol -= 3; --givptr; --perm; --prmptr; /* Function Body */ *info = 0; if (*n < 0) { *info = -1; } if (*info != 0) { i__1 = -(*info); xerbla_("DLAEDA", &i__1); return 0; } /* Quick return if possible */ if (*n == 0) { return 0; } /* Determine location of first number in second half. */ mid = *n / 2 + 1; /* Gather last/first rows of appropriate eigenblocks into center of Z */ ptr = 1; /* Determine location of lowest level subproblem in the full storage scheme */ i__1 = *curlvl - 1; curr = ptr + *curpbm * pow_ii(&c__2, curlvl) + pow_ii(&c__2, &i__1) - 1; /* Determine size of these matrices. We add HALF to the value of the SQRT in case the machine underestimates one of these square roots. */ bsiz1 = (integer) (sqrt((doublereal) (qptr[curr + 1] - qptr[curr])) + .5); bsiz2 = (integer) (sqrt((doublereal) (qptr[curr + 2] - qptr[curr + 1])) + .5); i__1 = mid - bsiz1 - 1; for (k = 1; k <= i__1; ++k) { z__[k] = 0.; /* L10: */ } dcopy_(&bsiz1, &q[qptr[curr] + bsiz1 - 1], &bsiz1, &z__[mid - bsiz1], & c__1); dcopy_(&bsiz2, &q[qptr[curr + 1]], &bsiz2, &z__[mid], &c__1); i__1 = *n; for (k = mid + bsiz2; k <= i__1; ++k) { z__[k] = 0.; /* L20: */ } /* Loop thru remaining levels 1 -> CURLVL applying the Givens rotations and permutation and then multiplying the center matrices against the current Z. */ ptr = pow_ii(&c__2, tlvls) + 1; i__1 = *curlvl - 1; for (k = 1; k <= i__1; ++k) { i__2 = *curlvl - k; i__3 = *curlvl - k - 1; curr = ptr + *curpbm * pow_ii(&c__2, &i__2) + pow_ii(&c__2, &i__3) - 1; psiz1 = prmptr[curr + 1] - prmptr[curr]; psiz2 = prmptr[curr + 2] - prmptr[curr + 1]; zptr1 = mid - psiz1; /* Apply Givens at CURR and CURR+1 */ i__2 = givptr[curr + 1] - 1; for (i__ = givptr[curr]; i__ <= i__2; ++i__) { drot_(&c__1, &z__[zptr1 + givcol[(i__ << 1) + 1] - 1], &c__1, & z__[zptr1 + givcol[(i__ << 1) + 2] - 1], &c__1, &givnum[( i__ << 1) + 1], &givnum[(i__ << 1) + 2]); /* L30: */ } i__2 = givptr[curr + 2] - 1; for (i__ = givptr[curr + 1]; i__ <= i__2; ++i__) { drot_(&c__1, &z__[mid - 1 + givcol[(i__ << 1) + 1]], &c__1, &z__[ mid - 1 + givcol[(i__ << 1) + 2]], &c__1, &givnum[(i__ << 1) + 1], &givnum[(i__ << 1) + 2]); /* L40: */ } psiz1 = prmptr[curr + 1] - prmptr[curr]; psiz2 = prmptr[curr + 2] - prmptr[curr + 1]; i__2 = psiz1 - 1; for (i__ = 0; i__ <= i__2; ++i__) { ztemp[i__ + 1] = z__[zptr1 + perm[prmptr[curr] + i__] - 1]; /* L50: */ } i__2 = psiz2 - 1; for (i__ = 0; i__ <= i__2; ++i__) { ztemp[psiz1 + i__ + 1] = z__[mid + perm[prmptr[curr + 1] + i__] - 1]; /* L60: */ } /* Multiply Blocks at CURR and CURR+1 Determine size of these matrices. We add HALF to the value of the SQRT in case the machine underestimates one of these square roots. */ bsiz1 = (integer) (sqrt((doublereal) (qptr[curr + 1] - qptr[curr])) + .5); bsiz2 = (integer) (sqrt((doublereal) (qptr[curr + 2] - qptr[curr + 1]) ) + .5); if (bsiz1 > 0) { dgemv_("T", &bsiz1, &bsiz1, &c_b15, &q[qptr[curr]], &bsiz1, & ztemp[1], &c__1, &c_b29, &z__[zptr1], &c__1); } i__2 = psiz1 - bsiz1; dcopy_(&i__2, &ztemp[bsiz1 + 1], &c__1, &z__[zptr1 + bsiz1], &c__1); if (bsiz2 > 0) { dgemv_("T", &bsiz2, &bsiz2, &c_b15, &q[qptr[curr + 1]], &bsiz2, & ztemp[psiz1 + 1], &c__1, &c_b29, &z__[mid], &c__1); } i__2 = psiz2 - bsiz2; dcopy_(&i__2, &ztemp[psiz1 + bsiz2 + 1], &c__1, &z__[mid + bsiz2], & c__1); i__2 = *tlvls - k; ptr += pow_ii(&c__2, &i__2); /* L70: */ } return 0; /* End of DLAEDA */ } /* dlaeda_ */ /* Subroutine */ int dlaev2_(doublereal *a, doublereal *b, doublereal *c__, doublereal *rt1, doublereal *rt2, doublereal *cs1, doublereal *sn1) { /* System generated locals */ doublereal d__1; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static doublereal acmn, acmx, ab, df, cs, ct, tb, sm, tn, rt, adf, acs; static integer sgn1, sgn2; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLAEV2 computes the eigendecomposition of a 2-by-2 symmetric matrix [ A B ] [ B C ]. On return, RT1 is the eigenvalue of larger absolute value, RT2 is the eigenvalue of smaller absolute value, and (CS1,SN1) is the unit right eigenvector for RT1, giving the decomposition [ CS1 SN1 ] [ A B ] [ CS1 -SN1 ] = [ RT1 0 ] [-SN1 CS1 ] [ B C ] [ SN1 CS1 ] [ 0 RT2 ]. Arguments ========= A (input) DOUBLE PRECISION The (1,1) element of the 2-by-2 matrix. B (input) DOUBLE PRECISION The (1,2) element and the conjugate of the (2,1) element of the 2-by-2 matrix. C (input) DOUBLE PRECISION The (2,2) element of the 2-by-2 matrix. RT1 (output) DOUBLE PRECISION The eigenvalue of larger absolute value. RT2 (output) DOUBLE PRECISION The eigenvalue of smaller absolute value. CS1 (output) DOUBLE PRECISION SN1 (output) DOUBLE PRECISION The vector (CS1, SN1) is a unit right eigenvector for RT1. Further Details =============== RT1 is accurate to a few ulps barring over/underflow. RT2 may be inaccurate if there is massive cancellation in the determinant A*C-B*B; higher precision or correctly rounded or correctly truncated arithmetic would be needed to compute RT2 accurately in all cases. CS1 and SN1 are accurate to a few ulps barring over/underflow. Overflow is possible only if RT1 is within a factor of 5 of overflow. Underflow is harmless if the input data is 0 or exceeds underflow_threshold / macheps. ===================================================================== Compute the eigenvalues */ sm = *a + *c__; df = *a - *c__; adf = abs(df); tb = *b + *b; ab = abs(tb); if (abs(*a) > abs(*c__)) { acmx = *a; acmn = *c__; } else { acmx = *c__; acmn = *a; } if (adf > ab) { /* Computing 2nd power */ d__1 = ab / adf; rt = adf * sqrt(d__1 * d__1 + 1.); } else if (adf < ab) { /* Computing 2nd power */ d__1 = adf / ab; rt = ab * sqrt(d__1 * d__1 + 1.); } else { /* Includes case AB=ADF=0 */ rt = ab * sqrt(2.); } if (sm < 0.) { *rt1 = (sm - rt) * .5; sgn1 = -1; /* Order of execution important. To get fully accurate smaller eigenvalue, next line needs to be executed in higher precision. */ *rt2 = acmx / *rt1 * acmn - *b / *rt1 * *b; } else if (sm > 0.) { *rt1 = (sm + rt) * .5; sgn1 = 1; /* Order of execution important. To get fully accurate smaller eigenvalue, next line needs to be executed in higher precision. */ *rt2 = acmx / *rt1 * acmn - *b / *rt1 * *b; } else { /* Includes case RT1 = RT2 = 0 */ *rt1 = rt * .5; *rt2 = rt * -.5; sgn1 = 1; } /* Compute the eigenvector */ if (df >= 0.) { cs = df + rt; sgn2 = 1; } else { cs = df - rt; sgn2 = -1; } acs = abs(cs); if (acs > ab) { ct = -tb / cs; *sn1 = 1. / sqrt(ct * ct + 1.); *cs1 = ct * *sn1; } else { if (ab == 0.) { *cs1 = 1.; *sn1 = 0.; } else { tn = -cs / tb; *cs1 = 1. / sqrt(tn * tn + 1.); *sn1 = tn * *cs1; } } if (sgn1 == sgn2) { tn = *cs1; *cs1 = -(*sn1); *sn1 = tn; } return 0; /* End of DLAEV2 */ } /* dlaev2_ */ /* Subroutine */ int dlaexc_(logical *wantq, integer *n, doublereal *t, integer *ldt, doublereal *q, integer *ldq, integer *j1, integer *n1, integer *n2, doublereal *work, integer *info) { /* System generated locals */ integer q_dim1, q_offset, t_dim1, t_offset, i__1; doublereal d__1, d__2, d__3; /* Local variables */ static integer ierr; static doublereal temp; extern /* Subroutine */ int drot_(integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *); static doublereal d__[16] /* was [4][4] */; static integer k; static doublereal u[3], scale, x[4] /* was [2][2] */, dnorm; static integer j2, j3, j4; static doublereal xnorm, u1[3], u2[3]; extern /* Subroutine */ int dlanv2_(doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *), dlasy2_( logical *, logical *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *); static integer nd; static doublereal cs, t11, t22; static doublereal t33; extern doublereal dlange_(char *, integer *, integer *, doublereal *, integer *, doublereal *); extern /* Subroutine */ int dlarfg_(integer *, doublereal *, doublereal *, integer *, doublereal *); static doublereal sn; extern /* Subroutine */ int dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *), dlartg_(doublereal *, doublereal *, doublereal *, doublereal *, doublereal *), dlarfx_(char *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *, doublereal *); static doublereal thresh, smlnum, wi1, wi2, wr1, wr2, eps, tau, tau1, tau2; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLAEXC swaps adjacent diagonal blocks T11 and T22 of order 1 or 2 in an upper quasi-triangular matrix T by an orthogonal similarity transformation. T must be in Schur canonical form, that is, block upper triangular with 1-by-1 and 2-by-2 diagonal blocks; each 2-by-2 diagonal block has its diagonal elemnts equal and its off-diagonal elements of opposite sign. Arguments ========= WANTQ (input) LOGICAL = .TRUE. : accumulate the transformation in the matrix Q; = .FALSE.: do not accumulate the transformation. N (input) INTEGER The order of the matrix T. N >= 0. T (input/output) DOUBLE PRECISION array, dimension (LDT,N) On entry, the upper quasi-triangular matrix T, in Schur canonical form. On exit, the updated matrix T, again in Schur canonical form. LDT (input) INTEGER The leading dimension of the array T. LDT >= max(1,N). Q (input/output) DOUBLE PRECISION array, dimension (LDQ,N) On entry, if WANTQ is .TRUE., the orthogonal matrix Q. On exit, if WANTQ is .TRUE., the updated matrix Q. If WANTQ is .FALSE., Q is not referenced. LDQ (input) INTEGER The leading dimension of the array Q. LDQ >= 1; and if WANTQ is .TRUE., LDQ >= N. J1 (input) INTEGER The index of the first row of the first block T11. N1 (input) INTEGER The order of the first block T11. N1 = 0, 1 or 2. N2 (input) INTEGER The order of the second block T22. N2 = 0, 1 or 2. WORK (workspace) DOUBLE PRECISION array, dimension (N) INFO (output) INTEGER = 0: successful exit = 1: the transformed matrix T would be too far from Schur form; the blocks are not swapped and T and Q are unchanged. ===================================================================== */ /* Parameter adjustments */ t_dim1 = *ldt; t_offset = 1 + t_dim1 * 1; t -= t_offset; q_dim1 = *ldq; q_offset = 1 + q_dim1 * 1; q -= q_offset; --work; /* Function Body */ *info = 0; /* Quick return if possible */ if (*n == 0 || *n1 == 0 || *n2 == 0) { return 0; } if (*j1 + *n1 > *n) { return 0; } j2 = *j1 + 1; j3 = *j1 + 2; j4 = *j1 + 3; if (*n1 == 1 && *n2 == 1) { /* Swap two 1-by-1 blocks. */ t11 = t[*j1 + *j1 * t_dim1]; t22 = t[j2 + j2 * t_dim1]; /* Determine the transformation to perform the interchange. */ d__1 = t22 - t11; dlartg_(&t[*j1 + j2 * t_dim1], &d__1, &cs, &sn, &temp); /* Apply transformation to the matrix T. */ if (j3 <= *n) { i__1 = *n - *j1 - 1; drot_(&i__1, &t[*j1 + j3 * t_dim1], ldt, &t[j2 + j3 * t_dim1], ldt, &cs, &sn); } i__1 = *j1 - 1; drot_(&i__1, &t[*j1 * t_dim1 + 1], &c__1, &t[j2 * t_dim1 + 1], &c__1, &cs, &sn); t[*j1 + *j1 * t_dim1] = t22; t[j2 + j2 * t_dim1] = t11; if (*wantq) { /* Accumulate transformation in the matrix Q. */ drot_(n, &q[*j1 * q_dim1 + 1], &c__1, &q[j2 * q_dim1 + 1], &c__1, &cs, &sn); } } else { /* Swapping involves at least one 2-by-2 block. Copy the diagonal block of order N1+N2 to the local array D and compute its norm. */ nd = *n1 + *n2; dlacpy_("Full", &nd, &nd, &t[*j1 + *j1 * t_dim1], ldt, d__, &c__4); dnorm = dlange_("Max", &nd, &nd, d__, &c__4, &work[1]); /* Compute machine-dependent threshold for test for accepting swap. */ eps = PRECISION; smlnum = SAFEMINIMUM / eps; /* Computing MAX */ d__1 = eps * 10. * dnorm; thresh = max(d__1,smlnum); /* Solve T11*X - X*T22 = scale*T12 for X. */ dlasy2_(&c_false, &c_false, &c_n1, n1, n2, d__, &c__4, &d__[*n1 + 1 + (*n1 + 1 << 2) - 5], &c__4, &d__[(*n1 + 1 << 2) - 4], &c__4, & scale, x, &c__2, &xnorm, &ierr); /* Swap the adjacent diagonal blocks. */ k = *n1 + *n1 + *n2 - 3; switch (k) { case 1: goto L10; case 2: goto L20; case 3: goto L30; } L10: /* N1 = 1, N2 = 2: generate elementary reflector H so that: ( scale, X11, X12 ) H = ( 0, 0, * ) */ u[0] = scale; u[1] = x[0]; u[2] = x[2]; dlarfg_(&c__3, &u[2], u, &c__1, &tau); u[2] = 1.; t11 = t[*j1 + *j1 * t_dim1]; /* Perform swap provisionally on diagonal block in D. */ dlarfx_("L", &c__3, &c__3, u, &tau, d__, &c__4, &work[1]); dlarfx_("R", &c__3, &c__3, u, &tau, d__, &c__4, &work[1]); /* Test whether to reject swap. Computing MAX */ d__2 = abs(d__[2]), d__3 = abs(d__[6]), d__2 = max(d__2,d__3), d__3 = (d__1 = d__[10] - t11, abs(d__1)); if (max(d__2,d__3) > thresh) { goto L50; } /* Accept swap: apply transformation to the entire matrix T. */ i__1 = *n - *j1 + 1; dlarfx_("L", &c__3, &i__1, u, &tau, &t[*j1 + *j1 * t_dim1], ldt, & work[1]); dlarfx_("R", &j2, &c__3, u, &tau, &t[*j1 * t_dim1 + 1], ldt, &work[1]); t[j3 + *j1 * t_dim1] = 0.; t[j3 + j2 * t_dim1] = 0.; t[j3 + j3 * t_dim1] = t11; if (*wantq) { /* Accumulate transformation in the matrix Q. */ dlarfx_("R", n, &c__3, u, &tau, &q[*j1 * q_dim1 + 1], ldq, &work[ 1]); } goto L40; L20: /* N1 = 2, N2 = 1: generate elementary reflector H so that: H ( -X11 ) = ( * ) ( -X21 ) = ( 0 ) ( scale ) = ( 0 ) */ u[0] = -x[0]; u[1] = -x[1]; u[2] = scale; dlarfg_(&c__3, u, &u[1], &c__1, &tau); u[0] = 1.; t33 = t[j3 + j3 * t_dim1]; /* Perform swap provisionally on diagonal block in D. */ dlarfx_("L", &c__3, &c__3, u, &tau, d__, &c__4, &work[1]); dlarfx_("R", &c__3, &c__3, u, &tau, d__, &c__4, &work[1]); /* Test whether to reject swap. Computing MAX */ d__2 = abs(d__[1]), d__3 = abs(d__[2]), d__2 = max(d__2,d__3), d__3 = (d__1 = d__[0] - t33, abs(d__1)); if (max(d__2,d__3) > thresh) { goto L50; } /* Accept swap: apply transformation to the entire matrix T. */ dlarfx_("R", &j3, &c__3, u, &tau, &t[*j1 * t_dim1 + 1], ldt, &work[1]); i__1 = *n - *j1; dlarfx_("L", &c__3, &i__1, u, &tau, &t[*j1 + j2 * t_dim1], ldt, &work[ 1]); t[*j1 + *j1 * t_dim1] = t33; t[j2 + *j1 * t_dim1] = 0.; t[j3 + *j1 * t_dim1] = 0.; if (*wantq) { /* Accumulate transformation in the matrix Q. */ dlarfx_("R", n, &c__3, u, &tau, &q[*j1 * q_dim1 + 1], ldq, &work[ 1]); } goto L40; L30: /* N1 = 2, N2 = 2: generate elementary reflectors H(1) and H(2) so that: H(2) H(1) ( -X11 -X12 ) = ( * * ) ( -X21 -X22 ) ( 0 * ) ( scale 0 ) ( 0 0 ) ( 0 scale ) ( 0 0 ) */ u1[0] = -x[0]; u1[1] = -x[1]; u1[2] = scale; dlarfg_(&c__3, u1, &u1[1], &c__1, &tau1); u1[0] = 1.; temp = -tau1 * (x[2] + u1[1] * x[3]); u2[0] = -temp * u1[1] - x[3]; u2[1] = -temp * u1[2]; u2[2] = scale; dlarfg_(&c__3, u2, &u2[1], &c__1, &tau2); u2[0] = 1.; /* Perform swap provisionally on diagonal block in D. */ dlarfx_("L", &c__3, &c__4, u1, &tau1, d__, &c__4, &work[1]) ; dlarfx_("R", &c__4, &c__3, u1, &tau1, d__, &c__4, &work[1]) ; dlarfx_("L", &c__3, &c__4, u2, &tau2, &d__[1], &c__4, &work[1]); dlarfx_("R", &c__4, &c__3, u2, &tau2, &d__[4], &c__4, &work[1]); /* Test whether to reject swap. Computing MAX */ d__1 = abs(d__[2]), d__2 = abs(d__[6]), d__1 = max(d__1,d__2), d__2 = abs(d__[3]), d__1 = max(d__1,d__2), d__2 = abs(d__[7]); if (max(d__1,d__2) > thresh) { goto L50; } /* Accept swap: apply transformation to the entire matrix T. */ i__1 = *n - *j1 + 1; dlarfx_("L", &c__3, &i__1, u1, &tau1, &t[*j1 + *j1 * t_dim1], ldt, & work[1]); dlarfx_("R", &j4, &c__3, u1, &tau1, &t[*j1 * t_dim1 + 1], ldt, &work[ 1]); i__1 = *n - *j1 + 1; dlarfx_("L", &c__3, &i__1, u2, &tau2, &t[j2 + *j1 * t_dim1], ldt, & work[1]); dlarfx_("R", &j4, &c__3, u2, &tau2, &t[j2 * t_dim1 + 1], ldt, &work[1] ); t[j3 + *j1 * t_dim1] = 0.; t[j3 + j2 * t_dim1] = 0.; t[j4 + *j1 * t_dim1] = 0.; t[j4 + j2 * t_dim1] = 0.; if (*wantq) { /* Accumulate transformation in the matrix Q. */ dlarfx_("R", n, &c__3, u1, &tau1, &q[*j1 * q_dim1 + 1], ldq, & work[1]); dlarfx_("R", n, &c__3, u2, &tau2, &q[j2 * q_dim1 + 1], ldq, &work[ 1]); } L40: if (*n2 == 2) { /* Standardize new 2-by-2 block T11 */ dlanv2_(&t[*j1 + *j1 * t_dim1], &t[*j1 + j2 * t_dim1], &t[j2 + * j1 * t_dim1], &t[j2 + j2 * t_dim1], &wr1, &wi1, &wr2, & wi2, &cs, &sn); i__1 = *n - *j1 - 1; drot_(&i__1, &t[*j1 + (*j1 + 2) * t_dim1], ldt, &t[j2 + (*j1 + 2) * t_dim1], ldt, &cs, &sn); i__1 = *j1 - 1; drot_(&i__1, &t[*j1 * t_dim1 + 1], &c__1, &t[j2 * t_dim1 + 1], & c__1, &cs, &sn); if (*wantq) { drot_(n, &q[*j1 * q_dim1 + 1], &c__1, &q[j2 * q_dim1 + 1], & c__1, &cs, &sn); } } if (*n1 == 2) { /* Standardize new 2-by-2 block T22 */ j3 = *j1 + *n2; j4 = j3 + 1; dlanv2_(&t[j3 + j3 * t_dim1], &t[j3 + j4 * t_dim1], &t[j4 + j3 * t_dim1], &t[j4 + j4 * t_dim1], &wr1, &wi1, &wr2, &wi2, & cs, &sn); if (j3 + 2 <= *n) { i__1 = *n - j3 - 1; drot_(&i__1, &t[j3 + (j3 + 2) * t_dim1], ldt, &t[j4 + (j3 + 2) * t_dim1], ldt, &cs, &sn); } i__1 = j3 - 1; drot_(&i__1, &t[j3 * t_dim1 + 1], &c__1, &t[j4 * t_dim1 + 1], & c__1, &cs, &sn); if (*wantq) { drot_(n, &q[j3 * q_dim1 + 1], &c__1, &q[j4 * q_dim1 + 1], & c__1, &cs, &sn); } } } return 0; /* Exit with INFO = 1 if swap was rejected. */ L50: *info = 1; return 0; /* End of DLAEXC */ } /* dlaexc_ */ /* Subroutine */ int dlahqr_(logical *wantt, logical *wantz, integer *n, integer *ilo, integer *ihi, doublereal *h__, integer *ldh, doublereal *wr, doublereal *wi, integer *iloz, integer *ihiz, doublereal *z__, integer *ldz, integer *info) { /* System generated locals */ integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3; doublereal d__1, d__2, d__3, d__4; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *); static integer i__, j, k, l, m; static doublereal s, v[3]; extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, doublereal *, integer *); static integer i1, i2; static doublereal t1, t2, t3, v2, v3; extern /* Subroutine */ int dlanv2_(doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *); static doublereal aa, ab, ba, bb; extern /* Subroutine */ int dlabad_(doublereal *, doublereal *); static doublereal h11, h12, h21, h22, cs; static integer nh; extern /* Subroutine */ int dlarfg_(integer *, doublereal *, doublereal *, integer *, doublereal *); static doublereal sn; static integer nr; static doublereal tr; static integer nz; static doublereal safmin, safmax, rtdisc, smlnum, det, h21s; static integer its; static doublereal ulp, sum, tst, rt1i, rt2i, rt1r, rt2r; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLAHQR is an auxiliary routine called by DHSEQR to update the eigenvalues and Schur decomposition already computed by DHSEQR, by dealing with the Hessenberg submatrix in rows and columns ILO to IHI. Arguments ========= WANTT (input) LOGICAL = .TRUE. : the full Schur form T is required; = .FALSE.: only eigenvalues are required. WANTZ (input) LOGICAL = .TRUE. : the matrix of Schur vectors Z is required; = .FALSE.: Schur vectors are not required. N (input) INTEGER The order of the matrix H. N >= 0. ILO (input) INTEGER IHI (input) INTEGER It is assumed that H is already upper quasi-triangular in rows and columns IHI+1:N, and that H(ILO,ILO-1) = 0 (unless ILO = 1). DLAHQR works primarily with the Hessenberg submatrix in rows and columns ILO to IHI, but applies transformations to all of H if WANTT is .TRUE.. 1 <= ILO <= max(1,IHI); IHI <= N. H (input/output) DOUBLE PRECISION array, dimension (LDH,N) On entry, the upper Hessenberg matrix H. On exit, if INFO is zero and if WANTT is .TRUE., H is upper quasi-triangular in rows and columns ILO:IHI, with any 2-by-2 diagonal blocks in standard form. If INFO is zero and WANTT is .FALSE., the contents of H are unspecified on exit. The output state of H if INFO is nonzero is given below under the description of INFO. LDH (input) INTEGER The leading dimension of the array H. LDH >= max(1,N). WR (output) DOUBLE PRECISION array, dimension (N) WI (output) DOUBLE PRECISION array, dimension (N) The real and imaginary parts, respectively, of the computed eigenvalues ILO to IHI are stored in the corresponding elements of WR and WI. If two eigenvalues are computed as a complex conjugate pair, they are stored in consecutive elements of WR and WI, say the i-th and (i+1)th, with WI(i) > 0 and WI(i+1) < 0. If WANTT is .TRUE., the eigenvalues are stored in the same order as on the diagonal of the Schur form returned in H, with WR(i) = H(i,i), and, if H(i:i+1,i:i+1) is a 2-by-2 diagonal block, WI(i) = sqrt(H(i+1,i)*H(i,i+1)) and WI(i+1) = -WI(i). ILOZ (input) INTEGER IHIZ (input) INTEGER Specify the rows of Z to which transformations must be applied if WANTZ is .TRUE.. 1 <= ILOZ <= ILO; IHI <= IHIZ <= N. Z (input/output) DOUBLE PRECISION array, dimension (LDZ,N) If WANTZ is .TRUE., on entry Z must contain the current matrix Z of transformations accumulated by DHSEQR, and on exit Z has been updated; transformations are applied only to the submatrix Z(ILOZ:IHIZ,ILO:IHI). If WANTZ is .FALSE., Z is not referenced. LDZ (input) INTEGER The leading dimension of the array Z. LDZ >= max(1,N). INFO (output) INTEGER = 0: successful exit .GT. 0: If INFO = i, DLAHQR failed to compute all the eigenvalues ILO to IHI in a total of 30 iterations per eigenvalue; elements i+1:ihi of WR and WI contain those eigenvalues which have been successfully computed. If INFO .GT. 0 and WANTT is .FALSE., then on exit, the remaining unconverged eigenvalues are the eigenvalues of the upper Hessenberg matrix rows and columns ILO thorugh INFO of the final, output value of H. If INFO .GT. 0 and WANTT is .TRUE., then on exit (*) (initial value of H)*U = U*(final value of H) where U is an orthognal matrix. The final value of H is upper Hessenberg and triangular in rows and columns INFO+1 through IHI. If INFO .GT. 0 and WANTZ is .TRUE., then on exit (final value of Z) = (initial value of Z)*U where U is the orthogonal matrix in (*) (regardless of the value of WANTT.) Further Details =============== 02-96 Based on modifications by David Day, Sandia National Laboratory, USA 12-04 Further modifications by Ralph Byers, University of Kansas, USA This is a modified version of DLAHQR from LAPACK version 3.0. It is (1) more robust against overflow and underflow and (2) adopts the more conservative Ahues & Tisseur stopping criterion (LAWN 122, 1997). ========================================================= */ /* Parameter adjustments */ h_dim1 = *ldh; h_offset = 1 + h_dim1 * 1; h__ -= h_offset; --wr; --wi; z_dim1 = *ldz; z_offset = 1 + z_dim1 * 1; z__ -= z_offset; /* Function Body */ *info = 0; /* Quick return if possible */ if (*n == 0) { return 0; } if (*ilo == *ihi) { wr[*ilo] = h__[*ilo + *ilo * h_dim1]; wi[*ilo] = 0.; return 0; } /* ==== clear out the trash ==== */ i__1 = *ihi - 3; for (j = *ilo; j <= i__1; ++j) { h__[j + 2 + j * h_dim1] = 0.; h__[j + 3 + j * h_dim1] = 0.; /* L10: */ } if (*ilo <= *ihi - 2) { h__[*ihi + (*ihi - 2) * h_dim1] = 0.; } nh = *ihi - *ilo + 1; nz = *ihiz - *iloz + 1; /* Set machine-dependent constants for the stopping criterion. */ safmin = SAFEMINIMUM; safmax = 1. / safmin; dlabad_(&safmin, &safmax); ulp = PRECISION; smlnum = safmin * ((doublereal) nh / ulp); /* I1 and I2 are the indices of the first row and last column of H to which transformations must be applied. If eigenvalues only are being computed, I1 and I2 are set inside the main loop. */ if (*wantt) { i1 = 1; i2 = *n; } /* The main loop begins here. I is the loop index and decreases from IHI to ILO in steps of 1 or 2. Each iteration of the loop works with the active submatrix in rows and columns L to I. Eigenvalues I+1 to IHI have already converged. Either L = ILO or H(L,L-1) is negligible so that the matrix splits. */ i__ = *ihi; L20: l = *ilo; if (i__ < *ilo) { goto L160; } /* Perform QR iterations on rows and columns ILO to I until a submatrix of order 1 or 2 splits off at the bottom because a subdiagonal element has become negligible. */ for (its = 0; its <= 30; ++its) { /* Look for a single small subdiagonal element. */ i__1 = l + 1; for (k = i__; k >= i__1; --k) { if ((d__1 = h__[k + (k - 1) * h_dim1], abs(d__1)) <= smlnum) { goto L40; } tst = (d__1 = h__[k - 1 + (k - 1) * h_dim1], abs(d__1)) + (d__2 = h__[k + k * h_dim1], abs(d__2)); if (tst == 0.) { if (k - 2 >= *ilo) { tst += (d__1 = h__[k - 1 + (k - 2) * h_dim1], abs(d__1)); } if (k + 1 <= *ihi) { tst += (d__1 = h__[k + 1 + k * h_dim1], abs(d__1)); } } /* ==== The following is a conservative small subdiagonal . deflation criterion due to Ahues & Tisseur (LAWN 122, . 1997). It has better mathematical foundation and . improves accuracy in some cases. ==== */ if ((d__1 = h__[k + (k - 1) * h_dim1], abs(d__1)) <= ulp * tst) { /* Computing MAX */ d__3 = (d__1 = h__[k + (k - 1) * h_dim1], abs(d__1)), d__4 = ( d__2 = h__[k - 1 + k * h_dim1], abs(d__2)); ab = max(d__3,d__4); /* Computing MIN */ d__3 = (d__1 = h__[k + (k - 1) * h_dim1], abs(d__1)), d__4 = ( d__2 = h__[k - 1 + k * h_dim1], abs(d__2)); ba = min(d__3,d__4); /* Computing MAX */ d__3 = (d__1 = h__[k + k * h_dim1], abs(d__1)), d__4 = (d__2 = h__[k - 1 + (k - 1) * h_dim1] - h__[k + k * h_dim1], abs(d__2)); aa = max(d__3,d__4); /* Computing MIN */ d__3 = (d__1 = h__[k + k * h_dim1], abs(d__1)), d__4 = (d__2 = h__[k - 1 + (k - 1) * h_dim1] - h__[k + k * h_dim1], abs(d__2)); bb = min(d__3,d__4); s = aa + ab; /* Computing MAX */ d__1 = smlnum, d__2 = ulp * (bb * (aa / s)); if (ba * (ab / s) <= max(d__1,d__2)) { goto L40; } } /* L30: */ } L40: l = k; if (l > *ilo) { /* H(L,L-1) is negligible */ h__[l + (l - 1) * h_dim1] = 0.; } /* Exit from loop if a submatrix of order 1 or 2 has split off. */ if (l >= i__ - 1) { goto L150; } /* Now the active submatrix is in rows and columns L to I. If eigenvalues only are being computed, only the active submatrix need be transformed. */ if (! (*wantt)) { i1 = l; i2 = i__; } if (its == 10 || its == 20) { /* Exceptional shift. */ h11 = s * .75 + h__[i__ + i__ * h_dim1]; h12 = s * -.4375; h21 = s; h22 = h11; } else { /* Prepare to use Francis' double shift (i.e. 2nd degree generalized Rayleigh quotient) */ h11 = h__[i__ - 1 + (i__ - 1) * h_dim1]; h21 = h__[i__ + (i__ - 1) * h_dim1]; h12 = h__[i__ - 1 + i__ * h_dim1]; h22 = h__[i__ + i__ * h_dim1]; } s = abs(h11) + abs(h12) + abs(h21) + abs(h22); if (s == 0.) { rt1r = 0.; rt1i = 0.; rt2r = 0.; rt2i = 0.; } else { h11 /= s; h21 /= s; h12 /= s; h22 /= s; tr = (h11 + h22) / 2.; det = (h11 - tr) * (h22 - tr) - h12 * h21; rtdisc = sqrt((abs(det))); if (det >= 0.) { /* ==== complex conjugate shifts ==== */ rt1r = tr * s; rt2r = rt1r; rt1i = rtdisc * s; rt2i = -rt1i; } else { /* ==== real shifts (use only one of them) ==== */ rt1r = tr + rtdisc; rt2r = tr - rtdisc; if ((d__1 = rt1r - h22, abs(d__1)) <= (d__2 = rt2r - h22, abs( d__2))) { rt1r *= s; rt2r = rt1r; } else { rt2r *= s; rt1r = rt2r; } rt1i = 0.; rt2i = 0.; } } /* Look for two consecutive small subdiagonal elements. */ i__1 = l; for (m = i__ - 2; m >= i__1; --m) { /* Determine the effect of starting the double-shift QR iteration at row M, and see if this would make H(M,M-1) negligible. (The following uses scaling to avoid overflows and most underflows.) */ h21s = h__[m + 1 + m * h_dim1]; s = (d__1 = h__[m + m * h_dim1] - rt2r, abs(d__1)) + abs(rt2i) + abs(h21s); h21s = h__[m + 1 + m * h_dim1] / s; v[0] = h21s * h__[m + (m + 1) * h_dim1] + (h__[m + m * h_dim1] - rt1r) * ((h__[m + m * h_dim1] - rt2r) / s) - rt1i * (rt2i / s); v[1] = h21s * (h__[m + m * h_dim1] + h__[m + 1 + (m + 1) * h_dim1] - rt1r - rt2r); v[2] = h21s * h__[m + 2 + (m + 1) * h_dim1]; s = abs(v[0]) + abs(v[1]) + abs(v[2]); v[0] /= s; v[1] /= s; v[2] /= s; if (m == l) { goto L60; } if ((d__1 = h__[m + (m - 1) * h_dim1], abs(d__1)) * (abs(v[1]) + abs(v[2])) <= ulp * abs(v[0]) * ((d__2 = h__[m - 1 + (m - 1) * h_dim1], abs(d__2)) + (d__3 = h__[m + m * h_dim1], abs(d__3)) + (d__4 = h__[m + 1 + (m + 1) * h_dim1], abs( d__4)))) { goto L60; } /* L50: */ } L60: /* Double-shift QR step */ i__1 = i__ - 1; for (k = m; k <= i__1; ++k) { /* The first iteration of this loop determines a reflection G from the vector V and applies it from left and right to H, thus creating a nonzero bulge below the subdiagonal. Each subsequent iteration determines a reflection G to restore the Hessenberg form in the (K-1)th column, and thus chases the bulge one step toward the bottom of the active submatrix. NR is the order of G. Computing MIN */ i__2 = 3, i__3 = i__ - k + 1; nr = min(i__2,i__3); if (k > m) { dcopy_(&nr, &h__[k + (k - 1) * h_dim1], &c__1, v, &c__1); } dlarfg_(&nr, v, &v[1], &c__1, &t1); if (k > m) { h__[k + (k - 1) * h_dim1] = v[0]; h__[k + 1 + (k - 1) * h_dim1] = 0.; if (k < i__ - 1) { h__[k + 2 + (k - 1) * h_dim1] = 0.; } } else if (m > l) { h__[k + (k - 1) * h_dim1] = -h__[k + (k - 1) * h_dim1]; } v2 = v[1]; t2 = t1 * v2; if (nr == 3) { v3 = v[2]; t3 = t1 * v3; /* Apply G from the left to transform the rows of the matrix in columns K to I2. */ i__2 = i2; for (j = k; j <= i__2; ++j) { sum = h__[k + j * h_dim1] + v2 * h__[k + 1 + j * h_dim1] + v3 * h__[k + 2 + j * h_dim1]; h__[k + j * h_dim1] -= sum * t1; h__[k + 1 + j * h_dim1] -= sum * t2; h__[k + 2 + j * h_dim1] -= sum * t3; /* L70: */ } /* Apply G from the right to transform the columns of the matrix in rows I1 to min(K+3,I). Computing MIN */ i__3 = k + 3; i__2 = min(i__3,i__); for (j = i1; j <= i__2; ++j) { sum = h__[j + k * h_dim1] + v2 * h__[j + (k + 1) * h_dim1] + v3 * h__[j + (k + 2) * h_dim1]; h__[j + k * h_dim1] -= sum * t1; h__[j + (k + 1) * h_dim1] -= sum * t2; h__[j + (k + 2) * h_dim1] -= sum * t3; /* L80: */ } if (*wantz) { /* Accumulate transformations in the matrix Z */ i__2 = *ihiz; for (j = *iloz; j <= i__2; ++j) { sum = z__[j + k * z_dim1] + v2 * z__[j + (k + 1) * z_dim1] + v3 * z__[j + (k + 2) * z_dim1]; z__[j + k * z_dim1] -= sum * t1; z__[j + (k + 1) * z_dim1] -= sum * t2; z__[j + (k + 2) * z_dim1] -= sum * t3; /* L90: */ } } } else if (nr == 2) { /* Apply G from the left to transform the rows of the matrix in columns K to I2. */ i__2 = i2; for (j = k; j <= i__2; ++j) { sum = h__[k + j * h_dim1] + v2 * h__[k + 1 + j * h_dim1]; h__[k + j * h_dim1] -= sum * t1; h__[k + 1 + j * h_dim1] -= sum * t2; /* L100: */ } /* Apply G from the right to transform the columns of the matrix in rows I1 to min(K+3,I). */ i__2 = i__; for (j = i1; j <= i__2; ++j) { sum = h__[j + k * h_dim1] + v2 * h__[j + (k + 1) * h_dim1] ; h__[j + k * h_dim1] -= sum * t1; h__[j + (k + 1) * h_dim1] -= sum * t2; /* L110: */ } if (*wantz) { /* Accumulate transformations in the matrix Z */ i__2 = *ihiz; for (j = *iloz; j <= i__2; ++j) { sum = z__[j + k * z_dim1] + v2 * z__[j + (k + 1) * z_dim1]; z__[j + k * z_dim1] -= sum * t1; z__[j + (k + 1) * z_dim1] -= sum * t2; /* L120: */ } } } /* L130: */ } /* L140: */ } /* Failure to converge in remaining number of iterations */ *info = i__; return 0; L150: if (l == i__) { /* H(I,I-1) is negligible: one eigenvalue has converged. */ wr[i__] = h__[i__ + i__ * h_dim1]; wi[i__] = 0.; } else if (l == i__ - 1) { /* H(I-1,I-2) is negligible: a pair of eigenvalues have converged. Transform the 2-by-2 submatrix to standard Schur form, and compute and store the eigenvalues. */ dlanv2_(&h__[i__ - 1 + (i__ - 1) * h_dim1], &h__[i__ - 1 + i__ * h_dim1], &h__[i__ + (i__ - 1) * h_dim1], &h__[i__ + i__ * h_dim1], &wr[i__ - 1], &wi[i__ - 1], &wr[i__], &wi[i__], &cs, &sn); if (*wantt) { /* Apply the transformation to the rest of H. */ if (i2 > i__) { i__1 = i2 - i__; drot_(&i__1, &h__[i__ - 1 + (i__ + 1) * h_dim1], ldh, &h__[ i__ + (i__ + 1) * h_dim1], ldh, &cs, &sn); } i__1 = i__ - i1 - 1; drot_(&i__1, &h__[i1 + (i__ - 1) * h_dim1], &c__1, &h__[i1 + i__ * h_dim1], &c__1, &cs, &sn); } if (*wantz) { /* Apply the transformation to Z. */ drot_(&nz, &z__[*iloz + (i__ - 1) * z_dim1], &c__1, &z__[*iloz + i__ * z_dim1], &c__1, &cs, &sn); } } /* return to start of the main loop with new value of I. */ i__ = l - 1; goto L20; L160: return 0; /* End of DLAHQR */ } /* dlahqr_ */ /* Subroutine */ int dlahr2_(integer *n, integer *k, integer *nb, doublereal * a, integer *lda, doublereal *tau, doublereal *t, integer *ldt, doublereal *y, integer *ldy) { /* System generated locals */ integer a_dim1, a_offset, t_dim1, t_offset, y_dim1, y_offset, i__1, i__2, i__3; doublereal d__1; /* Local variables */ static integer i__; extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, integer *), dgemm_(char *, char *, integer *, integer *, integer * , doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *), dgemv_( char *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *), dcopy_(integer *, doublereal *, integer *, doublereal *, integer *), dtrmm_(char *, char *, char *, char *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *), daxpy_(integer *, doublereal *, doublereal *, integer *, doublereal *, integer *), dtrmv_(char *, char *, char *, integer *, doublereal *, integer *, doublereal *, integer *); static doublereal ei; extern /* Subroutine */ int dlarfg_(integer *, doublereal *, doublereal *, integer *, doublereal *), dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *); /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLAHR2 reduces the first NB columns of A real general n-BY-(n-k+1) matrix A so that elements below the k-th subdiagonal are zero. The reduction is performed by an orthogonal similarity transformation Q' * A * Q. The routine returns the matrices V and T which determine Q as a block reflector I - V*T*V', and also the matrix Y = A * V * T. This is an auxiliary routine called by DGEHRD. Arguments ========= N (input) INTEGER The order of the matrix A. K (input) INTEGER The offset for the reduction. Elements below the k-th subdiagonal in the first NB columns are reduced to zero. K < N. NB (input) INTEGER The number of columns to be reduced. A (input/output) DOUBLE PRECISION array, dimension (LDA,N-K+1) On entry, the n-by-(n-k+1) general matrix A. On exit, the elements on and above the k-th subdiagonal in the first NB columns are overwritten with the corresponding elements of the reduced matrix; the elements below the k-th subdiagonal, with the array TAU, represent the matrix Q as a product of elementary reflectors. The other columns of A are unchanged. See Further Details. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,N). TAU (output) DOUBLE PRECISION array, dimension (NB) The scalar factors of the elementary reflectors. See Further Details. T (output) DOUBLE PRECISION array, dimension (LDT,NB) The upper triangular matrix T. LDT (input) INTEGER The leading dimension of the array T. LDT >= NB. Y (output) DOUBLE PRECISION array, dimension (LDY,NB) The n-by-nb matrix Y. LDY (input) INTEGER The leading dimension of the array Y. LDY >= N. Further Details =============== The matrix Q is represented as a product of nb elementary reflectors Q = H(1) H(2) . . . H(nb). Each H(i) has the form H(i) = I - tau * v * v' where tau is a real scalar, and v is a real vector with v(1:i+k-1) = 0, v(i+k) = 1; v(i+k+1:n) is stored on exit in A(i+k+1:n,i), and tau in TAU(i). The elements of the vectors v together form the (n-k+1)-by-nb matrix V which is needed, with T and Y, to apply the transformation to the unreduced part of the matrix, using an update of the form: A := (I - V*T*V') * (A - Y*V'). The contents of A on exit are illustrated by the following example with n = 7, k = 3 and nb = 2: ( a a a a a ) ( a a a a a ) ( a a a a a ) ( h h a a a ) ( v1 h a a a ) ( v1 v2 a a a ) ( v1 v2 a a a ) where a denotes an element of the original matrix A, h denotes a modified element of the upper Hessenberg matrix H, and vi denotes an element of the vector defining H(i). This file is a slight modification of LAPACK-3.0's DLAHRD incorporating improvements proposed by Quintana-Orti and Van de Gejin. Note that the entries of A(1:K,2:NB) differ from those returned by the original LAPACK routine. This function is not backward compatible with LAPACK3.0. ===================================================================== Quick return if possible */ /* Parameter adjustments */ --tau; a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; t_dim1 = *ldt; t_offset = 1 + t_dim1 * 1; t -= t_offset; y_dim1 = *ldy; y_offset = 1 + y_dim1 * 1; y -= y_offset; /* Function Body */ if (*n <= 1) { return 0; } i__1 = *nb; for (i__ = 1; i__ <= i__1; ++i__) { if (i__ > 1) { /* Update A(K+1:N,I) Update I-th column of A - Y * V' */ i__2 = *n - *k; i__3 = i__ - 1; dgemv_("NO TRANSPOSE", &i__2, &i__3, &c_b151, &y[*k + 1 + y_dim1], ldy, &a[*k + i__ - 1 + a_dim1], lda, &c_b15, &a[*k + 1 + i__ * a_dim1], &c__1); /* Apply I - V * T' * V' to this column (call it b) from the left, using the last column of T as workspace Let V = ( V1 ) and b = ( b1 ) (first I-1 rows) ( V2 ) ( b2 ) where V1 is unit lower triangular w := V1' * b1 */ i__2 = i__ - 1; dcopy_(&i__2, &a[*k + 1 + i__ * a_dim1], &c__1, &t[*nb * t_dim1 + 1], &c__1); i__2 = i__ - 1; dtrmv_("Lower", "Transpose", "UNIT", &i__2, &a[*k + 1 + a_dim1], lda, &t[*nb * t_dim1 + 1], &c__1); /* w := w + V2'*b2 */ i__2 = *n - *k - i__ + 1; i__3 = i__ - 1; dgemv_("Transpose", &i__2, &i__3, &c_b15, &a[*k + i__ + a_dim1], lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b15, &t[*nb * t_dim1 + 1], &c__1); /* w := T'*w */ i__2 = i__ - 1; dtrmv_("Upper", "Transpose", "NON-UNIT", &i__2, &t[t_offset], ldt, &t[*nb * t_dim1 + 1], &c__1); /* b2 := b2 - V2*w */ i__2 = *n - *k - i__ + 1; i__3 = i__ - 1; dgemv_("NO TRANSPOSE", &i__2, &i__3, &c_b151, &a[*k + i__ + a_dim1], lda, &t[*nb * t_dim1 + 1], &c__1, &c_b15, &a[*k + i__ + i__ * a_dim1], &c__1); /* b1 := b1 - V1*w */ i__2 = i__ - 1; dtrmv_("Lower", "NO TRANSPOSE", "UNIT", &i__2, &a[*k + 1 + a_dim1] , lda, &t[*nb * t_dim1 + 1], &c__1); i__2 = i__ - 1; daxpy_(&i__2, &c_b151, &t[*nb * t_dim1 + 1], &c__1, &a[*k + 1 + i__ * a_dim1], &c__1); a[*k + i__ - 1 + (i__ - 1) * a_dim1] = ei; } /* Generate the elementary reflector H(I) to annihilate A(K+I+1:N,I) */ i__2 = *n - *k - i__ + 1; /* Computing MIN */ i__3 = *k + i__ + 1; dlarfg_(&i__2, &a[*k + i__ + i__ * a_dim1], &a[min(i__3,*n) + i__ * a_dim1], &c__1, &tau[i__]); ei = a[*k + i__ + i__ * a_dim1]; a[*k + i__ + i__ * a_dim1] = 1.; /* Compute Y(K+1:N,I) */ i__2 = *n - *k; i__3 = *n - *k - i__ + 1; dgemv_("NO TRANSPOSE", &i__2, &i__3, &c_b15, &a[*k + 1 + (i__ + 1) * a_dim1], lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b29, &y[* k + 1 + i__ * y_dim1], &c__1); i__2 = *n - *k - i__ + 1; i__3 = i__ - 1; dgemv_("Transpose", &i__2, &i__3, &c_b15, &a[*k + i__ + a_dim1], lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b29, &t[i__ * t_dim1 + 1], &c__1); i__2 = *n - *k; i__3 = i__ - 1; dgemv_("NO TRANSPOSE", &i__2, &i__3, &c_b151, &y[*k + 1 + y_dim1], ldy, &t[i__ * t_dim1 + 1], &c__1, &c_b15, &y[*k + 1 + i__ * y_dim1], &c__1); i__2 = *n - *k; dscal_(&i__2, &tau[i__], &y[*k + 1 + i__ * y_dim1], &c__1); /* Compute T(1:I,I) */ i__2 = i__ - 1; d__1 = -tau[i__]; dscal_(&i__2, &d__1, &t[i__ * t_dim1 + 1], &c__1); i__2 = i__ - 1; dtrmv_("Upper", "No Transpose", "NON-UNIT", &i__2, &t[t_offset], ldt, &t[i__ * t_dim1 + 1], &c__1) ; t[i__ + i__ * t_dim1] = tau[i__]; /* L10: */ } a[*k + *nb + *nb * a_dim1] = ei; /* Compute Y(1:K,1:NB) */ dlacpy_("ALL", k, nb, &a[(a_dim1 << 1) + 1], lda, &y[y_offset], ldy); dtrmm_("RIGHT", "Lower", "NO TRANSPOSE", "UNIT", k, nb, &c_b15, &a[*k + 1 + a_dim1], lda, &y[y_offset], ldy); if (*n > *k + *nb) { i__1 = *n - *k - *nb; dgemm_("NO TRANSPOSE", "NO TRANSPOSE", k, nb, &i__1, &c_b15, &a[(*nb + 2) * a_dim1 + 1], lda, &a[*k + 1 + *nb + a_dim1], lda, & c_b15, &y[y_offset], ldy); } dtrmm_("RIGHT", "Upper", "NO TRANSPOSE", "NON-UNIT", k, nb, &c_b15, &t[ t_offset], ldt, &y[y_offset], ldy); return 0; /* End of DLAHR2 */ } /* dlahr2_ */ /* Subroutine */ int dlaln2_(logical *ltrans, integer *na, integer *nw, doublereal *smin, doublereal *ca, doublereal *a, integer *lda, doublereal *d1, doublereal *d2, doublereal *b, integer *ldb, doublereal *wr, doublereal *wi, doublereal *x, integer *ldx, doublereal *scale, doublereal *xnorm, integer *info) { /* Initialized data */ static logical zswap[4] = { FALSE_,FALSE_,TRUE_,TRUE_ }; static logical rswap[4] = { FALSE_,TRUE_,FALSE_,TRUE_ }; static integer ipivot[16] /* was [4][4] */ = { 1,2,3,4,2,1,4,3,3,4,1,2, 4,3,2,1 }; /* System generated locals */ integer a_dim1, a_offset, b_dim1, b_offset, x_dim1, x_offset; doublereal d__1, d__2, d__3, d__4, d__5, d__6; static doublereal equiv_0[4], equiv_1[4]; /* Local variables */ static doublereal bbnd, cmax, ui11r, ui12s, temp, ur11r, ur12s; static integer j; static doublereal u22abs; static integer icmax; static doublereal bnorm, cnorm, smini; #define ci (equiv_0) #define cr (equiv_1) extern /* Subroutine */ int dladiv_(doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *); static doublereal bignum, bi1, bi2, br1, br2, smlnum, xi1, xi2, xr1, xr2, ci21, ci22, cr21, cr22, li21, csi, ui11, lr21, ui12, ui22; #define civ (equiv_0) static doublereal csr, ur11, ur12, ur22; #define crv (equiv_1) /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLALN2 solves a system of the form (ca A - w D ) X = s B or (ca A' - w D) X = s B with possible scaling ("s") and perturbation of A. (A' means A-transpose.) A is an NA x NA real matrix, ca is a real scalar, D is an NA x NA real diagonal matrix, w is a real or complex value, and X and B are NA x 1 matrices -- real if w is real, complex if w is complex. NA may be 1 or 2. If w is complex, X and B are represented as NA x 2 matrices, the first column of each being the real part and the second being the imaginary part. "s" is a scaling factor (.LE. 1), computed by DLALN2, which is so chosen that X can be computed without overflow. X is further scaled if necessary to assure that norm(ca A - w D)*norm(X) is less than overflow. If both singular values of (ca A - w D) are less than SMIN, SMIN*identity will be used instead of (ca A - w D). If only one singular value is less than SMIN, one element of (ca A - w D) will be perturbed enough to make the smallest singular value roughly SMIN. If both singular values are at least SMIN, (ca A - w D) will not be perturbed. In any case, the perturbation will be at most some small multiple of max( SMIN, ulp*norm(ca A - w D) ). The singular values are computed by infinity-norm approximations, and thus will only be correct to a factor of 2 or so. Note: all input quantities are assumed to be smaller than overflow by a reasonable factor. (See BIGNUM.) Arguments ========== LTRANS (input) LOGICAL =.TRUE.: A-transpose will be used. =.FALSE.: A will be used (not transposed.) NA (input) INTEGER The size of the matrix A. It may (only) be 1 or 2. NW (input) INTEGER 1 if "w" is real, 2 if "w" is complex. It may only be 1 or 2. SMIN (input) DOUBLE PRECISION The desired lower bound on the singular values of A. This should be a safe distance away from underflow or overflow, say, between (underflow/machine precision) and (machine precision * overflow ). (See BIGNUM and ULP.) CA (input) DOUBLE PRECISION The coefficient c, which A is multiplied by. A (input) DOUBLE PRECISION array, dimension (LDA,NA) The NA x NA matrix A. LDA (input) INTEGER The leading dimension of A. It must be at least NA. D1 (input) DOUBLE PRECISION The 1,1 element in the diagonal matrix D. D2 (input) DOUBLE PRECISION The 2,2 element in the diagonal matrix D. Not used if NW=1. B (input) DOUBLE PRECISION array, dimension (LDB,NW) The NA x NW matrix B (right-hand side). If NW=2 ("w" is complex), column 1 contains the real part of B and column 2 contains the imaginary part. LDB (input) INTEGER The leading dimension of B. It must be at least NA. WR (input) DOUBLE PRECISION The real part of the scalar "w". WI (input) DOUBLE PRECISION The imaginary part of the scalar "w". Not used if NW=1. X (output) DOUBLE PRECISION array, dimension (LDX,NW) The NA x NW matrix X (unknowns), as computed by DLALN2. If NW=2 ("w" is complex), on exit, column 1 will contain the real part of X and column 2 will contain the imaginary part. LDX (input) INTEGER The leading dimension of X. It must be at least NA. SCALE (output) DOUBLE PRECISION The scale factor that B must be multiplied by to insure that overflow does not occur when computing X. Thus, (ca A - w D) X will be SCALE*B, not B (ignoring perturbations of A.) It will be at most 1. XNORM (output) DOUBLE PRECISION The infinity-norm of X, when X is regarded as an NA x NW real matrix. INFO (output) INTEGER An error flag. It will be set to zero if no error occurs, a negative number if an argument is in error, or a positive number if ca A - w D had to be perturbed. The possible values are: = 0: No error occurred, and (ca A - w D) did not have to be perturbed. = 1: (ca A - w D) had to be perturbed to make its smallest (or only) singular value greater than SMIN. NOTE: In the interests of speed, this routine does not check the inputs for errors. ===================================================================== */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; b_dim1 = *ldb; b_offset = 1 + b_dim1 * 1; b -= b_offset; x_dim1 = *ldx; x_offset = 1 + x_dim1 * 1; x -= x_offset; /* Function Body */ /* Compute BIGNUM */ smlnum = 2. * SAFEMINIMUM; bignum = 1. / smlnum; smini = max(*smin,smlnum); /* Don't check for input errors */ *info = 0; /* Standard Initializations */ *scale = 1.; if (*na == 1) { /* 1 x 1 (i.e., scalar) system C X = B */ if (*nw == 1) { /* Real 1x1 system. C = ca A - w D */ csr = *ca * a[a_dim1 + 1] - *wr * *d1; cnorm = abs(csr); /* If | C | < SMINI, use C = SMINI */ if (cnorm < smini) { csr = smini; cnorm = smini; *info = 1; } /* Check scaling for X = B / C */ bnorm = (d__1 = b[b_dim1 + 1], abs(d__1)); if (cnorm < 1. && bnorm > 1.) { if (bnorm > bignum * cnorm) { *scale = 1. / bnorm; } } /* Compute X */ x[x_dim1 + 1] = b[b_dim1 + 1] * *scale / csr; *xnorm = (d__1 = x[x_dim1 + 1], abs(d__1)); } else { /* Complex 1x1 system (w is complex) C = ca A - w D */ csr = *ca * a[a_dim1 + 1] - *wr * *d1; csi = -(*wi) * *d1; cnorm = abs(csr) + abs(csi); /* If | C | < SMINI, use C = SMINI */ if (cnorm < smini) { csr = smini; csi = 0.; cnorm = smini; *info = 1; } /* Check scaling for X = B / C */ bnorm = (d__1 = b[b_dim1 + 1], abs(d__1)) + (d__2 = b[(b_dim1 << 1) + 1], abs(d__2)); if (cnorm < 1. && bnorm > 1.) { if (bnorm > bignum * cnorm) { *scale = 1. / bnorm; } } /* Compute X */ d__1 = *scale * b[b_dim1 + 1]; d__2 = *scale * b[(b_dim1 << 1) + 1]; dladiv_(&d__1, &d__2, &csr, &csi, &x[x_dim1 + 1], &x[(x_dim1 << 1) + 1]); *xnorm = (d__1 = x[x_dim1 + 1], abs(d__1)) + (d__2 = x[(x_dim1 << 1) + 1], abs(d__2)); } } else { /* 2x2 System Compute the real part of C = ca A - w D (or ca A' - w D ) */ cr[0] = *ca * a[a_dim1 + 1] - *wr * *d1; cr[3] = *ca * a[(a_dim1 << 1) + 2] - *wr * *d2; if (*ltrans) { cr[2] = *ca * a[a_dim1 + 2]; cr[1] = *ca * a[(a_dim1 << 1) + 1]; } else { cr[1] = *ca * a[a_dim1 + 2]; cr[2] = *ca * a[(a_dim1 << 1) + 1]; } if (*nw == 1) { /* Real 2x2 system (w is real) Find the largest element in C */ cmax = 0.; icmax = 0; for (j = 1; j <= 4; ++j) { if ((d__1 = crv[j - 1], abs(d__1)) > cmax) { cmax = (d__1 = crv[j - 1], abs(d__1)); icmax = j; } /* L10: */ } /* If norm(C) < SMINI, use SMINI*identity. */ if (cmax < smini) { /* Computing MAX */ d__3 = (d__1 = b[b_dim1 + 1], abs(d__1)), d__4 = (d__2 = b[ b_dim1 + 2], abs(d__2)); bnorm = max(d__3,d__4); if (smini < 1. && bnorm > 1.) { if (bnorm > bignum * smini) { *scale = 1. / bnorm; } } temp = *scale / smini; x[x_dim1 + 1] = temp * b[b_dim1 + 1]; x[x_dim1 + 2] = temp * b[b_dim1 + 2]; *xnorm = temp * bnorm; *info = 1; return 0; } /* Gaussian elimination with complete pivoting. */ ur11 = crv[icmax - 1]; cr21 = crv[ipivot[(icmax << 2) - 3] - 1]; ur12 = crv[ipivot[(icmax << 2) - 2] - 1]; cr22 = crv[ipivot[(icmax << 2) - 1] - 1]; ur11r = 1. / ur11; lr21 = ur11r * cr21; ur22 = cr22 - ur12 * lr21; /* If smaller pivot < SMINI, use SMINI */ if (abs(ur22) < smini) { ur22 = smini; *info = 1; } if (rswap[icmax - 1]) { br1 = b[b_dim1 + 2]; br2 = b[b_dim1 + 1]; } else { br1 = b[b_dim1 + 1]; br2 = b[b_dim1 + 2]; } br2 -= lr21 * br1; /* Computing MAX */ d__2 = (d__1 = br1 * (ur22 * ur11r), abs(d__1)), d__3 = abs(br2); bbnd = max(d__2,d__3); if (bbnd > 1. && abs(ur22) < 1.) { if (bbnd >= bignum * abs(ur22)) { *scale = 1. / bbnd; } } xr2 = br2 * *scale / ur22; xr1 = *scale * br1 * ur11r - xr2 * (ur11r * ur12); if (zswap[icmax - 1]) { x[x_dim1 + 1] = xr2; x[x_dim1 + 2] = xr1; } else { x[x_dim1 + 1] = xr1; x[x_dim1 + 2] = xr2; } /* Computing MAX */ d__1 = abs(xr1), d__2 = abs(xr2); *xnorm = max(d__1,d__2); /* Further scaling if norm(A) norm(X) > overflow */ if (*xnorm > 1. && cmax > 1.) { if (*xnorm > bignum / cmax) { temp = cmax / bignum; x[x_dim1 + 1] = temp * x[x_dim1 + 1]; x[x_dim1 + 2] = temp * x[x_dim1 + 2]; *xnorm = temp * *xnorm; *scale = temp * *scale; } } } else { /* Complex 2x2 system (w is complex) Find the largest element in C */ ci[0] = -(*wi) * *d1; ci[1] = 0.; ci[2] = 0.; ci[3] = -(*wi) * *d2; cmax = 0.; icmax = 0; for (j = 1; j <= 4; ++j) { if ((d__1 = crv[j - 1], abs(d__1)) + (d__2 = civ[j - 1], abs( d__2)) > cmax) { cmax = (d__1 = crv[j - 1], abs(d__1)) + (d__2 = civ[j - 1] , abs(d__2)); icmax = j; } /* L20: */ } /* If norm(C) < SMINI, use SMINI*identity. */ if (cmax < smini) { /* Computing MAX */ d__5 = (d__1 = b[b_dim1 + 1], abs(d__1)) + (d__2 = b[(b_dim1 << 1) + 1], abs(d__2)), d__6 = (d__3 = b[b_dim1 + 2], abs(d__3)) + (d__4 = b[(b_dim1 << 1) + 2], abs(d__4)); bnorm = max(d__5,d__6); if (smini < 1. && bnorm > 1.) { if (bnorm > bignum * smini) { *scale = 1. / bnorm; } } temp = *scale / smini; x[x_dim1 + 1] = temp * b[b_dim1 + 1]; x[x_dim1 + 2] = temp * b[b_dim1 + 2]; x[(x_dim1 << 1) + 1] = temp * b[(b_dim1 << 1) + 1]; x[(x_dim1 << 1) + 2] = temp * b[(b_dim1 << 1) + 2]; *xnorm = temp * bnorm; *info = 1; return 0; } /* Gaussian elimination with complete pivoting. */ ur11 = crv[icmax - 1]; ui11 = civ[icmax - 1]; cr21 = crv[ipivot[(icmax << 2) - 3] - 1]; ci21 = civ[ipivot[(icmax << 2) - 3] - 1]; ur12 = crv[ipivot[(icmax << 2) - 2] - 1]; ui12 = civ[ipivot[(icmax << 2) - 2] - 1]; cr22 = crv[ipivot[(icmax << 2) - 1] - 1]; ci22 = civ[ipivot[(icmax << 2) - 1] - 1]; if (icmax == 1 || icmax == 4) { /* Code when off-diagonals of pivoted C are real */ if (abs(ur11) > abs(ui11)) { temp = ui11 / ur11; /* Computing 2nd power */ d__1 = temp; ur11r = 1. / (ur11 * (d__1 * d__1 + 1.)); ui11r = -temp * ur11r; } else { temp = ur11 / ui11; /* Computing 2nd power */ d__1 = temp; ui11r = -1. / (ui11 * (d__1 * d__1 + 1.)); ur11r = -temp * ui11r; } lr21 = cr21 * ur11r; li21 = cr21 * ui11r; ur12s = ur12 * ur11r; ui12s = ur12 * ui11r; ur22 = cr22 - ur12 * lr21; ui22 = ci22 - ur12 * li21; } else { /* Code when diagonals of pivoted C are real */ ur11r = 1. / ur11; ui11r = 0.; lr21 = cr21 * ur11r; li21 = ci21 * ur11r; ur12s = ur12 * ur11r; ui12s = ui12 * ur11r; ur22 = cr22 - ur12 * lr21 + ui12 * li21; ui22 = -ur12 * li21 - ui12 * lr21; } u22abs = abs(ur22) + abs(ui22); /* If smaller pivot < SMINI, use SMINI */ if (u22abs < smini) { ur22 = smini; ui22 = 0.; *info = 1; } if (rswap[icmax - 1]) { br2 = b[b_dim1 + 1]; br1 = b[b_dim1 + 2]; bi2 = b[(b_dim1 << 1) + 1]; bi1 = b[(b_dim1 << 1) + 2]; } else { br1 = b[b_dim1 + 1]; br2 = b[b_dim1 + 2]; bi1 = b[(b_dim1 << 1) + 1]; bi2 = b[(b_dim1 << 1) + 2]; } br2 = br2 - lr21 * br1 + li21 * bi1; bi2 = bi2 - li21 * br1 - lr21 * bi1; /* Computing MAX */ d__1 = (abs(br1) + abs(bi1)) * (u22abs * (abs(ur11r) + abs(ui11r)) ), d__2 = abs(br2) + abs(bi2); bbnd = max(d__1,d__2); if (bbnd > 1. && u22abs < 1.) { if (bbnd >= bignum * u22abs) { *scale = 1. / bbnd; br1 = *scale * br1; bi1 = *scale * bi1; br2 = *scale * br2; bi2 = *scale * bi2; } } dladiv_(&br2, &bi2, &ur22, &ui22, &xr2, &xi2); xr1 = ur11r * br1 - ui11r * bi1 - ur12s * xr2 + ui12s * xi2; xi1 = ui11r * br1 + ur11r * bi1 - ui12s * xr2 - ur12s * xi2; if (zswap[icmax - 1]) { x[x_dim1 + 1] = xr2; x[x_dim1 + 2] = xr1; x[(x_dim1 << 1) + 1] = xi2; x[(x_dim1 << 1) + 2] = xi1; } else { x[x_dim1 + 1] = xr1; x[x_dim1 + 2] = xr2; x[(x_dim1 << 1) + 1] = xi1; x[(x_dim1 << 1) + 2] = xi2; } /* Computing MAX */ d__1 = abs(xr1) + abs(xi1), d__2 = abs(xr2) + abs(xi2); *xnorm = max(d__1,d__2); /* Further scaling if norm(A) norm(X) > overflow */ if (*xnorm > 1. && cmax > 1.) { if (*xnorm > bignum / cmax) { temp = cmax / bignum; x[x_dim1 + 1] = temp * x[x_dim1 + 1]; x[x_dim1 + 2] = temp * x[x_dim1 + 2]; x[(x_dim1 << 1) + 1] = temp * x[(x_dim1 << 1) + 1]; x[(x_dim1 << 1) + 2] = temp * x[(x_dim1 << 1) + 2]; *xnorm = temp * *xnorm; *scale = temp * *scale; } } } } return 0; /* End of DLALN2 */ } /* dlaln2_ */ #undef crv #undef civ #undef cr #undef ci /* Subroutine */ int dlals0_(integer *icompq, integer *nl, integer *nr, integer *sqre, integer *nrhs, doublereal *b, integer *ldb, doublereal *bx, integer *ldbx, integer *perm, integer *givptr, integer *givcol, integer *ldgcol, doublereal *givnum, integer *ldgnum, doublereal * poles, doublereal *difl, doublereal *difr, doublereal *z__, integer * k, doublereal *c__, doublereal *s, doublereal *work, integer *info) { /* System generated locals */ integer givcol_dim1, givcol_offset, b_dim1, b_offset, bx_dim1, bx_offset, difr_dim1, difr_offset, givnum_dim1, givnum_offset, poles_dim1, poles_offset, i__1, i__2; doublereal d__1; /* Local variables */ static doublereal temp; extern /* Subroutine */ int drot_(integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *); extern doublereal dnrm2_(integer *, doublereal *, integer *); static integer i__, j, m, n; extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, integer *); static doublereal diflj, difrj, dsigj; extern /* Subroutine */ int dgemv_(char *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *), dcopy_(integer *, doublereal *, integer *, doublereal *, integer *); extern doublereal dlamc3_(doublereal *, doublereal *); static doublereal dj; extern /* Subroutine */ int dlascl_(char *, integer *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *), dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *), xerbla_(char *, integer *); static doublereal dsigjp; static integer nlp1; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLALS0 applies back the multiplying factors of either the left or the right singular vector matrix of a diagonal matrix appended by a row to the right hand side matrix B in solving the least squares problem using the divide-and-conquer SVD approach. For the left singular vector matrix, three types of orthogonal matrices are involved: (1L) Givens rotations: the number of such rotations is GIVPTR; the pairs of columns/rows they were applied to are stored in GIVCOL; and the C- and S-values of these rotations are stored in GIVNUM. (2L) Permutation. The (NL+1)-st row of B is to be moved to the first row, and for J=2:N, PERM(J)-th row of B is to be moved to the J-th row. (3L) The left singular vector matrix of the remaining matrix. For the right singular vector matrix, four types of orthogonal matrices are involved: (1R) The right singular vector matrix of the remaining matrix. (2R) If SQRE = 1, one extra Givens rotation to generate the right null space. (3R) The inverse transformation of (2L). (4R) The inverse transformation of (1L). Arguments ========= ICOMPQ (input) INTEGER Specifies whether singular vectors are to be computed in factored form: = 0: Left singular vector matrix. = 1: Right singular vector matrix. NL (input) INTEGER The row dimension of the upper block. NL >= 1. NR (input) INTEGER The row dimension of the lower block. NR >= 1. SQRE (input) INTEGER = 0: the lower block is an NR-by-NR square matrix. = 1: the lower block is an NR-by-(NR+1) rectangular matrix. The bidiagonal matrix has row dimension N = NL + NR + 1, and column dimension M = N + SQRE. NRHS (input) INTEGER The number of columns of B and BX. NRHS must be at least 1. B (input/output) DOUBLE PRECISION array, dimension ( LDB, NRHS ) On input, B contains the right hand sides of the least squares problem in rows 1 through M. On output, B contains the solution X in rows 1 through N. LDB (input) INTEGER The leading dimension of B. LDB must be at least max(1,MAX( M, N ) ). BX (workspace) DOUBLE PRECISION array, dimension ( LDBX, NRHS ) LDBX (input) INTEGER The leading dimension of BX. PERM (input) INTEGER array, dimension ( N ) The permutations (from deflation and sorting) applied to the two blocks. GIVPTR (input) INTEGER The number of Givens rotations which took place in this subproblem. GIVCOL (input) INTEGER array, dimension ( LDGCOL, 2 ) Each pair of numbers indicates a pair of rows/columns involved in a Givens rotation. LDGCOL (input) INTEGER The leading dimension of GIVCOL, must be at least N. GIVNUM (input) DOUBLE PRECISION array, dimension ( LDGNUM, 2 ) Each number indicates the C or S value used in the corresponding Givens rotation. LDGNUM (input) INTEGER The leading dimension of arrays DIFR, POLES and GIVNUM, must be at least K. POLES (input) DOUBLE PRECISION array, dimension ( LDGNUM, 2 ) On entry, POLES(1:K, 1) contains the new singular values obtained from solving the secular equation, and POLES(1:K, 2) is an array containing the poles in the secular equation. DIFL (input) DOUBLE PRECISION array, dimension ( K ). On entry, DIFL(I) is the distance between I-th updated (undeflated) singular value and the I-th (undeflated) old singular value. DIFR (input) DOUBLE PRECISION array, dimension ( LDGNUM, 2 ). On entry, DIFR(I, 1) contains the distances between I-th updated (undeflated) singular value and the I+1-th (undeflated) old singular value. And DIFR(I, 2) is the normalizing factor for the I-th right singular vector. Z (input) DOUBLE PRECISION array, dimension ( K ) Contain the components of the deflation-adjusted updating row vector. K (input) INTEGER Contains the dimension of the non-deflated matrix, This is the order of the related secular equation. 1 <= K <=N. C (input) DOUBLE PRECISION C contains garbage if SQRE =0 and the C-value of a Givens rotation related to the right null space if SQRE = 1. S (input) DOUBLE PRECISION S contains garbage if SQRE =0 and the S-value of a Givens rotation related to the right null space if SQRE = 1. WORK (workspace) DOUBLE PRECISION array, dimension ( K ) INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. Further Details =============== Based on contributions by Ming Gu and Ren-Cang Li, Computer Science Division, University of California at Berkeley, USA Osni Marques, LBNL/NERSC, USA ===================================================================== Test the input parameters. */ /* Parameter adjustments */ b_dim1 = *ldb; b_offset = 1 + b_dim1 * 1; b -= b_offset; bx_dim1 = *ldbx; bx_offset = 1 + bx_dim1 * 1; bx -= bx_offset; --perm; givcol_dim1 = *ldgcol; givcol_offset = 1 + givcol_dim1 * 1; givcol -= givcol_offset; difr_dim1 = *ldgnum; difr_offset = 1 + difr_dim1 * 1; difr -= difr_offset; poles_dim1 = *ldgnum; poles_offset = 1 + poles_dim1 * 1; poles -= poles_offset; givnum_dim1 = *ldgnum; givnum_offset = 1 + givnum_dim1 * 1; givnum -= givnum_offset; --difl; --z__; --work; /* Function Body */ *info = 0; if (*icompq < 0 || *icompq > 1) { *info = -1; } else if (*nl < 1) { *info = -2; } else if (*nr < 1) { *info = -3; } else if (*sqre < 0 || *sqre > 1) { *info = -4; } n = *nl + *nr + 1; if (*nrhs < 1) { *info = -5; } else if (*ldb < n) { *info = -7; } else if (*ldbx < n) { *info = -9; } else if (*givptr < 0) { *info = -11; } else if (*ldgcol < n) { *info = -13; } else if (*ldgnum < n) { *info = -15; } else if (*k < 1) { *info = -20; } if (*info != 0) { i__1 = -(*info); xerbla_("DLALS0", &i__1); return 0; } m = n + *sqre; nlp1 = *nl + 1; if (*icompq == 0) { /* Apply back orthogonal transformations from the left. Step (1L): apply back the Givens rotations performed. */ i__1 = *givptr; for (i__ = 1; i__ <= i__1; ++i__) { drot_(nrhs, &b[givcol[i__ + (givcol_dim1 << 1)] + b_dim1], ldb, & b[givcol[i__ + givcol_dim1] + b_dim1], ldb, &givnum[i__ + (givnum_dim1 << 1)], &givnum[i__ + givnum_dim1]); /* L10: */ } /* Step (2L): permute rows of B. */ dcopy_(nrhs, &b[nlp1 + b_dim1], ldb, &bx[bx_dim1 + 1], ldbx); i__1 = n; for (i__ = 2; i__ <= i__1; ++i__) { dcopy_(nrhs, &b[perm[i__] + b_dim1], ldb, &bx[i__ + bx_dim1], ldbx); /* L20: */ } /* Step (3L): apply the inverse of the left singular vector matrix to BX. */ if (*k == 1) { dcopy_(nrhs, &bx[bx_offset], ldbx, &b[b_offset], ldb); if (z__[1] < 0.) { dscal_(nrhs, &c_b151, &b[b_offset], ldb); } } else { i__1 = *k; for (j = 1; j <= i__1; ++j) { diflj = difl[j]; dj = poles[j + poles_dim1]; dsigj = -poles[j + (poles_dim1 << 1)]; if (j < *k) { difrj = -difr[j + difr_dim1]; dsigjp = -poles[j + 1 + (poles_dim1 << 1)]; } if (z__[j] == 0. || poles[j + (poles_dim1 << 1)] == 0.) { work[j] = 0.; } else { work[j] = -poles[j + (poles_dim1 << 1)] * z__[j] / diflj / (poles[j + (poles_dim1 << 1)] + dj); } i__2 = j - 1; for (i__ = 1; i__ <= i__2; ++i__) { if (z__[i__] == 0. || poles[i__ + (poles_dim1 << 1)] == 0.) { work[i__] = 0.; } else { work[i__] = poles[i__ + (poles_dim1 << 1)] * z__[i__] / (dlamc3_(&poles[i__ + (poles_dim1 << 1)], & dsigj) - diflj) / (poles[i__ + (poles_dim1 << 1)] + dj); } /* L30: */ } i__2 = *k; for (i__ = j + 1; i__ <= i__2; ++i__) { if (z__[i__] == 0. || poles[i__ + (poles_dim1 << 1)] == 0.) { work[i__] = 0.; } else { work[i__] = poles[i__ + (poles_dim1 << 1)] * z__[i__] / (dlamc3_(&poles[i__ + (poles_dim1 << 1)], & dsigjp) + difrj) / (poles[i__ + (poles_dim1 << 1)] + dj); } /* L40: */ } work[1] = -1.; temp = dnrm2_(k, &work[1], &c__1); dgemv_("T", k, nrhs, &c_b15, &bx[bx_offset], ldbx, &work[1], & c__1, &c_b29, &b[j + b_dim1], ldb); dlascl_("G", &c__0, &c__0, &temp, &c_b15, &c__1, nrhs, &b[j + b_dim1], ldb, info); /* L50: */ } } /* Move the deflated rows of BX to B also. */ if (*k < max(m,n)) { i__1 = n - *k; dlacpy_("A", &i__1, nrhs, &bx[*k + 1 + bx_dim1], ldbx, &b[*k + 1 + b_dim1], ldb); } } else { /* Apply back the right orthogonal transformations. Step (1R): apply back the new right singular vector matrix to B. */ if (*k == 1) { dcopy_(nrhs, &b[b_offset], ldb, &bx[bx_offset], ldbx); } else { i__1 = *k; for (j = 1; j <= i__1; ++j) { dsigj = poles[j + (poles_dim1 << 1)]; if (z__[j] == 0.) { work[j] = 0.; } else { work[j] = -z__[j] / difl[j] / (dsigj + poles[j + poles_dim1]) / difr[j + (difr_dim1 << 1)]; } i__2 = j - 1; for (i__ = 1; i__ <= i__2; ++i__) { if (z__[j] == 0.) { work[i__] = 0.; } else { d__1 = -poles[i__ + 1 + (poles_dim1 << 1)]; work[i__] = z__[j] / (dlamc3_(&dsigj, &d__1) - difr[ i__ + difr_dim1]) / (dsigj + poles[i__ + poles_dim1]) / difr[i__ + (difr_dim1 << 1)]; } /* L60: */ } i__2 = *k; for (i__ = j + 1; i__ <= i__2; ++i__) { if (z__[j] == 0.) { work[i__] = 0.; } else { d__1 = -poles[i__ + (poles_dim1 << 1)]; work[i__] = z__[j] / (dlamc3_(&dsigj, &d__1) - difl[ i__]) / (dsigj + poles[i__ + poles_dim1]) / difr[i__ + (difr_dim1 << 1)]; } /* L70: */ } dgemv_("T", k, nrhs, &c_b15, &b[b_offset], ldb, &work[1], & c__1, &c_b29, &bx[j + bx_dim1], ldbx); /* L80: */ } } /* Step (2R): if SQRE = 1, apply back the rotation that is related to the right null space of the subproblem. */ if (*sqre == 1) { dcopy_(nrhs, &b[m + b_dim1], ldb, &bx[m + bx_dim1], ldbx); drot_(nrhs, &bx[bx_dim1 + 1], ldbx, &bx[m + bx_dim1], ldbx, c__, s); } if (*k < max(m,n)) { i__1 = n - *k; dlacpy_("A", &i__1, nrhs, &b[*k + 1 + b_dim1], ldb, &bx[*k + 1 + bx_dim1], ldbx); } /* Step (3R): permute rows of B. */ dcopy_(nrhs, &bx[bx_dim1 + 1], ldbx, &b[nlp1 + b_dim1], ldb); if (*sqre == 1) { dcopy_(nrhs, &bx[m + bx_dim1], ldbx, &b[m + b_dim1], ldb); } i__1 = n; for (i__ = 2; i__ <= i__1; ++i__) { dcopy_(nrhs, &bx[i__ + bx_dim1], ldbx, &b[perm[i__] + b_dim1], ldb); /* L90: */ } /* Step (4R): apply back the Givens rotations performed. */ for (i__ = *givptr; i__ >= 1; --i__) { d__1 = -givnum[i__ + givnum_dim1]; drot_(nrhs, &b[givcol[i__ + (givcol_dim1 << 1)] + b_dim1], ldb, & b[givcol[i__ + givcol_dim1] + b_dim1], ldb, &givnum[i__ + (givnum_dim1 << 1)], &d__1); /* L100: */ } } return 0; /* End of DLALS0 */ } /* dlals0_ */ /* Subroutine */ int dlalsa_(integer *icompq, integer *smlsiz, integer *n, integer *nrhs, doublereal *b, integer *ldb, doublereal *bx, integer * ldbx, doublereal *u, integer *ldu, doublereal *vt, integer *k, doublereal *difl, doublereal *difr, doublereal *z__, doublereal * poles, integer *givptr, integer *givcol, integer *ldgcol, integer * perm, doublereal *givnum, doublereal *c__, doublereal *s, doublereal * work, integer *iwork, integer *info) { /* System generated locals */ integer givcol_dim1, givcol_offset, perm_dim1, perm_offset, b_dim1, b_offset, bx_dim1, bx_offset, difl_dim1, difl_offset, difr_dim1, difr_offset, givnum_dim1, givnum_offset, poles_dim1, poles_offset, u_dim1, u_offset, vt_dim1, vt_offset, z_dim1, z_offset, i__1, i__2; /* Builtin functions */ integer pow_ii(integer *, integer *); /* Local variables */ static integer nlvl, sqre, i__, j; extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); static integer inode, ndiml, ndimr; extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, doublereal *, integer *); static integer i1; extern /* Subroutine */ int dlals0_(integer *, integer *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, integer *, integer *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, integer *); static integer ic, lf, nd, ll, nl, nr; extern /* Subroutine */ int dlasdt_(integer *, integer *, integer *, integer *, integer *, integer *, integer *), xerbla_(char *, integer *); static integer im1, nlf, nrf, lvl, ndb1, nlp1, lvl2, nrp1; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLALSA is an itermediate step in solving the least squares problem by computing the SVD of the coefficient matrix in compact form (The singular vectors are computed as products of simple orthorgonal matrices.). If ICOMPQ = 0, DLALSA applies the inverse of the left singular vector matrix of an upper bidiagonal matrix to the right hand side; and if ICOMPQ = 1, DLALSA applies the right singular vector matrix to the right hand side. The singular vector matrices were generated in compact form by DLALSA. Arguments ========= ICOMPQ (input) INTEGER Specifies whether the left or the right singular vector matrix is involved. = 0: Left singular vector matrix = 1: Right singular vector matrix SMLSIZ (input) INTEGER The maximum size of the subproblems at the bottom of the computation tree. N (input) INTEGER The row and column dimensions of the upper bidiagonal matrix. NRHS (input) INTEGER The number of columns of B and BX. NRHS must be at least 1. B (input/output) DOUBLE PRECISION array, dimension ( LDB, NRHS ) On input, B contains the right hand sides of the least squares problem in rows 1 through M. On output, B contains the solution X in rows 1 through N. LDB (input) INTEGER The leading dimension of B in the calling subprogram. LDB must be at least max(1,MAX( M, N ) ). BX (output) DOUBLE PRECISION array, dimension ( LDBX, NRHS ) On exit, the result of applying the left or right singular vector matrix to B. LDBX (input) INTEGER The leading dimension of BX. U (input) DOUBLE PRECISION array, dimension ( LDU, SMLSIZ ). On entry, U contains the left singular vector matrices of all subproblems at the bottom level. LDU (input) INTEGER, LDU = > N. The leading dimension of arrays U, VT, DIFL, DIFR, POLES, GIVNUM, and Z. VT (input) DOUBLE PRECISION array, dimension ( LDU, SMLSIZ+1 ). On entry, VT' contains the right singular vector matrices of all subproblems at the bottom level. K (input) INTEGER array, dimension ( N ). DIFL (input) DOUBLE PRECISION array, dimension ( LDU, NLVL ). where NLVL = INT(log_2 (N/(SMLSIZ+1))) + 1. DIFR (input) DOUBLE PRECISION array, dimension ( LDU, 2 * NLVL ). On entry, DIFL(*, I) and DIFR(*, 2 * I -1) record distances between singular values on the I-th level and singular values on the (I -1)-th level, and DIFR(*, 2 * I) record the normalizing factors of the right singular vectors matrices of subproblems on I-th level. Z (input) DOUBLE PRECISION array, dimension ( LDU, NLVL ). On entry, Z(1, I) contains the components of the deflation- adjusted updating row vector for subproblems on the I-th level. POLES (input) DOUBLE PRECISION array, dimension ( LDU, 2 * NLVL ). On entry, POLES(*, 2 * I -1: 2 * I) contains the new and old singular values involved in the secular equations on the I-th level. GIVPTR (input) INTEGER array, dimension ( N ). On entry, GIVPTR( I ) records the number of Givens rotations performed on the I-th problem on the computation tree. GIVCOL (input) INTEGER array, dimension ( LDGCOL, 2 * NLVL ). On entry, for each I, GIVCOL(*, 2 * I - 1: 2 * I) records the locations of Givens rotations performed on the I-th level on the computation tree. LDGCOL (input) INTEGER, LDGCOL = > N. The leading dimension of arrays GIVCOL and PERM. PERM (input) INTEGER array, dimension ( LDGCOL, NLVL ). On entry, PERM(*, I) records permutations done on the I-th level of the computation tree. GIVNUM (input) DOUBLE PRECISION array, dimension ( LDU, 2 * NLVL ). On entry, GIVNUM(*, 2 *I -1 : 2 * I) records the C- and S- values of Givens rotations performed on the I-th level on the computation tree. C (input) DOUBLE PRECISION array, dimension ( N ). On entry, if the I-th subproblem is not square, C( I ) contains the C-value of a Givens rotation related to the right null space of the I-th subproblem. S (input) DOUBLE PRECISION array, dimension ( N ). On entry, if the I-th subproblem is not square, S( I ) contains the S-value of a Givens rotation related to the right null space of the I-th subproblem. WORK (workspace) DOUBLE PRECISION array. The dimension must be at least N. IWORK (workspace) INTEGER array. The dimension must be at least 3 * N INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. Further Details =============== Based on contributions by Ming Gu and Ren-Cang Li, Computer Science Division, University of California at Berkeley, USA Osni Marques, LBNL/NERSC, USA ===================================================================== Test the input parameters. */ /* Parameter adjustments */ b_dim1 = *ldb; b_offset = 1 + b_dim1 * 1; b -= b_offset; bx_dim1 = *ldbx; bx_offset = 1 + bx_dim1 * 1; bx -= bx_offset; givnum_dim1 = *ldu; givnum_offset = 1 + givnum_dim1 * 1; givnum -= givnum_offset; poles_dim1 = *ldu; poles_offset = 1 + poles_dim1 * 1; poles -= poles_offset; z_dim1 = *ldu; z_offset = 1 + z_dim1 * 1; z__ -= z_offset; difr_dim1 = *ldu; difr_offset = 1 + difr_dim1 * 1; difr -= difr_offset; difl_dim1 = *ldu; difl_offset = 1 + difl_dim1 * 1; difl -= difl_offset; vt_dim1 = *ldu; vt_offset = 1 + vt_dim1 * 1; vt -= vt_offset; u_dim1 = *ldu; u_offset = 1 + u_dim1 * 1; u -= u_offset; --k; --givptr; perm_dim1 = *ldgcol; perm_offset = 1 + perm_dim1 * 1; perm -= perm_offset; givcol_dim1 = *ldgcol; givcol_offset = 1 + givcol_dim1 * 1; givcol -= givcol_offset; --c__; --s; --work; --iwork; /* Function Body */ *info = 0; if (*icompq < 0 || *icompq > 1) { *info = -1; } else if (*smlsiz < 3) { *info = -2; } else if (*n < *smlsiz) { *info = -3; } else if (*nrhs < 1) { *info = -4; } else if (*ldb < *n) { *info = -6; } else if (*ldbx < *n) { *info = -8; } else if (*ldu < *n) { *info = -10; } else if (*ldgcol < *n) { *info = -19; } if (*info != 0) { i__1 = -(*info); xerbla_("DLALSA", &i__1); return 0; } /* Book-keeping and setting up the computation tree. */ inode = 1; ndiml = inode + *n; ndimr = ndiml + *n; dlasdt_(n, &nlvl, &nd, &iwork[inode], &iwork[ndiml], &iwork[ndimr], smlsiz); /* The following code applies back the left singular vector factors. For applying back the right singular vector factors, go to 50. */ if (*icompq == 1) { goto L50; } /* The nodes on the bottom level of the tree were solved by DLASDQ. The corresponding left and right singular vector matrices are in explicit form. First apply back the left singular vector matrices. */ ndb1 = (nd + 1) / 2; i__1 = nd; for (i__ = ndb1; i__ <= i__1; ++i__) { /* IC : center row of each node NL : number of rows of left subproblem NR : number of rows of right subproblem NLF: starting row of the left subproblem NRF: starting row of the right subproblem */ i1 = i__ - 1; ic = iwork[inode + i1]; nl = iwork[ndiml + i1]; nr = iwork[ndimr + i1]; nlf = ic - nl; nrf = ic + 1; dgemm_("T", "N", &nl, nrhs, &nl, &c_b15, &u[nlf + u_dim1], ldu, &b[ nlf + b_dim1], ldb, &c_b29, &bx[nlf + bx_dim1], ldbx); dgemm_("T", "N", &nr, nrhs, &nr, &c_b15, &u[nrf + u_dim1], ldu, &b[ nrf + b_dim1], ldb, &c_b29, &bx[nrf + bx_dim1], ldbx); /* L10: */ } /* Next copy the rows of B that correspond to unchanged rows in the bidiagonal matrix to BX. */ i__1 = nd; for (i__ = 1; i__ <= i__1; ++i__) { ic = iwork[inode + i__ - 1]; dcopy_(nrhs, &b[ic + b_dim1], ldb, &bx[ic + bx_dim1], ldbx); /* L20: */ } /* Finally go through the left singular vector matrices of all the other subproblems bottom-up on the tree. */ j = pow_ii(&c__2, &nlvl); sqre = 0; for (lvl = nlvl; lvl >= 1; --lvl) { lvl2 = (lvl << 1) - 1; /* find the first node LF and last node LL on the current level LVL */ if (lvl == 1) { lf = 1; ll = 1; } else { i__1 = lvl - 1; lf = pow_ii(&c__2, &i__1); ll = (lf << 1) - 1; } i__1 = ll; for (i__ = lf; i__ <= i__1; ++i__) { im1 = i__ - 1; ic = iwork[inode + im1]; nl = iwork[ndiml + im1]; nr = iwork[ndimr + im1]; nlf = ic - nl; nrf = ic + 1; --j; dlals0_(icompq, &nl, &nr, &sqre, nrhs, &bx[nlf + bx_dim1], ldbx, & b[nlf + b_dim1], ldb, &perm[nlf + lvl * perm_dim1], & givptr[j], &givcol[nlf + lvl2 * givcol_dim1], ldgcol, & givnum[nlf + lvl2 * givnum_dim1], ldu, &poles[nlf + lvl2 * poles_dim1], &difl[nlf + lvl * difl_dim1], &difr[nlf + lvl2 * difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[ j], &s[j], &work[1], info); /* L30: */ } /* L40: */ } goto L90; /* ICOMPQ = 1: applying back the right singular vector factors. */ L50: /* First now go through the right singular vector matrices of all the tree nodes top-down. */ j = 0; i__1 = nlvl; for (lvl = 1; lvl <= i__1; ++lvl) { lvl2 = (lvl << 1) - 1; /* Find the first node LF and last node LL on the current level LVL. */ if (lvl == 1) { lf = 1; ll = 1; } else { i__2 = lvl - 1; lf = pow_ii(&c__2, &i__2); ll = (lf << 1) - 1; } i__2 = lf; for (i__ = ll; i__ >= i__2; --i__) { im1 = i__ - 1; ic = iwork[inode + im1]; nl = iwork[ndiml + im1]; nr = iwork[ndimr + im1]; nlf = ic - nl; nrf = ic + 1; if (i__ == ll) { sqre = 0; } else { sqre = 1; } ++j; dlals0_(icompq, &nl, &nr, &sqre, nrhs, &b[nlf + b_dim1], ldb, &bx[ nlf + bx_dim1], ldbx, &perm[nlf + lvl * perm_dim1], & givptr[j], &givcol[nlf + lvl2 * givcol_dim1], ldgcol, & givnum[nlf + lvl2 * givnum_dim1], ldu, &poles[nlf + lvl2 * poles_dim1], &difl[nlf + lvl * difl_dim1], &difr[nlf + lvl2 * difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[ j], &s[j], &work[1], info); /* L60: */ } /* L70: */ } /* The nodes on the bottom level of the tree were solved by DLASDQ. The corresponding right singular vector matrices are in explicit form. Apply them back. */ ndb1 = (nd + 1) / 2; i__1 = nd; for (i__ = ndb1; i__ <= i__1; ++i__) { i1 = i__ - 1; ic = iwork[inode + i1]; nl = iwork[ndiml + i1]; nr = iwork[ndimr + i1]; nlp1 = nl + 1; if (i__ == nd) { nrp1 = nr; } else { nrp1 = nr + 1; } nlf = ic - nl; nrf = ic + 1; dgemm_("T", "N", &nlp1, nrhs, &nlp1, &c_b15, &vt[nlf + vt_dim1], ldu, &b[nlf + b_dim1], ldb, &c_b29, &bx[nlf + bx_dim1], ldbx); dgemm_("T", "N", &nrp1, nrhs, &nrp1, &c_b15, &vt[nrf + vt_dim1], ldu, &b[nrf + b_dim1], ldb, &c_b29, &bx[nrf + bx_dim1], ldbx); /* L80: */ } L90: return 0; /* End of DLALSA */ } /* dlalsa_ */ /* Subroutine */ int dlalsd_(char *uplo, integer *smlsiz, integer *n, integer *nrhs, doublereal *d__, doublereal *e, doublereal *b, integer *ldb, doublereal *rcond, integer *rank, doublereal *work, integer *iwork, integer *info) { /* System generated locals */ integer b_dim1, b_offset, i__1, i__2; doublereal d__1; /* Builtin functions */ double log(doublereal), d_sign(doublereal *, doublereal *); /* Local variables */ static integer difl, difr; static doublereal rcnd; static integer perm, nsub; extern /* Subroutine */ int drot_(integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *); static integer nlvl, sqre, bxst, c__, i__, j, k; static doublereal r__; static integer s, u; extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); static integer z__; extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, doublereal *, integer *); static integer poles, sizei, nsize, nwork, icmpq1, icmpq2; static doublereal cs; extern /* Subroutine */ int dlasda_(integer *, integer *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, integer *, integer *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, integer *); static integer bx; extern /* Subroutine */ int dlalsa_(integer *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, integer *, integer *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, integer *); static doublereal sn; extern /* Subroutine */ int dlascl_(char *, integer *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *); extern integer idamax_(integer *, doublereal *, integer *); static integer st; extern /* Subroutine */ int dlasdq_(char *, integer *, integer *, integer *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *); static integer vt; extern /* Subroutine */ int dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *), dlartg_(doublereal *, doublereal *, doublereal *, doublereal *, doublereal *), dlaset_(char *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *); static integer givcol; extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *); extern /* Subroutine */ int dlasrt_(char *, integer *, doublereal *, integer *); static doublereal orgnrm; static integer givnum, givptr, nm1, smlszp, st1; static doublereal eps; static integer iwk; static doublereal tol; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLALSD uses the singular value decomposition of A to solve the least squares problem of finding X to minimize the Euclidean norm of each column of A*X-B, where A is N-by-N upper bidiagonal, and X and B are N-by-NRHS. The solution X overwrites B. The singular values of A smaller than RCOND times the largest singular value are treated as zero in solving the least squares problem; in this case a minimum norm solution is returned. The actual singular values are returned in D in ascending order. This code makes very mild assumptions about floating point arithmetic. It will work on machines with a guard digit in add/subtract, or on those binary machines without guard digits which subtract like the Cray XMP, Cray YMP, Cray C 90, or Cray 2. It could conceivably fail on hexadecimal or decimal machines without guard digits, but we know of none. Arguments ========= UPLO (input) CHARACTER*1 = 'U': D and E define an upper bidiagonal matrix. = 'L': D and E define a lower bidiagonal matrix. SMLSIZ (input) INTEGER The maximum size of the subproblems at the bottom of the computation tree. N (input) INTEGER The dimension of the bidiagonal matrix. N >= 0. NRHS (input) INTEGER The number of columns of B. NRHS must be at least 1. D (input/output) DOUBLE PRECISION array, dimension (N) On entry D contains the main diagonal of the bidiagonal matrix. On exit, if INFO = 0, D contains its singular values. E (input/output) DOUBLE PRECISION array, dimension (N-1) Contains the super-diagonal entries of the bidiagonal matrix. On exit, E has been destroyed. B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS) On input, B contains the right hand sides of the least squares problem. On output, B contains the solution X. LDB (input) INTEGER The leading dimension of B in the calling subprogram. LDB must be at least max(1,N). RCOND (input) DOUBLE PRECISION The singular values of A less than or equal to RCOND times the largest singular value are treated as zero in solving the least squares problem. If RCOND is negative, machine precision is used instead. For example, if diag(S)*X=B were the least squares problem, where diag(S) is a diagonal matrix of singular values, the solution would be X(i) = B(i) / S(i) if S(i) is greater than RCOND*max(S), and X(i) = 0 if S(i) is less than or equal to RCOND*max(S). RANK (output) INTEGER The number of singular values of A greater than RCOND times the largest singular value. WORK (workspace) DOUBLE PRECISION array, dimension at least (9*N + 2*N*SMLSIZ + 8*N*NLVL + N*NRHS + (SMLSIZ+1)**2), where NLVL = max(0, INT(log_2 (N/(SMLSIZ+1))) + 1). IWORK (workspace) INTEGER array, dimension at least (3*N*NLVL + 11*N) INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. > 0: The algorithm failed to compute an singular value while working on the submatrix lying in rows and columns INFO/(N+1) through MOD(INFO,N+1). Further Details =============== Based on contributions by Ming Gu and Ren-Cang Li, Computer Science Division, University of California at Berkeley, USA Osni Marques, LBNL/NERSC, USA ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; --e; b_dim1 = *ldb; b_offset = 1 + b_dim1 * 1; b -= b_offset; --work; --iwork; /* Function Body */ *info = 0; if (*n < 0) { *info = -3; } else if (*nrhs < 1) { *info = -4; } else if (*ldb < 1 || *ldb < *n) { *info = -8; } if (*info != 0) { i__1 = -(*info); xerbla_("DLALSD", &i__1); return 0; } eps = EPSILON; /* Set up the tolerance. */ if (*rcond <= 0. || *rcond >= 1.) { rcnd = eps; } else { rcnd = *rcond; } *rank = 0; /* Quick return if possible. */ if (*n == 0) { return 0; } else if (*n == 1) { if (d__[1] == 0.) { dlaset_("A", &c__1, nrhs, &c_b29, &c_b29, &b[b_offset], ldb); } else { *rank = 1; dlascl_("G", &c__0, &c__0, &d__[1], &c_b15, &c__1, nrhs, &b[ b_offset], ldb, info); d__[1] = abs(d__[1]); } return 0; } /* Rotate the matrix if it is lower bidiagonal. */ if (*(unsigned char *)uplo == 'L') { i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { dlartg_(&d__[i__], &e[i__], &cs, &sn, &r__); d__[i__] = r__; e[i__] = sn * d__[i__ + 1]; d__[i__ + 1] = cs * d__[i__ + 1]; if (*nrhs == 1) { drot_(&c__1, &b[i__ + b_dim1], &c__1, &b[i__ + 1 + b_dim1], & c__1, &cs, &sn); } else { work[(i__ << 1) - 1] = cs; work[i__ * 2] = sn; } /* L10: */ } if (*nrhs > 1) { i__1 = *nrhs; for (i__ = 1; i__ <= i__1; ++i__) { i__2 = *n - 1; for (j = 1; j <= i__2; ++j) { cs = work[(j << 1) - 1]; sn = work[j * 2]; drot_(&c__1, &b[j + i__ * b_dim1], &c__1, &b[j + 1 + i__ * b_dim1], &c__1, &cs, &sn); /* L20: */ } /* L30: */ } } } /* Scale. */ nm1 = *n - 1; orgnrm = dlanst_("M", n, &d__[1], &e[1]); if (orgnrm == 0.) { dlaset_("A", n, nrhs, &c_b29, &c_b29, &b[b_offset], ldb); return 0; } dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, n, &c__1, &d__[1], n, info); dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, &nm1, &c__1, &e[1], &nm1, info); /* If N is smaller than the minimum divide size SMLSIZ, then solve the problem with another solver. */ if (*n <= *smlsiz) { nwork = *n * *n + 1; dlaset_("A", n, n, &c_b29, &c_b15, &work[1], n); dlasdq_("U", &c__0, n, n, &c__0, nrhs, &d__[1], &e[1], &work[1], n, & work[1], n, &b[b_offset], ldb, &work[nwork], info); if (*info != 0) { return 0; } tol = rcnd * (d__1 = d__[idamax_(n, &d__[1], &c__1)], abs(d__1)); i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { if (d__[i__] <= tol) { dlaset_("A", &c__1, nrhs, &c_b29, &c_b29, &b[i__ + b_dim1], ldb); } else { dlascl_("G", &c__0, &c__0, &d__[i__], &c_b15, &c__1, nrhs, &b[ i__ + b_dim1], ldb, info); ++(*rank); } /* L40: */ } dgemm_("T", "N", n, nrhs, n, &c_b15, &work[1], n, &b[b_offset], ldb, & c_b29, &work[nwork], n); dlacpy_("A", n, nrhs, &work[nwork], n, &b[b_offset], ldb); /* Unscale. */ dlascl_("G", &c__0, &c__0, &c_b15, &orgnrm, n, &c__1, &d__[1], n, info); dlasrt_("D", n, &d__[1], info); dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, n, nrhs, &b[b_offset], ldb, info); return 0; } /* Book-keeping and setting up some constants. */ nlvl = (integer) (log((doublereal) (*n) / (doublereal) (*smlsiz + 1)) / log(2.)) + 1; smlszp = *smlsiz + 1; u = 1; vt = *smlsiz * *n + 1; difl = vt + smlszp * *n; difr = difl + nlvl * *n; z__ = difr + (nlvl * *n << 1); c__ = z__ + nlvl * *n; s = c__ + *n; poles = s + *n; givnum = poles + (nlvl << 1) * *n; bx = givnum + (nlvl << 1) * *n; nwork = bx + *n * *nrhs; sizei = *n + 1; k = sizei + *n; givptr = k + *n; perm = givptr + *n; givcol = perm + nlvl * *n; iwk = givcol + (nlvl * *n << 1); st = 1; sqre = 0; icmpq1 = 1; icmpq2 = 0; nsub = 0; i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { if ((d__1 = d__[i__], abs(d__1)) < eps) { d__[i__] = d_sign(&eps, &d__[i__]); } /* L50: */ } i__1 = nm1; for (i__ = 1; i__ <= i__1; ++i__) { if ((d__1 = e[i__], abs(d__1)) < eps || i__ == nm1) { ++nsub; iwork[nsub] = st; /* Subproblem found. First determine its size and then apply divide and conquer on it. */ if (i__ < nm1) { /* A subproblem with E(I) small for I < NM1. */ nsize = i__ - st + 1; iwork[sizei + nsub - 1] = nsize; } else if ((d__1 = e[i__], abs(d__1)) >= eps) { /* A subproblem with E(NM1) not too small but I = NM1. */ nsize = *n - st + 1; iwork[sizei + nsub - 1] = nsize; } else { /* A subproblem with E(NM1) small. This implies an 1-by-1 subproblem at D(N), which is not solved explicitly. */ nsize = i__ - st + 1; iwork[sizei + nsub - 1] = nsize; ++nsub; iwork[nsub] = *n; iwork[sizei + nsub - 1] = 1; dcopy_(nrhs, &b[*n + b_dim1], ldb, &work[bx + nm1], n); } st1 = st - 1; if (nsize == 1) { /* This is a 1-by-1 subproblem and is not solved explicitly. */ dcopy_(nrhs, &b[st + b_dim1], ldb, &work[bx + st1], n); } else if (nsize <= *smlsiz) { /* This is a small subproblem and is solved by DLASDQ. */ dlaset_("A", &nsize, &nsize, &c_b29, &c_b15, &work[vt + st1], n); dlasdq_("U", &c__0, &nsize, &nsize, &c__0, nrhs, &d__[st], &e[ st], &work[vt + st1], n, &work[nwork], n, &b[st + b_dim1], ldb, &work[nwork], info); if (*info != 0) { return 0; } dlacpy_("A", &nsize, nrhs, &b[st + b_dim1], ldb, &work[bx + st1], n); } else { /* A large problem. Solve it using divide and conquer. */ dlasda_(&icmpq1, smlsiz, &nsize, &sqre, &d__[st], &e[st], & work[u + st1], n, &work[vt + st1], &iwork[k + st1], & work[difl + st1], &work[difr + st1], &work[z__ + st1], &work[poles + st1], &iwork[givptr + st1], &iwork[ givcol + st1], n, &iwork[perm + st1], &work[givnum + st1], &work[c__ + st1], &work[s + st1], &work[nwork], &iwork[iwk], info); if (*info != 0) { return 0; } bxst = bx + st1; dlalsa_(&icmpq2, smlsiz, &nsize, nrhs, &b[st + b_dim1], ldb, & work[bxst], n, &work[u + st1], n, &work[vt + st1], & iwork[k + st1], &work[difl + st1], &work[difr + st1], &work[z__ + st1], &work[poles + st1], &iwork[givptr + st1], &iwork[givcol + st1], n, &iwork[perm + st1], & work[givnum + st1], &work[c__ + st1], &work[s + st1], &work[nwork], &iwork[iwk], info); if (*info != 0) { return 0; } } st = i__ + 1; } /* L60: */ } /* Apply the singular values and treat the tiny ones as zero. */ tol = rcnd * (d__1 = d__[idamax_(n, &d__[1], &c__1)], abs(d__1)); i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { /* Some of the elements in D can be negative because 1-by-1 subproblems were not solved explicitly. */ if ((d__1 = d__[i__], abs(d__1)) <= tol) { dlaset_("A", &c__1, nrhs, &c_b29, &c_b29, &work[bx + i__ - 1], n); } else { ++(*rank); dlascl_("G", &c__0, &c__0, &d__[i__], &c_b15, &c__1, nrhs, &work[ bx + i__ - 1], n, info); } d__[i__] = (d__1 = d__[i__], abs(d__1)); /* L70: */ } /* Now apply back the right singular vectors. */ icmpq2 = 1; i__1 = nsub; for (i__ = 1; i__ <= i__1; ++i__) { st = iwork[i__]; st1 = st - 1; nsize = iwork[sizei + i__ - 1]; bxst = bx + st1; if (nsize == 1) { dcopy_(nrhs, &work[bxst], n, &b[st + b_dim1], ldb); } else if (nsize <= *smlsiz) { dgemm_("T", "N", &nsize, nrhs, &nsize, &c_b15, &work[vt + st1], n, &work[bxst], n, &c_b29, &b[st + b_dim1], ldb); } else { dlalsa_(&icmpq2, smlsiz, &nsize, nrhs, &work[bxst], n, &b[st + b_dim1], ldb, &work[u + st1], n, &work[vt + st1], &iwork[ k + st1], &work[difl + st1], &work[difr + st1], &work[z__ + st1], &work[poles + st1], &iwork[givptr + st1], &iwork[ givcol + st1], n, &iwork[perm + st1], &work[givnum + st1], &work[c__ + st1], &work[s + st1], &work[nwork], &iwork[ iwk], info); if (*info != 0) { return 0; } } /* L80: */ } /* Unscale and sort the singular values. */ dlascl_("G", &c__0, &c__0, &c_b15, &orgnrm, n, &c__1, &d__[1], n, info); dlasrt_("D", n, &d__[1], info); dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, n, nrhs, &b[b_offset], ldb, info); return 0; /* End of DLALSD */ } /* dlalsd_ */ /* Subroutine */ int dlamrg_(integer *n1, integer *n2, doublereal *a, integer *dtrd1, integer *dtrd2, integer *index) { /* System generated locals */ integer i__1; /* Local variables */ static integer i__, ind1, ind2, n1sv, n2sv; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLAMRG will create a permutation list which will merge the elements of A (which is composed of two independently sorted sets) into a single set which is sorted in ascending order. Arguments ========= N1 (input) INTEGER N2 (input) INTEGER These arguements contain the respective lengths of the two sorted lists to be merged. A (input) DOUBLE PRECISION array, dimension (N1+N2) The first N1 elements of A contain a list of numbers which are sorted in either ascending or descending order. Likewise for the final N2 elements. DTRD1 (input) INTEGER DTRD2 (input) INTEGER These are the strides to be taken through the array A. Allowable strides are 1 and -1. They indicate whether a subset of A is sorted in ascending (DTRDx = 1) or descending (DTRDx = -1) order. INDEX (output) INTEGER array, dimension (N1+N2) On exit this array will contain a permutation such that if B( I ) = A( INDEX( I ) ) for I=1,N1+N2, then B will be sorted in ascending order. ===================================================================== */ /* Parameter adjustments */ --index; --a; /* Function Body */ n1sv = *n1; n2sv = *n2; if (*dtrd1 > 0) { ind1 = 1; } else { ind1 = *n1; } if (*dtrd2 > 0) { ind2 = *n1 + 1; } else { ind2 = *n1 + *n2; } i__ = 1; /* while ( (N1SV > 0) & (N2SV > 0) ) */ L10: if (n1sv > 0 && n2sv > 0) { if (a[ind1] <= a[ind2]) { index[i__] = ind1; ++i__; ind1 += *dtrd1; --n1sv; } else { index[i__] = ind2; ++i__; ind2 += *dtrd2; --n2sv; } goto L10; } /* end while */ if (n1sv == 0) { i__1 = n2sv; for (n1sv = 1; n1sv <= i__1; ++n1sv) { index[i__] = ind2; ++i__; ind2 += *dtrd2; /* L20: */ } } else { /* N2SV .EQ. 0 */ i__1 = n1sv; for (n2sv = 1; n2sv <= i__1; ++n2sv) { index[i__] = ind1; ++i__; ind1 += *dtrd1; /* L30: */ } } return 0; /* End of DLAMRG */ } /* dlamrg_ */ doublereal dlange_(char *norm, integer *m, integer *n, doublereal *a, integer *lda, doublereal *work) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2; doublereal ret_val, d__1, d__2, d__3; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static integer i__, j; static doublereal scale; extern logical lsame_(char *, char *); static doublereal value; extern /* Subroutine */ int dlassq_(integer *, doublereal *, integer *, doublereal *, doublereal *); static doublereal sum; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLANGE returns the value of the one norm, or the Frobenius norm, or the infinity norm, or the element of largest absolute value of a real matrix A. Description =========== DLANGE returns the value DLANGE = ( max(abs(A(i,j))), NORM = 'M' or 'm' ( ( norm1(A), NORM = '1', 'O' or 'o' ( ( normI(A), NORM = 'I' or 'i' ( ( normF(A), NORM = 'F', 'f', 'E' or 'e' where norm1 denotes the one norm of a matrix (maximum column sum), normI denotes the infinity norm of a matrix (maximum row sum) and normF denotes the Frobenius norm of a matrix (square root of sum of squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. Arguments ========= NORM (input) CHARACTER*1 Specifies the value to be returned in DLANGE as described above. M (input) INTEGER The number of rows of the matrix A. M >= 0. When M = 0, DLANGE is set to zero. N (input) INTEGER The number of columns of the matrix A. N >= 0. When N = 0, DLANGE is set to zero. A (input) DOUBLE PRECISION array, dimension (LDA,N) The m by n matrix A. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(M,1). WORK (workspace) DOUBLE PRECISION array, dimension (MAX(1,LWORK)), where LWORK >= M when NORM = 'I'; otherwise, WORK is not referenced. ===================================================================== */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --work; /* Function Body */ if (min(*m,*n) == 0) { value = 0.; } else if (lsame_(norm, "M")) { /* Find max(abs(A(i,j))). */ value = 0.; i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { /* Computing MAX */ d__2 = value, d__3 = (d__1 = a[i__ + j * a_dim1], abs(d__1)); value = max(d__2,d__3); /* L10: */ } /* L20: */ } } else if (lsame_(norm, "O") || *(unsigned char *) norm == '1') { /* Find norm1(A). */ value = 0.; i__1 = *n; for (j = 1; j <= i__1; ++j) { sum = 0.; i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { sum += (d__1 = a[i__ + j * a_dim1], abs(d__1)); /* L30: */ } value = max(value,sum); /* L40: */ } } else if (lsame_(norm, "I")) { /* Find normI(A). */ i__1 = *m; for (i__ = 1; i__ <= i__1; ++i__) { work[i__] = 0.; /* L50: */ } i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { work[i__] += (d__1 = a[i__ + j * a_dim1], abs(d__1)); /* L60: */ } /* L70: */ } value = 0.; i__1 = *m; for (i__ = 1; i__ <= i__1; ++i__) { /* Computing MAX */ d__1 = value, d__2 = work[i__]; value = max(d__1,d__2); /* L80: */ } } else if (lsame_(norm, "F") || lsame_(norm, "E")) { /* Find normF(A). */ scale = 0.; sum = 1.; i__1 = *n; for (j = 1; j <= i__1; ++j) { dlassq_(m, &a[j * a_dim1 + 1], &c__1, &scale, &sum); /* L90: */ } value = scale * sqrt(sum); } ret_val = value; return ret_val; /* End of DLANGE */ } /* dlange_ */ doublereal dlanst_(char *norm, integer *n, doublereal *d__, doublereal *e) { /* System generated locals */ integer i__1; doublereal ret_val, d__1, d__2, d__3, d__4, d__5; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static integer i__; static doublereal scale; extern logical lsame_(char *, char *); static doublereal anorm; extern /* Subroutine */ int dlassq_(integer *, doublereal *, integer *, doublereal *, doublereal *); static doublereal sum; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLANST returns the value of the one norm, or the Frobenius norm, or the infinity norm, or the element of largest absolute value of a real symmetric tridiagonal matrix A. Description =========== DLANST returns the value DLANST = ( max(abs(A(i,j))), NORM = 'M' or 'm' ( ( norm1(A), NORM = '1', 'O' or 'o' ( ( normI(A), NORM = 'I' or 'i' ( ( normF(A), NORM = 'F', 'f', 'E' or 'e' where norm1 denotes the one norm of a matrix (maximum column sum), normI denotes the infinity norm of a matrix (maximum row sum) and normF denotes the Frobenius norm of a matrix (square root of sum of squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. Arguments ========= NORM (input) CHARACTER*1 Specifies the value to be returned in DLANST as described above. N (input) INTEGER The order of the matrix A. N >= 0. When N = 0, DLANST is set to zero. D (input) DOUBLE PRECISION array, dimension (N) The diagonal elements of A. E (input) DOUBLE PRECISION array, dimension (N-1) The (n-1) sub-diagonal or super-diagonal elements of A. ===================================================================== */ /* Parameter adjustments */ --e; --d__; /* Function Body */ if (*n <= 0) { anorm = 0.; } else if (lsame_(norm, "M")) { /* Find max(abs(A(i,j))). */ anorm = (d__1 = d__[*n], abs(d__1)); i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { /* Computing MAX */ d__2 = anorm, d__3 = (d__1 = d__[i__], abs(d__1)); anorm = max(d__2,d__3); /* Computing MAX */ d__2 = anorm, d__3 = (d__1 = e[i__], abs(d__1)); anorm = max(d__2,d__3); /* L10: */ } } else if (lsame_(norm, "O") || *(unsigned char *) norm == '1' || lsame_(norm, "I")) { /* Find norm1(A). */ if (*n == 1) { anorm = abs(d__[1]); } else { /* Computing MAX */ d__3 = abs(d__[1]) + abs(e[1]), d__4 = (d__1 = e[*n - 1], abs( d__1)) + (d__2 = d__[*n], abs(d__2)); anorm = max(d__3,d__4); i__1 = *n - 1; for (i__ = 2; i__ <= i__1; ++i__) { /* Computing MAX */ d__4 = anorm, d__5 = (d__1 = d__[i__], abs(d__1)) + (d__2 = e[ i__], abs(d__2)) + (d__3 = e[i__ - 1], abs(d__3)); anorm = max(d__4,d__5); /* L20: */ } } } else if (lsame_(norm, "F") || lsame_(norm, "E")) { /* Find normF(A). */ scale = 0.; sum = 1.; if (*n > 1) { i__1 = *n - 1; dlassq_(&i__1, &e[1], &c__1, &scale, &sum); sum *= 2; } dlassq_(n, &d__[1], &c__1, &scale, &sum); anorm = scale * sqrt(sum); } ret_val = anorm; return ret_val; /* End of DLANST */ } /* dlanst_ */ doublereal dlansy_(char *norm, char *uplo, integer *n, doublereal *a, integer *lda, doublereal *work) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2; doublereal ret_val, d__1, d__2, d__3; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static doublereal absa; static integer i__, j; static doublereal scale; extern logical lsame_(char *, char *); static doublereal value; extern /* Subroutine */ int dlassq_(integer *, doublereal *, integer *, doublereal *, doublereal *); static doublereal sum; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLANSY returns the value of the one norm, or the Frobenius norm, or the infinity norm, or the element of largest absolute value of a real symmetric matrix A. Description =========== DLANSY returns the value DLANSY = ( max(abs(A(i,j))), NORM = 'M' or 'm' ( ( norm1(A), NORM = '1', 'O' or 'o' ( ( normI(A), NORM = 'I' or 'i' ( ( normF(A), NORM = 'F', 'f', 'E' or 'e' where norm1 denotes the one norm of a matrix (maximum column sum), normI denotes the infinity norm of a matrix (maximum row sum) and normF denotes the Frobenius norm of a matrix (square root of sum of squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. Arguments ========= NORM (input) CHARACTER*1 Specifies the value to be returned in DLANSY as described above. UPLO (input) CHARACTER*1 Specifies whether the upper or lower triangular part of the symmetric matrix A is to be referenced. = 'U': Upper triangular part of A is referenced = 'L': Lower triangular part of A is referenced N (input) INTEGER The order of the matrix A. N >= 0. When N = 0, DLANSY is set to zero. A (input) DOUBLE PRECISION array, dimension (LDA,N) The symmetric matrix A. If UPLO = 'U', the leading n by n upper triangular part of A contains the upper triangular part of the matrix A, and the strictly lower triangular part of A is not referenced. If UPLO = 'L', the leading n by n lower triangular part of A contains the lower triangular part of the matrix A, and the strictly upper triangular part of A is not referenced. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(N,1). WORK (workspace) DOUBLE PRECISION array, dimension (MAX(1,LWORK)), where LWORK >= N when NORM = 'I' or '1' or 'O'; otherwise, WORK is not referenced. ===================================================================== */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --work; /* Function Body */ if (*n == 0) { value = 0.; } else if (lsame_(norm, "M")) { /* Find max(abs(A(i,j))). */ value = 0.; if (lsame_(uplo, "U")) { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = j; for (i__ = 1; i__ <= i__2; ++i__) { /* Computing MAX */ d__2 = value, d__3 = (d__1 = a[i__ + j * a_dim1], abs( d__1)); value = max(d__2,d__3); /* L10: */ } /* L20: */ } } else { i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *n; for (i__ = j; i__ <= i__2; ++i__) { /* Computing MAX */ d__2 = value, d__3 = (d__1 = a[i__ + j * a_dim1], abs( d__1)); value = max(d__2,d__3); /* L30: */ } /* L40: */ } } } else if (lsame_(norm, "I") || lsame_(norm, "O") || *(unsigned char *)norm == '1') { /* Find normI(A) ( = norm1(A), since A is symmetric). */ value = 0.; if (lsame_(uplo, "U")) { i__1 = *n; for (j = 1; j <= i__1; ++j) { sum = 0.; i__2 = j - 1; for (i__ = 1; i__ <= i__2; ++i__) { absa = (d__1 = a[i__ + j * a_dim1], abs(d__1)); sum += absa; work[i__] += absa; /* L50: */ } work[j] = sum + (d__1 = a[j + j * a_dim1], abs(d__1)); /* L60: */ } i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { /* Computing MAX */ d__1 = value, d__2 = work[i__]; value = max(d__1,d__2); /* L70: */ } } else { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { work[i__] = 0.; /* L80: */ } i__1 = *n; for (j = 1; j <= i__1; ++j) { sum = work[j] + (d__1 = a[j + j * a_dim1], abs(d__1)); i__2 = *n; for (i__ = j + 1; i__ <= i__2; ++i__) { absa = (d__1 = a[i__ + j * a_dim1], abs(d__1)); sum += absa; work[i__] += absa; /* L90: */ } value = max(value,sum); /* L100: */ } } } else if (lsame_(norm, "F") || lsame_(norm, "E")) { /* Find normF(A). */ scale = 0.; sum = 1.; if (lsame_(uplo, "U")) { i__1 = *n; for (j = 2; j <= i__1; ++j) { i__2 = j - 1; dlassq_(&i__2, &a[j * a_dim1 + 1], &c__1, &scale, &sum); /* L110: */ } } else { i__1 = *n - 1; for (j = 1; j <= i__1; ++j) { i__2 = *n - j; dlassq_(&i__2, &a[j + 1 + j * a_dim1], &c__1, &scale, &sum); /* L120: */ } } sum *= 2; i__1 = *lda + 1; dlassq_(n, &a[a_offset], &i__1, &scale, &sum); value = scale * sqrt(sum); } ret_val = value; return ret_val; /* End of DLANSY */ } /* dlansy_ */ /* Subroutine */ int dlanv2_(doublereal *a, doublereal *b, doublereal *c__, doublereal *d__, doublereal *rt1r, doublereal *rt1i, doublereal *rt2r, doublereal *rt2i, doublereal *cs, doublereal *sn) { /* System generated locals */ doublereal d__1, d__2; /* Builtin functions */ double d_sign(doublereal *, doublereal *), sqrt(doublereal); /* Local variables */ static doublereal temp, p, scale, bcmax, z__, bcmis, sigma; extern doublereal dlapy2_(doublereal *, doublereal *); static doublereal aa, bb, cc, dd; static doublereal cs1, sn1, sab, sac, eps, tau; /* -- LAPACK driver routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLANV2 computes the Schur factorization of a real 2-by-2 nonsymmetric matrix in standard form: [ A B ] = [ CS -SN ] [ AA BB ] [ CS SN ] [ C D ] [ SN CS ] [ CC DD ] [-SN CS ] where either 1) CC = 0 so that AA and DD are real eigenvalues of the matrix, or 2) AA = DD and BB*CC < 0, so that AA + or - sqrt(BB*CC) are complex conjugate eigenvalues. Arguments ========= A (input/output) DOUBLE PRECISION B (input/output) DOUBLE PRECISION C (input/output) DOUBLE PRECISION D (input/output) DOUBLE PRECISION On entry, the elements of the input matrix. On exit, they are overwritten by the elements of the standardised Schur form. RT1R (output) DOUBLE PRECISION RT1I (output) DOUBLE PRECISION RT2R (output) DOUBLE PRECISION RT2I (output) DOUBLE PRECISION The real and imaginary parts of the eigenvalues. If the eigenvalues are a complex conjugate pair, RT1I > 0. CS (output) DOUBLE PRECISION SN (output) DOUBLE PRECISION Parameters of the rotation matrix. Further Details =============== Modified by V. Sima, Research Institute for Informatics, Bucharest, Romania, to reduce the risk of cancellation errors, when computing real eigenvalues, and to ensure, if possible, that abs(RT1R) >= abs(RT2R). ===================================================================== */ eps = PRECISION; if (*c__ == 0.) { *cs = 1.; *sn = 0.; goto L10; } else if (*b == 0.) { /* Swap rows and columns */ *cs = 0.; *sn = 1.; temp = *d__; *d__ = *a; *a = temp; *b = -(*c__); *c__ = 0.; goto L10; } else if (*a - *d__ == 0. && d_sign(&c_b15, b) != d_sign(&c_b15, c__)) { *cs = 1.; *sn = 0.; goto L10; } else { temp = *a - *d__; p = temp * .5; /* Computing MAX */ d__1 = abs(*b), d__2 = abs(*c__); bcmax = max(d__1,d__2); /* Computing MIN */ d__1 = abs(*b), d__2 = abs(*c__); bcmis = min(d__1,d__2) * d_sign(&c_b15, b) * d_sign(&c_b15, c__); /* Computing MAX */ d__1 = abs(p); scale = max(d__1,bcmax); z__ = p / scale * p + bcmax / scale * bcmis; /* If Z is of the order of the machine accuracy, postpone the decision on the nature of eigenvalues */ if (z__ >= eps * 4.) { /* Real eigenvalues. Compute A and D. */ d__1 = sqrt(scale) * sqrt(z__); z__ = p + d_sign(&d__1, &p); *a = *d__ + z__; *d__ -= bcmax / z__ * bcmis; /* Compute B and the rotation matrix */ tau = dlapy2_(c__, &z__); *cs = z__ / tau; *sn = *c__ / tau; *b -= *c__; *c__ = 0.; } else { /* Complex eigenvalues, or real (almost) equal eigenvalues. Make diagonal elements equal. */ sigma = *b + *c__; tau = dlapy2_(&sigma, &temp); *cs = sqrt((abs(sigma) / tau + 1.) * .5); *sn = -(p / (tau * *cs)) * d_sign(&c_b15, &sigma); /* Compute [ AA BB ] = [ A B ] [ CS -SN ] [ CC DD ] [ C D ] [ SN CS ] */ aa = *a * *cs + *b * *sn; bb = -(*a) * *sn + *b * *cs; cc = *c__ * *cs + *d__ * *sn; dd = -(*c__) * *sn + *d__ * *cs; /* Compute [ A B ] = [ CS SN ] [ AA BB ] [ C D ] [-SN CS ] [ CC DD ] */ *a = aa * *cs + cc * *sn; *b = bb * *cs + dd * *sn; *c__ = -aa * *sn + cc * *cs; *d__ = -bb * *sn + dd * *cs; temp = (*a + *d__) * .5; *a = temp; *d__ = temp; if (*c__ != 0.) { if (*b != 0.) { if (d_sign(&c_b15, b) == d_sign(&c_b15, c__)) { /* Real eigenvalues: reduce to upper triangular form */ sab = sqrt((abs(*b))); sac = sqrt((abs(*c__))); d__1 = sab * sac; p = d_sign(&d__1, c__); tau = 1. / sqrt((d__1 = *b + *c__, abs(d__1))); *a = temp + p; *d__ = temp - p; *b -= *c__; *c__ = 0.; cs1 = sab * tau; sn1 = sac * tau; temp = *cs * cs1 - *sn * sn1; *sn = *cs * sn1 + *sn * cs1; *cs = temp; } } else { *b = -(*c__); *c__ = 0.; temp = *cs; *cs = -(*sn); *sn = temp; } } } } L10: /* Store eigenvalues in (RT1R,RT1I) and (RT2R,RT2I). */ *rt1r = *a; *rt2r = *d__; if (*c__ == 0.) { *rt1i = 0.; *rt2i = 0.; } else { *rt1i = sqrt((abs(*b))) * sqrt((abs(*c__))); *rt2i = -(*rt1i); } return 0; /* End of DLANV2 */ } /* dlanv2_ */ doublereal dlapy2_(doublereal *x, doublereal *y) { /* System generated locals */ doublereal ret_val, d__1; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static doublereal xabs, yabs, w, z__; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLAPY2 returns sqrt(x**2+y**2), taking care not to cause unnecessary overflow. Arguments ========= X (input) DOUBLE PRECISION Y (input) DOUBLE PRECISION X and Y specify the values x and y. ===================================================================== */ xabs = abs(*x); yabs = abs(*y); w = max(xabs,yabs); z__ = min(xabs,yabs); if (z__ == 0.) { ret_val = w; } else { /* Computing 2nd power */ d__1 = z__ / w; ret_val = w * sqrt(d__1 * d__1 + 1.); } return ret_val; /* End of DLAPY2 */ } /* dlapy2_ */ /* Subroutine */ int dlaqr0_(logical *wantt, logical *wantz, integer *n, integer *ilo, integer *ihi, doublereal *h__, integer *ldh, doublereal *wr, doublereal *wi, integer *iloz, integer *ihiz, doublereal *z__, integer *ldz, doublereal *work, integer *lwork, integer *info) { /* System generated locals */ integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4, i__5; doublereal d__1, d__2, d__3, d__4; /* Local variables */ static integer ndfl, kbot, nmin; static doublereal swap; static integer ktop; static doublereal zdum[1] /* was [1][1] */; static integer kacc22, i__, k; static logical nwinc; static integer itmax, nsmax, nwmax, kwtop; extern /* Subroutine */ int dlanv2_(doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *), dlaqr3_( logical *, logical *, integer *, integer *, integer *, integer *, doublereal *, integer *, integer *, integer *, doublereal *, integer *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *, doublereal *, integer *, doublereal *, integer *), dlaqr4_(logical *, logical *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, doublereal *, integer *, integer *), dlaqr5_(logical *, logical *, integer *, integer *, integer *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, integer *, doublereal *, integer *, integer *, doublereal *, integer *); static doublereal aa, bb, cc, dd; static integer ld; static doublereal cs; static integer nh, nibble, it, ks, kt; static doublereal sn; static integer ku, kv, ls, ns; static doublereal ss; static integer nw; extern /* Subroutine */ int dlahqr_(logical *, logical *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *), dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); static char jbcmpz[2]; static logical sorted; static integer lwkopt, inf, kdu, nho, nve, kwh, nsr, nwr, kwv; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLAQR0 computes the eigenvalues of a Hessenberg matrix H and, optionally, the matrices T and Z from the Schur decomposition H = Z T Z**T, where T is an upper quasi-triangular matrix (the Schur form), and Z is the orthogonal matrix of Schur vectors. Optionally Z may be postmultiplied into an input orthogonal matrix Q so that this routine can give the Schur factorization of a matrix A which has been reduced to the Hessenberg form H by the orthogonal matrix Q: A = Q*H*Q**T = (QZ)*T*(QZ)**T. Arguments ========= WANTT (input) LOGICAL = .TRUE. : the full Schur form T is required; = .FALSE.: only eigenvalues are required. WANTZ (input) LOGICAL = .TRUE. : the matrix of Schur vectors Z is required; = .FALSE.: Schur vectors are not required. N (input) INTEGER The order of the matrix H. N .GE. 0. ILO (input) INTEGER IHI (input) INTEGER It is assumed that H is already upper triangular in rows and columns 1:ILO-1 and IHI+1:N and, if ILO.GT.1, H(ILO,ILO-1) is zero. ILO and IHI are normally set by a previous call to DGEBAL, and then passed to DGEHRD when the matrix output by DGEBAL is reduced to Hessenberg form. Otherwise, ILO and IHI should be set to 1 and N, respectively. If N.GT.0, then 1.LE.ILO.LE.IHI.LE.N. If N = 0, then ILO = 1 and IHI = 0. H (input/output) DOUBLE PRECISION array, dimension (LDH,N) On entry, the upper Hessenberg matrix H. On exit, if INFO = 0 and WANTT is .TRUE., then H contains the upper quasi-triangular matrix T from the Schur decomposition (the Schur form); 2-by-2 diagonal blocks (corresponding to complex conjugate pairs of eigenvalues) are returned in standard form, with H(i,i) = H(i+1,i+1) and H(i+1,i)*H(i,i+1).LT.0. If INFO = 0 and WANTT is .FALSE., then the contents of H are unspecified on exit. (The output value of H when INFO.GT.0 is given under the description of INFO below.) This subroutine may explicitly set H(i,j) = 0 for i.GT.j and j = 1, 2, ... ILO-1 or j = IHI+1, IHI+2, ... N. LDH (input) INTEGER The leading dimension of the array H. LDH .GE. max(1,N). WR (output) DOUBLE PRECISION array, dimension (IHI) WI (output) DOUBLE PRECISION array, dimension (IHI) The real and imaginary parts, respectively, of the computed eigenvalues of H(ILO:IHI,ILO:IHI) are stored WR(ILO:IHI) and WI(ILO:IHI). If two eigenvalues are computed as a complex conjugate pair, they are stored in consecutive elements of WR and WI, say the i-th and (i+1)th, with WI(i) .GT. 0 and WI(i+1) .LT. 0. If WANTT is .TRUE., then the eigenvalues are stored in the same order as on the diagonal of the Schur form returned in H, with WR(i) = H(i,i) and, if H(i:i+1,i:i+1) is a 2-by-2 diagonal block, WI(i) = sqrt(-H(i+1,i)*H(i,i+1)) and WI(i+1) = -WI(i). ILOZ (input) INTEGER IHIZ (input) INTEGER Specify the rows of Z to which transformations must be applied if WANTZ is .TRUE.. 1 .LE. ILOZ .LE. ILO; IHI .LE. IHIZ .LE. N. Z (input/output) DOUBLE PRECISION array, dimension (LDZ,IHI) If WANTZ is .FALSE., then Z is not referenced. If WANTZ is .TRUE., then Z(ILO:IHI,ILOZ:IHIZ) is replaced by Z(ILO:IHI,ILOZ:IHIZ)*U where U is the orthogonal Schur factor of H(ILO:IHI,ILO:IHI). (The output value of Z when INFO.GT.0 is given under the description of INFO below.) LDZ (input) INTEGER The leading dimension of the array Z. if WANTZ is .TRUE. then LDZ.GE.MAX(1,IHIZ). Otherwize, LDZ.GE.1. WORK (workspace/output) DOUBLE PRECISION array, dimension LWORK On exit, if LWORK = -1, WORK(1) returns an estimate of the optimal value for LWORK. LWORK (input) INTEGER The dimension of the array WORK. LWORK .GE. max(1,N) is sufficient, but LWORK typically as large as 6*N may be required for optimal performance. A workspace query to determine the optimal workspace size is recommended. If LWORK = -1, then DLAQR0 does a workspace query. In this case, DLAQR0 checks the input parameters and estimates the optimal workspace size for the given values of N, ILO and IHI. The estimate is returned in WORK(1). No error message related to LWORK is issued by XERBLA. Neither H nor Z are accessed. INFO (output) INTEGER = 0: successful exit .GT. 0: if INFO = i, DLAQR0 failed to compute all of the eigenvalues. Elements 1:ilo-1 and i+1:n of WR and WI contain those eigenvalues which have been successfully computed. (Failures are rare.) If INFO .GT. 0 and WANT is .FALSE., then on exit, the remaining unconverged eigenvalues are the eigen- values of the upper Hessenberg matrix rows and columns ILO through INFO of the final, output value of H. If INFO .GT. 0 and WANTT is .TRUE., then on exit (*) (initial value of H)*U = U*(final value of H) where U is an orthogonal matrix. The final value of H is upper Hessenberg and quasi-triangular in rows and columns INFO+1 through IHI. If INFO .GT. 0 and WANTZ is .TRUE., then on exit (final value of Z(ILO:IHI,ILOZ:IHIZ) = (initial value of Z(ILO:IHI,ILOZ:IHIZ)*U where U is the orthogonal matrix in (*) (regard- less of the value of WANTT.) If INFO .GT. 0 and WANTZ is .FALSE., then Z is not accessed. ================================================================ Based on contributions by Karen Braman and Ralph Byers, Department of Mathematics, University of Kansas, USA ================================================================ References: K. Braman, R. Byers and R. Mathias, The Multi-Shift QR Algorithm Part I: Maintaining Well Focused Shifts, and Level 3 Performance, SIAM Journal of Matrix Analysis, volume 23, pages 929--947, 2002. K. Braman, R. Byers and R. Mathias, The Multi-Shift QR Algorithm Part II: Aggressive Early Deflation, SIAM Journal of Matrix Analysis, volume 23, pages 948--973, 2002. ================================================================ ==== Matrices of order NTINY or smaller must be processed by . DLAHQR because of insufficient subdiagonal scratch space. . (This is a hard limit.) ==== ==== Exceptional deflation windows: try to cure rare . slow convergence by increasing the size of the . deflation window after KEXNW iterations. ===== ==== Exceptional shifts: try to cure rare slow convergence . with ad-hoc exceptional shifts every KEXSH iterations. . The constants WILK1 and WILK2 are used to form the . exceptional shifts. ==== */ /* Parameter adjustments */ h_dim1 = *ldh; h_offset = 1 + h_dim1 * 1; h__ -= h_offset; --wr; --wi; z_dim1 = *ldz; z_offset = 1 + z_dim1 * 1; z__ -= z_offset; --work; /* Function Body */ *info = 0; /* ==== Quick return for N = 0: nothing to do. ==== */ if (*n == 0) { work[1] = 1.; return 0; } /* ==== Set up job flags for ILAENV. ==== */ if (*wantt) { *(unsigned char *)jbcmpz = 'S'; } else { *(unsigned char *)jbcmpz = 'E'; } if (*wantz) { *(unsigned char *)&jbcmpz[1] = 'V'; } else { *(unsigned char *)&jbcmpz[1] = 'N'; } /* ==== Tiny matrices must use DLAHQR. ==== */ if (*n <= 11) { /* ==== Estimate optimal workspace. ==== */ lwkopt = 1; if (*lwork != -1) { dlahqr_(wantt, wantz, n, ilo, ihi, &h__[h_offset], ldh, &wr[1], & wi[1], iloz, ihiz, &z__[z_offset], ldz, info); } } else { /* ==== Use small bulge multi-shift QR with aggressive early . deflation on larger-than-tiny matrices. ==== ==== Hope for the best. ==== */ *info = 0; /* ==== NWR = recommended deflation window size. At this . point, N .GT. NTINY = 11, so there is enough . subdiagonal workspace for NWR.GE.2 as required. . (In fact, there is enough subdiagonal space for . NWR.GE.3.) ==== */ nwr = ilaenv_(&c__13, "DLAQR0", jbcmpz, n, ilo, ihi, lwork, (ftnlen)6, (ftnlen)2); nwr = max(2,nwr); /* Computing MIN */ i__1 = *ihi - *ilo + 1, i__2 = (*n - 1) / 3, i__1 = min(i__1,i__2); nwr = min(i__1,nwr); nw = nwr; /* ==== NSR = recommended number of simultaneous shifts. . At this point N .GT. NTINY = 11, so there is at . enough subdiagonal workspace for NSR to be even . and greater than or equal to two as required. ==== */ nsr = ilaenv_(&c__15, "DLAQR0", jbcmpz, n, ilo, ihi, lwork, (ftnlen)6, (ftnlen)2); /* Computing MIN */ i__1 = nsr, i__2 = (*n + 6) / 9, i__1 = min(i__1,i__2), i__2 = *ihi - *ilo; nsr = min(i__1,i__2); /* Computing MAX */ i__1 = 2, i__2 = nsr - nsr % 2; nsr = max(i__1,i__2); /* ==== Estimate optimal workspace ==== ==== Workspace query call to DLAQR3 ==== */ i__1 = nwr + 1; dlaqr3_(wantt, wantz, n, ilo, ihi, &i__1, &h__[h_offset], ldh, iloz, ihiz, &z__[z_offset], ldz, &ls, &ld, &wr[1], &wi[1], &h__[ h_offset], ldh, n, &h__[h_offset], ldh, n, &h__[h_offset], ldh, &work[1], &c_n1); /* ==== Optimal workspace = MAX(DLAQR5, DLAQR3) ==== Computing MAX */ i__1 = nsr * 3 / 2, i__2 = (integer) work[1]; lwkopt = max(i__1,i__2); /* ==== Quick return in case of workspace query. ==== */ if (*lwork == -1) { work[1] = (doublereal) lwkopt; return 0; } /* ==== DLAHQR/DLAQR0 crossover point ==== */ nmin = ilaenv_(&c__12, "DLAQR0", jbcmpz, n, ilo, ihi, lwork, (ftnlen) 6, (ftnlen)2); nmin = max(11,nmin); /* ==== Nibble crossover point ==== */ nibble = ilaenv_(&c__14, "DLAQR0", jbcmpz, n, ilo, ihi, lwork, ( ftnlen)6, (ftnlen)2); nibble = max(0,nibble); /* ==== Accumulate reflections during ttswp? Use block . 2-by-2 structure during matrix-matrix multiply? ==== */ kacc22 = ilaenv_(&c__16, "DLAQR0", jbcmpz, n, ilo, ihi, lwork, ( ftnlen)6, (ftnlen)2); kacc22 = max(0,kacc22); kacc22 = min(2,kacc22); /* ==== NWMAX = the largest possible deflation window for . which there is sufficient workspace. ==== Computing MIN */ i__1 = (*n - 1) / 3, i__2 = *lwork / 2; nwmax = min(i__1,i__2); /* ==== NSMAX = the Largest number of simultaneous shifts . for which there is sufficient workspace. ==== Computing MIN */ i__1 = (*n + 6) / 9, i__2 = (*lwork << 1) / 3; nsmax = min(i__1,i__2); nsmax -= nsmax % 2; /* ==== NDFL: an iteration count restarted at deflation. ==== */ ndfl = 1; /* ==== ITMAX = iteration limit ==== Computing MAX */ i__1 = 10, i__2 = *ihi - *ilo + 1; itmax = 30 * max(i__1,i__2); /* ==== Last row and column in the active block ==== */ kbot = *ihi; /* ==== Main Loop ==== */ i__1 = itmax; for (it = 1; it <= i__1; ++it) { /* ==== Done when KBOT falls below ILO ==== */ if (kbot < *ilo) { goto L90; } /* ==== Locate active block ==== */ i__2 = *ilo + 1; for (k = kbot; k >= i__2; --k) { if (h__[k + (k - 1) * h_dim1] == 0.) { goto L20; } /* L10: */ } k = *ilo; L20: ktop = k; /* ==== Select deflation window size ==== */ nh = kbot - ktop + 1; if (ndfl < 5 || nh < nw) { /* ==== Typical deflation window. If possible and . advisable, nibble the entire active block. . If not, use size NWR or NWR+1 depending upon . which has the smaller corresponding subdiagonal . entry (a heuristic). ==== */ nwinc = TRUE_; if (nh <= min(nmin,nwmax)) { nw = nh; } else { /* Computing MIN */ i__2 = min(nwr,nh); nw = min(i__2,nwmax); if (nw < nwmax) { if (nw >= nh - 1) { nw = nh; } else { kwtop = kbot - nw + 1; if ((d__1 = h__[kwtop + (kwtop - 1) * h_dim1], abs(d__1)) > (d__2 = h__[kwtop - 1 + ( kwtop - 2) * h_dim1], abs(d__2))) { ++nw; } } } } } else { /* ==== Exceptional deflation window. If there have . been no deflations in KEXNW or more iterations, . then vary the deflation window size. At first, . because, larger windows are, in general, more . powerful than smaller ones, rapidly increase the . window up to the maximum reasonable and possible. . Then maybe try a slightly smaller window. ==== */ if (nwinc && nw < min(nwmax,nh)) { /* Computing MIN */ i__2 = min(nwmax,nh), i__3 = nw << 1; nw = min(i__2,i__3); } else { nwinc = FALSE_; if (nw == nh && nh > 2) { nw = nh - 1; } } } /* ==== Aggressive early deflation: . split workspace under the subdiagonal into . - an nw-by-nw work array V in the lower . left-hand-corner, . - an NW-by-at-least-NW-but-more-is-better . (NW-by-NHO) horizontal work array along . the bottom edge, . - an at-least-NW-but-more-is-better (NHV-by-NW) . vertical work array along the left-hand-edge. . ==== */ kv = *n - nw + 1; kt = nw + 1; nho = *n - nw - 1 - kt + 1; kwv = nw + 2; nve = *n - nw - kwv + 1; /* ==== Aggressive early deflation ==== */ dlaqr3_(wantt, wantz, n, &ktop, &kbot, &nw, &h__[h_offset], ldh, iloz, ihiz, &z__[z_offset], ldz, &ls, &ld, &wr[1], &wi[1], &h__[kv + h_dim1], ldh, &nho, &h__[kv + kt * h_dim1], ldh, &nve, &h__[kwv + h_dim1], ldh, &work[1], lwork); /* ==== Adjust KBOT accounting for new deflations. ==== */ kbot -= ld; /* ==== KS points to the shifts. ==== */ ks = kbot - ls + 1; /* ==== Skip an expensive QR sweep if there is a (partly . heuristic) reason to expect that many eigenvalues . will deflate without it. Here, the QR sweep is . skipped if many eigenvalues have just been deflated . or if the remaining active block is small. */ if (ld == 0 || ld * 100 <= nw * nibble && kbot - ktop + 1 > min( nmin,nwmax)) { /* ==== NS = nominal number of simultaneous shifts. . This may be lowered (slightly) if DLAQR3 . did not provide that many shifts. ==== Computing MIN Computing MAX */ i__4 = 2, i__5 = kbot - ktop; i__2 = min(nsmax,nsr), i__3 = max(i__4,i__5); ns = min(i__2,i__3); ns -= ns % 2; /* ==== If there have been no deflations . in a multiple of KEXSH iterations, . then try exceptional shifts. . Otherwise use shifts provided by . DLAQR3 above or from the eigenvalues . of a trailing principal submatrix. ==== */ if (ndfl % 6 == 0) { ks = kbot - ns + 1; /* Computing MAX */ i__3 = ks + 1, i__4 = ktop + 2; i__2 = max(i__3,i__4); for (i__ = kbot; i__ >= i__2; i__ += -2) { ss = (d__1 = h__[i__ + (i__ - 1) * h_dim1], abs(d__1)) + (d__2 = h__[i__ - 1 + (i__ - 2) * h_dim1], abs(d__2)); aa = ss * .75 + h__[i__ + i__ * h_dim1]; bb = ss; cc = ss * -.4375; dd = aa; dlanv2_(&aa, &bb, &cc, &dd, &wr[i__ - 1], &wi[i__ - 1] , &wr[i__], &wi[i__], &cs, &sn); /* L30: */ } if (ks == ktop) { wr[ks + 1] = h__[ks + 1 + (ks + 1) * h_dim1]; wi[ks + 1] = 0.; wr[ks] = wr[ks + 1]; wi[ks] = wi[ks + 1]; } } else { /* ==== Got NS/2 or fewer shifts? Use DLAQR4 or . DLAHQR on a trailing principal submatrix to . get more. (Since NS.LE.NSMAX.LE.(N+6)/9, . there is enough space below the subdiagonal . to fit an NS-by-NS scratch array.) ==== */ if (kbot - ks + 1 <= ns / 2) { ks = kbot - ns + 1; kt = *n - ns + 1; dlacpy_("A", &ns, &ns, &h__[ks + ks * h_dim1], ldh, & h__[kt + h_dim1], ldh); if (ns > nmin) { dlaqr4_(&c_false, &c_false, &ns, &c__1, &ns, &h__[ kt + h_dim1], ldh, &wr[ks], &wi[ks], & c__1, &c__1, zdum, &c__1, &work[1], lwork, &inf); } else { dlahqr_(&c_false, &c_false, &ns, &c__1, &ns, &h__[ kt + h_dim1], ldh, &wr[ks], &wi[ks], & c__1, &c__1, zdum, &c__1, &inf); } ks += inf; /* ==== In case of a rare QR failure use . eigenvalues of the trailing 2-by-2 . principal submatrix. ==== */ if (ks >= kbot) { aa = h__[kbot - 1 + (kbot - 1) * h_dim1]; cc = h__[kbot + (kbot - 1) * h_dim1]; bb = h__[kbot - 1 + kbot * h_dim1]; dd = h__[kbot + kbot * h_dim1]; dlanv2_(&aa, &bb, &cc, &dd, &wr[kbot - 1], &wi[ kbot - 1], &wr[kbot], &wi[kbot], &cs, &sn) ; ks = kbot - 1; } } if (kbot - ks + 1 > ns) { /* ==== Sort the shifts (Helps a little) . Bubble sort keeps complex conjugate . pairs together. ==== */ sorted = FALSE_; i__2 = ks + 1; for (k = kbot; k >= i__2; --k) { if (sorted) { goto L60; } sorted = TRUE_; i__3 = k - 1; for (i__ = ks; i__ <= i__3; ++i__) { if ((d__1 = wr[i__], abs(d__1)) + (d__2 = wi[ i__], abs(d__2)) < (d__3 = wr[i__ + 1] , abs(d__3)) + (d__4 = wi[i__ + 1], abs(d__4))) { sorted = FALSE_; swap = wr[i__]; wr[i__] = wr[i__ + 1]; wr[i__ + 1] = swap; swap = wi[i__]; wi[i__] = wi[i__ + 1]; wi[i__ + 1] = swap; } /* L40: */ } /* L50: */ } L60: ; } /* ==== Shuffle shifts into pairs of real shifts . and pairs of complex conjugate shifts . assuming complex conjugate shifts are . already adjacent to one another. (Yes, . they are.) ==== */ i__2 = ks + 2; for (i__ = kbot; i__ >= i__2; i__ += -2) { if (wi[i__] != -wi[i__ - 1]) { swap = wr[i__]; wr[i__] = wr[i__ - 1]; wr[i__ - 1] = wr[i__ - 2]; wr[i__ - 2] = swap; swap = wi[i__]; wi[i__] = wi[i__ - 1]; wi[i__ - 1] = wi[i__ - 2]; wi[i__ - 2] = swap; } /* L70: */ } } /* ==== If there are only two shifts and both are . real, then use only one. ==== */ if (kbot - ks + 1 == 2) { if (wi[kbot] == 0.) { if ((d__1 = wr[kbot] - h__[kbot + kbot * h_dim1], abs( d__1)) < (d__2 = wr[kbot - 1] - h__[kbot + kbot * h_dim1], abs(d__2))) { wr[kbot - 1] = wr[kbot]; } else { wr[kbot] = wr[kbot - 1]; } } } /* ==== Use up to NS of the the smallest magnatiude . shifts. If there aren't NS shifts available, . then use them all, possibly dropping one to . make the number of shifts even. ==== Computing MIN */ i__2 = ns, i__3 = kbot - ks + 1; ns = min(i__2,i__3); ns -= ns % 2; ks = kbot - ns + 1; /* ==== Small-bulge multi-shift QR sweep: . split workspace under the subdiagonal into . - a KDU-by-KDU work array U in the lower . left-hand-corner, . - a KDU-by-at-least-KDU-but-more-is-better . (KDU-by-NHo) horizontal work array WH along . the bottom edge, . - and an at-least-KDU-but-more-is-better-by-KDU . (NVE-by-KDU) vertical work WV arrow along . the left-hand-edge. ==== */ kdu = ns * 3 - 3; ku = *n - kdu + 1; kwh = kdu + 1; nho = *n - kdu - 3 - (kdu + 1) + 1; kwv = kdu + 4; nve = *n - kdu - kwv + 1; /* ==== Small-bulge multi-shift QR sweep ==== */ dlaqr5_(wantt, wantz, &kacc22, n, &ktop, &kbot, &ns, &wr[ks], &wi[ks], &h__[h_offset], ldh, iloz, ihiz, &z__[ z_offset], ldz, &work[1], &c__3, &h__[ku + h_dim1], ldh, &nve, &h__[kwv + h_dim1], ldh, &nho, &h__[ku + kwh * h_dim1], ldh); } /* ==== Note progress (or the lack of it). ==== */ if (ld > 0) { ndfl = 1; } else { ++ndfl; } /* ==== End of main loop ==== L80: */ } /* ==== Iteration limit exceeded. Set INFO to show where . the problem occurred and exit. ==== */ *info = kbot; L90: ; } /* ==== Return the optimal value of LWORK. ==== */ work[1] = (doublereal) lwkopt; /* ==== End of DLAQR0 ==== */ return 0; } /* dlaqr0_ */ /* Subroutine */ int dlaqr1_(integer *n, doublereal *h__, integer *ldh, doublereal *sr1, doublereal *si1, doublereal *sr2, doublereal *si2, doublereal *v) { /* System generated locals */ integer h_dim1, h_offset; doublereal d__1, d__2, d__3; /* Local variables */ static doublereal s, h21s, h31s; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Given a 2-by-2 or 3-by-3 matrix H, DLAQR1 sets v to a scalar multiple of the first column of the product (*) K = (H - (sr1 + i*si1)*I)*(H - (sr2 + i*si2)*I) scaling to avoid overflows and most underflows. It is assumed that either 1) sr1 = sr2 and si1 = -si2 or 2) si1 = si2 = 0. This is useful for starting double implicit shift bulges in the QR algorithm. N (input) integer Order of the matrix H. N must be either 2 or 3. H (input) DOUBLE PRECISION array of dimension (LDH,N) The 2-by-2 or 3-by-3 matrix H in (*). LDH (input) integer The leading dimension of H as declared in the calling procedure. LDH.GE.N SR1 (input) DOUBLE PRECISION SI1 The shifts in (*). SR2 SI2 V (output) DOUBLE PRECISION array of dimension N A scalar multiple of the first column of the matrix K in (*). ================================================================ Based on contributions by Karen Braman and Ralph Byers, Department of Mathematics, University of Kansas, USA ================================================================ */ /* Parameter adjustments */ h_dim1 = *ldh; h_offset = 1 + h_dim1 * 1; h__ -= h_offset; --v; /* Function Body */ if (*n == 2) { s = (d__1 = h__[h_dim1 + 1] - *sr2, abs(d__1)) + abs(*si2) + (d__2 = h__[h_dim1 + 2], abs(d__2)); if (s == 0.) { v[1] = 0.; v[2] = 0.; } else { h21s = h__[h_dim1 + 2] / s; v[1] = h21s * h__[(h_dim1 << 1) + 1] + (h__[h_dim1 + 1] - *sr1) * ((h__[h_dim1 + 1] - *sr2) / s) - *si1 * (*si2 / s); v[2] = h21s * (h__[h_dim1 + 1] + h__[(h_dim1 << 1) + 2] - *sr1 - * sr2); } } else { s = (d__1 = h__[h_dim1 + 1] - *sr2, abs(d__1)) + abs(*si2) + (d__2 = h__[h_dim1 + 2], abs(d__2)) + (d__3 = h__[h_dim1 + 3], abs( d__3)); if (s == 0.) { v[1] = 0.; v[2] = 0.; v[3] = 0.; } else { h21s = h__[h_dim1 + 2] / s; h31s = h__[h_dim1 + 3] / s; v[1] = (h__[h_dim1 + 1] - *sr1) * ((h__[h_dim1 + 1] - *sr2) / s) - *si1 * (*si2 / s) + h__[(h_dim1 << 1) + 1] * h21s + h__[ h_dim1 * 3 + 1] * h31s; v[2] = h21s * (h__[h_dim1 + 1] + h__[(h_dim1 << 1) + 2] - *sr1 - * sr2) + h__[h_dim1 * 3 + 2] * h31s; v[3] = h31s * (h__[h_dim1 + 1] + h__[h_dim1 * 3 + 3] - *sr1 - * sr2) + h21s * h__[(h_dim1 << 1) + 3]; } } return 0; } /* dlaqr1_ */ /* Subroutine */ int dlaqr2_(logical *wantt, logical *wantz, integer *n, integer *ktop, integer *kbot, integer *nw, doublereal *h__, integer * ldh, integer *iloz, integer *ihiz, doublereal *z__, integer *ldz, integer *ns, integer *nd, doublereal *sr, doublereal *si, doublereal * v, integer *ldv, integer *nh, doublereal *t, integer *ldt, integer * nv, doublereal *wv, integer *ldwv, doublereal *work, integer *lwork) { /* System generated locals */ integer h_dim1, h_offset, t_dim1, t_offset, v_dim1, v_offset, wv_dim1, wv_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4; doublereal d__1, d__2, d__3, d__4, d__5, d__6; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static doublereal beta; static integer kend, kcol, info, ifst, ilst, ltop, krow, i__, j, k; static doublereal s; extern /* Subroutine */ int dlarf_(char *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *), dgemm_(char *, char *, integer *, integer * , integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); static logical bulge; extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, doublereal *, integer *); static integer infqr, kwtop; extern /* Subroutine */ int dlanv2_(doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *); static doublereal aa, bb, cc; extern /* Subroutine */ int dlabad_(doublereal *, doublereal *); static doublereal dd, cs; extern /* Subroutine */ int dgehrd_(integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *), dlarfg_(integer *, doublereal *, doublereal *, integer *, doublereal *); static doublereal sn; static integer jw; extern /* Subroutine */ int dlahqr_(logical *, logical *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *), dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *); static doublereal safmin, safmax; extern /* Subroutine */ int dlaset_(char *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *), dorghr_(integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *), dtrexc_(char *, integer *, doublereal *, integer *, doublereal *, integer *, integer *, integer *, doublereal *, integer *); static logical sorted; static doublereal smlnum; static integer lwkopt; static doublereal evi, evk, foo; static integer kln; static doublereal tau, ulp; static integer lwk1, lwk2; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 This subroutine is identical to DLAQR3 except that it avoids recursion by calling DLAHQR instead of DLAQR4. ****************************************************************** Aggressive early deflation: This subroutine accepts as input an upper Hessenberg matrix H and performs an orthogonal similarity transformation designed to detect and deflate fully converged eigenvalues from a trailing principal submatrix. On output H has been over- written by a new Hessenberg matrix that is a perturbation of an orthogonal similarity transformation of H. It is to be hoped that the final version of H has many zero subdiagonal entries. ****************************************************************** WANTT (input) LOGICAL If .TRUE., then the Hessenberg matrix H is fully updated so that the quasi-triangular Schur factor may be computed (in cooperation with the calling subroutine). If .FALSE., then only enough of H is updated to preserve the eigenvalues. WANTZ (input) LOGICAL If .TRUE., then the orthogonal matrix Z is updated so so that the orthogonal Schur factor may be computed (in cooperation with the calling subroutine). If .FALSE., then Z is not referenced. N (input) INTEGER The order of the matrix H and (if WANTZ is .TRUE.) the order of the orthogonal matrix Z. KTOP (input) INTEGER It is assumed that either KTOP = 1 or H(KTOP,KTOP-1)=0. KBOT and KTOP together determine an isolated block along the diagonal of the Hessenberg matrix. KBOT (input) INTEGER It is assumed without a check that either KBOT = N or H(KBOT+1,KBOT)=0. KBOT and KTOP together determine an isolated block along the diagonal of the Hessenberg matrix. NW (input) INTEGER Deflation window size. 1 .LE. NW .LE. (KBOT-KTOP+1). H (input/output) DOUBLE PRECISION array, dimension (LDH,N) On input the initial N-by-N section of H stores the Hessenberg matrix undergoing aggressive early deflation. On output H has been transformed by an orthogonal similarity transformation, perturbed, and the returned to Hessenberg form that (it is to be hoped) has some zero subdiagonal entries. LDH (input) integer Leading dimension of H just as declared in the calling subroutine. N .LE. LDH ILOZ (input) INTEGER IHIZ (input) INTEGER Specify the rows of Z to which transformations must be applied if WANTZ is .TRUE.. 1 .LE. ILOZ .LE. IHIZ .LE. N. Z (input/output) DOUBLE PRECISION array, dimension (LDZ,IHI) IF WANTZ is .TRUE., then on output, the orthogonal similarity transformation mentioned above has been accumulated into Z(ILOZ:IHIZ,ILO:IHI) from the right. If WANTZ is .FALSE., then Z is unreferenced. LDZ (input) integer The leading dimension of Z just as declared in the calling subroutine. 1 .LE. LDZ. NS (output) integer The number of unconverged (ie approximate) eigenvalues returned in SR and SI that may be used as shifts by the calling subroutine. ND (output) integer The number of converged eigenvalues uncovered by this subroutine. SR (output) DOUBLE PRECISION array, dimension KBOT SI (output) DOUBLE PRECISION array, dimension KBOT On output, the real and imaginary parts of approximate eigenvalues that may be used for shifts are stored in SR(KBOT-ND-NS+1) through SR(KBOT-ND) and SI(KBOT-ND-NS+1) through SI(KBOT-ND), respectively. The real and imaginary parts of converged eigenvalues are stored in SR(KBOT-ND+1) through SR(KBOT) and SI(KBOT-ND+1) through SI(KBOT), respectively. V (workspace) DOUBLE PRECISION array, dimension (LDV,NW) An NW-by-NW work array. LDV (input) integer scalar The leading dimension of V just as declared in the calling subroutine. NW .LE. LDV NH (input) integer scalar The number of columns of T. NH.GE.NW. T (workspace) DOUBLE PRECISION array, dimension (LDT,NW) LDT (input) integer The leading dimension of T just as declared in the calling subroutine. NW .LE. LDT NV (input) integer The number of rows of work array WV available for workspace. NV.GE.NW. WV (workspace) DOUBLE PRECISION array, dimension (LDWV,NW) LDWV (input) integer The leading dimension of W just as declared in the calling subroutine. NW .LE. LDV WORK (workspace) DOUBLE PRECISION array, dimension LWORK. On exit, WORK(1) is set to an estimate of the optimal value of LWORK for the given values of N, NW, KTOP and KBOT. LWORK (input) integer The dimension of the work array WORK. LWORK = 2*NW suffices, but greater efficiency may result from larger values of LWORK. If LWORK = -1, then a workspace query is assumed; DLAQR2 only estimates the optimal workspace size for the given values of N, NW, KTOP and KBOT. The estimate is returned in WORK(1). No error message related to LWORK is issued by XERBLA. Neither H nor Z are accessed. ================================================================ Based on contributions by Karen Braman and Ralph Byers, Department of Mathematics, University of Kansas, USA ================================================================ ==== Estimate optimal workspace. ==== */ /* Parameter adjustments */ h_dim1 = *ldh; h_offset = 1 + h_dim1 * 1; h__ -= h_offset; z_dim1 = *ldz; z_offset = 1 + z_dim1 * 1; z__ -= z_offset; --sr; --si; v_dim1 = *ldv; v_offset = 1 + v_dim1 * 1; v -= v_offset; t_dim1 = *ldt; t_offset = 1 + t_dim1 * 1; t -= t_offset; wv_dim1 = *ldwv; wv_offset = 1 + wv_dim1 * 1; wv -= wv_offset; --work; /* Function Body */ /* Computing MIN */ i__1 = *nw, i__2 = *kbot - *ktop + 1; jw = min(i__1,i__2); if (jw <= 2) { lwkopt = 1; } else { /* ==== Workspace query call to DGEHRD ==== */ i__1 = jw - 1; dgehrd_(&jw, &c__1, &i__1, &t[t_offset], ldt, &work[1], &work[1], & c_n1, &info); lwk1 = (integer) work[1]; /* ==== Workspace query call to DORGHR ==== */ i__1 = jw - 1; dorghr_(&jw, &c__1, &i__1, &t[t_offset], ldt, &work[1], &work[1], & c_n1, &info); lwk2 = (integer) work[1]; /* ==== Optimal workspace ==== */ lwkopt = jw + max(lwk1,lwk2); } /* ==== Quick return in case of workspace query. ==== */ if (*lwork == -1) { work[1] = (doublereal) lwkopt; return 0; } /* ==== Nothing to do ... ... for an empty active block ... ==== */ *ns = 0; *nd = 0; if (*ktop > *kbot) { return 0; } /* ... nor for an empty deflation window. ==== */ if (*nw < 1) { return 0; } /* ==== Machine constants ==== */ safmin = SAFEMINIMUM; safmax = 1. / safmin; dlabad_(&safmin, &safmax); ulp = PRECISION; smlnum = safmin * ((doublereal) (*n) / ulp); /* ==== Setup deflation window ==== Computing MIN */ i__1 = *nw, i__2 = *kbot - *ktop + 1; jw = min(i__1,i__2); kwtop = *kbot - jw + 1; if (kwtop == *ktop) { s = 0.; } else { s = h__[kwtop + (kwtop - 1) * h_dim1]; } if (*kbot == kwtop) { /* ==== 1-by-1 deflation window: not much to do ==== */ sr[kwtop] = h__[kwtop + kwtop * h_dim1]; si[kwtop] = 0.; *ns = 1; *nd = 0; /* Computing MAX */ d__2 = smlnum, d__3 = ulp * (d__1 = h__[kwtop + kwtop * h_dim1], abs( d__1)); if (abs(s) <= max(d__2,d__3)) { *ns = 0; *nd = 1; if (kwtop > *ktop) { h__[kwtop + (kwtop - 1) * h_dim1] = 0.; } } return 0; } /* ==== Convert to spike-triangular form. (In case of a . rare QR failure, this routine continues to do . aggressive early deflation using that part of . the deflation window that converged using INFQR . here and there to keep track.) ==== */ dlacpy_("U", &jw, &jw, &h__[kwtop + kwtop * h_dim1], ldh, &t[t_offset], ldt); i__1 = jw - 1; i__2 = *ldh + 1; i__3 = *ldt + 1; dcopy_(&i__1, &h__[kwtop + 1 + kwtop * h_dim1], &i__2, &t[t_dim1 + 2], & i__3); dlaset_("A", &jw, &jw, &c_b29, &c_b15, &v[v_offset], ldv); dlahqr_(&c_true, &c_true, &jw, &c__1, &jw, &t[t_offset], ldt, &sr[kwtop], &si[kwtop], &c__1, &jw, &v[v_offset], ldv, &infqr); /* ==== DTREXC needs a clean margin near the diagonal ==== */ i__1 = jw - 3; for (j = 1; j <= i__1; ++j) { t[j + 2 + j * t_dim1] = 0.; t[j + 3 + j * t_dim1] = 0.; /* L10: */ } if (jw > 2) { t[jw + (jw - 2) * t_dim1] = 0.; } /* ==== Deflation detection loop ==== */ *ns = jw; ilst = infqr + 1; L20: if (ilst <= *ns) { if (*ns == 1) { bulge = FALSE_; } else { bulge = t[*ns + (*ns - 1) * t_dim1] != 0.; } /* ==== Small spike tip test for deflation ==== */ if (! bulge) { /* ==== Real eigenvalue ==== */ foo = (d__1 = t[*ns + *ns * t_dim1], abs(d__1)); if (foo == 0.) { foo = abs(s); } /* Computing MAX */ d__2 = smlnum, d__3 = ulp * foo; if ((d__1 = s * v[*ns * v_dim1 + 1], abs(d__1)) <= max(d__2,d__3)) { /* ==== Deflatable ==== */ --(*ns); } else { /* ==== Undeflatable. Move it up out of the way. . (DTREXC can not fail in this case.) ==== */ ifst = *ns; dtrexc_("V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst, &ilst, &work[1], &info); ++ilst; } } else { /* ==== Complex conjugate pair ==== */ foo = (d__3 = t[*ns + *ns * t_dim1], abs(d__3)) + sqrt((d__1 = t[* ns + (*ns - 1) * t_dim1], abs(d__1))) * sqrt((d__2 = t[* ns - 1 + *ns * t_dim1], abs(d__2))); if (foo == 0.) { foo = abs(s); } /* Computing MAX */ d__3 = (d__1 = s * v[*ns * v_dim1 + 1], abs(d__1)), d__4 = (d__2 = s * v[(*ns - 1) * v_dim1 + 1], abs(d__2)); /* Computing MAX */ d__5 = smlnum, d__6 = ulp * foo; if (max(d__3,d__4) <= max(d__5,d__6)) { /* ==== Deflatable ==== */ *ns += -2; } else { /* ==== Undflatable. Move them up out of the way. . Fortunately, DTREXC does the right thing with . ILST in case of a rare exchange failure. ==== */ ifst = *ns; dtrexc_("V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst, &ilst, &work[1], &info); ilst += 2; } } /* ==== End deflation detection loop ==== */ goto L20; } /* ==== Return to Hessenberg form ==== */ if (*ns == 0) { s = 0.; } if (*ns < jw) { /* ==== sorting diagonal blocks of T improves accuracy for . graded matrices. Bubble sort deals well with . exchange failures. ==== */ sorted = FALSE_; i__ = *ns + 1; L30: if (sorted) { goto L50; } sorted = TRUE_; kend = i__ - 1; i__ = infqr + 1; if (i__ == *ns) { k = i__ + 1; } else if (t[i__ + 1 + i__ * t_dim1] == 0.) { k = i__ + 1; } else { k = i__ + 2; } L40: if (k <= kend) { if (k == i__ + 1) { evi = (d__1 = t[i__ + i__ * t_dim1], abs(d__1)); } else { evi = (d__3 = t[i__ + i__ * t_dim1], abs(d__3)) + sqrt((d__1 = t[i__ + 1 + i__ * t_dim1], abs(d__1))) * sqrt((d__2 = t[i__ + (i__ + 1) * t_dim1], abs(d__2))); } if (k == kend) { evk = (d__1 = t[k + k * t_dim1], abs(d__1)); } else if (t[k + 1 + k * t_dim1] == 0.) { evk = (d__1 = t[k + k * t_dim1], abs(d__1)); } else { evk = (d__3 = t[k + k * t_dim1], abs(d__3)) + sqrt((d__1 = t[ k + 1 + k * t_dim1], abs(d__1))) * sqrt((d__2 = t[k + (k + 1) * t_dim1], abs(d__2))); } if (evi >= evk) { i__ = k; } else { sorted = FALSE_; ifst = i__; ilst = k; dtrexc_("V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst, &ilst, &work[1], &info); if (info == 0) { i__ = ilst; } else { i__ = k; } } if (i__ == kend) { k = i__ + 1; } else if (t[i__ + 1 + i__ * t_dim1] == 0.) { k = i__ + 1; } else { k = i__ + 2; } goto L40; } goto L30; L50: ; } /* ==== Restore shift/eigenvalue array from T ==== */ i__ = jw; L60: if (i__ >= infqr + 1) { if (i__ == infqr + 1) { sr[kwtop + i__ - 1] = t[i__ + i__ * t_dim1]; si[kwtop + i__ - 1] = 0.; --i__; } else if (t[i__ + (i__ - 1) * t_dim1] == 0.) { sr[kwtop + i__ - 1] = t[i__ + i__ * t_dim1]; si[kwtop + i__ - 1] = 0.; --i__; } else { aa = t[i__ - 1 + (i__ - 1) * t_dim1]; cc = t[i__ + (i__ - 1) * t_dim1]; bb = t[i__ - 1 + i__ * t_dim1]; dd = t[i__ + i__ * t_dim1]; dlanv2_(&aa, &bb, &cc, &dd, &sr[kwtop + i__ - 2], &si[kwtop + i__ - 2], &sr[kwtop + i__ - 1], &si[kwtop + i__ - 1], &cs, & sn); i__ += -2; } goto L60; } if (*ns < jw || s == 0.) { if (*ns > 1 && s != 0.) { /* ==== Reflect spike back into lower triangle ==== */ dcopy_(ns, &v[v_offset], ldv, &work[1], &c__1); beta = work[1]; dlarfg_(ns, &beta, &work[2], &c__1, &tau); work[1] = 1.; i__1 = jw - 2; i__2 = jw - 2; dlaset_("L", &i__1, &i__2, &c_b29, &c_b29, &t[t_dim1 + 3], ldt); dlarf_("L", ns, &jw, &work[1], &c__1, &tau, &t[t_offset], ldt, & work[jw + 1]); dlarf_("R", ns, ns, &work[1], &c__1, &tau, &t[t_offset], ldt, & work[jw + 1]); dlarf_("R", &jw, ns, &work[1], &c__1, &tau, &v[v_offset], ldv, & work[jw + 1]); i__1 = *lwork - jw; dgehrd_(&jw, &c__1, ns, &t[t_offset], ldt, &work[1], &work[jw + 1] , &i__1, &info); } /* ==== Copy updated reduced window into place ==== */ if (kwtop > 1) { h__[kwtop + (kwtop - 1) * h_dim1] = s * v[v_dim1 + 1]; } dlacpy_("U", &jw, &jw, &t[t_offset], ldt, &h__[kwtop + kwtop * h_dim1] , ldh); i__1 = jw - 1; i__2 = *ldt + 1; i__3 = *ldh + 1; dcopy_(&i__1, &t[t_dim1 + 2], &i__2, &h__[kwtop + 1 + kwtop * h_dim1], &i__3); /* ==== Accumulate orthogonal matrix in order update . H and Z, if requested. (A modified version . of DORGHR that accumulates block Householder . transformations into V directly might be . marginally more efficient than the following.) ==== */ if (*ns > 1 && s != 0.) { i__1 = *lwork - jw; dorghr_(&jw, &c__1, ns, &t[t_offset], ldt, &work[1], &work[jw + 1] , &i__1, &info); dgemm_("N", "N", &jw, ns, ns, &c_b15, &v[v_offset], ldv, &t[ t_offset], ldt, &c_b29, &wv[wv_offset], ldwv); dlacpy_("A", &jw, ns, &wv[wv_offset], ldwv, &v[v_offset], ldv); } /* ==== Update vertical slab in H ==== */ if (*wantt) { ltop = 1; } else { ltop = *ktop; } i__1 = kwtop - 1; i__2 = *nv; for (krow = ltop; i__2 < 0 ? krow >= i__1 : krow <= i__1; krow += i__2) { /* Computing MIN */ i__3 = *nv, i__4 = kwtop - krow; kln = min(i__3,i__4); dgemm_("N", "N", &kln, &jw, &jw, &c_b15, &h__[krow + kwtop * h_dim1], ldh, &v[v_offset], ldv, &c_b29, &wv[wv_offset], ldwv); dlacpy_("A", &kln, &jw, &wv[wv_offset], ldwv, &h__[krow + kwtop * h_dim1], ldh); /* L70: */ } /* ==== Update horizontal slab in H ==== */ if (*wantt) { i__2 = *n; i__1 = *nh; for (kcol = *kbot + 1; i__1 < 0 ? kcol >= i__2 : kcol <= i__2; kcol += i__1) { /* Computing MIN */ i__3 = *nh, i__4 = *n - kcol + 1; kln = min(i__3,i__4); dgemm_("C", "N", &jw, &kln, &jw, &c_b15, &v[v_offset], ldv, & h__[kwtop + kcol * h_dim1], ldh, &c_b29, &t[t_offset], ldt); dlacpy_("A", &jw, &kln, &t[t_offset], ldt, &h__[kwtop + kcol * h_dim1], ldh); /* L80: */ } } /* ==== Update vertical slab in Z ==== */ if (*wantz) { i__1 = *ihiz; i__2 = *nv; for (krow = *iloz; i__2 < 0 ? krow >= i__1 : krow <= i__1; krow += i__2) { /* Computing MIN */ i__3 = *nv, i__4 = *ihiz - krow + 1; kln = min(i__3,i__4); dgemm_("N", "N", &kln, &jw, &jw, &c_b15, &z__[krow + kwtop * z_dim1], ldz, &v[v_offset], ldv, &c_b29, &wv[ wv_offset], ldwv); dlacpy_("A", &kln, &jw, &wv[wv_offset], ldwv, &z__[krow + kwtop * z_dim1], ldz); /* L90: */ } } } /* ==== Return the number of deflations ... ==== */ *nd = jw - *ns; /* ==== ... and the number of shifts. (Subtracting . INFQR from the spike length takes care . of the case of a rare QR failure while . calculating eigenvalues of the deflation . window.) ==== */ *ns -= infqr; /* ==== Return optimal workspace. ==== */ work[1] = (doublereal) lwkopt; /* ==== End of DLAQR2 ==== */ return 0; } /* dlaqr2_ */ /* Subroutine */ int dlaqr3_(logical *wantt, logical *wantz, integer *n, integer *ktop, integer *kbot, integer *nw, doublereal *h__, integer * ldh, integer *iloz, integer *ihiz, doublereal *z__, integer *ldz, integer *ns, integer *nd, doublereal *sr, doublereal *si, doublereal * v, integer *ldv, integer *nh, doublereal *t, integer *ldt, integer * nv, doublereal *wv, integer *ldwv, doublereal *work, integer *lwork) { /* System generated locals */ integer h_dim1, h_offset, t_dim1, t_offset, v_dim1, v_offset, wv_dim1, wv_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4; doublereal d__1, d__2, d__3, d__4, d__5, d__6; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static doublereal beta; static integer kend, kcol, info, nmin, ifst, ilst, ltop, krow, i__, j, k; static doublereal s; extern /* Subroutine */ int dlarf_(char *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *), dgemm_(char *, char *, integer *, integer * , integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); static logical bulge; extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, doublereal *, integer *); static integer infqr, kwtop; extern /* Subroutine */ int dlanv2_(doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *), dlaqr4_( logical *, logical *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, doublereal *, integer *, integer *); static doublereal aa, bb, cc; extern /* Subroutine */ int dlabad_(doublereal *, doublereal *); static doublereal dd, cs; extern /* Subroutine */ int dgehrd_(integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *), dlarfg_(integer *, doublereal *, doublereal *, integer *, doublereal *); static doublereal sn; static integer jw; extern /* Subroutine */ int dlahqr_(logical *, logical *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *), dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *); static doublereal safmin, safmax; extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); extern /* Subroutine */ int dlaset_(char *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *), dorghr_(integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *), dtrexc_(char *, integer *, doublereal *, integer *, doublereal *, integer *, integer *, integer *, doublereal *, integer *); static logical sorted; static doublereal smlnum; static integer lwkopt; static doublereal evi, evk, foo; static integer kln; static doublereal tau, ulp; static integer lwk1, lwk2, lwk3; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 ****************************************************************** Aggressive early deflation: This subroutine accepts as input an upper Hessenberg matrix H and performs an orthogonal similarity transformation designed to detect and deflate fully converged eigenvalues from a trailing principal submatrix. On output H has been over- written by a new Hessenberg matrix that is a perturbation of an orthogonal similarity transformation of H. It is to be hoped that the final version of H has many zero subdiagonal entries. ****************************************************************** WANTT (input) LOGICAL If .TRUE., then the Hessenberg matrix H is fully updated so that the quasi-triangular Schur factor may be computed (in cooperation with the calling subroutine). If .FALSE., then only enough of H is updated to preserve the eigenvalues. WANTZ (input) LOGICAL If .TRUE., then the orthogonal matrix Z is updated so so that the orthogonal Schur factor may be computed (in cooperation with the calling subroutine). If .FALSE., then Z is not referenced. N (input) INTEGER The order of the matrix H and (if WANTZ is .TRUE.) the order of the orthogonal matrix Z. KTOP (input) INTEGER It is assumed that either KTOP = 1 or H(KTOP,KTOP-1)=0. KBOT and KTOP together determine an isolated block along the diagonal of the Hessenberg matrix. KBOT (input) INTEGER It is assumed without a check that either KBOT = N or H(KBOT+1,KBOT)=0. KBOT and KTOP together determine an isolated block along the diagonal of the Hessenberg matrix. NW (input) INTEGER Deflation window size. 1 .LE. NW .LE. (KBOT-KTOP+1). H (input/output) DOUBLE PRECISION array, dimension (LDH,N) On input the initial N-by-N section of H stores the Hessenberg matrix undergoing aggressive early deflation. On output H has been transformed by an orthogonal similarity transformation, perturbed, and the returned to Hessenberg form that (it is to be hoped) has some zero subdiagonal entries. LDH (input) integer Leading dimension of H just as declared in the calling subroutine. N .LE. LDH ILOZ (input) INTEGER IHIZ (input) INTEGER Specify the rows of Z to which transformations must be applied if WANTZ is .TRUE.. 1 .LE. ILOZ .LE. IHIZ .LE. N. Z (input/output) DOUBLE PRECISION array, dimension (LDZ,IHI) IF WANTZ is .TRUE., then on output, the orthogonal similarity transformation mentioned above has been accumulated into Z(ILOZ:IHIZ,ILO:IHI) from the right. If WANTZ is .FALSE., then Z is unreferenced. LDZ (input) integer The leading dimension of Z just as declared in the calling subroutine. 1 .LE. LDZ. NS (output) integer The number of unconverged (ie approximate) eigenvalues returned in SR and SI that may be used as shifts by the calling subroutine. ND (output) integer The number of converged eigenvalues uncovered by this subroutine. SR (output) DOUBLE PRECISION array, dimension KBOT SI (output) DOUBLE PRECISION array, dimension KBOT On output, the real and imaginary parts of approximate eigenvalues that may be used for shifts are stored in SR(KBOT-ND-NS+1) through SR(KBOT-ND) and SI(KBOT-ND-NS+1) through SI(KBOT-ND), respectively. The real and imaginary parts of converged eigenvalues are stored in SR(KBOT-ND+1) through SR(KBOT) and SI(KBOT-ND+1) through SI(KBOT), respectively. V (workspace) DOUBLE PRECISION array, dimension (LDV,NW) An NW-by-NW work array. LDV (input) integer scalar The leading dimension of V just as declared in the calling subroutine. NW .LE. LDV NH (input) integer scalar The number of columns of T. NH.GE.NW. T (workspace) DOUBLE PRECISION array, dimension (LDT,NW) LDT (input) integer The leading dimension of T just as declared in the calling subroutine. NW .LE. LDT NV (input) integer The number of rows of work array WV available for workspace. NV.GE.NW. WV (workspace) DOUBLE PRECISION array, dimension (LDWV,NW) LDWV (input) integer The leading dimension of W just as declared in the calling subroutine. NW .LE. LDV WORK (workspace) DOUBLE PRECISION array, dimension LWORK. On exit, WORK(1) is set to an estimate of the optimal value of LWORK for the given values of N, NW, KTOP and KBOT. LWORK (input) integer The dimension of the work array WORK. LWORK = 2*NW suffices, but greater efficiency may result from larger values of LWORK. If LWORK = -1, then a workspace query is assumed; DLAQR3 only estimates the optimal workspace size for the given values of N, NW, KTOP and KBOT. The estimate is returned in WORK(1). No error message related to LWORK is issued by XERBLA. Neither H nor Z are accessed. ================================================================ Based on contributions by Karen Braman and Ralph Byers, Department of Mathematics, University of Kansas, USA ================================================================== ==== Estimate optimal workspace. ==== */ /* Parameter adjustments */ h_dim1 = *ldh; h_offset = 1 + h_dim1 * 1; h__ -= h_offset; z_dim1 = *ldz; z_offset = 1 + z_dim1 * 1; z__ -= z_offset; --sr; --si; v_dim1 = *ldv; v_offset = 1 + v_dim1 * 1; v -= v_offset; t_dim1 = *ldt; t_offset = 1 + t_dim1 * 1; t -= t_offset; wv_dim1 = *ldwv; wv_offset = 1 + wv_dim1 * 1; wv -= wv_offset; --work; /* Function Body */ /* Computing MIN */ i__1 = *nw, i__2 = *kbot - *ktop + 1; jw = min(i__1,i__2); if (jw <= 2) { lwkopt = 1; } else { /* ==== Workspace query call to DGEHRD ==== */ i__1 = jw - 1; dgehrd_(&jw, &c__1, &i__1, &t[t_offset], ldt, &work[1], &work[1], & c_n1, &info); lwk1 = (integer) work[1]; /* ==== Workspace query call to DORGHR ==== */ i__1 = jw - 1; dorghr_(&jw, &c__1, &i__1, &t[t_offset], ldt, &work[1], &work[1], & c_n1, &info); lwk2 = (integer) work[1]; /* ==== Workspace query call to DLAQR4 ==== */ dlaqr4_(&c_true, &c_true, &jw, &c__1, &jw, &t[t_offset], ldt, &sr[1], &si[1], &c__1, &jw, &v[v_offset], ldv, &work[1], &c_n1, & infqr); lwk3 = (integer) work[1]; /* ==== Optimal workspace ==== Computing MAX */ i__1 = jw + max(lwk1,lwk2); lwkopt = max(i__1,lwk3); } /* ==== Quick return in case of workspace query. ==== */ if (*lwork == -1) { work[1] = (doublereal) lwkopt; return 0; } /* ==== Nothing to do ... ... for an empty active block ... ==== */ *ns = 0; *nd = 0; if (*ktop > *kbot) { return 0; } /* ... nor for an empty deflation window. ==== */ if (*nw < 1) { return 0; } /* ==== Machine constants ==== */ safmin = SAFEMINIMUM; safmax = 1. / safmin; dlabad_(&safmin, &safmax); ulp = PRECISION; smlnum = safmin * ((doublereal) (*n) / ulp); /* ==== Setup deflation window ==== Computing MIN */ i__1 = *nw, i__2 = *kbot - *ktop + 1; jw = min(i__1,i__2); kwtop = *kbot - jw + 1; if (kwtop == *ktop) { s = 0.; } else { s = h__[kwtop + (kwtop - 1) * h_dim1]; } if (*kbot == kwtop) { /* ==== 1-by-1 deflation window: not much to do ==== */ sr[kwtop] = h__[kwtop + kwtop * h_dim1]; si[kwtop] = 0.; *ns = 1; *nd = 0; /* Computing MAX */ d__2 = smlnum, d__3 = ulp * (d__1 = h__[kwtop + kwtop * h_dim1], abs( d__1)); if (abs(s) <= max(d__2,d__3)) { *ns = 0; *nd = 1; if (kwtop > *ktop) { h__[kwtop + (kwtop - 1) * h_dim1] = 0.; } } return 0; } /* ==== Convert to spike-triangular form. (In case of a . rare QR failure, this routine continues to do . aggressive early deflation using that part of . the deflation window that converged using INFQR . here and there to keep track.) ==== */ dlacpy_("U", &jw, &jw, &h__[kwtop + kwtop * h_dim1], ldh, &t[t_offset], ldt); i__1 = jw - 1; i__2 = *ldh + 1; i__3 = *ldt + 1; dcopy_(&i__1, &h__[kwtop + 1 + kwtop * h_dim1], &i__2, &t[t_dim1 + 2], & i__3); dlaset_("A", &jw, &jw, &c_b29, &c_b15, &v[v_offset], ldv); nmin = ilaenv_(&c__12, "DLAQR3", "SV", &jw, &c__1, &jw, lwork, (ftnlen)6, (ftnlen)2); if (jw > nmin) { dlaqr4_(&c_true, &c_true, &jw, &c__1, &jw, &t[t_offset], ldt, &sr[ kwtop], &si[kwtop], &c__1, &jw, &v[v_offset], ldv, &work[1], lwork, &infqr); } else { dlahqr_(&c_true, &c_true, &jw, &c__1, &jw, &t[t_offset], ldt, &sr[ kwtop], &si[kwtop], &c__1, &jw, &v[v_offset], ldv, &infqr); } /* ==== DTREXC needs a clean margin near the diagonal ==== */ i__1 = jw - 3; for (j = 1; j <= i__1; ++j) { t[j + 2 + j * t_dim1] = 0.; t[j + 3 + j * t_dim1] = 0.; /* L10: */ } if (jw > 2) { t[jw + (jw - 2) * t_dim1] = 0.; } /* ==== Deflation detection loop ==== */ *ns = jw; ilst = infqr + 1; L20: if (ilst <= *ns) { if (*ns == 1) { bulge = FALSE_; } else { bulge = t[*ns + (*ns - 1) * t_dim1] != 0.; } /* ==== Small spike tip test for deflation ==== */ if (! bulge) { /* ==== Real eigenvalue ==== */ foo = (d__1 = t[*ns + *ns * t_dim1], abs(d__1)); if (foo == 0.) { foo = abs(s); } /* Computing MAX */ d__2 = smlnum, d__3 = ulp * foo; if ((d__1 = s * v[*ns * v_dim1 + 1], abs(d__1)) <= max(d__2,d__3)) { /* ==== Deflatable ==== */ --(*ns); } else { /* ==== Undeflatable. Move it up out of the way. . (DTREXC can not fail in this case.) ==== */ ifst = *ns; dtrexc_("V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst, &ilst, &work[1], &info); ++ilst; } } else { /* ==== Complex conjugate pair ==== */ foo = (d__3 = t[*ns + *ns * t_dim1], abs(d__3)) + sqrt((d__1 = t[* ns + (*ns - 1) * t_dim1], abs(d__1))) * sqrt((d__2 = t[* ns - 1 + *ns * t_dim1], abs(d__2))); if (foo == 0.) { foo = abs(s); } /* Computing MAX */ d__3 = (d__1 = s * v[*ns * v_dim1 + 1], abs(d__1)), d__4 = (d__2 = s * v[(*ns - 1) * v_dim1 + 1], abs(d__2)); /* Computing MAX */ d__5 = smlnum, d__6 = ulp * foo; if (max(d__3,d__4) <= max(d__5,d__6)) { /* ==== Deflatable ==== */ *ns += -2; } else { /* ==== Undflatable. Move them up out of the way. . Fortunately, DTREXC does the right thing with . ILST in case of a rare exchange failure. ==== */ ifst = *ns; dtrexc_("V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst, &ilst, &work[1], &info); ilst += 2; } } /* ==== End deflation detection loop ==== */ goto L20; } /* ==== Return to Hessenberg form ==== */ if (*ns == 0) { s = 0.; } if (*ns < jw) { /* ==== sorting diagonal blocks of T improves accuracy for . graded matrices. Bubble sort deals well with . exchange failures. ==== */ sorted = FALSE_; i__ = *ns + 1; L30: if (sorted) { goto L50; } sorted = TRUE_; kend = i__ - 1; i__ = infqr + 1; if (i__ == *ns) { k = i__ + 1; } else if (t[i__ + 1 + i__ * t_dim1] == 0.) { k = i__ + 1; } else { k = i__ + 2; } L40: if (k <= kend) { if (k == i__ + 1) { evi = (d__1 = t[i__ + i__ * t_dim1], abs(d__1)); } else { evi = (d__3 = t[i__ + i__ * t_dim1], abs(d__3)) + sqrt((d__1 = t[i__ + 1 + i__ * t_dim1], abs(d__1))) * sqrt((d__2 = t[i__ + (i__ + 1) * t_dim1], abs(d__2))); } if (k == kend) { evk = (d__1 = t[k + k * t_dim1], abs(d__1)); } else if (t[k + 1 + k * t_dim1] == 0.) { evk = (d__1 = t[k + k * t_dim1], abs(d__1)); } else { evk = (d__3 = t[k + k * t_dim1], abs(d__3)) + sqrt((d__1 = t[ k + 1 + k * t_dim1], abs(d__1))) * sqrt((d__2 = t[k + (k + 1) * t_dim1], abs(d__2))); } if (evi >= evk) { i__ = k; } else { sorted = FALSE_; ifst = i__; ilst = k; dtrexc_("V", &jw, &t[t_offset], ldt, &v[v_offset], ldv, &ifst, &ilst, &work[1], &info); if (info == 0) { i__ = ilst; } else { i__ = k; } } if (i__ == kend) { k = i__ + 1; } else if (t[i__ + 1 + i__ * t_dim1] == 0.) { k = i__ + 1; } else { k = i__ + 2; } goto L40; } goto L30; L50: ; } /* ==== Restore shift/eigenvalue array from T ==== */ i__ = jw; L60: if (i__ >= infqr + 1) { if (i__ == infqr + 1) { sr[kwtop + i__ - 1] = t[i__ + i__ * t_dim1]; si[kwtop + i__ - 1] = 0.; --i__; } else if (t[i__ + (i__ - 1) * t_dim1] == 0.) { sr[kwtop + i__ - 1] = t[i__ + i__ * t_dim1]; si[kwtop + i__ - 1] = 0.; --i__; } else { aa = t[i__ - 1 + (i__ - 1) * t_dim1]; cc = t[i__ + (i__ - 1) * t_dim1]; bb = t[i__ - 1 + i__ * t_dim1]; dd = t[i__ + i__ * t_dim1]; dlanv2_(&aa, &bb, &cc, &dd, &sr[kwtop + i__ - 2], &si[kwtop + i__ - 2], &sr[kwtop + i__ - 1], &si[kwtop + i__ - 1], &cs, & sn); i__ += -2; } goto L60; } if (*ns < jw || s == 0.) { if (*ns > 1 && s != 0.) { /* ==== Reflect spike back into lower triangle ==== */ dcopy_(ns, &v[v_offset], ldv, &work[1], &c__1); beta = work[1]; dlarfg_(ns, &beta, &work[2], &c__1, &tau); work[1] = 1.; i__1 = jw - 2; i__2 = jw - 2; dlaset_("L", &i__1, &i__2, &c_b29, &c_b29, &t[t_dim1 + 3], ldt); dlarf_("L", ns, &jw, &work[1], &c__1, &tau, &t[t_offset], ldt, & work[jw + 1]); dlarf_("R", ns, ns, &work[1], &c__1, &tau, &t[t_offset], ldt, & work[jw + 1]); dlarf_("R", &jw, ns, &work[1], &c__1, &tau, &v[v_offset], ldv, & work[jw + 1]); i__1 = *lwork - jw; dgehrd_(&jw, &c__1, ns, &t[t_offset], ldt, &work[1], &work[jw + 1] , &i__1, &info); } /* ==== Copy updated reduced window into place ==== */ if (kwtop > 1) { h__[kwtop + (kwtop - 1) * h_dim1] = s * v[v_dim1 + 1]; } dlacpy_("U", &jw, &jw, &t[t_offset], ldt, &h__[kwtop + kwtop * h_dim1] , ldh); i__1 = jw - 1; i__2 = *ldt + 1; i__3 = *ldh + 1; dcopy_(&i__1, &t[t_dim1 + 2], &i__2, &h__[kwtop + 1 + kwtop * h_dim1], &i__3); /* ==== Accumulate orthogonal matrix in order update . H and Z, if requested. (A modified version . of DORGHR that accumulates block Householder . transformations into V directly might be . marginally more efficient than the following.) ==== */ if (*ns > 1 && s != 0.) { i__1 = *lwork - jw; dorghr_(&jw, &c__1, ns, &t[t_offset], ldt, &work[1], &work[jw + 1] , &i__1, &info); dgemm_("N", "N", &jw, ns, ns, &c_b15, &v[v_offset], ldv, &t[ t_offset], ldt, &c_b29, &wv[wv_offset], ldwv); dlacpy_("A", &jw, ns, &wv[wv_offset], ldwv, &v[v_offset], ldv); } /* ==== Update vertical slab in H ==== */ if (*wantt) { ltop = 1; } else { ltop = *ktop; } i__1 = kwtop - 1; i__2 = *nv; for (krow = ltop; i__2 < 0 ? krow >= i__1 : krow <= i__1; krow += i__2) { /* Computing MIN */ i__3 = *nv, i__4 = kwtop - krow; kln = min(i__3,i__4); dgemm_("N", "N", &kln, &jw, &jw, &c_b15, &h__[krow + kwtop * h_dim1], ldh, &v[v_offset], ldv, &c_b29, &wv[wv_offset], ldwv); dlacpy_("A", &kln, &jw, &wv[wv_offset], ldwv, &h__[krow + kwtop * h_dim1], ldh); /* L70: */ } /* ==== Update horizontal slab in H ==== */ if (*wantt) { i__2 = *n; i__1 = *nh; for (kcol = *kbot + 1; i__1 < 0 ? kcol >= i__2 : kcol <= i__2; kcol += i__1) { /* Computing MIN */ i__3 = *nh, i__4 = *n - kcol + 1; kln = min(i__3,i__4); dgemm_("C", "N", &jw, &kln, &jw, &c_b15, &v[v_offset], ldv, & h__[kwtop + kcol * h_dim1], ldh, &c_b29, &t[t_offset], ldt); dlacpy_("A", &jw, &kln, &t[t_offset], ldt, &h__[kwtop + kcol * h_dim1], ldh); /* L80: */ } } /* ==== Update vertical slab in Z ==== */ if (*wantz) { i__1 = *ihiz; i__2 = *nv; for (krow = *iloz; i__2 < 0 ? krow >= i__1 : krow <= i__1; krow += i__2) { /* Computing MIN */ i__3 = *nv, i__4 = *ihiz - krow + 1; kln = min(i__3,i__4); dgemm_("N", "N", &kln, &jw, &jw, &c_b15, &z__[krow + kwtop * z_dim1], ldz, &v[v_offset], ldv, &c_b29, &wv[ wv_offset], ldwv); dlacpy_("A", &kln, &jw, &wv[wv_offset], ldwv, &z__[krow + kwtop * z_dim1], ldz); /* L90: */ } } } /* ==== Return the number of deflations ... ==== */ *nd = jw - *ns; /* ==== ... and the number of shifts. (Subtracting . INFQR from the spike length takes care . of the case of a rare QR failure while . calculating eigenvalues of the deflation . window.) ==== */ *ns -= infqr; /* ==== Return optimal workspace. ==== */ work[1] = (doublereal) lwkopt; /* ==== End of DLAQR3 ==== */ return 0; } /* dlaqr3_ */ /* Subroutine */ int dlaqr4_(logical *wantt, logical *wantz, integer *n, integer *ilo, integer *ihi, doublereal *h__, integer *ldh, doublereal *wr, doublereal *wi, integer *iloz, integer *ihiz, doublereal *z__, integer *ldz, doublereal *work, integer *lwork, integer *info) { /* System generated locals */ integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4, i__5; doublereal d__1, d__2, d__3, d__4; /* Local variables */ static integer ndfl, kbot, nmin; static doublereal swap; static integer ktop; static doublereal zdum[1] /* was [1][1] */; static integer kacc22, i__, k; static logical nwinc; static integer itmax, nsmax, nwmax, kwtop; extern /* Subroutine */ int dlaqr2_(logical *, logical *, integer *, integer *, integer *, integer *, doublereal *, integer *, integer *, integer *, doublereal *, integer *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *, doublereal *, integer *, doublereal *, integer *), dlanv2_(doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *), dlaqr5_( logical *, logical *, integer *, integer *, integer *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, integer *, doublereal *, integer *, integer *, doublereal *, integer *); static doublereal aa, bb, cc, dd; static integer ld; static doublereal cs; static integer nh, nibble, it, ks, kt; static doublereal sn; static integer ku, kv, ls, ns; static doublereal ss; static integer nw; extern /* Subroutine */ int dlahqr_(logical *, logical *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *), dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); static char jbcmpz[2]; static logical sorted; static integer lwkopt, inf, kdu, nho, nve, kwh, nsr, nwr, kwv; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 This subroutine implements one level of recursion for DLAQR0. It is a complete implementation of the small bulge multi-shift QR algorithm. It may be called by DLAQR0 and, for large enough deflation window size, it may be called by DLAQR3. This subroutine is identical to DLAQR0 except that it calls DLAQR2 instead of DLAQR3. Purpose ======= DLAQR4 computes the eigenvalues of a Hessenberg matrix H and, optionally, the matrices T and Z from the Schur decomposition H = Z T Z**T, where T is an upper quasi-triangular matrix (the Schur form), and Z is the orthogonal matrix of Schur vectors. Optionally Z may be postmultiplied into an input orthogonal matrix Q so that this routine can give the Schur factorization of a matrix A which has been reduced to the Hessenberg form H by the orthogonal matrix Q: A = Q*H*Q**T = (QZ)*T*(QZ)**T. Arguments ========= WANTT (input) LOGICAL = .TRUE. : the full Schur form T is required; = .FALSE.: only eigenvalues are required. WANTZ (input) LOGICAL = .TRUE. : the matrix of Schur vectors Z is required; = .FALSE.: Schur vectors are not required. N (input) INTEGER The order of the matrix H. N .GE. 0. ILO (input) INTEGER IHI (input) INTEGER It is assumed that H is already upper triangular in rows and columns 1:ILO-1 and IHI+1:N and, if ILO.GT.1, H(ILO,ILO-1) is zero. ILO and IHI are normally set by a previous call to DGEBAL, and then passed to DGEHRD when the matrix output by DGEBAL is reduced to Hessenberg form. Otherwise, ILO and IHI should be set to 1 and N, respectively. If N.GT.0, then 1.LE.ILO.LE.IHI.LE.N. If N = 0, then ILO = 1 and IHI = 0. H (input/output) DOUBLE PRECISION array, dimension (LDH,N) On entry, the upper Hessenberg matrix H. On exit, if INFO = 0 and WANTT is .TRUE., then H contains the upper quasi-triangular matrix T from the Schur decomposition (the Schur form); 2-by-2 diagonal blocks (corresponding to complex conjugate pairs of eigenvalues) are returned in standard form, with H(i,i) = H(i+1,i+1) and H(i+1,i)*H(i,i+1).LT.0. If INFO = 0 and WANTT is .FALSE., then the contents of H are unspecified on exit. (The output value of H when INFO.GT.0 is given under the description of INFO below.) This subroutine may explicitly set H(i,j) = 0 for i.GT.j and j = 1, 2, ... ILO-1 or j = IHI+1, IHI+2, ... N. LDH (input) INTEGER The leading dimension of the array H. LDH .GE. max(1,N). WR (output) DOUBLE PRECISION array, dimension (IHI) WI (output) DOUBLE PRECISION array, dimension (IHI) The real and imaginary parts, respectively, of the computed eigenvalues of H(ILO:IHI,ILO:IHI) are stored WR(ILO:IHI) and WI(ILO:IHI). If two eigenvalues are computed as a complex conjugate pair, they are stored in consecutive elements of WR and WI, say the i-th and (i+1)th, with WI(i) .GT. 0 and WI(i+1) .LT. 0. If WANTT is .TRUE., then the eigenvalues are stored in the same order as on the diagonal of the Schur form returned in H, with WR(i) = H(i,i) and, if H(i:i+1,i:i+1) is a 2-by-2 diagonal block, WI(i) = sqrt(-H(i+1,i)*H(i,i+1)) and WI(i+1) = -WI(i). ILOZ (input) INTEGER IHIZ (input) INTEGER Specify the rows of Z to which transformations must be applied if WANTZ is .TRUE.. 1 .LE. ILOZ .LE. ILO; IHI .LE. IHIZ .LE. N. Z (input/output) DOUBLE PRECISION array, dimension (LDZ,IHI) If WANTZ is .FALSE., then Z is not referenced. If WANTZ is .TRUE., then Z(ILO:IHI,ILOZ:IHIZ) is replaced by Z(ILO:IHI,ILOZ:IHIZ)*U where U is the orthogonal Schur factor of H(ILO:IHI,ILO:IHI). (The output value of Z when INFO.GT.0 is given under the description of INFO below.) LDZ (input) INTEGER The leading dimension of the array Z. if WANTZ is .TRUE. then LDZ.GE.MAX(1,IHIZ). Otherwize, LDZ.GE.1. WORK (workspace/output) DOUBLE PRECISION array, dimension LWORK On exit, if LWORK = -1, WORK(1) returns an estimate of the optimal value for LWORK. LWORK (input) INTEGER The dimension of the array WORK. LWORK .GE. max(1,N) is sufficient, but LWORK typically as large as 6*N may be required for optimal performance. A workspace query to determine the optimal workspace size is recommended. If LWORK = -1, then DLAQR4 does a workspace query. In this case, DLAQR4 checks the input parameters and estimates the optimal workspace size for the given values of N, ILO and IHI. The estimate is returned in WORK(1). No error message related to LWORK is issued by XERBLA. Neither H nor Z are accessed. INFO (output) INTEGER = 0: successful exit .GT. 0: if INFO = i, DLAQR4 failed to compute all of the eigenvalues. Elements 1:ilo-1 and i+1:n of WR and WI contain those eigenvalues which have been successfully computed. (Failures are rare.) If INFO .GT. 0 and WANT is .FALSE., then on exit, the remaining unconverged eigenvalues are the eigen- values of the upper Hessenberg matrix rows and columns ILO through INFO of the final, output value of H. If INFO .GT. 0 and WANTT is .TRUE., then on exit (*) (initial value of H)*U = U*(final value of H) where U is an orthogonal matrix. The final value of H is upper Hessenberg and quasi-triangular in rows and columns INFO+1 through IHI. If INFO .GT. 0 and WANTZ is .TRUE., then on exit (final value of Z(ILO:IHI,ILOZ:IHIZ) = (initial value of Z(ILO:IHI,ILOZ:IHIZ)*U where U is the orthogonal matrix in (*) (regard- less of the value of WANTT.) If INFO .GT. 0 and WANTZ is .FALSE., then Z is not accessed. ================================================================ Based on contributions by Karen Braman and Ralph Byers, Department of Mathematics, University of Kansas, USA ================================================================ References: K. Braman, R. Byers and R. Mathias, The Multi-Shift QR Algorithm Part I: Maintaining Well Focused Shifts, and Level 3 Performance, SIAM Journal of Matrix Analysis, volume 23, pages 929--947, 2002. K. Braman, R. Byers and R. Mathias, The Multi-Shift QR Algorithm Part II: Aggressive Early Deflation, SIAM Journal of Matrix Analysis, volume 23, pages 948--973, 2002. ================================================================ ==== Matrices of order NTINY or smaller must be processed by . DLAHQR because of insufficient subdiagonal scratch space. . (This is a hard limit.) ==== ==== Exceptional deflation windows: try to cure rare . slow convergence by increasing the size of the . deflation window after KEXNW iterations. ===== ==== Exceptional shifts: try to cure rare slow convergence . with ad-hoc exceptional shifts every KEXSH iterations. . The constants WILK1 and WILK2 are used to form the . exceptional shifts. ==== */ /* Parameter adjustments */ h_dim1 = *ldh; h_offset = 1 + h_dim1 * 1; h__ -= h_offset; --wr; --wi; z_dim1 = *ldz; z_offset = 1 + z_dim1 * 1; z__ -= z_offset; --work; /* Function Body */ *info = 0; /* ==== Quick return for N = 0: nothing to do. ==== */ if (*n == 0) { work[1] = 1.; return 0; } /* ==== Set up job flags for ILAENV. ==== */ if (*wantt) { *(unsigned char *)jbcmpz = 'S'; } else { *(unsigned char *)jbcmpz = 'E'; } if (*wantz) { *(unsigned char *)&jbcmpz[1] = 'V'; } else { *(unsigned char *)&jbcmpz[1] = 'N'; } /* ==== Tiny matrices must use DLAHQR. ==== */ if (*n <= 11) { /* ==== Estimate optimal workspace. ==== */ lwkopt = 1; if (*lwork != -1) { dlahqr_(wantt, wantz, n, ilo, ihi, &h__[h_offset], ldh, &wr[1], & wi[1], iloz, ihiz, &z__[z_offset], ldz, info); } } else { /* ==== Use small bulge multi-shift QR with aggressive early . deflation on larger-than-tiny matrices. ==== ==== Hope for the best. ==== */ *info = 0; /* ==== NWR = recommended deflation window size. At this . point, N .GT. NTINY = 11, so there is enough . subdiagonal workspace for NWR.GE.2 as required. . (In fact, there is enough subdiagonal space for . NWR.GE.3.) ==== */ nwr = ilaenv_(&c__13, "DLAQR4", jbcmpz, n, ilo, ihi, lwork, (ftnlen)6, (ftnlen)2); nwr = max(2,nwr); /* Computing MIN */ i__1 = *ihi - *ilo + 1, i__2 = (*n - 1) / 3, i__1 = min(i__1,i__2); nwr = min(i__1,nwr); nw = nwr; /* ==== NSR = recommended number of simultaneous shifts. . At this point N .GT. NTINY = 11, so there is at . enough subdiagonal workspace for NSR to be even . and greater than or equal to two as required. ==== */ nsr = ilaenv_(&c__15, "DLAQR4", jbcmpz, n, ilo, ihi, lwork, (ftnlen)6, (ftnlen)2); /* Computing MIN */ i__1 = nsr, i__2 = (*n + 6) / 9, i__1 = min(i__1,i__2), i__2 = *ihi - *ilo; nsr = min(i__1,i__2); /* Computing MAX */ i__1 = 2, i__2 = nsr - nsr % 2; nsr = max(i__1,i__2); /* ==== Estimate optimal workspace ==== ==== Workspace query call to DLAQR2 ==== */ i__1 = nwr + 1; dlaqr2_(wantt, wantz, n, ilo, ihi, &i__1, &h__[h_offset], ldh, iloz, ihiz, &z__[z_offset], ldz, &ls, &ld, &wr[1], &wi[1], &h__[ h_offset], ldh, n, &h__[h_offset], ldh, n, &h__[h_offset], ldh, &work[1], &c_n1); /* ==== Optimal workspace = MAX(DLAQR5, DLAQR2) ==== Computing MAX */ i__1 = nsr * 3 / 2, i__2 = (integer) work[1]; lwkopt = max(i__1,i__2); /* ==== Quick return in case of workspace query. ==== */ if (*lwork == -1) { work[1] = (doublereal) lwkopt; return 0; } /* ==== DLAHQR/DLAQR0 crossover point ==== */ nmin = ilaenv_(&c__12, "DLAQR4", jbcmpz, n, ilo, ihi, lwork, (ftnlen) 6, (ftnlen)2); nmin = max(11,nmin); /* ==== Nibble crossover point ==== */ nibble = ilaenv_(&c__14, "DLAQR4", jbcmpz, n, ilo, ihi, lwork, ( ftnlen)6, (ftnlen)2); nibble = max(0,nibble); /* ==== Accumulate reflections during ttswp? Use block . 2-by-2 structure during matrix-matrix multiply? ==== */ kacc22 = ilaenv_(&c__16, "DLAQR4", jbcmpz, n, ilo, ihi, lwork, ( ftnlen)6, (ftnlen)2); kacc22 = max(0,kacc22); kacc22 = min(2,kacc22); /* ==== NWMAX = the largest possible deflation window for . which there is sufficient workspace. ==== Computing MIN */ i__1 = (*n - 1) / 3, i__2 = *lwork / 2; nwmax = min(i__1,i__2); /* ==== NSMAX = the Largest number of simultaneous shifts . for which there is sufficient workspace. ==== Computing MIN */ i__1 = (*n + 6) / 9, i__2 = (*lwork << 1) / 3; nsmax = min(i__1,i__2); nsmax -= nsmax % 2; /* ==== NDFL: an iteration count restarted at deflation. ==== */ ndfl = 1; /* ==== ITMAX = iteration limit ==== Computing MAX */ i__1 = 10, i__2 = *ihi - *ilo + 1; itmax = 30 * max(i__1,i__2); /* ==== Last row and column in the active block ==== */ kbot = *ihi; /* ==== Main Loop ==== */ i__1 = itmax; for (it = 1; it <= i__1; ++it) { /* ==== Done when KBOT falls below ILO ==== */ if (kbot < *ilo) { goto L90; } /* ==== Locate active block ==== */ i__2 = *ilo + 1; for (k = kbot; k >= i__2; --k) { if (h__[k + (k - 1) * h_dim1] == 0.) { goto L20; } /* L10: */ } k = *ilo; L20: ktop = k; /* ==== Select deflation window size ==== */ nh = kbot - ktop + 1; if (ndfl < 5 || nh < nw) { /* ==== Typical deflation window. If possible and . advisable, nibble the entire active block. . If not, use size NWR or NWR+1 depending upon . which has the smaller corresponding subdiagonal . entry (a heuristic). ==== */ nwinc = TRUE_; if (nh <= min(nmin,nwmax)) { nw = nh; } else { /* Computing MIN */ i__2 = min(nwr,nh); nw = min(i__2,nwmax); if (nw < nwmax) { if (nw >= nh - 1) { nw = nh; } else { kwtop = kbot - nw + 1; if ((d__1 = h__[kwtop + (kwtop - 1) * h_dim1], abs(d__1)) > (d__2 = h__[kwtop - 1 + ( kwtop - 2) * h_dim1], abs(d__2))) { ++nw; } } } } } else { /* ==== Exceptional deflation window. If there have . been no deflations in KEXNW or more iterations, . then vary the deflation window size. At first, . because, larger windows are, in general, more . powerful than smaller ones, rapidly increase the . window up to the maximum reasonable and possible. . Then maybe try a slightly smaller window. ==== */ if (nwinc && nw < min(nwmax,nh)) { /* Computing MIN */ i__2 = min(nwmax,nh), i__3 = nw << 1; nw = min(i__2,i__3); } else { nwinc = FALSE_; if (nw == nh && nh > 2) { nw = nh - 1; } } } /* ==== Aggressive early deflation: . split workspace under the subdiagonal into . - an nw-by-nw work array V in the lower . left-hand-corner, . - an NW-by-at-least-NW-but-more-is-better . (NW-by-NHO) horizontal work array along . the bottom edge, . - an at-least-NW-but-more-is-better (NHV-by-NW) . vertical work array along the left-hand-edge. . ==== */ kv = *n - nw + 1; kt = nw + 1; nho = *n - nw - 1 - kt + 1; kwv = nw + 2; nve = *n - nw - kwv + 1; /* ==== Aggressive early deflation ==== */ dlaqr2_(wantt, wantz, n, &ktop, &kbot, &nw, &h__[h_offset], ldh, iloz, ihiz, &z__[z_offset], ldz, &ls, &ld, &wr[1], &wi[1], &h__[kv + h_dim1], ldh, &nho, &h__[kv + kt * h_dim1], ldh, &nve, &h__[kwv + h_dim1], ldh, &work[1], lwork); /* ==== Adjust KBOT accounting for new deflations. ==== */ kbot -= ld; /* ==== KS points to the shifts. ==== */ ks = kbot - ls + 1; /* ==== Skip an expensive QR sweep if there is a (partly . heuristic) reason to expect that many eigenvalues . will deflate without it. Here, the QR sweep is . skipped if many eigenvalues have just been deflated . or if the remaining active block is small. */ if (ld == 0 || ld * 100 <= nw * nibble && kbot - ktop + 1 > min( nmin,nwmax)) { /* ==== NS = nominal number of simultaneous shifts. . This may be lowered (slightly) if DLAQR2 . did not provide that many shifts. ==== Computing MIN Computing MAX */ i__4 = 2, i__5 = kbot - ktop; i__2 = min(nsmax,nsr), i__3 = max(i__4,i__5); ns = min(i__2,i__3); ns -= ns % 2; /* ==== If there have been no deflations . in a multiple of KEXSH iterations, . then try exceptional shifts. . Otherwise use shifts provided by . DLAQR2 above or from the eigenvalues . of a trailing principal submatrix. ==== */ if (ndfl % 6 == 0) { ks = kbot - ns + 1; /* Computing MAX */ i__3 = ks + 1, i__4 = ktop + 2; i__2 = max(i__3,i__4); for (i__ = kbot; i__ >= i__2; i__ += -2) { ss = (d__1 = h__[i__ + (i__ - 1) * h_dim1], abs(d__1)) + (d__2 = h__[i__ - 1 + (i__ - 2) * h_dim1], abs(d__2)); aa = ss * .75 + h__[i__ + i__ * h_dim1]; bb = ss; cc = ss * -.4375; dd = aa; dlanv2_(&aa, &bb, &cc, &dd, &wr[i__ - 1], &wi[i__ - 1] , &wr[i__], &wi[i__], &cs, &sn); /* L30: */ } if (ks == ktop) { wr[ks + 1] = h__[ks + 1 + (ks + 1) * h_dim1]; wi[ks + 1] = 0.; wr[ks] = wr[ks + 1]; wi[ks] = wi[ks + 1]; } } else { /* ==== Got NS/2 or fewer shifts? Use DLAHQR . on a trailing principal submatrix to . get more. (Since NS.LE.NSMAX.LE.(N+6)/9, . there is enough space below the subdiagonal . to fit an NS-by-NS scratch array.) ==== */ if (kbot - ks + 1 <= ns / 2) { ks = kbot - ns + 1; kt = *n - ns + 1; dlacpy_("A", &ns, &ns, &h__[ks + ks * h_dim1], ldh, & h__[kt + h_dim1], ldh); dlahqr_(&c_false, &c_false, &ns, &c__1, &ns, &h__[kt + h_dim1], ldh, &wr[ks], &wi[ks], &c__1, & c__1, zdum, &c__1, &inf); ks += inf; /* ==== In case of a rare QR failure use . eigenvalues of the trailing 2-by-2 . principal submatrix. ==== */ if (ks >= kbot) { aa = h__[kbot - 1 + (kbot - 1) * h_dim1]; cc = h__[kbot + (kbot - 1) * h_dim1]; bb = h__[kbot - 1 + kbot * h_dim1]; dd = h__[kbot + kbot * h_dim1]; dlanv2_(&aa, &bb, &cc, &dd, &wr[kbot - 1], &wi[ kbot - 1], &wr[kbot], &wi[kbot], &cs, &sn) ; ks = kbot - 1; } } if (kbot - ks + 1 > ns) { /* ==== Sort the shifts (Helps a little) . Bubble sort keeps complex conjugate . pairs together. ==== */ sorted = FALSE_; i__2 = ks + 1; for (k = kbot; k >= i__2; --k) { if (sorted) { goto L60; } sorted = TRUE_; i__3 = k - 1; for (i__ = ks; i__ <= i__3; ++i__) { if ((d__1 = wr[i__], abs(d__1)) + (d__2 = wi[ i__], abs(d__2)) < (d__3 = wr[i__ + 1] , abs(d__3)) + (d__4 = wi[i__ + 1], abs(d__4))) { sorted = FALSE_; swap = wr[i__]; wr[i__] = wr[i__ + 1]; wr[i__ + 1] = swap; swap = wi[i__]; wi[i__] = wi[i__ + 1]; wi[i__ + 1] = swap; } /* L40: */ } /* L50: */ } L60: ; } /* ==== Shuffle shifts into pairs of real shifts . and pairs of complex conjugate shifts . assuming complex conjugate shifts are . already adjacent to one another. (Yes, . they are.) ==== */ i__2 = ks + 2; for (i__ = kbot; i__ >= i__2; i__ += -2) { if (wi[i__] != -wi[i__ - 1]) { swap = wr[i__]; wr[i__] = wr[i__ - 1]; wr[i__ - 1] = wr[i__ - 2]; wr[i__ - 2] = swap; swap = wi[i__]; wi[i__] = wi[i__ - 1]; wi[i__ - 1] = wi[i__ - 2]; wi[i__ - 2] = swap; } /* L70: */ } } /* ==== If there are only two shifts and both are . real, then use only one. ==== */ if (kbot - ks + 1 == 2) { if (wi[kbot] == 0.) { if ((d__1 = wr[kbot] - h__[kbot + kbot * h_dim1], abs( d__1)) < (d__2 = wr[kbot - 1] - h__[kbot + kbot * h_dim1], abs(d__2))) { wr[kbot - 1] = wr[kbot]; } else { wr[kbot] = wr[kbot - 1]; } } } /* ==== Use up to NS of the the smallest magnatiude . shifts. If there aren't NS shifts available, . then use them all, possibly dropping one to . make the number of shifts even. ==== Computing MIN */ i__2 = ns, i__3 = kbot - ks + 1; ns = min(i__2,i__3); ns -= ns % 2; ks = kbot - ns + 1; /* ==== Small-bulge multi-shift QR sweep: . split workspace under the subdiagonal into . - a KDU-by-KDU work array U in the lower . left-hand-corner, . - a KDU-by-at-least-KDU-but-more-is-better . (KDU-by-NHo) horizontal work array WH along . the bottom edge, . - and an at-least-KDU-but-more-is-better-by-KDU . (NVE-by-KDU) vertical work WV arrow along . the left-hand-edge. ==== */ kdu = ns * 3 - 3; ku = *n - kdu + 1; kwh = kdu + 1; nho = *n - kdu - 3 - (kdu + 1) + 1; kwv = kdu + 4; nve = *n - kdu - kwv + 1; /* ==== Small-bulge multi-shift QR sweep ==== */ dlaqr5_(wantt, wantz, &kacc22, n, &ktop, &kbot, &ns, &wr[ks], &wi[ks], &h__[h_offset], ldh, iloz, ihiz, &z__[ z_offset], ldz, &work[1], &c__3, &h__[ku + h_dim1], ldh, &nve, &h__[kwv + h_dim1], ldh, &nho, &h__[ku + kwh * h_dim1], ldh); } /* ==== Note progress (or the lack of it). ==== */ if (ld > 0) { ndfl = 1; } else { ++ndfl; } /* ==== End of main loop ==== L80: */ } /* ==== Iteration limit exceeded. Set INFO to show where . the problem occurred and exit. ==== */ *info = kbot; L90: ; } /* ==== Return the optimal value of LWORK. ==== */ work[1] = (doublereal) lwkopt; /* ==== End of DLAQR4 ==== */ return 0; } /* dlaqr4_ */ /* Subroutine */ int dlaqr5_(logical *wantt, logical *wantz, integer *kacc22, integer *n, integer *ktop, integer *kbot, integer *nshfts, doublereal *sr, doublereal *si, doublereal *h__, integer *ldh, integer *iloz, integer *ihiz, doublereal *z__, integer *ldz, doublereal *v, integer * ldv, doublereal *u, integer *ldu, integer *nv, doublereal *wv, integer *ldwv, integer *nh, doublereal *wh, integer *ldwh) { /* System generated locals */ integer h_dim1, h_offset, u_dim1, u_offset, v_dim1, v_offset, wh_dim1, wh_offset, wv_dim1, wv_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4, i__5, i__6, i__7; doublereal d__1, d__2, d__3, d__4; /* Local variables */ static doublereal beta; static logical blk22, bmp22; static integer mend, jcol, jlen, jbot, mbot; static doublereal swap; static integer jtop, jrow, mtop, i__, j, k, m; static doublereal alpha; static logical accum; extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); static integer ndcol, incol, krcol, nbmps; extern /* Subroutine */ int dtrmm_(char *, char *, char *, char *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *); static integer i2, j2, i4, j4, k1; extern /* Subroutine */ int dlaqr1_(integer *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *), dlabad_(doublereal *, doublereal *); static doublereal h11, h12, h21, h22; static integer m22; extern /* Subroutine */ int dlarfg_(integer *, doublereal *, doublereal *, integer *, doublereal *); static integer ns, nu; static doublereal vt[3]; extern /* Subroutine */ int dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *); static doublereal safmin, safmax; extern /* Subroutine */ int dlaset_(char *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *); static doublereal refsum; static integer mstart; static doublereal smlnum, scl; static integer kdu, kms; static doublereal ulp; static integer knz, kzs; static doublereal tst1, tst2; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 This auxiliary subroutine called by DLAQR0 performs a single small-bulge multi-shift QR sweep. WANTT (input) logical scalar WANTT = .true. if the quasi-triangular Schur factor is being computed. WANTT is set to .false. otherwise. WANTZ (input) logical scalar WANTZ = .true. if the orthogonal Schur factor is being computed. WANTZ is set to .false. otherwise. KACC22 (input) integer with value 0, 1, or 2. Specifies the computation mode of far-from-diagonal orthogonal updates. = 0: DLAQR5 does not accumulate reflections and does not use matrix-matrix multiply to update far-from-diagonal matrix entries. = 1: DLAQR5 accumulates reflections and uses matrix-matrix multiply to update the far-from-diagonal matrix entries. = 2: DLAQR5 accumulates reflections, uses matrix-matrix multiply to update the far-from-diagonal matrix entries, and takes advantage of 2-by-2 block structure during matrix multiplies. N (input) integer scalar N is the order of the Hessenberg matrix H upon which this subroutine operates. KTOP (input) integer scalar KBOT (input) integer scalar These are the first and last rows and columns of an isolated diagonal block upon which the QR sweep is to be applied. It is assumed without a check that either KTOP = 1 or H(KTOP,KTOP-1) = 0 and either KBOT = N or H(KBOT+1,KBOT) = 0. NSHFTS (input) integer scalar NSHFTS gives the number of simultaneous shifts. NSHFTS must be positive and even. SR (input) DOUBLE PRECISION array of size (NSHFTS) SI (input) DOUBLE PRECISION array of size (NSHFTS) SR contains the real parts and SI contains the imaginary parts of the NSHFTS shifts of origin that define the multi-shift QR sweep. H (input/output) DOUBLE PRECISION array of size (LDH,N) On input H contains a Hessenberg matrix. On output a multi-shift QR sweep with shifts SR(J)+i*SI(J) is applied to the isolated diagonal block in rows and columns KTOP through KBOT. LDH (input) integer scalar LDH is the leading dimension of H just as declared in the calling procedure. LDH.GE.MAX(1,N). ILOZ (input) INTEGER IHIZ (input) INTEGER Specify the rows of Z to which transformations must be applied if WANTZ is .TRUE.. 1 .LE. ILOZ .LE. IHIZ .LE. N Z (input/output) DOUBLE PRECISION array of size (LDZ,IHI) If WANTZ = .TRUE., then the QR Sweep orthogonal similarity transformation is accumulated into Z(ILOZ:IHIZ,ILO:IHI) from the right. If WANTZ = .FALSE., then Z is unreferenced. LDZ (input) integer scalar LDA is the leading dimension of Z just as declared in the calling procedure. LDZ.GE.N. V (workspace) DOUBLE PRECISION array of size (LDV,NSHFTS/2) LDV (input) integer scalar LDV is the leading dimension of V as declared in the calling procedure. LDV.GE.3. U (workspace) DOUBLE PRECISION array of size (LDU,3*NSHFTS-3) LDU (input) integer scalar LDU is the leading dimension of U just as declared in the in the calling subroutine. LDU.GE.3*NSHFTS-3. NH (input) integer scalar NH is the number of columns in array WH available for workspace. NH.GE.1. WH (workspace) DOUBLE PRECISION array of size (LDWH,NH) LDWH (input) integer scalar Leading dimension of WH just as declared in the calling procedure. LDWH.GE.3*NSHFTS-3. NV (input) integer scalar NV is the number of rows in WV agailable for workspace. NV.GE.1. WV (workspace) DOUBLE PRECISION array of size (LDWV,3*NSHFTS-3) LDWV (input) integer scalar LDWV is the leading dimension of WV as declared in the in the calling subroutine. LDWV.GE.NV. ================================================================ Based on contributions by Karen Braman and Ralph Byers, Department of Mathematics, University of Kansas, USA ============================================================ Reference: K. Braman, R. Byers and R. Mathias, The Multi-Shift QR Algorithm Part I: Maintaining Well Focused Shifts, and Level 3 Performance, SIAM Journal of Matrix Analysis, volume 23, pages 929--947, 2002. ============================================================ ==== If there are no shifts, then there is nothing to do. ==== */ /* Parameter adjustments */ --sr; --si; h_dim1 = *ldh; h_offset = 1 + h_dim1 * 1; h__ -= h_offset; z_dim1 = *ldz; z_offset = 1 + z_dim1 * 1; z__ -= z_offset; v_dim1 = *ldv; v_offset = 1 + v_dim1 * 1; v -= v_offset; u_dim1 = *ldu; u_offset = 1 + u_dim1 * 1; u -= u_offset; wv_dim1 = *ldwv; wv_offset = 1 + wv_dim1 * 1; wv -= wv_offset; wh_dim1 = *ldwh; wh_offset = 1 + wh_dim1 * 1; wh -= wh_offset; /* Function Body */ if (*nshfts < 2) { return 0; } /* ==== If the active block is empty or 1-by-1, then there . is nothing to do. ==== */ if (*ktop >= *kbot) { return 0; } /* ==== Shuffle shifts into pairs of real shifts and pairs . of complex conjugate shifts assuming complex . conjugate shifts are already adjacent to one . another. ==== */ i__1 = *nshfts - 2; for (i__ = 1; i__ <= i__1; i__ += 2) { if (si[i__] != -si[i__ + 1]) { swap = sr[i__]; sr[i__] = sr[i__ + 1]; sr[i__ + 1] = sr[i__ + 2]; sr[i__ + 2] = swap; swap = si[i__]; si[i__] = si[i__ + 1]; si[i__ + 1] = si[i__ + 2]; si[i__ + 2] = swap; } /* L10: */ } /* ==== NSHFTS is supposed to be even, but if is odd, . then simply reduce it by one. The shuffle above . ensures that the dropped shift is real and that . the remaining shifts are paired. ==== */ ns = *nshfts - *nshfts % 2; /* ==== Machine constants for deflation ==== */ safmin = SAFEMINIMUM; safmax = 1. / safmin; dlabad_(&safmin, &safmax); ulp = PRECISION; smlnum = safmin * ((doublereal) (*n) / ulp); /* ==== Use accumulated reflections to update far-from-diagonal . entries ? ==== */ accum = *kacc22 == 1 || *kacc22 == 2; /* ==== If so, exploit the 2-by-2 block structure? ==== */ blk22 = ns > 2 && *kacc22 == 2; /* ==== clear trash ==== */ if (*ktop + 2 <= *kbot) { h__[*ktop + 2 + *ktop * h_dim1] = 0.; } /* ==== NBMPS = number of 2-shift bulges in the chain ==== */ nbmps = ns / 2; /* ==== KDU = width of slab ==== */ kdu = nbmps * 6 - 3; /* ==== Create and chase chains of NBMPS bulges ==== */ i__1 = *kbot - 2; i__2 = nbmps * 3 - 2; for (incol = (1 - nbmps) * 3 + *ktop - 1; i__2 < 0 ? incol >= i__1 : incol <= i__1; incol += i__2) { ndcol = incol + kdu; if (accum) { dlaset_("ALL", &kdu, &kdu, &c_b29, &c_b15, &u[u_offset], ldu); } /* ==== Near-the-diagonal bulge chase. The following loop . performs the near-the-diagonal part of a small bulge . multi-shift QR sweep. Each 6*NBMPS-2 column diagonal . chunk extends from column INCOL to column NDCOL . (including both column INCOL and column NDCOL). The . following loop chases a 3*NBMPS column long chain of . NBMPS bulges 3*NBMPS-2 columns to the right. (INCOL . may be less than KTOP and and NDCOL may be greater than . KBOT indicating phantom columns from which to chase . bulges before they are actually introduced or to which . to chase bulges beyond column KBOT.) ==== Computing MIN */ i__4 = incol + nbmps * 3 - 3, i__5 = *kbot - 2; i__3 = min(i__4,i__5); for (krcol = incol; krcol <= i__3; ++krcol) { /* ==== Bulges number MTOP to MBOT are active double implicit . shift bulges. There may or may not also be small . 2-by-2 bulge, if there is room. The inactive bulges . (if any) must wait until the active bulges have moved . down the diagonal to make room. The phantom matrix . paradigm described above helps keep track. ==== Computing MAX */ i__4 = 1, i__5 = (*ktop - 1 - krcol + 2) / 3 + 1; mtop = max(i__4,i__5); /* Computing MIN */ i__4 = nbmps, i__5 = (*kbot - krcol) / 3; mbot = min(i__4,i__5); m22 = mbot + 1; bmp22 = mbot < nbmps && krcol + (m22 - 1) * 3 == *kbot - 2; /* ==== Generate reflections to chase the chain right . one column. (The minimum value of K is KTOP-1.) ==== */ i__4 = mbot; for (m = mtop; m <= i__4; ++m) { k = krcol + (m - 1) * 3; if (k == *ktop - 1) { dlaqr1_(&c__3, &h__[*ktop + *ktop * h_dim1], ldh, &sr[(m << 1) - 1], &si[(m << 1) - 1], &sr[m * 2], &si[m * 2], &v[m * v_dim1 + 1]); alpha = v[m * v_dim1 + 1]; dlarfg_(&c__3, &alpha, &v[m * v_dim1 + 2], &c__1, &v[m * v_dim1 + 1]); } else { beta = h__[k + 1 + k * h_dim1]; v[m * v_dim1 + 2] = h__[k + 2 + k * h_dim1]; v[m * v_dim1 + 3] = h__[k + 3 + k * h_dim1]; dlarfg_(&c__3, &beta, &v[m * v_dim1 + 2], &c__1, &v[m * v_dim1 + 1]); /* ==== A Bulge may collapse because of vigilant . deflation or destructive underflow. (The . initial bulge is always collapsed.) Use . the two-small-subdiagonals trick to try . to get it started again. If V(2,M).NE.0 and . V(3,M) = H(K+3,K+1) = H(K+3,K+2) = 0, then . this bulge is collapsing into a zero . subdiagonal. It will be restarted next . trip through the loop.) */ if (v[m * v_dim1 + 1] != 0. && (v[m * v_dim1 + 3] != 0. || h__[k + 3 + (k + 1) * h_dim1] == 0. && h__[k + 3 + (k + 2) * h_dim1] == 0.)) { /* ==== Typical case: not collapsed (yet). ==== */ h__[k + 1 + k * h_dim1] = beta; h__[k + 2 + k * h_dim1] = 0.; h__[k + 3 + k * h_dim1] = 0.; } else { /* ==== Atypical case: collapsed. Attempt to . reintroduce ignoring H(K+1,K). If the . fill resulting from the new reflector . is too large, then abandon it. . Otherwise, use the new one. ==== */ dlaqr1_(&c__3, &h__[k + 1 + (k + 1) * h_dim1], ldh, & sr[(m << 1) - 1], &si[(m << 1) - 1], &sr[m * 2], &si[m * 2], vt); scl = abs(vt[0]) + abs(vt[1]) + abs(vt[2]); if (scl != 0.) { vt[0] /= scl; vt[1] /= scl; vt[2] /= scl; } /* ==== The following is the traditional and . conservative two-small-subdiagonals . test. ==== . */ if ((d__1 = h__[k + 1 + k * h_dim1], abs(d__1)) * ( abs(vt[1]) + abs(vt[2])) > ulp * abs(vt[0]) * ((d__2 = h__[k + k * h_dim1], abs(d__2)) + ( d__3 = h__[k + 1 + (k + 1) * h_dim1], abs( d__3)) + (d__4 = h__[k + 2 + (k + 2) * h_dim1] , abs(d__4)))) { /* ==== Starting a new bulge here would . create non-negligible fill. If . the old reflector is diagonal (only . possible with underflows), then . change it to I. Otherwise, use . it with trepidation. ==== */ if (v[m * v_dim1 + 2] == 0. && v[m * v_dim1 + 3] == 0.) { v[m * v_dim1 + 1] = 0.; } else { h__[k + 1 + k * h_dim1] = beta; h__[k + 2 + k * h_dim1] = 0.; h__[k + 3 + k * h_dim1] = 0.; } } else { /* ==== Stating a new bulge here would . create only negligible fill. . Replace the old reflector with . the new one. ==== */ alpha = vt[0]; dlarfg_(&c__3, &alpha, &vt[1], &c__1, vt); refsum = h__[k + 1 + k * h_dim1] + h__[k + 2 + k * h_dim1] * vt[1] + h__[k + 3 + k * h_dim1] * vt[2]; h__[k + 1 + k * h_dim1] -= vt[0] * refsum; h__[k + 2 + k * h_dim1] = 0.; h__[k + 3 + k * h_dim1] = 0.; v[m * v_dim1 + 1] = vt[0]; v[m * v_dim1 + 2] = vt[1]; v[m * v_dim1 + 3] = vt[2]; } } } /* L20: */ } /* ==== Generate a 2-by-2 reflection, if needed. ==== */ k = krcol + (m22 - 1) * 3; if (bmp22) { if (k == *ktop - 1) { dlaqr1_(&c__2, &h__[k + 1 + (k + 1) * h_dim1], ldh, &sr[( m22 << 1) - 1], &si[(m22 << 1) - 1], &sr[m22 * 2], &si[m22 * 2], &v[m22 * v_dim1 + 1]); beta = v[m22 * v_dim1 + 1]; dlarfg_(&c__2, &beta, &v[m22 * v_dim1 + 2], &c__1, &v[m22 * v_dim1 + 1]); } else { beta = h__[k + 1 + k * h_dim1]; v[m22 * v_dim1 + 2] = h__[k + 2 + k * h_dim1]; dlarfg_(&c__2, &beta, &v[m22 * v_dim1 + 2], &c__1, &v[m22 * v_dim1 + 1]); h__[k + 1 + k * h_dim1] = beta; h__[k + 2 + k * h_dim1] = 0.; } } else { /* ==== Initialize V(1,M22) here to avoid possible undefined . variable problems later. ==== */ v[m22 * v_dim1 + 1] = 0.; } /* ==== Multiply H by reflections from the left ==== */ if (accum) { jbot = min(ndcol,*kbot); } else if (*wantt) { jbot = *n; } else { jbot = *kbot; } i__4 = jbot; for (j = max(*ktop,krcol); j <= i__4; ++j) { /* Computing MIN */ i__5 = mbot, i__6 = (j - krcol + 2) / 3; mend = min(i__5,i__6); i__5 = mend; for (m = mtop; m <= i__5; ++m) { k = krcol + (m - 1) * 3; refsum = v[m * v_dim1 + 1] * (h__[k + 1 + j * h_dim1] + v[ m * v_dim1 + 2] * h__[k + 2 + j * h_dim1] + v[m * v_dim1 + 3] * h__[k + 3 + j * h_dim1]); h__[k + 1 + j * h_dim1] -= refsum; h__[k + 2 + j * h_dim1] -= refsum * v[m * v_dim1 + 2]; h__[k + 3 + j * h_dim1] -= refsum * v[m * v_dim1 + 3]; /* L30: */ } /* L40: */ } if (bmp22) { k = krcol + (m22 - 1) * 3; /* Computing MAX */ i__4 = k + 1; i__5 = jbot; for (j = max(i__4,*ktop); j <= i__5; ++j) { refsum = v[m22 * v_dim1 + 1] * (h__[k + 1 + j * h_dim1] + v[m22 * v_dim1 + 2] * h__[k + 2 + j * h_dim1]); h__[k + 1 + j * h_dim1] -= refsum; h__[k + 2 + j * h_dim1] -= refsum * v[m22 * v_dim1 + 2]; /* L50: */ } } /* ==== Multiply H by reflections from the right. . Delay filling in the last row until the . vigilant deflation check is complete. ==== */ if (accum) { jtop = max(*ktop,incol); } else if (*wantt) { jtop = 1; } else { jtop = *ktop; } i__5 = mbot; for (m = mtop; m <= i__5; ++m) { if (v[m * v_dim1 + 1] != 0.) { k = krcol + (m - 1) * 3; /* Computing MIN */ i__6 = *kbot, i__7 = k + 3; i__4 = min(i__6,i__7); for (j = jtop; j <= i__4; ++j) { refsum = v[m * v_dim1 + 1] * (h__[j + (k + 1) * h_dim1] + v[m * v_dim1 + 2] * h__[j + (k + 2) * h_dim1] + v[m * v_dim1 + 3] * h__[j + (k + 3) * h_dim1]); h__[j + (k + 1) * h_dim1] -= refsum; h__[j + (k + 2) * h_dim1] -= refsum * v[m * v_dim1 + 2]; h__[j + (k + 3) * h_dim1] -= refsum * v[m * v_dim1 + 3]; /* L60: */ } if (accum) { /* ==== Accumulate U. (If necessary, update Z later . with with an efficient matrix-matrix . multiply.) ==== */ kms = k - incol; /* Computing MAX */ i__4 = 1, i__6 = *ktop - incol; i__7 = kdu; for (j = max(i__4,i__6); j <= i__7; ++j) { refsum = v[m * v_dim1 + 1] * (u[j + (kms + 1) * u_dim1] + v[m * v_dim1 + 2] * u[j + (kms + 2) * u_dim1] + v[m * v_dim1 + 3] * u[j + (kms + 3) * u_dim1]); u[j + (kms + 1) * u_dim1] -= refsum; u[j + (kms + 2) * u_dim1] -= refsum * v[m * v_dim1 + 2]; u[j + (kms + 3) * u_dim1] -= refsum * v[m * v_dim1 + 3]; /* L70: */ } } else if (*wantz) { /* ==== U is not accumulated, so update Z . now by multiplying by reflections . from the right. ==== */ i__7 = *ihiz; for (j = *iloz; j <= i__7; ++j) { refsum = v[m * v_dim1 + 1] * (z__[j + (k + 1) * z_dim1] + v[m * v_dim1 + 2] * z__[j + (k + 2) * z_dim1] + v[m * v_dim1 + 3] * z__[ j + (k + 3) * z_dim1]); z__[j + (k + 1) * z_dim1] -= refsum; z__[j + (k + 2) * z_dim1] -= refsum * v[m * v_dim1 + 2]; z__[j + (k + 3) * z_dim1] -= refsum * v[m * v_dim1 + 3]; /* L80: */ } } } /* L90: */ } /* ==== Special case: 2-by-2 reflection (if needed) ==== */ k = krcol + (m22 - 1) * 3; if (bmp22 && v[m22 * v_dim1 + 1] != 0.) { /* Computing MIN */ i__7 = *kbot, i__4 = k + 3; i__5 = min(i__7,i__4); for (j = jtop; j <= i__5; ++j) { refsum = v[m22 * v_dim1 + 1] * (h__[j + (k + 1) * h_dim1] + v[m22 * v_dim1 + 2] * h__[j + (k + 2) * h_dim1]) ; h__[j + (k + 1) * h_dim1] -= refsum; h__[j + (k + 2) * h_dim1] -= refsum * v[m22 * v_dim1 + 2]; /* L100: */ } if (accum) { kms = k - incol; /* Computing MAX */ i__5 = 1, i__7 = *ktop - incol; i__4 = kdu; for (j = max(i__5,i__7); j <= i__4; ++j) { refsum = v[m22 * v_dim1 + 1] * (u[j + (kms + 1) * u_dim1] + v[m22 * v_dim1 + 2] * u[j + (kms + 2) * u_dim1]); u[j + (kms + 1) * u_dim1] -= refsum; u[j + (kms + 2) * u_dim1] -= refsum * v[m22 * v_dim1 + 2]; /* L110: */ } } else if (*wantz) { i__4 = *ihiz; for (j = *iloz; j <= i__4; ++j) { refsum = v[m22 * v_dim1 + 1] * (z__[j + (k + 1) * z_dim1] + v[m22 * v_dim1 + 2] * z__[j + (k + 2) * z_dim1]); z__[j + (k + 1) * z_dim1] -= refsum; z__[j + (k + 2) * z_dim1] -= refsum * v[m22 * v_dim1 + 2]; /* L120: */ } } } /* ==== Vigilant deflation check ==== */ mstart = mtop; if (krcol + (mstart - 1) * 3 < *ktop) { ++mstart; } mend = mbot; if (bmp22) { ++mend; } if (krcol == *kbot - 2) { ++mend; } i__4 = mend; for (m = mstart; m <= i__4; ++m) { /* Computing MIN */ i__5 = *kbot - 1, i__7 = krcol + (m - 1) * 3; k = min(i__5,i__7); /* ==== The following convergence test requires that . the tradition small-compared-to-nearby-diagonals . criterion and the Ahues & Tisseur (LAWN 122, 1997) . criteria both be satisfied. The latter improves . accuracy in some examples. Falling back on an . alternate convergence criterion when TST1 or TST2 . is zero (as done here) is traditional but probably . unnecessary. ==== */ if (h__[k + 1 + k * h_dim1] != 0.) { tst1 = (d__1 = h__[k + k * h_dim1], abs(d__1)) + (d__2 = h__[k + 1 + (k + 1) * h_dim1], abs(d__2)); if (tst1 == 0.) { if (k >= *ktop + 1) { tst1 += (d__1 = h__[k + (k - 1) * h_dim1], abs( d__1)); } if (k >= *ktop + 2) { tst1 += (d__1 = h__[k + (k - 2) * h_dim1], abs( d__1)); } if (k >= *ktop + 3) { tst1 += (d__1 = h__[k + (k - 3) * h_dim1], abs( d__1)); } if (k <= *kbot - 2) { tst1 += (d__1 = h__[k + 2 + (k + 1) * h_dim1], abs(d__1)); } if (k <= *kbot - 3) { tst1 += (d__1 = h__[k + 3 + (k + 1) * h_dim1], abs(d__1)); } if (k <= *kbot - 4) { tst1 += (d__1 = h__[k + 4 + (k + 1) * h_dim1], abs(d__1)); } } /* Computing MAX */ d__2 = smlnum, d__3 = ulp * tst1; if ((d__1 = h__[k + 1 + k * h_dim1], abs(d__1)) <= max( d__2,d__3)) { /* Computing MAX */ d__3 = (d__1 = h__[k + 1 + k * h_dim1], abs(d__1)), d__4 = (d__2 = h__[k + (k + 1) * h_dim1], abs( d__2)); h12 = max(d__3,d__4); /* Computing MIN */ d__3 = (d__1 = h__[k + 1 + k * h_dim1], abs(d__1)), d__4 = (d__2 = h__[k + (k + 1) * h_dim1], abs( d__2)); h21 = min(d__3,d__4); /* Computing MAX */ d__3 = (d__1 = h__[k + 1 + (k + 1) * h_dim1], abs( d__1)), d__4 = (d__2 = h__[k + k * h_dim1] - h__[k + 1 + (k + 1) * h_dim1], abs(d__2)); h11 = max(d__3,d__4); /* Computing MIN */ d__3 = (d__1 = h__[k + 1 + (k + 1) * h_dim1], abs( d__1)), d__4 = (d__2 = h__[k + k * h_dim1] - h__[k + 1 + (k + 1) * h_dim1], abs(d__2)); h22 = min(d__3,d__4); scl = h11 + h12; tst2 = h22 * (h11 / scl); /* Computing MAX */ d__1 = smlnum, d__2 = ulp * tst2; if (tst2 == 0. || h21 * (h12 / scl) <= max(d__1,d__2)) { h__[k + 1 + k * h_dim1] = 0.; } } } /* L130: */ } /* ==== Fill in the last row of each bulge. ==== Computing MIN */ i__4 = nbmps, i__5 = (*kbot - krcol - 1) / 3; mend = min(i__4,i__5); i__4 = mend; for (m = mtop; m <= i__4; ++m) { k = krcol + (m - 1) * 3; refsum = v[m * v_dim1 + 1] * v[m * v_dim1 + 3] * h__[k + 4 + ( k + 3) * h_dim1]; h__[k + 4 + (k + 1) * h_dim1] = -refsum; h__[k + 4 + (k + 2) * h_dim1] = -refsum * v[m * v_dim1 + 2]; h__[k + 4 + (k + 3) * h_dim1] -= refsum * v[m * v_dim1 + 3]; /* L140: */ } /* ==== End of near-the-diagonal bulge chase. ==== L150: */ } /* ==== Use U (if accumulated) to update far-from-diagonal . entries in H. If required, use U to update Z as . well. ==== */ if (accum) { if (*wantt) { jtop = 1; jbot = *n; } else { jtop = *ktop; jbot = *kbot; } if (! blk22 || incol < *ktop || ndcol > *kbot || ns <= 2) { /* ==== Updates not exploiting the 2-by-2 block . structure of U. K1 and NU keep track of . the location and size of U in the special . cases of introducing bulges and chasing . bulges off the bottom. In these special . cases and in case the number of shifts . is NS = 2, there is no 2-by-2 block . structure to exploit. ==== Computing MAX */ i__3 = 1, i__4 = *ktop - incol; k1 = max(i__3,i__4); /* Computing MAX */ i__3 = 0, i__4 = ndcol - *kbot; nu = kdu - max(i__3,i__4) - k1 + 1; /* ==== Horizontal Multiply ==== */ i__3 = jbot; i__4 = *nh; for (jcol = min(ndcol,*kbot) + 1; i__4 < 0 ? jcol >= i__3 : jcol <= i__3; jcol += i__4) { /* Computing MIN */ i__5 = *nh, i__7 = jbot - jcol + 1; jlen = min(i__5,i__7); dgemm_("C", "N", &nu, &jlen, &nu, &c_b15, &u[k1 + k1 * u_dim1], ldu, &h__[incol + k1 + jcol * h_dim1], ldh, &c_b29, &wh[wh_offset], ldwh); dlacpy_("ALL", &nu, &jlen, &wh[wh_offset], ldwh, &h__[ incol + k1 + jcol * h_dim1], ldh); /* L160: */ } /* ==== Vertical multiply ==== */ i__4 = max(*ktop,incol) - 1; i__3 = *nv; for (jrow = jtop; i__3 < 0 ? jrow >= i__4 : jrow <= i__4; jrow += i__3) { /* Computing MIN */ i__5 = *nv, i__7 = max(*ktop,incol) - jrow; jlen = min(i__5,i__7); dgemm_("N", "N", &jlen, &nu, &nu, &c_b15, &h__[jrow + ( incol + k1) * h_dim1], ldh, &u[k1 + k1 * u_dim1], ldu, &c_b29, &wv[wv_offset], ldwv); dlacpy_("ALL", &jlen, &nu, &wv[wv_offset], ldwv, &h__[ jrow + (incol + k1) * h_dim1], ldh); /* L170: */ } /* ==== Z multiply (also vertical) ==== */ if (*wantz) { i__3 = *ihiz; i__4 = *nv; for (jrow = *iloz; i__4 < 0 ? jrow >= i__3 : jrow <= i__3; jrow += i__4) { /* Computing MIN */ i__5 = *nv, i__7 = *ihiz - jrow + 1; jlen = min(i__5,i__7); dgemm_("N", "N", &jlen, &nu, &nu, &c_b15, &z__[jrow + (incol + k1) * z_dim1], ldz, &u[k1 + k1 * u_dim1], ldu, &c_b29, &wv[wv_offset], ldwv); dlacpy_("ALL", &jlen, &nu, &wv[wv_offset], ldwv, &z__[ jrow + (incol + k1) * z_dim1], ldz) ; /* L180: */ } } } else { /* ==== Updates exploiting U's 2-by-2 block structure. . (I2, I4, J2, J4 are the last rows and columns . of the blocks.) ==== */ i2 = (kdu + 1) / 2; i4 = kdu; j2 = i4 - i2; j4 = kdu; /* ==== KZS and KNZ deal with the band of zeros . along the diagonal of one of the triangular . blocks. ==== */ kzs = j4 - j2 - (ns + 1); knz = ns + 1; /* ==== Horizontal multiply ==== */ i__4 = jbot; i__3 = *nh; for (jcol = min(ndcol,*kbot) + 1; i__3 < 0 ? jcol >= i__4 : jcol <= i__4; jcol += i__3) { /* Computing MIN */ i__5 = *nh, i__7 = jbot - jcol + 1; jlen = min(i__5,i__7); /* ==== Copy bottom of H to top+KZS of scratch ==== (The first KZS rows get multiplied by zero.) ==== */ dlacpy_("ALL", &knz, &jlen, &h__[incol + 1 + j2 + jcol * h_dim1], ldh, &wh[kzs + 1 + wh_dim1], ldwh); /* ==== Multiply by U21' ==== */ dlaset_("ALL", &kzs, &jlen, &c_b29, &c_b29, &wh[wh_offset] , ldwh); dtrmm_("L", "U", "C", "N", &knz, &jlen, &c_b15, &u[j2 + 1 + (kzs + 1) * u_dim1], ldu, &wh[kzs + 1 + wh_dim1] , ldwh); /* ==== Multiply top of H by U11' ==== */ dgemm_("C", "N", &i2, &jlen, &j2, &c_b15, &u[u_offset], ldu, &h__[incol + 1 + jcol * h_dim1], ldh, &c_b15, &wh[wh_offset], ldwh); /* ==== Copy top of H bottom of WH ==== */ dlacpy_("ALL", &j2, &jlen, &h__[incol + 1 + jcol * h_dim1] , ldh, &wh[i2 + 1 + wh_dim1], ldwh); /* ==== Multiply by U21' ==== */ dtrmm_("L", "L", "C", "N", &j2, &jlen, &c_b15, &u[(i2 + 1) * u_dim1 + 1], ldu, &wh[i2 + 1 + wh_dim1], ldwh); /* ==== Multiply by U22 ==== */ i__5 = i4 - i2; i__7 = j4 - j2; dgemm_("C", "N", &i__5, &jlen, &i__7, &c_b15, &u[j2 + 1 + (i2 + 1) * u_dim1], ldu, &h__[incol + 1 + j2 + jcol * h_dim1], ldh, &c_b15, &wh[i2 + 1 + wh_dim1] , ldwh); /* ==== Copy it back ==== */ dlacpy_("ALL", &kdu, &jlen, &wh[wh_offset], ldwh, &h__[ incol + 1 + jcol * h_dim1], ldh); /* L190: */ } /* ==== Vertical multiply ==== */ i__3 = max(incol,*ktop) - 1; i__4 = *nv; for (jrow = jtop; i__4 < 0 ? jrow >= i__3 : jrow <= i__3; jrow += i__4) { /* Computing MIN */ i__5 = *nv, i__7 = max(incol,*ktop) - jrow; jlen = min(i__5,i__7); /* ==== Copy right of H to scratch (the first KZS . columns get multiplied by zero) ==== */ dlacpy_("ALL", &jlen, &knz, &h__[jrow + (incol + 1 + j2) * h_dim1], ldh, &wv[(kzs + 1) * wv_dim1 + 1], ldwv); /* ==== Multiply by U21 ==== */ dlaset_("ALL", &jlen, &kzs, &c_b29, &c_b29, &wv[wv_offset] , ldwv); dtrmm_("R", "U", "N", "N", &jlen, &knz, &c_b15, &u[j2 + 1 + (kzs + 1) * u_dim1], ldu, &wv[(kzs + 1) * wv_dim1 + 1], ldwv); /* ==== Multiply by U11 ==== */ dgemm_("N", "N", &jlen, &i2, &j2, &c_b15, &h__[jrow + ( incol + 1) * h_dim1], ldh, &u[u_offset], ldu, & c_b15, &wv[wv_offset], ldwv) ; /* ==== Copy left of H to right of scratch ==== */ dlacpy_("ALL", &jlen, &j2, &h__[jrow + (incol + 1) * h_dim1], ldh, &wv[(i2 + 1) * wv_dim1 + 1], ldwv); /* ==== Multiply by U21 ==== */ i__5 = i4 - i2; dtrmm_("R", "L", "N", "N", &jlen, &i__5, &c_b15, &u[(i2 + 1) * u_dim1 + 1], ldu, &wv[(i2 + 1) * wv_dim1 + 1] , ldwv); /* ==== Multiply by U22 ==== */ i__5 = i4 - i2; i__7 = j4 - j2; dgemm_("N", "N", &jlen, &i__5, &i__7, &c_b15, &h__[jrow + (incol + 1 + j2) * h_dim1], ldh, &u[j2 + 1 + (i2 + 1) * u_dim1], ldu, &c_b15, &wv[(i2 + 1) * wv_dim1 + 1], ldwv); /* ==== Copy it back ==== */ dlacpy_("ALL", &jlen, &kdu, &wv[wv_offset], ldwv, &h__[ jrow + (incol + 1) * h_dim1], ldh); /* L200: */ } /* ==== Multiply Z (also vertical) ==== */ if (*wantz) { i__4 = *ihiz; i__3 = *nv; for (jrow = *iloz; i__3 < 0 ? jrow >= i__4 : jrow <= i__4; jrow += i__3) { /* Computing MIN */ i__5 = *nv, i__7 = *ihiz - jrow + 1; jlen = min(i__5,i__7); /* ==== Copy right of Z to left of scratch (first . KZS columns get multiplied by zero) ==== */ dlacpy_("ALL", &jlen, &knz, &z__[jrow + (incol + 1 + j2) * z_dim1], ldz, &wv[(kzs + 1) * wv_dim1 + 1], ldwv); /* ==== Multiply by U12 ==== */ dlaset_("ALL", &jlen, &kzs, &c_b29, &c_b29, &wv[ wv_offset], ldwv); dtrmm_("R", "U", "N", "N", &jlen, &knz, &c_b15, &u[j2 + 1 + (kzs + 1) * u_dim1], ldu, &wv[(kzs + 1) * wv_dim1 + 1], ldwv); /* ==== Multiply by U11 ==== */ dgemm_("N", "N", &jlen, &i2, &j2, &c_b15, &z__[jrow + (incol + 1) * z_dim1], ldz, &u[u_offset], ldu, &c_b15, &wv[wv_offset], ldwv); /* ==== Copy left of Z to right of scratch ==== */ dlacpy_("ALL", &jlen, &j2, &z__[jrow + (incol + 1) * z_dim1], ldz, &wv[(i2 + 1) * wv_dim1 + 1], ldwv); /* ==== Multiply by U21 ==== */ i__5 = i4 - i2; dtrmm_("R", "L", "N", "N", &jlen, &i__5, &c_b15, &u[( i2 + 1) * u_dim1 + 1], ldu, &wv[(i2 + 1) * wv_dim1 + 1], ldwv); /* ==== Multiply by U22 ==== */ i__5 = i4 - i2; i__7 = j4 - j2; dgemm_("N", "N", &jlen, &i__5, &i__7, &c_b15, &z__[ jrow + (incol + 1 + j2) * z_dim1], ldz, &u[j2 + 1 + (i2 + 1) * u_dim1], ldu, &c_b15, &wv[( i2 + 1) * wv_dim1 + 1], ldwv); /* ==== Copy the result back to Z ==== */ dlacpy_("ALL", &jlen, &kdu, &wv[wv_offset], ldwv, & z__[jrow + (incol + 1) * z_dim1], ldz); /* L210: */ } } } } /* L220: */ } /* ==== End of DLAQR5 ==== */ return 0; } /* dlaqr5_ */ /* Subroutine */ int dlarf_(char *side, integer *m, integer *n, doublereal *v, integer *incv, doublereal *tau, doublereal *c__, integer *ldc, doublereal *work) { /* System generated locals */ integer c_dim1, c_offset; doublereal d__1; /* Local variables */ extern /* Subroutine */ int dger_(integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *); extern logical lsame_(char *, char *); extern /* Subroutine */ int dgemv_(char *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLARF applies a real elementary reflector H to a real m by n matrix C, from either the left or the right. H is represented in the form H = I - tau * v * v' where tau is a real scalar and v is a real vector. If tau = 0, then H is taken to be the unit matrix. Arguments ========= SIDE (input) CHARACTER*1 = 'L': form H * C = 'R': form C * H M (input) INTEGER The number of rows of the matrix C. N (input) INTEGER The number of columns of the matrix C. V (input) DOUBLE PRECISION array, dimension (1 + (M-1)*abs(INCV)) if SIDE = 'L' or (1 + (N-1)*abs(INCV)) if SIDE = 'R' The vector v in the representation of H. V is not used if TAU = 0. INCV (input) INTEGER The increment between elements of v. INCV <> 0. TAU (input) DOUBLE PRECISION The value tau in the representation of H. C (input/output) DOUBLE PRECISION array, dimension (LDC,N) On entry, the m by n matrix C. On exit, C is overwritten by the matrix H * C if SIDE = 'L', or C * H if SIDE = 'R'. LDC (input) INTEGER The leading dimension of the array C. LDC >= max(1,M). WORK (workspace) DOUBLE PRECISION array, dimension (N) if SIDE = 'L' or (M) if SIDE = 'R' ===================================================================== */ /* Parameter adjustments */ --v; c_dim1 = *ldc; c_offset = 1 + c_dim1 * 1; c__ -= c_offset; --work; /* Function Body */ if (lsame_(side, "L")) { /* Form H * C */ if (*tau != 0.) { /* w := C' * v */ dgemv_("Transpose", m, n, &c_b15, &c__[c_offset], ldc, &v[1], incv, &c_b29, &work[1], &c__1); /* C := C - v * w' */ d__1 = -(*tau); dger_(m, n, &d__1, &v[1], incv, &work[1], &c__1, &c__[c_offset], ldc); } } else { /* Form C * H */ if (*tau != 0.) { /* w := C * v */ dgemv_("No transpose", m, n, &c_b15, &c__[c_offset], ldc, &v[1], incv, &c_b29, &work[1], &c__1); /* C := C - w * v' */ d__1 = -(*tau); dger_(m, n, &d__1, &work[1], &c__1, &v[1], incv, &c__[c_offset], ldc); } } return 0; /* End of DLARF */ } /* dlarf_ */ /* Subroutine */ int dlarfb_(char *side, char *trans, char *direct, char * storev, integer *m, integer *n, integer *k, doublereal *v, integer * ldv, doublereal *t, integer *ldt, doublereal *c__, integer *ldc, doublereal *work, integer *ldwork) { /* System generated locals */ integer c_dim1, c_offset, t_dim1, t_offset, v_dim1, v_offset, work_dim1, work_offset, i__1, i__2; /* Local variables */ static integer i__, j; extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); extern logical lsame_(char *, char *); extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, doublereal *, integer *), dtrmm_(char *, char *, char *, char *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *); static char transt[1]; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLARFB applies a real block reflector H or its transpose H' to a real m by n matrix C, from either the left or the right. Arguments ========= SIDE (input) CHARACTER*1 = 'L': apply H or H' from the Left = 'R': apply H or H' from the Right TRANS (input) CHARACTER*1 = 'N': apply H (No transpose) = 'T': apply H' (Transpose) DIRECT (input) CHARACTER*1 Indicates how H is formed from a product of elementary reflectors = 'F': H = H(1) H(2) . . . H(k) (Forward) = 'B': H = H(k) . . . H(2) H(1) (Backward) STOREV (input) CHARACTER*1 Indicates how the vectors which define the elementary reflectors are stored: = 'C': Columnwise = 'R': Rowwise M (input) INTEGER The number of rows of the matrix C. N (input) INTEGER The number of columns of the matrix C. K (input) INTEGER The order of the matrix T (= the number of elementary reflectors whose product defines the block reflector). V (input) DOUBLE PRECISION array, dimension (LDV,K) if STOREV = 'C' (LDV,M) if STOREV = 'R' and SIDE = 'L' (LDV,N) if STOREV = 'R' and SIDE = 'R' The matrix V. See further details. LDV (input) INTEGER The leading dimension of the array V. If STOREV = 'C' and SIDE = 'L', LDV >= max(1,M); if STOREV = 'C' and SIDE = 'R', LDV >= max(1,N); if STOREV = 'R', LDV >= K. T (input) DOUBLE PRECISION array, dimension (LDT,K) The triangular k by k matrix T in the representation of the block reflector. LDT (input) INTEGER The leading dimension of the array T. LDT >= K. C (input/output) DOUBLE PRECISION array, dimension (LDC,N) On entry, the m by n matrix C. On exit, C is overwritten by H*C or H'*C or C*H or C*H'. LDC (input) INTEGER The leading dimension of the array C. LDA >= max(1,M). WORK (workspace) DOUBLE PRECISION array, dimension (LDWORK,K) LDWORK (input) INTEGER The leading dimension of the array WORK. If SIDE = 'L', LDWORK >= max(1,N); if SIDE = 'R', LDWORK >= max(1,M). ===================================================================== Quick return if possible */ /* Parameter adjustments */ v_dim1 = *ldv; v_offset = 1 + v_dim1 * 1; v -= v_offset; t_dim1 = *ldt; t_offset = 1 + t_dim1 * 1; t -= t_offset; c_dim1 = *ldc; c_offset = 1 + c_dim1 * 1; c__ -= c_offset; work_dim1 = *ldwork; work_offset = 1 + work_dim1 * 1; work -= work_offset; /* Function Body */ if (*m <= 0 || *n <= 0) { return 0; } if (lsame_(trans, "N")) { *(unsigned char *)transt = 'T'; } else { *(unsigned char *)transt = 'N'; } if (lsame_(storev, "C")) { if (lsame_(direct, "F")) { /* Let V = ( V1 ) (first K rows) ( V2 ) where V1 is unit lower triangular. */ if (lsame_(side, "L")) { /* Form H * C or H' * C where C = ( C1 ) ( C2 ) W := C' * V = (C1'*V1 + C2'*V2) (stored in WORK) W := C1' */ i__1 = *k; for (j = 1; j <= i__1; ++j) { dcopy_(n, &c__[j + c_dim1], ldc, &work[j * work_dim1 + 1], &c__1); /* L10: */ } /* W := W * V1 */ dtrmm_("Right", "Lower", "No transpose", "Unit", n, k, &c_b15, &v[v_offset], ldv, &work[work_offset], ldwork); if (*m > *k) { /* W := W + C2'*V2 */ i__1 = *m - *k; dgemm_("Transpose", "No transpose", n, k, &i__1, &c_b15, & c__[*k + 1 + c_dim1], ldc, &v[*k + 1 + v_dim1], ldv, &c_b15, &work[work_offset], ldwork); } /* W := W * T' or W * T */ dtrmm_("Right", "Upper", transt, "Non-unit", n, k, &c_b15, &t[ t_offset], ldt, &work[work_offset], ldwork); /* C := C - V * W' */ if (*m > *k) { /* C2 := C2 - V2 * W' */ i__1 = *m - *k; dgemm_("No transpose", "Transpose", &i__1, n, k, &c_b151, &v[*k + 1 + v_dim1], ldv, &work[work_offset], ldwork, &c_b15, &c__[*k + 1 + c_dim1], ldc); } /* W := W * V1' */ dtrmm_("Right", "Lower", "Transpose", "Unit", n, k, &c_b15, & v[v_offset], ldv, &work[work_offset], ldwork); /* C1 := C1 - W' */ i__1 = *k; for (j = 1; j <= i__1; ++j) { i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { c__[j + i__ * c_dim1] -= work[i__ + j * work_dim1]; /* L20: */ } /* L30: */ } } else if (lsame_(side, "R")) { /* Form C * H or C * H' where C = ( C1 C2 ) W := C * V = (C1*V1 + C2*V2) (stored in WORK) W := C1 */ i__1 = *k; for (j = 1; j <= i__1; ++j) { dcopy_(m, &c__[j * c_dim1 + 1], &c__1, &work[j * work_dim1 + 1], &c__1); /* L40: */ } /* W := W * V1 */ dtrmm_("Right", "Lower", "No transpose", "Unit", m, k, &c_b15, &v[v_offset], ldv, &work[work_offset], ldwork); if (*n > *k) { /* W := W + C2 * V2 */ i__1 = *n - *k; dgemm_("No transpose", "No transpose", m, k, &i__1, & c_b15, &c__[(*k + 1) * c_dim1 + 1], ldc, &v[*k + 1 + v_dim1], ldv, &c_b15, &work[work_offset], ldwork); } /* W := W * T or W * T' */ dtrmm_("Right", "Upper", trans, "Non-unit", m, k, &c_b15, &t[ t_offset], ldt, &work[work_offset], ldwork); /* C := C - W * V' */ if (*n > *k) { /* C2 := C2 - W * V2' */ i__1 = *n - *k; dgemm_("No transpose", "Transpose", m, &i__1, k, &c_b151, &work[work_offset], ldwork, &v[*k + 1 + v_dim1], ldv, &c_b15, &c__[(*k + 1) * c_dim1 + 1], ldc); } /* W := W * V1' */ dtrmm_("Right", "Lower", "Transpose", "Unit", m, k, &c_b15, & v[v_offset], ldv, &work[work_offset], ldwork); /* C1 := C1 - W */ i__1 = *k; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] -= work[i__ + j * work_dim1]; /* L50: */ } /* L60: */ } } } else { /* Let V = ( V1 ) ( V2 ) (last K rows) where V2 is unit upper triangular. */ if (lsame_(side, "L")) { /* Form H * C or H' * C where C = ( C1 ) ( C2 ) W := C' * V = (C1'*V1 + C2'*V2) (stored in WORK) W := C2' */ i__1 = *k; for (j = 1; j <= i__1; ++j) { dcopy_(n, &c__[*m - *k + j + c_dim1], ldc, &work[j * work_dim1 + 1], &c__1); /* L70: */ } /* W := W * V2 */ dtrmm_("Right", "Upper", "No transpose", "Unit", n, k, &c_b15, &v[*m - *k + 1 + v_dim1], ldv, &work[work_offset], ldwork); if (*m > *k) { /* W := W + C1'*V1 */ i__1 = *m - *k; dgemm_("Transpose", "No transpose", n, k, &i__1, &c_b15, & c__[c_offset], ldc, &v[v_offset], ldv, &c_b15, & work[work_offset], ldwork); } /* W := W * T' or W * T */ dtrmm_("Right", "Lower", transt, "Non-unit", n, k, &c_b15, &t[ t_offset], ldt, &work[work_offset], ldwork); /* C := C - V * W' */ if (*m > *k) { /* C1 := C1 - V1 * W' */ i__1 = *m - *k; dgemm_("No transpose", "Transpose", &i__1, n, k, &c_b151, &v[v_offset], ldv, &work[work_offset], ldwork, & c_b15, &c__[c_offset], ldc) ; } /* W := W * V2' */ dtrmm_("Right", "Upper", "Transpose", "Unit", n, k, &c_b15, & v[*m - *k + 1 + v_dim1], ldv, &work[work_offset], ldwork); /* C2 := C2 - W' */ i__1 = *k; for (j = 1; j <= i__1; ++j) { i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { c__[*m - *k + j + i__ * c_dim1] -= work[i__ + j * work_dim1]; /* L80: */ } /* L90: */ } } else if (lsame_(side, "R")) { /* Form C * H or C * H' where C = ( C1 C2 ) W := C * V = (C1*V1 + C2*V2) (stored in WORK) W := C2 */ i__1 = *k; for (j = 1; j <= i__1; ++j) { dcopy_(m, &c__[(*n - *k + j) * c_dim1 + 1], &c__1, &work[ j * work_dim1 + 1], &c__1); /* L100: */ } /* W := W * V2 */ dtrmm_("Right", "Upper", "No transpose", "Unit", m, k, &c_b15, &v[*n - *k + 1 + v_dim1], ldv, &work[work_offset], ldwork); if (*n > *k) { /* W := W + C1 * V1 */ i__1 = *n - *k; dgemm_("No transpose", "No transpose", m, k, &i__1, & c_b15, &c__[c_offset], ldc, &v[v_offset], ldv, & c_b15, &work[work_offset], ldwork); } /* W := W * T or W * T' */ dtrmm_("Right", "Lower", trans, "Non-unit", m, k, &c_b15, &t[ t_offset], ldt, &work[work_offset], ldwork); /* C := C - W * V' */ if (*n > *k) { /* C1 := C1 - W * V1' */ i__1 = *n - *k; dgemm_("No transpose", "Transpose", m, &i__1, k, &c_b151, &work[work_offset], ldwork, &v[v_offset], ldv, & c_b15, &c__[c_offset], ldc) ; } /* W := W * V2' */ dtrmm_("Right", "Upper", "Transpose", "Unit", m, k, &c_b15, & v[*n - *k + 1 + v_dim1], ldv, &work[work_offset], ldwork); /* C2 := C2 - W */ i__1 = *k; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + (*n - *k + j) * c_dim1] -= work[i__ + j * work_dim1]; /* L110: */ } /* L120: */ } } } } else if (lsame_(storev, "R")) { if (lsame_(direct, "F")) { /* Let V = ( V1 V2 ) (V1: first K columns) where V1 is unit upper triangular. */ if (lsame_(side, "L")) { /* Form H * C or H' * C where C = ( C1 ) ( C2 ) W := C' * V' = (C1'*V1' + C2'*V2') (stored in WORK) W := C1' */ i__1 = *k; for (j = 1; j <= i__1; ++j) { dcopy_(n, &c__[j + c_dim1], ldc, &work[j * work_dim1 + 1], &c__1); /* L130: */ } /* W := W * V1' */ dtrmm_("Right", "Upper", "Transpose", "Unit", n, k, &c_b15, & v[v_offset], ldv, &work[work_offset], ldwork); if (*m > *k) { /* W := W + C2'*V2' */ i__1 = *m - *k; dgemm_("Transpose", "Transpose", n, k, &i__1, &c_b15, & c__[*k + 1 + c_dim1], ldc, &v[(*k + 1) * v_dim1 + 1], ldv, &c_b15, &work[work_offset], ldwork); } /* W := W * T' or W * T */ dtrmm_("Right", "Upper", transt, "Non-unit", n, k, &c_b15, &t[ t_offset], ldt, &work[work_offset], ldwork); /* C := C - V' * W' */ if (*m > *k) { /* C2 := C2 - V2' * W' */ i__1 = *m - *k; dgemm_("Transpose", "Transpose", &i__1, n, k, &c_b151, &v[ (*k + 1) * v_dim1 + 1], ldv, &work[work_offset], ldwork, &c_b15, &c__[*k + 1 + c_dim1], ldc); } /* W := W * V1 */ dtrmm_("Right", "Upper", "No transpose", "Unit", n, k, &c_b15, &v[v_offset], ldv, &work[work_offset], ldwork); /* C1 := C1 - W' */ i__1 = *k; for (j = 1; j <= i__1; ++j) { i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { c__[j + i__ * c_dim1] -= work[i__ + j * work_dim1]; /* L140: */ } /* L150: */ } } else if (lsame_(side, "R")) { /* Form C * H or C * H' where C = ( C1 C2 ) W := C * V' = (C1*V1' + C2*V2') (stored in WORK) W := C1 */ i__1 = *k; for (j = 1; j <= i__1; ++j) { dcopy_(m, &c__[j * c_dim1 + 1], &c__1, &work[j * work_dim1 + 1], &c__1); /* L160: */ } /* W := W * V1' */ dtrmm_("Right", "Upper", "Transpose", "Unit", m, k, &c_b15, & v[v_offset], ldv, &work[work_offset], ldwork); if (*n > *k) { /* W := W + C2 * V2' */ i__1 = *n - *k; dgemm_("No transpose", "Transpose", m, k, &i__1, &c_b15, & c__[(*k + 1) * c_dim1 + 1], ldc, &v[(*k + 1) * v_dim1 + 1], ldv, &c_b15, &work[work_offset], ldwork); } /* W := W * T or W * T' */ dtrmm_("Right", "Upper", trans, "Non-unit", m, k, &c_b15, &t[ t_offset], ldt, &work[work_offset], ldwork); /* C := C - W * V */ if (*n > *k) { /* C2 := C2 - W * V2 */ i__1 = *n - *k; dgemm_("No transpose", "No transpose", m, &i__1, k, & c_b151, &work[work_offset], ldwork, &v[(*k + 1) * v_dim1 + 1], ldv, &c_b15, &c__[(*k + 1) * c_dim1 + 1], ldc); } /* W := W * V1 */ dtrmm_("Right", "Upper", "No transpose", "Unit", m, k, &c_b15, &v[v_offset], ldv, &work[work_offset], ldwork); /* C1 := C1 - W */ i__1 = *k; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + j * c_dim1] -= work[i__ + j * work_dim1]; /* L170: */ } /* L180: */ } } } else { /* Let V = ( V1 V2 ) (V2: last K columns) where V2 is unit lower triangular. */ if (lsame_(side, "L")) { /* Form H * C or H' * C where C = ( C1 ) ( C2 ) W := C' * V' = (C1'*V1' + C2'*V2') (stored in WORK) W := C2' */ i__1 = *k; for (j = 1; j <= i__1; ++j) { dcopy_(n, &c__[*m - *k + j + c_dim1], ldc, &work[j * work_dim1 + 1], &c__1); /* L190: */ } /* W := W * V2' */ dtrmm_("Right", "Lower", "Transpose", "Unit", n, k, &c_b15, & v[(*m - *k + 1) * v_dim1 + 1], ldv, &work[work_offset] , ldwork); if (*m > *k) { /* W := W + C1'*V1' */ i__1 = *m - *k; dgemm_("Transpose", "Transpose", n, k, &i__1, &c_b15, & c__[c_offset], ldc, &v[v_offset], ldv, &c_b15, & work[work_offset], ldwork); } /* W := W * T' or W * T */ dtrmm_("Right", "Lower", transt, "Non-unit", n, k, &c_b15, &t[ t_offset], ldt, &work[work_offset], ldwork); /* C := C - V' * W' */ if (*m > *k) { /* C1 := C1 - V1' * W' */ i__1 = *m - *k; dgemm_("Transpose", "Transpose", &i__1, n, k, &c_b151, &v[ v_offset], ldv, &work[work_offset], ldwork, & c_b15, &c__[c_offset], ldc); } /* W := W * V2 */ dtrmm_("Right", "Lower", "No transpose", "Unit", n, k, &c_b15, &v[(*m - *k + 1) * v_dim1 + 1], ldv, &work[ work_offset], ldwork); /* C2 := C2 - W' */ i__1 = *k; for (j = 1; j <= i__1; ++j) { i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { c__[*m - *k + j + i__ * c_dim1] -= work[i__ + j * work_dim1]; /* L200: */ } /* L210: */ } } else if (lsame_(side, "R")) { /* Form C * H or C * H' where C = ( C1 C2 ) W := C * V' = (C1*V1' + C2*V2') (stored in WORK) W := C2 */ i__1 = *k; for (j = 1; j <= i__1; ++j) { dcopy_(m, &c__[(*n - *k + j) * c_dim1 + 1], &c__1, &work[ j * work_dim1 + 1], &c__1); /* L220: */ } /* W := W * V2' */ dtrmm_("Right", "Lower", "Transpose", "Unit", m, k, &c_b15, & v[(*n - *k + 1) * v_dim1 + 1], ldv, &work[work_offset] , ldwork); if (*n > *k) { /* W := W + C1 * V1' */ i__1 = *n - *k; dgemm_("No transpose", "Transpose", m, k, &i__1, &c_b15, & c__[c_offset], ldc, &v[v_offset], ldv, &c_b15, & work[work_offset], ldwork); } /* W := W * T or W * T' */ dtrmm_("Right", "Lower", trans, "Non-unit", m, k, &c_b15, &t[ t_offset], ldt, &work[work_offset], ldwork); /* C := C - W * V */ if (*n > *k) { /* C1 := C1 - W * V1 */ i__1 = *n - *k; dgemm_("No transpose", "No transpose", m, &i__1, k, & c_b151, &work[work_offset], ldwork, &v[v_offset], ldv, &c_b15, &c__[c_offset], ldc); } /* W := W * V2 */ dtrmm_("Right", "Lower", "No transpose", "Unit", m, k, &c_b15, &v[(*n - *k + 1) * v_dim1 + 1], ldv, &work[ work_offset], ldwork); /* C1 := C1 - W */ i__1 = *k; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { c__[i__ + (*n - *k + j) * c_dim1] -= work[i__ + j * work_dim1]; /* L230: */ } /* L240: */ } } } } return 0; /* End of DLARFB */ } /* dlarfb_ */ /* Subroutine */ int dlarfg_(integer *n, doublereal *alpha, doublereal *x, integer *incx, doublereal *tau) { /* System generated locals */ integer i__1; doublereal d__1; /* Builtin functions */ double d_sign(doublereal *, doublereal *); /* Local variables */ static doublereal beta; extern doublereal dnrm2_(integer *, doublereal *, integer *); static integer j; extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, integer *); static doublereal xnorm; static doublereal safmin, rsafmn; static integer knt; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLARFG generates a real elementary reflector H of order n, such that H * ( alpha ) = ( beta ), H' * H = I. ( x ) ( 0 ) where alpha and beta are scalars, and x is an (n-1)-element real vector. H is represented in the form H = I - tau * ( 1 ) * ( 1 v' ) , ( v ) where tau is a real scalar and v is a real (n-1)-element vector. If the elements of x are all zero, then tau = 0 and H is taken to be the unit matrix. Otherwise 1 <= tau <= 2. Arguments ========= N (input) INTEGER The order of the elementary reflector. ALPHA (input/output) DOUBLE PRECISION On entry, the value alpha. On exit, it is overwritten with the value beta. X (input/output) DOUBLE PRECISION array, dimension (1+(N-2)*abs(INCX)) On entry, the vector x. On exit, it is overwritten with the vector v. INCX (input) INTEGER The increment between elements of X. INCX > 0. TAU (output) DOUBLE PRECISION The value tau. ===================================================================== */ /* Parameter adjustments */ --x; /* Function Body */ if (*n <= 1) { *tau = 0.; return 0; } i__1 = *n - 1; xnorm = dnrm2_(&i__1, &x[1], incx); if (xnorm == 0.) { /* H = I */ *tau = 0.; } else { /* general case */ d__1 = dlapy2_(alpha, &xnorm); beta = -d_sign(&d__1, alpha); safmin = SAFEMINIMUM / EPSILON; if (abs(beta) < safmin) { /* XNORM, BETA may be inaccurate; scale X and recompute them */ rsafmn = 1. / safmin; knt = 0; L10: ++knt; i__1 = *n - 1; dscal_(&i__1, &rsafmn, &x[1], incx); beta *= rsafmn; *alpha *= rsafmn; if (abs(beta) < safmin) { goto L10; } /* New BETA is at most 1, at least SAFMIN */ i__1 = *n - 1; xnorm = dnrm2_(&i__1, &x[1], incx); d__1 = dlapy2_(alpha, &xnorm); beta = -d_sign(&d__1, alpha); *tau = (beta - *alpha) / beta; i__1 = *n - 1; d__1 = 1. / (*alpha - beta); dscal_(&i__1, &d__1, &x[1], incx); /* If ALPHA is subnormal, it may lose relative accuracy */ *alpha = beta; i__1 = knt; for (j = 1; j <= i__1; ++j) { *alpha *= safmin; /* L20: */ } } else { *tau = (beta - *alpha) / beta; i__1 = *n - 1; d__1 = 1. / (*alpha - beta); dscal_(&i__1, &d__1, &x[1], incx); *alpha = beta; } } return 0; /* End of DLARFG */ } /* dlarfg_ */ /* Subroutine */ int dlarft_(char *direct, char *storev, integer *n, integer * k, doublereal *v, integer *ldv, doublereal *tau, doublereal *t, integer *ldt) { /* System generated locals */ integer t_dim1, t_offset, v_dim1, v_offset, i__1, i__2, i__3; doublereal d__1; /* Local variables */ static integer i__, j; extern logical lsame_(char *, char *); extern /* Subroutine */ int dgemv_(char *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *), dtrmv_(char *, char *, char *, integer *, doublereal *, integer *, doublereal *, integer *); static doublereal vii; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLARFT forms the triangular factor T of a real block reflector H of order n, which is defined as a product of k elementary reflectors. If DIRECT = 'F', H = H(1) H(2) . . . H(k) and T is upper triangular; If DIRECT = 'B', H = H(k) . . . H(2) H(1) and T is lower triangular. If STOREV = 'C', the vector which defines the elementary reflector H(i) is stored in the i-th column of the array V, and H = I - V * T * V' If STOREV = 'R', the vector which defines the elementary reflector H(i) is stored in the i-th row of the array V, and H = I - V' * T * V Arguments ========= DIRECT (input) CHARACTER*1 Specifies the order in which the elementary reflectors are multiplied to form the block reflector: = 'F': H = H(1) H(2) . . . H(k) (Forward) = 'B': H = H(k) . . . H(2) H(1) (Backward) STOREV (input) CHARACTER*1 Specifies how the vectors which define the elementary reflectors are stored (see also Further Details): = 'C': columnwise = 'R': rowwise N (input) INTEGER The order of the block reflector H. N >= 0. K (input) INTEGER The order of the triangular factor T (= the number of elementary reflectors). K >= 1. V (input/output) DOUBLE PRECISION array, dimension (LDV,K) if STOREV = 'C' (LDV,N) if STOREV = 'R' The matrix V. See further details. LDV (input) INTEGER The leading dimension of the array V. If STOREV = 'C', LDV >= max(1,N); if STOREV = 'R', LDV >= K. TAU (input) DOUBLE PRECISION array, dimension (K) TAU(i) must contain the scalar factor of the elementary reflector H(i). T (output) DOUBLE PRECISION array, dimension (LDT,K) The k by k triangular factor T of the block reflector. If DIRECT = 'F', T is upper triangular; if DIRECT = 'B', T is lower triangular. The rest of the array is not used. LDT (input) INTEGER The leading dimension of the array T. LDT >= K. Further Details =============== The shape of the matrix V and the storage of the vectors which define the H(i) is best illustrated by the following example with n = 5 and k = 3. The elements equal to 1 are not stored; the corresponding array elements are modified but restored on exit. The rest of the array is not used. DIRECT = 'F' and STOREV = 'C': DIRECT = 'F' and STOREV = 'R': V = ( 1 ) V = ( 1 v1 v1 v1 v1 ) ( v1 1 ) ( 1 v2 v2 v2 ) ( v1 v2 1 ) ( 1 v3 v3 ) ( v1 v2 v3 ) ( v1 v2 v3 ) DIRECT = 'B' and STOREV = 'C': DIRECT = 'B' and STOREV = 'R': V = ( v1 v2 v3 ) V = ( v1 v1 1 ) ( v1 v2 v3 ) ( v2 v2 v2 1 ) ( 1 v2 v3 ) ( v3 v3 v3 v3 1 ) ( 1 v3 ) ( 1 ) ===================================================================== Quick return if possible */ /* Parameter adjustments */ v_dim1 = *ldv; v_offset = 1 + v_dim1 * 1; v -= v_offset; --tau; t_dim1 = *ldt; t_offset = 1 + t_dim1 * 1; t -= t_offset; /* Function Body */ if (*n == 0) { return 0; } if (lsame_(direct, "F")) { i__1 = *k; for (i__ = 1; i__ <= i__1; ++i__) { if (tau[i__] == 0.) { /* H(i) = I */ i__2 = i__; for (j = 1; j <= i__2; ++j) { t[j + i__ * t_dim1] = 0.; /* L10: */ } } else { /* general case */ vii = v[i__ + i__ * v_dim1]; v[i__ + i__ * v_dim1] = 1.; if (lsame_(storev, "C")) { /* T(1:i-1,i) := - tau(i) * V(i:n,1:i-1)' * V(i:n,i) */ i__2 = *n - i__ + 1; i__3 = i__ - 1; d__1 = -tau[i__]; dgemv_("Transpose", &i__2, &i__3, &d__1, &v[i__ + v_dim1], ldv, &v[i__ + i__ * v_dim1], &c__1, &c_b29, &t[ i__ * t_dim1 + 1], &c__1); } else { /* T(1:i-1,i) := - tau(i) * V(1:i-1,i:n) * V(i,i:n)' */ i__2 = i__ - 1; i__3 = *n - i__ + 1; d__1 = -tau[i__]; dgemv_("No transpose", &i__2, &i__3, &d__1, &v[i__ * v_dim1 + 1], ldv, &v[i__ + i__ * v_dim1], ldv, & c_b29, &t[i__ * t_dim1 + 1], &c__1); } v[i__ + i__ * v_dim1] = vii; /* T(1:i-1,i) := T(1:i-1,1:i-1) * T(1:i-1,i) */ i__2 = i__ - 1; dtrmv_("Upper", "No transpose", "Non-unit", &i__2, &t[ t_offset], ldt, &t[i__ * t_dim1 + 1], &c__1); t[i__ + i__ * t_dim1] = tau[i__]; } /* L20: */ } } else { for (i__ = *k; i__ >= 1; --i__) { if (tau[i__] == 0.) { /* H(i) = I */ i__1 = *k; for (j = i__; j <= i__1; ++j) { t[j + i__ * t_dim1] = 0.; /* L30: */ } } else { /* general case */ if (i__ < *k) { if (lsame_(storev, "C")) { vii = v[*n - *k + i__ + i__ * v_dim1]; v[*n - *k + i__ + i__ * v_dim1] = 1.; /* T(i+1:k,i) := - tau(i) * V(1:n-k+i,i+1:k)' * V(1:n-k+i,i) */ i__1 = *n - *k + i__; i__2 = *k - i__; d__1 = -tau[i__]; dgemv_("Transpose", &i__1, &i__2, &d__1, &v[(i__ + 1) * v_dim1 + 1], ldv, &v[i__ * v_dim1 + 1], & c__1, &c_b29, &t[i__ + 1 + i__ * t_dim1], & c__1); v[*n - *k + i__ + i__ * v_dim1] = vii; } else { vii = v[i__ + (*n - *k + i__) * v_dim1]; v[i__ + (*n - *k + i__) * v_dim1] = 1.; /* T(i+1:k,i) := - tau(i) * V(i+1:k,1:n-k+i) * V(i,1:n-k+i)' */ i__1 = *k - i__; i__2 = *n - *k + i__; d__1 = -tau[i__]; dgemv_("No transpose", &i__1, &i__2, &d__1, &v[i__ + 1 + v_dim1], ldv, &v[i__ + v_dim1], ldv, & c_b29, &t[i__ + 1 + i__ * t_dim1], &c__1); v[i__ + (*n - *k + i__) * v_dim1] = vii; } /* T(i+1:k,i) := T(i+1:k,i+1:k) * T(i+1:k,i) */ i__1 = *k - i__; dtrmv_("Lower", "No transpose", "Non-unit", &i__1, &t[i__ + 1 + (i__ + 1) * t_dim1], ldt, &t[i__ + 1 + i__ * t_dim1], &c__1) ; } t[i__ + i__ * t_dim1] = tau[i__]; } /* L40: */ } } return 0; /* End of DLARFT */ } /* dlarft_ */ /* Subroutine */ int dlarfx_(char *side, integer *m, integer *n, doublereal * v, doublereal *tau, doublereal *c__, integer *ldc, doublereal *work) { /* System generated locals */ integer c_dim1, c_offset, i__1; doublereal d__1; /* Local variables */ extern /* Subroutine */ int dger_(integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *); static integer j; extern logical lsame_(char *, char *); extern /* Subroutine */ int dgemv_(char *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); static doublereal t1, t2, t3, t4, t5, t6, t7, t8, t9, v1, v2, v3, v4, v5, v6, v7, v8, v9, t10, v10, sum; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLARFX applies a real elementary reflector H to a real m by n matrix C, from either the left or the right. H is represented in the form H = I - tau * v * v' where tau is a real scalar and v is a real vector. If tau = 0, then H is taken to be the unit matrix This version uses inline code if H has order < 11. Arguments ========= SIDE (input) CHARACTER*1 = 'L': form H * C = 'R': form C * H M (input) INTEGER The number of rows of the matrix C. N (input) INTEGER The number of columns of the matrix C. V (input) DOUBLE PRECISION array, dimension (M) if SIDE = 'L' or (N) if SIDE = 'R' The vector v in the representation of H. TAU (input) DOUBLE PRECISION The value tau in the representation of H. C (input/output) DOUBLE PRECISION array, dimension (LDC,N) On entry, the m by n matrix C. On exit, C is overwritten by the matrix H * C if SIDE = 'L', or C * H if SIDE = 'R'. LDC (input) INTEGER The leading dimension of the array C. LDA >= (1,M). WORK (workspace) DOUBLE PRECISION array, dimension (N) if SIDE = 'L' or (M) if SIDE = 'R' WORK is not referenced if H has order < 11. ===================================================================== */ /* Parameter adjustments */ --v; c_dim1 = *ldc; c_offset = 1 + c_dim1 * 1; c__ -= c_offset; --work; /* Function Body */ if (*tau == 0.) { return 0; } if (lsame_(side, "L")) { /* Form H * C, where H has order m. */ switch (*m) { case 1: goto L10; case 2: goto L30; case 3: goto L50; case 4: goto L70; case 5: goto L90; case 6: goto L110; case 7: goto L130; case 8: goto L150; case 9: goto L170; case 10: goto L190; } /* Code for general M w := C'*v */ dgemv_("Transpose", m, n, &c_b15, &c__[c_offset], ldc, &v[1], &c__1, & c_b29, &work[1], &c__1); /* C := C - tau * v * w' */ d__1 = -(*tau); dger_(m, n, &d__1, &v[1], &c__1, &work[1], &c__1, &c__[c_offset], ldc) ; goto L410; L10: /* Special code for 1 x 1 Householder */ t1 = 1. - *tau * v[1] * v[1]; i__1 = *n; for (j = 1; j <= i__1; ++j) { c__[j * c_dim1 + 1] = t1 * c__[j * c_dim1 + 1]; /* L20: */ } goto L410; L30: /* Special code for 2 x 2 Householder */ v1 = v[1]; t1 = *tau * v1; v2 = v[2]; t2 = *tau * v2; i__1 = *n; for (j = 1; j <= i__1; ++j) { sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2]; c__[j * c_dim1 + 1] -= sum * t1; c__[j * c_dim1 + 2] -= sum * t2; /* L40: */ } goto L410; L50: /* Special code for 3 x 3 Householder */ v1 = v[1]; t1 = *tau * v1; v2 = v[2]; t2 = *tau * v2; v3 = v[3]; t3 = *tau * v3; i__1 = *n; for (j = 1; j <= i__1; ++j) { sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 * c__[j * c_dim1 + 3]; c__[j * c_dim1 + 1] -= sum * t1; c__[j * c_dim1 + 2] -= sum * t2; c__[j * c_dim1 + 3] -= sum * t3; /* L60: */ } goto L410; L70: /* Special code for 4 x 4 Householder */ v1 = v[1]; t1 = *tau * v1; v2 = v[2]; t2 = *tau * v2; v3 = v[3]; t3 = *tau * v3; v4 = v[4]; t4 = *tau * v4; i__1 = *n; for (j = 1; j <= i__1; ++j) { sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 * c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4]; c__[j * c_dim1 + 1] -= sum * t1; c__[j * c_dim1 + 2] -= sum * t2; c__[j * c_dim1 + 3] -= sum * t3; c__[j * c_dim1 + 4] -= sum * t4; /* L80: */ } goto L410; L90: /* Special code for 5 x 5 Householder */ v1 = v[1]; t1 = *tau * v1; v2 = v[2]; t2 = *tau * v2; v3 = v[3]; t3 = *tau * v3; v4 = v[4]; t4 = *tau * v4; v5 = v[5]; t5 = *tau * v5; i__1 = *n; for (j = 1; j <= i__1; ++j) { sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 * c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[ j * c_dim1 + 5]; c__[j * c_dim1 + 1] -= sum * t1; c__[j * c_dim1 + 2] -= sum * t2; c__[j * c_dim1 + 3] -= sum * t3; c__[j * c_dim1 + 4] -= sum * t4; c__[j * c_dim1 + 5] -= sum * t5; /* L100: */ } goto L410; L110: /* Special code for 6 x 6 Householder */ v1 = v[1]; t1 = *tau * v1; v2 = v[2]; t2 = *tau * v2; v3 = v[3]; t3 = *tau * v3; v4 = v[4]; t4 = *tau * v4; v5 = v[5]; t5 = *tau * v5; v6 = v[6]; t6 = *tau * v6; i__1 = *n; for (j = 1; j <= i__1; ++j) { sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 * c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[ j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6]; c__[j * c_dim1 + 1] -= sum * t1; c__[j * c_dim1 + 2] -= sum * t2; c__[j * c_dim1 + 3] -= sum * t3; c__[j * c_dim1 + 4] -= sum * t4; c__[j * c_dim1 + 5] -= sum * t5; c__[j * c_dim1 + 6] -= sum * t6; /* L120: */ } goto L410; L130: /* Special code for 7 x 7 Householder */ v1 = v[1]; t1 = *tau * v1; v2 = v[2]; t2 = *tau * v2; v3 = v[3]; t3 = *tau * v3; v4 = v[4]; t4 = *tau * v4; v5 = v[5]; t5 = *tau * v5; v6 = v[6]; t6 = *tau * v6; v7 = v[7]; t7 = *tau * v7; i__1 = *n; for (j = 1; j <= i__1; ++j) { sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 * c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[ j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j * c_dim1 + 7]; c__[j * c_dim1 + 1] -= sum * t1; c__[j * c_dim1 + 2] -= sum * t2; c__[j * c_dim1 + 3] -= sum * t3; c__[j * c_dim1 + 4] -= sum * t4; c__[j * c_dim1 + 5] -= sum * t5; c__[j * c_dim1 + 6] -= sum * t6; c__[j * c_dim1 + 7] -= sum * t7; /* L140: */ } goto L410; L150: /* Special code for 8 x 8 Householder */ v1 = v[1]; t1 = *tau * v1; v2 = v[2]; t2 = *tau * v2; v3 = v[3]; t3 = *tau * v3; v4 = v[4]; t4 = *tau * v4; v5 = v[5]; t5 = *tau * v5; v6 = v[6]; t6 = *tau * v6; v7 = v[7]; t7 = *tau * v7; v8 = v[8]; t8 = *tau * v8; i__1 = *n; for (j = 1; j <= i__1; ++j) { sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 * c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[ j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j * c_dim1 + 7] + v8 * c__[j * c_dim1 + 8]; c__[j * c_dim1 + 1] -= sum * t1; c__[j * c_dim1 + 2] -= sum * t2; c__[j * c_dim1 + 3] -= sum * t3; c__[j * c_dim1 + 4] -= sum * t4; c__[j * c_dim1 + 5] -= sum * t5; c__[j * c_dim1 + 6] -= sum * t6; c__[j * c_dim1 + 7] -= sum * t7; c__[j * c_dim1 + 8] -= sum * t8; /* L160: */ } goto L410; L170: /* Special code for 9 x 9 Householder */ v1 = v[1]; t1 = *tau * v1; v2 = v[2]; t2 = *tau * v2; v3 = v[3]; t3 = *tau * v3; v4 = v[4]; t4 = *tau * v4; v5 = v[5]; t5 = *tau * v5; v6 = v[6]; t6 = *tau * v6; v7 = v[7]; t7 = *tau * v7; v8 = v[8]; t8 = *tau * v8; v9 = v[9]; t9 = *tau * v9; i__1 = *n; for (j = 1; j <= i__1; ++j) { sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 * c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[ j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j * c_dim1 + 7] + v8 * c__[j * c_dim1 + 8] + v9 * c__[j * c_dim1 + 9]; c__[j * c_dim1 + 1] -= sum * t1; c__[j * c_dim1 + 2] -= sum * t2; c__[j * c_dim1 + 3] -= sum * t3; c__[j * c_dim1 + 4] -= sum * t4; c__[j * c_dim1 + 5] -= sum * t5; c__[j * c_dim1 + 6] -= sum * t6; c__[j * c_dim1 + 7] -= sum * t7; c__[j * c_dim1 + 8] -= sum * t8; c__[j * c_dim1 + 9] -= sum * t9; /* L180: */ } goto L410; L190: /* Special code for 10 x 10 Householder */ v1 = v[1]; t1 = *tau * v1; v2 = v[2]; t2 = *tau * v2; v3 = v[3]; t3 = *tau * v3; v4 = v[4]; t4 = *tau * v4; v5 = v[5]; t5 = *tau * v5; v6 = v[6]; t6 = *tau * v6; v7 = v[7]; t7 = *tau * v7; v8 = v[8]; t8 = *tau * v8; v9 = v[9]; t9 = *tau * v9; v10 = v[10]; t10 = *tau * v10; i__1 = *n; for (j = 1; j <= i__1; ++j) { sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 * c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[ j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j * c_dim1 + 7] + v8 * c__[j * c_dim1 + 8] + v9 * c__[j * c_dim1 + 9] + v10 * c__[j * c_dim1 + 10]; c__[j * c_dim1 + 1] -= sum * t1; c__[j * c_dim1 + 2] -= sum * t2; c__[j * c_dim1 + 3] -= sum * t3; c__[j * c_dim1 + 4] -= sum * t4; c__[j * c_dim1 + 5] -= sum * t5; c__[j * c_dim1 + 6] -= sum * t6; c__[j * c_dim1 + 7] -= sum * t7; c__[j * c_dim1 + 8] -= sum * t8; c__[j * c_dim1 + 9] -= sum * t9; c__[j * c_dim1 + 10] -= sum * t10; /* L200: */ } goto L410; } else { /* Form C * H, where H has order n. */ switch (*n) { case 1: goto L210; case 2: goto L230; case 3: goto L250; case 4: goto L270; case 5: goto L290; case 6: goto L310; case 7: goto L330; case 8: goto L350; case 9: goto L370; case 10: goto L390; } /* Code for general N w := C * v */ dgemv_("No transpose", m, n, &c_b15, &c__[c_offset], ldc, &v[1], & c__1, &c_b29, &work[1], &c__1); /* C := C - tau * w * v' */ d__1 = -(*tau); dger_(m, n, &d__1, &work[1], &c__1, &v[1], &c__1, &c__[c_offset], ldc) ; goto L410; L210: /* Special code for 1 x 1 Householder */ t1 = 1. - *tau * v[1] * v[1]; i__1 = *m; for (j = 1; j <= i__1; ++j) { c__[j + c_dim1] = t1 * c__[j + c_dim1]; /* L220: */ } goto L410; L230: /* Special code for 2 x 2 Householder */ v1 = v[1]; t1 = *tau * v1; v2 = v[2]; t2 = *tau * v2; i__1 = *m; for (j = 1; j <= i__1; ++j) { sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)]; c__[j + c_dim1] -= sum * t1; c__[j + (c_dim1 << 1)] -= sum * t2; /* L240: */ } goto L410; L250: /* Special code for 3 x 3 Householder */ v1 = v[1]; t1 = *tau * v1; v2 = v[2]; t2 = *tau * v2; v3 = v[3]; t3 = *tau * v3; i__1 = *m; for (j = 1; j <= i__1; ++j) { sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 * c__[j + c_dim1 * 3]; c__[j + c_dim1] -= sum * t1; c__[j + (c_dim1 << 1)] -= sum * t2; c__[j + c_dim1 * 3] -= sum * t3; /* L260: */ } goto L410; L270: /* Special code for 4 x 4 Householder */ v1 = v[1]; t1 = *tau * v1; v2 = v[2]; t2 = *tau * v2; v3 = v[3]; t3 = *tau * v3; v4 = v[4]; t4 = *tau * v4; i__1 = *m; for (j = 1; j <= i__1; ++j) { sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 * c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)]; c__[j + c_dim1] -= sum * t1; c__[j + (c_dim1 << 1)] -= sum * t2; c__[j + c_dim1 * 3] -= sum * t3; c__[j + (c_dim1 << 2)] -= sum * t4; /* L280: */ } goto L410; L290: /* Special code for 5 x 5 Householder */ v1 = v[1]; t1 = *tau * v1; v2 = v[2]; t2 = *tau * v2; v3 = v[3]; t3 = *tau * v3; v4 = v[4]; t4 = *tau * v4; v5 = v[5]; t5 = *tau * v5; i__1 = *m; for (j = 1; j <= i__1; ++j) { sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 * c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)] + v5 * c__[j + c_dim1 * 5]; c__[j + c_dim1] -= sum * t1; c__[j + (c_dim1 << 1)] -= sum * t2; c__[j + c_dim1 * 3] -= sum * t3; c__[j + (c_dim1 << 2)] -= sum * t4; c__[j + c_dim1 * 5] -= sum * t5; /* L300: */ } goto L410; L310: /* Special code for 6 x 6 Householder */ v1 = v[1]; t1 = *tau * v1; v2 = v[2]; t2 = *tau * v2; v3 = v[3]; t3 = *tau * v3; v4 = v[4]; t4 = *tau * v4; v5 = v[5]; t5 = *tau * v5; v6 = v[6]; t6 = *tau * v6; i__1 = *m; for (j = 1; j <= i__1; ++j) { sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 * c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)] + v5 * c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6]; c__[j + c_dim1] -= sum * t1; c__[j + (c_dim1 << 1)] -= sum * t2; c__[j + c_dim1 * 3] -= sum * t3; c__[j + (c_dim1 << 2)] -= sum * t4; c__[j + c_dim1 * 5] -= sum * t5; c__[j + c_dim1 * 6] -= sum * t6; /* L320: */ } goto L410; L330: /* Special code for 7 x 7 Householder */ v1 = v[1]; t1 = *tau * v1; v2 = v[2]; t2 = *tau * v2; v3 = v[3]; t3 = *tau * v3; v4 = v[4]; t4 = *tau * v4; v5 = v[5]; t5 = *tau * v5; v6 = v[6]; t6 = *tau * v6; v7 = v[7]; t7 = *tau * v7; i__1 = *m; for (j = 1; j <= i__1; ++j) { sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 * c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)] + v5 * c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 * c__[ j + c_dim1 * 7]; c__[j + c_dim1] -= sum * t1; c__[j + (c_dim1 << 1)] -= sum * t2; c__[j + c_dim1 * 3] -= sum * t3; c__[j + (c_dim1 << 2)] -= sum * t4; c__[j + c_dim1 * 5] -= sum * t5; c__[j + c_dim1 * 6] -= sum * t6; c__[j + c_dim1 * 7] -= sum * t7; /* L340: */ } goto L410; L350: /* Special code for 8 x 8 Householder */ v1 = v[1]; t1 = *tau * v1; v2 = v[2]; t2 = *tau * v2; v3 = v[3]; t3 = *tau * v3; v4 = v[4]; t4 = *tau * v4; v5 = v[5]; t5 = *tau * v5; v6 = v[6]; t6 = *tau * v6; v7 = v[7]; t7 = *tau * v7; v8 = v[8]; t8 = *tau * v8; i__1 = *m; for (j = 1; j <= i__1; ++j) { sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 * c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)] + v5 * c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 * c__[ j + c_dim1 * 7] + v8 * c__[j + (c_dim1 << 3)]; c__[j + c_dim1] -= sum * t1; c__[j + (c_dim1 << 1)] -= sum * t2; c__[j + c_dim1 * 3] -= sum * t3; c__[j + (c_dim1 << 2)] -= sum * t4; c__[j + c_dim1 * 5] -= sum * t5; c__[j + c_dim1 * 6] -= sum * t6; c__[j + c_dim1 * 7] -= sum * t7; c__[j + (c_dim1 << 3)] -= sum * t8; /* L360: */ } goto L410; L370: /* Special code for 9 x 9 Householder */ v1 = v[1]; t1 = *tau * v1; v2 = v[2]; t2 = *tau * v2; v3 = v[3]; t3 = *tau * v3; v4 = v[4]; t4 = *tau * v4; v5 = v[5]; t5 = *tau * v5; v6 = v[6]; t6 = *tau * v6; v7 = v[7]; t7 = *tau * v7; v8 = v[8]; t8 = *tau * v8; v9 = v[9]; t9 = *tau * v9; i__1 = *m; for (j = 1; j <= i__1; ++j) { sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 * c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)] + v5 * c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 * c__[ j + c_dim1 * 7] + v8 * c__[j + (c_dim1 << 3)] + v9 * c__[ j + c_dim1 * 9]; c__[j + c_dim1] -= sum * t1; c__[j + (c_dim1 << 1)] -= sum * t2; c__[j + c_dim1 * 3] -= sum * t3; c__[j + (c_dim1 << 2)] -= sum * t4; c__[j + c_dim1 * 5] -= sum * t5; c__[j + c_dim1 * 6] -= sum * t6; c__[j + c_dim1 * 7] -= sum * t7; c__[j + (c_dim1 << 3)] -= sum * t8; c__[j + c_dim1 * 9] -= sum * t9; /* L380: */ } goto L410; L390: /* Special code for 10 x 10 Householder */ v1 = v[1]; t1 = *tau * v1; v2 = v[2]; t2 = *tau * v2; v3 = v[3]; t3 = *tau * v3; v4 = v[4]; t4 = *tau * v4; v5 = v[5]; t5 = *tau * v5; v6 = v[6]; t6 = *tau * v6; v7 = v[7]; t7 = *tau * v7; v8 = v[8]; t8 = *tau * v8; v9 = v[9]; t9 = *tau * v9; v10 = v[10]; t10 = *tau * v10; i__1 = *m; for (j = 1; j <= i__1; ++j) { sum = v1 * c__[j + c_dim1] + v2 * c__[j + (c_dim1 << 1)] + v3 * c__[j + c_dim1 * 3] + v4 * c__[j + (c_dim1 << 2)] + v5 * c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 * c__[ j + c_dim1 * 7] + v8 * c__[j + (c_dim1 << 3)] + v9 * c__[ j + c_dim1 * 9] + v10 * c__[j + c_dim1 * 10]; c__[j + c_dim1] -= sum * t1; c__[j + (c_dim1 << 1)] -= sum * t2; c__[j + c_dim1 * 3] -= sum * t3; c__[j + (c_dim1 << 2)] -= sum * t4; c__[j + c_dim1 * 5] -= sum * t5; c__[j + c_dim1 * 6] -= sum * t6; c__[j + c_dim1 * 7] -= sum * t7; c__[j + (c_dim1 << 3)] -= sum * t8; c__[j + c_dim1 * 9] -= sum * t9; c__[j + c_dim1 * 10] -= sum * t10; /* L400: */ } goto L410; } L410: return 0; /* End of DLARFX */ } /* dlarfx_ */ /* Subroutine */ int dlartg_(doublereal *f, doublereal *g, doublereal *cs, doublereal *sn, doublereal *r__) { /* System generated locals */ integer i__1; doublereal d__1, d__2; /* Builtin functions */ double log(doublereal), pow_di(doublereal *, integer *), sqrt(doublereal); /* Local variables */ static integer i__; static doublereal scale, f1; static integer count; static doublereal g1, safmn2, safmx2; static doublereal safmin, eps; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLARTG generate a plane rotation so that [ CS SN ] . [ F ] = [ R ] where CS**2 + SN**2 = 1. [ -SN CS ] [ G ] [ 0 ] This is a slower, more accurate version of the BLAS1 routine DROTG, with the following other differences: F and G are unchanged on return. If G=0, then CS=1 and SN=0. If F=0 and (G .ne. 0), then CS=0 and SN=1 without doing any floating point operations (saves work in DBDSQR when there are zeros on the diagonal). If F exceeds G in magnitude, CS will be positive. Arguments ========= F (input) DOUBLE PRECISION The first component of vector to be rotated. G (input) DOUBLE PRECISION The second component of vector to be rotated. CS (output) DOUBLE PRECISION The cosine of the rotation. SN (output) DOUBLE PRECISION The sine of the rotation. R (output) DOUBLE PRECISION The nonzero component of the rotated vector. This version has a few statements commented out for thread safety (machine parameters are computed on each entry). 10 feb 03, SJH. ===================================================================== LOGICAL FIRST SAVE FIRST, SAFMX2, SAFMIN, SAFMN2 DATA FIRST / .TRUE. / IF( FIRST ) THEN */ safmin = SAFEMINIMUM; eps = EPSILON; d__1 = BASE; i__1 = (integer) (log(safmin / eps) / log(BASE) / 2.); safmn2 = pow_di(&d__1, &i__1); safmx2 = 1. / safmn2; /* FIRST = .FALSE. END IF */ if (*g == 0.) { *cs = 1.; *sn = 0.; *r__ = *f; } else if (*f == 0.) { *cs = 0.; *sn = 1.; *r__ = *g; } else { f1 = *f; g1 = *g; /* Computing MAX */ d__1 = abs(f1), d__2 = abs(g1); scale = max(d__1,d__2); if (scale >= safmx2) { count = 0; L10: ++count; f1 *= safmn2; g1 *= safmn2; /* Computing MAX */ d__1 = abs(f1), d__2 = abs(g1); scale = max(d__1,d__2); if (scale >= safmx2) { goto L10; } /* Computing 2nd power */ d__1 = f1; /* Computing 2nd power */ d__2 = g1; *r__ = sqrt(d__1 * d__1 + d__2 * d__2); *cs = f1 / *r__; *sn = g1 / *r__; i__1 = count; for (i__ = 1; i__ <= i__1; ++i__) { *r__ *= safmx2; /* L20: */ } } else if (scale <= safmn2) { count = 0; L30: ++count; f1 *= safmx2; g1 *= safmx2; /* Computing MAX */ d__1 = abs(f1), d__2 = abs(g1); scale = max(d__1,d__2); if (scale <= safmn2) { goto L30; } /* Computing 2nd power */ d__1 = f1; /* Computing 2nd power */ d__2 = g1; *r__ = sqrt(d__1 * d__1 + d__2 * d__2); *cs = f1 / *r__; *sn = g1 / *r__; i__1 = count; for (i__ = 1; i__ <= i__1; ++i__) { *r__ *= safmn2; /* L40: */ } } else { /* Computing 2nd power */ d__1 = f1; /* Computing 2nd power */ d__2 = g1; *r__ = sqrt(d__1 * d__1 + d__2 * d__2); *cs = f1 / *r__; *sn = g1 / *r__; } if (abs(*f) > abs(*g) && *cs < 0.) { *cs = -(*cs); *sn = -(*sn); *r__ = -(*r__); } } return 0; /* End of DLARTG */ } /* dlartg_ */ /* Subroutine */ int dlas2_(doublereal *f, doublereal *g, doublereal *h__, doublereal *ssmin, doublereal *ssmax) { /* System generated locals */ doublereal d__1, d__2; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static doublereal fhmn, fhmx, c__, fa, ga, ha, as, at, au; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLAS2 computes the singular values of the 2-by-2 matrix [ F G ] [ 0 H ]. On return, SSMIN is the smaller singular value and SSMAX is the larger singular value. Arguments ========= F (input) DOUBLE PRECISION The (1,1) element of the 2-by-2 matrix. G (input) DOUBLE PRECISION The (1,2) element of the 2-by-2 matrix. H (input) DOUBLE PRECISION The (2,2) element of the 2-by-2 matrix. SSMIN (output) DOUBLE PRECISION The smaller singular value. SSMAX (output) DOUBLE PRECISION The larger singular value. Further Details =============== Barring over/underflow, all output quantities are correct to within a few units in the last place (ulps), even in the absence of a guard digit in addition/subtraction. In IEEE arithmetic, the code works correctly if one matrix element is infinite. Overflow will not occur unless the largest singular value itself overflows, or is within a few ulps of overflow. (On machines with partial overflow, like the Cray, overflow may occur if the largest singular value is within a factor of 2 of overflow.) Underflow is harmless if underflow is gradual. Otherwise, results may correspond to a matrix modified by perturbations of size near the underflow threshold. ==================================================================== */ fa = abs(*f); ga = abs(*g); ha = abs(*h__); fhmn = min(fa,ha); fhmx = max(fa,ha); if (fhmn == 0.) { *ssmin = 0.; if (fhmx == 0.) { *ssmax = ga; } else { /* Computing 2nd power */ d__1 = min(fhmx,ga) / max(fhmx,ga); *ssmax = max(fhmx,ga) * sqrt(d__1 * d__1 + 1.); } } else { if (ga < fhmx) { as = fhmn / fhmx + 1.; at = (fhmx - fhmn) / fhmx; /* Computing 2nd power */ d__1 = ga / fhmx; au = d__1 * d__1; c__ = 2. / (sqrt(as * as + au) + sqrt(at * at + au)); *ssmin = fhmn * c__; *ssmax = fhmx / c__; } else { au = fhmx / ga; if (au == 0.) { /* Avoid possible harmful underflow if exponent range asymmetric (true SSMIN may not underflow even if AU underflows) */ *ssmin = fhmn * fhmx / ga; *ssmax = ga; } else { as = fhmn / fhmx + 1.; at = (fhmx - fhmn) / fhmx; /* Computing 2nd power */ d__1 = as * au; /* Computing 2nd power */ d__2 = at * au; c__ = 1. / (sqrt(d__1 * d__1 + 1.) + sqrt(d__2 * d__2 + 1.)); *ssmin = fhmn * c__ * au; *ssmin += *ssmin; *ssmax = ga / (c__ + c__); } } } return 0; /* End of DLAS2 */ } /* dlas2_ */ /* Subroutine */ int dlascl_(char *type__, integer *kl, integer *ku, doublereal *cfrom, doublereal *cto, integer *m, integer *n, doublereal *a, integer *lda, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5; /* Local variables */ static logical done; static doublereal ctoc; static integer i__, j; extern logical lsame_(char *, char *); static integer itype, k1, k2, k3, k4; static doublereal cfrom1; static doublereal cfromc; extern /* Subroutine */ int xerbla_(char *, integer *); static doublereal bignum, smlnum, mul, cto1; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLASCL multiplies the M by N real matrix A by the real scalar CTO/CFROM. This is done without over/underflow as long as the final result CTO*A(I,J)/CFROM does not over/underflow. TYPE specifies that A may be full, upper triangular, lower triangular, upper Hessenberg, or banded. Arguments ========= TYPE (input) CHARACTER*1 TYPE indices the storage type of the input matrix. = 'G': A is a full matrix. = 'L': A is a lower triangular matrix. = 'U': A is an upper triangular matrix. = 'H': A is an upper Hessenberg matrix. = 'B': A is a symmetric band matrix with lower bandwidth KL and upper bandwidth KU and with the only the lower half stored. = 'Q': A is a symmetric band matrix with lower bandwidth KL and upper bandwidth KU and with the only the upper half stored. = 'Z': A is a band matrix with lower bandwidth KL and upper bandwidth KU. KL (input) INTEGER The lower bandwidth of A. Referenced only if TYPE = 'B', 'Q' or 'Z'. KU (input) INTEGER The upper bandwidth of A. Referenced only if TYPE = 'B', 'Q' or 'Z'. CFROM (input) DOUBLE PRECISION CTO (input) DOUBLE PRECISION The matrix A is multiplied by CTO/CFROM. A(I,J) is computed without over/underflow if the final result CTO*A(I,J)/CFROM can be represented without over/underflow. CFROM must be nonzero. M (input) INTEGER The number of rows of the matrix A. M >= 0. N (input) INTEGER The number of columns of the matrix A. N >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) The matrix to be multiplied by CTO/CFROM. See TYPE for the storage type. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,M). INFO (output) INTEGER 0 - successful exit <0 - if INFO = -i, the i-th argument had an illegal value. ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; /* Function Body */ *info = 0; if (lsame_(type__, "G")) { itype = 0; } else if (lsame_(type__, "L")) { itype = 1; } else if (lsame_(type__, "U")) { itype = 2; } else if (lsame_(type__, "H")) { itype = 3; } else if (lsame_(type__, "B")) { itype = 4; } else if (lsame_(type__, "Q")) { itype = 5; } else if (lsame_(type__, "Z")) { itype = 6; } else { itype = -1; } if (itype == -1) { *info = -1; } else if (*cfrom == 0.) { *info = -4; } else if (*m < 0) { *info = -6; } else if (*n < 0 || itype == 4 && *n != *m || itype == 5 && *n != *m) { *info = -7; } else if (itype <= 3 && *lda < max(1,*m)) { *info = -9; } else if (itype >= 4) { /* Computing MAX */ i__1 = *m - 1; if (*kl < 0 || *kl > max(i__1,0)) { *info = -2; } else /* if(complicated condition) */ { /* Computing MAX */ i__1 = *n - 1; if (*ku < 0 || *ku > max(i__1,0) || (itype == 4 || itype == 5) && *kl != *ku) { *info = -3; } else if (itype == 4 && *lda < *kl + 1 || itype == 5 && *lda < * ku + 1 || itype == 6 && *lda < (*kl << 1) + *ku + 1) { *info = -9; } } } if (*info != 0) { i__1 = -(*info); xerbla_("DLASCL", &i__1); return 0; } /* Quick return if possible */ if (*n == 0 || *m == 0) { return 0; } /* Get machine parameters */ smlnum = SAFEMINIMUM; bignum = 1. / smlnum; cfromc = *cfrom; ctoc = *cto; L10: cfrom1 = cfromc * smlnum; cto1 = ctoc / bignum; if (abs(cfrom1) > abs(ctoc) && ctoc != 0.) { mul = smlnum; done = FALSE_; cfromc = cfrom1; } else if (abs(cto1) > abs(cfromc)) { mul = bignum; done = FALSE_; ctoc = cto1; } else { mul = ctoc / cfromc; done = TRUE_; } if (itype == 0) { /* Full matrix */ i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] *= mul; /* L20: */ } /* L30: */ } } else if (itype == 1) { /* Lower triangular matrix */ i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = j; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] *= mul; /* L40: */ } /* L50: */ } } else if (itype == 2) { /* Upper triangular matrix */ i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = min(j,*m); for (i__ = 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] *= mul; /* L60: */ } /* L70: */ } } else if (itype == 3) { /* Upper Hessenberg matrix */ i__1 = *n; for (j = 1; j <= i__1; ++j) { /* Computing MIN */ i__3 = j + 1; i__2 = min(i__3,*m); for (i__ = 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] *= mul; /* L80: */ } /* L90: */ } } else if (itype == 4) { /* Lower half of a symmetric band matrix */ k3 = *kl + 1; k4 = *n + 1; i__1 = *n; for (j = 1; j <= i__1; ++j) { /* Computing MIN */ i__3 = k3, i__4 = k4 - j; i__2 = min(i__3,i__4); for (i__ = 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] *= mul; /* L100: */ } /* L110: */ } } else if (itype == 5) { /* Upper half of a symmetric band matrix */ k1 = *ku + 2; k3 = *ku + 1; i__1 = *n; for (j = 1; j <= i__1; ++j) { /* Computing MAX */ i__2 = k1 - j; i__3 = k3; for (i__ = max(i__2,1); i__ <= i__3; ++i__) { a[i__ + j * a_dim1] *= mul; /* L120: */ } /* L130: */ } } else if (itype == 6) { /* Band matrix */ k1 = *kl + *ku + 2; k2 = *kl + 1; k3 = (*kl << 1) + *ku + 1; k4 = *kl + *ku + 1 + *m; i__1 = *n; for (j = 1; j <= i__1; ++j) { /* Computing MAX */ i__3 = k1 - j; /* Computing MIN */ i__4 = k3, i__5 = k4 - j; i__2 = min(i__4,i__5); for (i__ = max(i__3,k2); i__ <= i__2; ++i__) { a[i__ + j * a_dim1] *= mul; /* L140: */ } /* L150: */ } } if (! done) { goto L10; } return 0; /* End of DLASCL */ } /* dlascl_ */ /* Subroutine */ int dlasd0_(integer *n, integer *sqre, doublereal *d__, doublereal *e, doublereal *u, integer *ldu, doublereal *vt, integer * ldvt, integer *smlsiz, integer *iwork, doublereal *work, integer * info) { /* System generated locals */ integer u_dim1, u_offset, vt_dim1, vt_offset, i__1, i__2; /* Builtin functions */ integer pow_ii(integer *, integer *); /* Local variables */ static doublereal beta; static integer idxq, nlvl, i__, j, m; static doublereal alpha; static integer inode, ndiml, idxqc, ndimr, itemp, sqrei, i1; extern /* Subroutine */ int dlasd1_(integer *, integer *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, integer *, integer *, integer *, doublereal *, integer *); static integer ic, lf, nd, ll, nl, nr; extern /* Subroutine */ int dlasdq_(char *, integer *, integer *, integer *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *), dlasdt_(integer *, integer *, integer *, integer *, integer *, integer *, integer *), xerbla_( char *, integer *); static integer im1, ncc, nlf, nrf, iwk, lvl, ndb1, nlp1, nrp1; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= Using a divide and conquer approach, DLASD0 computes the singular value decomposition (SVD) of a real upper bidiagonal N-by-M matrix B with diagonal D and offdiagonal E, where M = N + SQRE. The algorithm computes orthogonal matrices U and VT such that B = U * S * VT. The singular values S are overwritten on D. A related subroutine, DLASDA, computes only the singular values, and optionally, the singular vectors in compact form. Arguments ========= N (input) INTEGER On entry, the row dimension of the upper bidiagonal matrix. This is also the dimension of the main diagonal array D. SQRE (input) INTEGER Specifies the column dimension of the bidiagonal matrix. = 0: The bidiagonal matrix has column dimension M = N; = 1: The bidiagonal matrix has column dimension M = N+1; D (input/output) DOUBLE PRECISION array, dimension (N) On entry D contains the main diagonal of the bidiagonal matrix. On exit D, if INFO = 0, contains its singular values. E (input) DOUBLE PRECISION array, dimension (M-1) Contains the subdiagonal entries of the bidiagonal matrix. On exit, E has been destroyed. U (output) DOUBLE PRECISION array, dimension at least (LDQ, N) On exit, U contains the left singular vectors. LDU (input) INTEGER On entry, leading dimension of U. VT (output) DOUBLE PRECISION array, dimension at least (LDVT, M) On exit, VT' contains the right singular vectors. LDVT (input) INTEGER On entry, leading dimension of VT. SMLSIZ (input) INTEGER On entry, maximum size of the subproblems at the bottom of the computation tree. IWORK (workspace) INTEGER work array. Dimension must be at least (8 * N) WORK (workspace) DOUBLE PRECISION work array. Dimension must be at least (3 * M**2 + 2 * M) INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. > 0: if INFO = 1, an singular value did not converge Further Details =============== Based on contributions by Ming Gu and Huan Ren, Computer Science Division, University of California at Berkeley, USA ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; --e; u_dim1 = *ldu; u_offset = 1 + u_dim1 * 1; u -= u_offset; vt_dim1 = *ldvt; vt_offset = 1 + vt_dim1 * 1; vt -= vt_offset; --iwork; --work; /* Function Body */ *info = 0; if (*n < 0) { *info = -1; } else if (*sqre < 0 || *sqre > 1) { *info = -2; } m = *n + *sqre; if (*ldu < *n) { *info = -6; } else if (*ldvt < m) { *info = -8; } else if (*smlsiz < 3) { *info = -9; } if (*info != 0) { i__1 = -(*info); xerbla_("DLASD0", &i__1); return 0; } /* If the input matrix is too small, call DLASDQ to find the SVD. */ if (*n <= *smlsiz) { dlasdq_("U", sqre, n, &m, n, &c__0, &d__[1], &e[1], &vt[vt_offset], ldvt, &u[u_offset], ldu, &u[u_offset], ldu, &work[1], info); return 0; } /* Set up the computation tree. */ inode = 1; ndiml = inode + *n; ndimr = ndiml + *n; idxq = ndimr + *n; iwk = idxq + *n; dlasdt_(n, &nlvl, &nd, &iwork[inode], &iwork[ndiml], &iwork[ndimr], smlsiz); /* For the nodes on bottom level of the tree, solve their subproblems by DLASDQ. */ ndb1 = (nd + 1) / 2; ncc = 0; i__1 = nd; for (i__ = ndb1; i__ <= i__1; ++i__) { /* IC : center row of each node NL : number of rows of left subproblem NR : number of rows of right subproblem NLF: starting row of the left subproblem NRF: starting row of the right subproblem */ i1 = i__ - 1; ic = iwork[inode + i1]; nl = iwork[ndiml + i1]; nlp1 = nl + 1; nr = iwork[ndimr + i1]; nrp1 = nr + 1; nlf = ic - nl; nrf = ic + 1; sqrei = 1; dlasdq_("U", &sqrei, &nl, &nlp1, &nl, &ncc, &d__[nlf], &e[nlf], &vt[ nlf + nlf * vt_dim1], ldvt, &u[nlf + nlf * u_dim1], ldu, &u[ nlf + nlf * u_dim1], ldu, &work[1], info); if (*info != 0) { return 0; } itemp = idxq + nlf - 2; i__2 = nl; for (j = 1; j <= i__2; ++j) { iwork[itemp + j] = j; /* L10: */ } if (i__ == nd) { sqrei = *sqre; } else { sqrei = 1; } nrp1 = nr + sqrei; dlasdq_("U", &sqrei, &nr, &nrp1, &nr, &ncc, &d__[nrf], &e[nrf], &vt[ nrf + nrf * vt_dim1], ldvt, &u[nrf + nrf * u_dim1], ldu, &u[ nrf + nrf * u_dim1], ldu, &work[1], info); if (*info != 0) { return 0; } itemp = idxq + ic; i__2 = nr; for (j = 1; j <= i__2; ++j) { iwork[itemp + j - 1] = j; /* L20: */ } /* L30: */ } /* Now conquer each subproblem bottom-up. */ for (lvl = nlvl; lvl >= 1; --lvl) { /* Find the first node LF and last node LL on the current level LVL. */ if (lvl == 1) { lf = 1; ll = 1; } else { i__1 = lvl - 1; lf = pow_ii(&c__2, &i__1); ll = (lf << 1) - 1; } i__1 = ll; for (i__ = lf; i__ <= i__1; ++i__) { im1 = i__ - 1; ic = iwork[inode + im1]; nl = iwork[ndiml + im1]; nr = iwork[ndimr + im1]; nlf = ic - nl; if (*sqre == 0 && i__ == ll) { sqrei = *sqre; } else { sqrei = 1; } idxqc = idxq + nlf - 1; alpha = d__[ic]; beta = e[ic]; dlasd1_(&nl, &nr, &sqrei, &d__[nlf], &alpha, &beta, &u[nlf + nlf * u_dim1], ldu, &vt[nlf + nlf * vt_dim1], ldvt, &iwork[ idxqc], &iwork[iwk], &work[1], info); if (*info != 0) { return 0; } /* L40: */ } /* L50: */ } return 0; /* End of DLASD0 */ } /* dlasd0_ */ /* Subroutine */ int dlasd1_(integer *nl, integer *nr, integer *sqre, doublereal *d__, doublereal *alpha, doublereal *beta, doublereal *u, integer *ldu, doublereal *vt, integer *ldvt, integer *idxq, integer * iwork, doublereal *work, integer *info) { /* System generated locals */ integer u_dim1, u_offset, vt_dim1, vt_offset, i__1; doublereal d__1, d__2; /* Local variables */ static integer idxc, idxp, ldvt2, i__, k, m, n, n1, n2; extern /* Subroutine */ int dlasd2_(integer *, integer *, integer *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, integer *, integer *, integer *, integer *, integer *, integer *), dlasd3_( integer *, integer *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, integer *, integer *, doublereal *, integer *); static integer iq; extern /* Subroutine */ int dlascl_(char *, integer *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *); static integer iz; extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *, integer *, integer *, integer *); static integer isigma; extern /* Subroutine */ int xerbla_(char *, integer *); static doublereal orgnrm; static integer coltyp, iu2, ldq, idx, ldu2, ivt2; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLASD1 computes the SVD of an upper bidiagonal N-by-M matrix B, where N = NL + NR + 1 and M = N + SQRE. DLASD1 is called from DLASD0. A related subroutine DLASD7 handles the case in which the singular values (and the singular vectors in factored form) are desired. DLASD1 computes the SVD as follows: ( D1(in) 0 0 0 ) B = U(in) * ( Z1' a Z2' b ) * VT(in) ( 0 0 D2(in) 0 ) = U(out) * ( D(out) 0) * VT(out) where Z' = (Z1' a Z2' b) = u' VT', and u is a vector of dimension M with ALPHA and BETA in the NL+1 and NL+2 th entries and zeros elsewhere; and the entry b is empty if SQRE = 0. The left singular vectors of the original matrix are stored in U, and the transpose of the right singular vectors are stored in VT, and the singular values are in D. The algorithm consists of three stages: The first stage consists of deflating the size of the problem when there are multiple singular values or when there are zeros in the Z vector. For each such occurence the dimension of the secular equation problem is reduced by one. This stage is performed by the routine DLASD2. The second stage consists of calculating the updated singular values. This is done by finding the square roots of the roots of the secular equation via the routine DLASD4 (as called by DLASD3). This routine also calculates the singular vectors of the current problem. The final stage consists of computing the updated singular vectors directly using the updated singular values. The singular vectors for the current problem are multiplied with the singular vectors from the overall problem. Arguments ========= NL (input) INTEGER The row dimension of the upper block. NL >= 1. NR (input) INTEGER The row dimension of the lower block. NR >= 1. SQRE (input) INTEGER = 0: the lower block is an NR-by-NR square matrix. = 1: the lower block is an NR-by-(NR+1) rectangular matrix. The bidiagonal matrix has row dimension N = NL + NR + 1, and column dimension M = N + SQRE. D (input/output) DOUBLE PRECISION array, dimension (N = NL+NR+1). On entry D(1:NL,1:NL) contains the singular values of the upper block; and D(NL+2:N) contains the singular values of the lower block. On exit D(1:N) contains the singular values of the modified matrix. ALPHA (input/output) DOUBLE PRECISION Contains the diagonal element associated with the added row. BETA (input/output) DOUBLE PRECISION Contains the off-diagonal element associated with the added row. U (input/output) DOUBLE PRECISION array, dimension(LDU,N) On entry U(1:NL, 1:NL) contains the left singular vectors of the upper block; U(NL+2:N, NL+2:N) contains the left singular vectors of the lower block. On exit U contains the left singular vectors of the bidiagonal matrix. LDU (input) INTEGER The leading dimension of the array U. LDU >= max( 1, N ). VT (input/output) DOUBLE PRECISION array, dimension(LDVT,M) where M = N + SQRE. On entry VT(1:NL+1, 1:NL+1)' contains the right singular vectors of the upper block; VT(NL+2:M, NL+2:M)' contains the right singular vectors of the lower block. On exit VT' contains the right singular vectors of the bidiagonal matrix. LDVT (input) INTEGER The leading dimension of the array VT. LDVT >= max( 1, M ). IDXQ (output) INTEGER array, dimension(N) This contains the permutation which will reintegrate the subproblem just solved back into sorted order, i.e. D( IDXQ( I = 1, N ) ) will be in ascending order. IWORK (workspace) INTEGER array, dimension( 4 * N ) WORK (workspace) DOUBLE PRECISION array, dimension( 3*M**2 + 2*M ) INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. > 0: if INFO = 1, an singular value did not converge Further Details =============== Based on contributions by Ming Gu and Huan Ren, Computer Science Division, University of California at Berkeley, USA ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; u_dim1 = *ldu; u_offset = 1 + u_dim1 * 1; u -= u_offset; vt_dim1 = *ldvt; vt_offset = 1 + vt_dim1 * 1; vt -= vt_offset; --idxq; --iwork; --work; /* Function Body */ *info = 0; if (*nl < 1) { *info = -1; } else if (*nr < 1) { *info = -2; } else if (*sqre < 0 || *sqre > 1) { *info = -3; } if (*info != 0) { i__1 = -(*info); xerbla_("DLASD1", &i__1); return 0; } n = *nl + *nr + 1; m = n + *sqre; /* The following values are for bookkeeping purposes only. They are integer pointers which indicate the portion of the workspace used by a particular array in DLASD2 and DLASD3. */ ldu2 = n; ldvt2 = m; iz = 1; isigma = iz + m; iu2 = isigma + n; ivt2 = iu2 + ldu2 * n; iq = ivt2 + ldvt2 * m; idx = 1; idxc = idx + n; coltyp = idxc + n; idxp = coltyp + n; /* Scale. Computing MAX */ d__1 = abs(*alpha), d__2 = abs(*beta); orgnrm = max(d__1,d__2); d__[*nl + 1] = 0.; i__1 = n; for (i__ = 1; i__ <= i__1; ++i__) { if ((d__1 = d__[i__], abs(d__1)) > orgnrm) { orgnrm = (d__1 = d__[i__], abs(d__1)); } /* L10: */ } dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, &n, &c__1, &d__[1], &n, info); *alpha /= orgnrm; *beta /= orgnrm; /* Deflate singular values. */ dlasd2_(nl, nr, sqre, &k, &d__[1], &work[iz], alpha, beta, &u[u_offset], ldu, &vt[vt_offset], ldvt, &work[isigma], &work[iu2], &ldu2, & work[ivt2], &ldvt2, &iwork[idxp], &iwork[idx], &iwork[idxc], & idxq[1], &iwork[coltyp], info); /* Solve Secular Equation and update singular vectors. */ ldq = k; dlasd3_(nl, nr, sqre, &k, &d__[1], &work[iq], &ldq, &work[isigma], &u[ u_offset], ldu, &work[iu2], &ldu2, &vt[vt_offset], ldvt, &work[ ivt2], &ldvt2, &iwork[idxc], &iwork[coltyp], &work[iz], info); if (*info != 0) { return 0; } /* Unscale. */ dlascl_("G", &c__0, &c__0, &c_b15, &orgnrm, &n, &c__1, &d__[1], &n, info); /* Prepare the IDXQ sorting permutation. */ n1 = k; n2 = n - k; dlamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &idxq[1]); return 0; /* End of DLASD1 */ } /* dlasd1_ */ /* Subroutine */ int dlasd2_(integer *nl, integer *nr, integer *sqre, integer *k, doublereal *d__, doublereal *z__, doublereal *alpha, doublereal * beta, doublereal *u, integer *ldu, doublereal *vt, integer *ldvt, doublereal *dsigma, doublereal *u2, integer *ldu2, doublereal *vt2, integer *ldvt2, integer *idxp, integer *idx, integer *idxc, integer * idxq, integer *coltyp, integer *info) { /* System generated locals */ integer u_dim1, u_offset, u2_dim1, u2_offset, vt_dim1, vt_offset, vt2_dim1, vt2_offset, i__1; doublereal d__1, d__2; /* Local variables */ static integer idxi, idxj; extern /* Subroutine */ int drot_(integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *); static integer ctot[4]; static doublereal c__; static integer i__, j, m, n; static doublereal s; static integer idxjp; extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, doublereal *, integer *); static integer jprev, k2; static doublereal z1; extern doublereal dlapy2_(doublereal *, doublereal *); static integer ct; static integer jp; extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *, integer *, integer *, integer *), dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *), dlaset_(char *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *); static doublereal hlftol, eps, tau, tol; static integer psm[4], nlp1, nlp2; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLASD2 merges the two sets of singular values together into a single sorted set. Then it tries to deflate the size of the problem. There are two ways in which deflation can occur: when two or more singular values are close together or if there is a tiny entry in the Z vector. For each such occurrence the order of the related secular equation problem is reduced by one. DLASD2 is called from DLASD1. Arguments ========= NL (input) INTEGER The row dimension of the upper block. NL >= 1. NR (input) INTEGER The row dimension of the lower block. NR >= 1. SQRE (input) INTEGER = 0: the lower block is an NR-by-NR square matrix. = 1: the lower block is an NR-by-(NR+1) rectangular matrix. The bidiagonal matrix has N = NL + NR + 1 rows and M = N + SQRE >= N columns. K (output) INTEGER Contains the dimension of the non-deflated matrix, This is the order of the related secular equation. 1 <= K <=N. D (input/output) DOUBLE PRECISION array, dimension(N) On entry D contains the singular values of the two submatrices to be combined. On exit D contains the trailing (N-K) updated singular values (those which were deflated) sorted into increasing order. Z (output) DOUBLE PRECISION array, dimension(N) On exit Z contains the updating row vector in the secular equation. ALPHA (input) DOUBLE PRECISION Contains the diagonal element associated with the added row. BETA (input) DOUBLE PRECISION Contains the off-diagonal element associated with the added row. U (input/output) DOUBLE PRECISION array, dimension(LDU,N) On entry U contains the left singular vectors of two submatrices in the two square blocks with corners at (1,1), (NL, NL), and (NL+2, NL+2), (N,N). On exit U contains the trailing (N-K) updated left singular vectors (those which were deflated) in its last N-K columns. LDU (input) INTEGER The leading dimension of the array U. LDU >= N. VT (input/output) DOUBLE PRECISION array, dimension(LDVT,M) On entry VT' contains the right singular vectors of two submatrices in the two square blocks with corners at (1,1), (NL+1, NL+1), and (NL+2, NL+2), (M,M). On exit VT' contains the trailing (N-K) updated right singular vectors (those which were deflated) in its last N-K columns. In case SQRE =1, the last row of VT spans the right null space. LDVT (input) INTEGER The leading dimension of the array VT. LDVT >= M. DSIGMA (output) DOUBLE PRECISION array, dimension (N) Contains a copy of the diagonal elements (K-1 singular values and one zero) in the secular equation. U2 (output) DOUBLE PRECISION array, dimension(LDU2,N) Contains a copy of the first K-1 left singular vectors which will be used by DLASD3 in a matrix multiply (DGEMM) to solve for the new left singular vectors. U2 is arranged into four blocks. The first block contains a column with 1 at NL+1 and zero everywhere else; the second block contains non-zero entries only at and above NL; the third contains non-zero entries only below NL+1; and the fourth is dense. LDU2 (input) INTEGER The leading dimension of the array U2. LDU2 >= N. VT2 (output) DOUBLE PRECISION array, dimension(LDVT2,N) VT2' contains a copy of the first K right singular vectors which will be used by DLASD3 in a matrix multiply (DGEMM) to solve for the new right singular vectors. VT2 is arranged into three blocks. The first block contains a row that corresponds to the special 0 diagonal element in SIGMA; the second block contains non-zeros only at and before NL +1; the third block contains non-zeros only at and after NL +2. LDVT2 (input) INTEGER The leading dimension of the array VT2. LDVT2 >= M. IDXP (workspace) INTEGER array dimension(N) This will contain the permutation used to place deflated values of D at the end of the array. On output IDXP(2:K) points to the nondeflated D-values and IDXP(K+1:N) points to the deflated singular values. IDX (workspace) INTEGER array dimension(N) This will contain the permutation used to sort the contents of D into ascending order. IDXC (output) INTEGER array dimension(N) This will contain the permutation used to arrange the columns of the deflated U matrix into three groups: the first group contains non-zero entries only at and above NL, the second contains non-zero entries only below NL+2, and the third is dense. IDXQ (input/output) INTEGER array dimension(N) This contains the permutation which separately sorts the two sub-problems in D into ascending order. Note that entries in the first hlaf of this permutation must first be moved one position backward; and entries in the second half must first have NL+1 added to their values. COLTYP (workspace/output) INTEGER array dimension(N) As workspace, this will contain a label which will indicate which of the following types a column in the U2 matrix or a row in the VT2 matrix is: 1 : non-zero in the upper half only 2 : non-zero in the lower half only 3 : dense 4 : deflated On exit, it is an array of dimension 4, with COLTYP(I) being the dimension of the I-th type columns. INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. Further Details =============== Based on contributions by Ming Gu and Huan Ren, Computer Science Division, University of California at Berkeley, USA ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; --z__; u_dim1 = *ldu; u_offset = 1 + u_dim1 * 1; u -= u_offset; vt_dim1 = *ldvt; vt_offset = 1 + vt_dim1 * 1; vt -= vt_offset; --dsigma; u2_dim1 = *ldu2; u2_offset = 1 + u2_dim1 * 1; u2 -= u2_offset; vt2_dim1 = *ldvt2; vt2_offset = 1 + vt2_dim1 * 1; vt2 -= vt2_offset; --idxp; --idx; --idxc; --idxq; --coltyp; /* Function Body */ *info = 0; if (*nl < 1) { *info = -1; } else if (*nr < 1) { *info = -2; } else if (*sqre != 1 && *sqre != 0) { *info = -3; } n = *nl + *nr + 1; m = n + *sqre; if (*ldu < n) { *info = -10; } else if (*ldvt < m) { *info = -12; } else if (*ldu2 < n) { *info = -15; } else if (*ldvt2 < m) { *info = -17; } if (*info != 0) { i__1 = -(*info); xerbla_("DLASD2", &i__1); return 0; } nlp1 = *nl + 1; nlp2 = *nl + 2; /* Generate the first part of the vector Z; and move the singular values in the first part of D one position backward. */ z1 = *alpha * vt[nlp1 + nlp1 * vt_dim1]; z__[1] = z1; for (i__ = *nl; i__ >= 1; --i__) { z__[i__ + 1] = *alpha * vt[i__ + nlp1 * vt_dim1]; d__[i__ + 1] = d__[i__]; idxq[i__ + 1] = idxq[i__] + 1; /* L10: */ } /* Generate the second part of the vector Z. */ i__1 = m; for (i__ = nlp2; i__ <= i__1; ++i__) { z__[i__] = *beta * vt[i__ + nlp2 * vt_dim1]; /* L20: */ } /* Initialize some reference arrays. */ i__1 = nlp1; for (i__ = 2; i__ <= i__1; ++i__) { coltyp[i__] = 1; /* L30: */ } i__1 = n; for (i__ = nlp2; i__ <= i__1; ++i__) { coltyp[i__] = 2; /* L40: */ } /* Sort the singular values into increasing order */ i__1 = n; for (i__ = nlp2; i__ <= i__1; ++i__) { idxq[i__] += nlp1; /* L50: */ } /* DSIGMA, IDXC, IDXC, and the first column of U2 are used as storage space. */ i__1 = n; for (i__ = 2; i__ <= i__1; ++i__) { dsigma[i__] = d__[idxq[i__]]; u2[i__ + u2_dim1] = z__[idxq[i__]]; idxc[i__] = coltyp[idxq[i__]]; /* L60: */ } dlamrg_(nl, nr, &dsigma[2], &c__1, &c__1, &idx[2]); i__1 = n; for (i__ = 2; i__ <= i__1; ++i__) { idxi = idx[i__] + 1; d__[i__] = dsigma[idxi]; z__[i__] = u2[idxi + u2_dim1]; coltyp[i__] = idxc[idxi]; /* L70: */ } /* Calculate the allowable deflation tolerance */ eps = EPSILON; /* Computing MAX */ d__1 = abs(*alpha), d__2 = abs(*beta); tol = max(d__1,d__2); /* Computing MAX */ d__2 = (d__1 = d__[n], abs(d__1)); tol = eps * 8. * max(d__2,tol); /* There are 2 kinds of deflation -- first a value in the z-vector is small, second two (or more) singular values are very close together (their difference is small). If the value in the z-vector is small, we simply permute the array so that the corresponding singular value is moved to the end. If two values in the D-vector are close, we perform a two-sided rotation designed to make one of the corresponding z-vector entries zero, and then permute the array so that the deflated singular value is moved to the end. If there are multiple singular values then the problem deflates. Here the number of equal singular values are found. As each equal singular value is found, an elementary reflector is computed to rotate the corresponding singular subspace so that the corresponding components of Z are zero in this new basis. */ *k = 1; k2 = n + 1; i__1 = n; for (j = 2; j <= i__1; ++j) { if ((d__1 = z__[j], abs(d__1)) <= tol) { /* Deflate due to small z component. */ --k2; idxp[k2] = j; coltyp[j] = 4; if (j == n) { goto L120; } } else { jprev = j; goto L90; } /* L80: */ } L90: j = jprev; L100: ++j; if (j > n) { goto L110; } if ((d__1 = z__[j], abs(d__1)) <= tol) { /* Deflate due to small z component. */ --k2; idxp[k2] = j; coltyp[j] = 4; } else { /* Check if singular values are close enough to allow deflation. */ if ((d__1 = d__[j] - d__[jprev], abs(d__1)) <= tol) { /* Deflation is possible. */ s = z__[jprev]; c__ = z__[j]; /* Find sqrt(a**2+b**2) without overflow or destructive underflow. */ tau = dlapy2_(&c__, &s); c__ /= tau; s = -s / tau; z__[j] = tau; z__[jprev] = 0.; /* Apply back the Givens rotation to the left and right singular vector matrices. */ idxjp = idxq[idx[jprev] + 1]; idxj = idxq[idx[j] + 1]; if (idxjp <= nlp1) { --idxjp; } if (idxj <= nlp1) { --idxj; } drot_(&n, &u[idxjp * u_dim1 + 1], &c__1, &u[idxj * u_dim1 + 1], & c__1, &c__, &s); drot_(&m, &vt[idxjp + vt_dim1], ldvt, &vt[idxj + vt_dim1], ldvt, & c__, &s); if (coltyp[j] != coltyp[jprev]) { coltyp[j] = 3; } coltyp[jprev] = 4; --k2; idxp[k2] = jprev; jprev = j; } else { ++(*k); u2[*k + u2_dim1] = z__[jprev]; dsigma[*k] = d__[jprev]; idxp[*k] = jprev; jprev = j; } } goto L100; L110: /* Record the last singular value. */ ++(*k); u2[*k + u2_dim1] = z__[jprev]; dsigma[*k] = d__[jprev]; idxp[*k] = jprev; L120: /* Count up the total number of the various types of columns, then form a permutation which positions the four column types into four groups of uniform structure (although one or more of these groups may be empty). */ for (j = 1; j <= 4; ++j) { ctot[j - 1] = 0; /* L130: */ } i__1 = n; for (j = 2; j <= i__1; ++j) { ct = coltyp[j]; ++ctot[ct - 1]; /* L140: */ } /* PSM(*) = Position in SubMatrix (of types 1 through 4) */ psm[0] = 2; psm[1] = ctot[0] + 2; psm[2] = psm[1] + ctot[1]; psm[3] = psm[2] + ctot[2]; /* Fill out the IDXC array so that the permutation which it induces will place all type-1 columns first, all type-2 columns next, then all type-3's, and finally all type-4's, starting from the second column. This applies similarly to the rows of VT. */ i__1 = n; for (j = 2; j <= i__1; ++j) { jp = idxp[j]; ct = coltyp[jp]; idxc[psm[ct - 1]] = j; ++psm[ct - 1]; /* L150: */ } /* Sort the singular values and corresponding singular vectors into DSIGMA, U2, and VT2 respectively. The singular values/vectors which were not deflated go into the first K slots of DSIGMA, U2, and VT2 respectively, while those which were deflated go into the last N - K slots, except that the first column/row will be treated separately. */ i__1 = n; for (j = 2; j <= i__1; ++j) { jp = idxp[j]; dsigma[j] = d__[jp]; idxj = idxq[idx[idxp[idxc[j]]] + 1]; if (idxj <= nlp1) { --idxj; } dcopy_(&n, &u[idxj * u_dim1 + 1], &c__1, &u2[j * u2_dim1 + 1], &c__1); dcopy_(&m, &vt[idxj + vt_dim1], ldvt, &vt2[j + vt2_dim1], ldvt2); /* L160: */ } /* Determine DSIGMA(1), DSIGMA(2) and Z(1) */ dsigma[1] = 0.; hlftol = tol / 2.; if (abs(dsigma[2]) <= hlftol) { dsigma[2] = hlftol; } if (m > n) { z__[1] = dlapy2_(&z1, &z__[m]); if (z__[1] <= tol) { c__ = 1.; s = 0.; z__[1] = tol; } else { c__ = z1 / z__[1]; s = z__[m] / z__[1]; } } else { if (abs(z1) <= tol) { z__[1] = tol; } else { z__[1] = z1; } } /* Move the rest of the updating row to Z. */ i__1 = *k - 1; dcopy_(&i__1, &u2[u2_dim1 + 2], &c__1, &z__[2], &c__1); /* Determine the first column of U2, the first row of VT2 and the last row of VT. */ dlaset_("A", &n, &c__1, &c_b29, &c_b29, &u2[u2_offset], ldu2); u2[nlp1 + u2_dim1] = 1.; if (m > n) { i__1 = nlp1; for (i__ = 1; i__ <= i__1; ++i__) { vt[m + i__ * vt_dim1] = -s * vt[nlp1 + i__ * vt_dim1]; vt2[i__ * vt2_dim1 + 1] = c__ * vt[nlp1 + i__ * vt_dim1]; /* L170: */ } i__1 = m; for (i__ = nlp2; i__ <= i__1; ++i__) { vt2[i__ * vt2_dim1 + 1] = s * vt[m + i__ * vt_dim1]; vt[m + i__ * vt_dim1] = c__ * vt[m + i__ * vt_dim1]; /* L180: */ } } else { dcopy_(&m, &vt[nlp1 + vt_dim1], ldvt, &vt2[vt2_dim1 + 1], ldvt2); } if (m > n) { dcopy_(&m, &vt[m + vt_dim1], ldvt, &vt2[m + vt2_dim1], ldvt2); } /* The deflated singular values and their corresponding vectors go into the back of D, U, and V respectively. */ if (n > *k) { i__1 = n - *k; dcopy_(&i__1, &dsigma[*k + 1], &c__1, &d__[*k + 1], &c__1); i__1 = n - *k; dlacpy_("A", &n, &i__1, &u2[(*k + 1) * u2_dim1 + 1], ldu2, &u[(*k + 1) * u_dim1 + 1], ldu); i__1 = n - *k; dlacpy_("A", &i__1, &m, &vt2[*k + 1 + vt2_dim1], ldvt2, &vt[*k + 1 + vt_dim1], ldvt); } /* Copy CTOT into COLTYP for referencing in DLASD3. */ for (j = 1; j <= 4; ++j) { coltyp[j] = ctot[j - 1]; /* L190: */ } return 0; /* End of DLASD2 */ } /* dlasd2_ */ /* Subroutine */ int dlasd3_(integer *nl, integer *nr, integer *sqre, integer *k, doublereal *d__, doublereal *q, integer *ldq, doublereal *dsigma, doublereal *u, integer *ldu, doublereal *u2, integer *ldu2, doublereal *vt, integer *ldvt, doublereal *vt2, integer *ldvt2, integer *idxc, integer *ctot, doublereal *z__, integer *info) { /* System generated locals */ integer q_dim1, q_offset, u_dim1, u_offset, u2_dim1, u2_offset, vt_dim1, vt_offset, vt2_dim1, vt2_offset, i__1, i__2; doublereal d__1, d__2; /* Builtin functions */ double sqrt(doublereal), d_sign(doublereal *, doublereal *); /* Local variables */ static doublereal temp; extern doublereal dnrm2_(integer *, doublereal *, integer *); static integer i__, j, m, n; extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); static integer ctemp; extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, doublereal *, integer *); static integer ktemp; extern doublereal dlamc3_(doublereal *, doublereal *); extern /* Subroutine */ int dlasd4_(integer *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, integer *); static integer jc; extern /* Subroutine */ int dlascl_(char *, integer *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *), dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *), xerbla_(char *, integer *); static doublereal rho; static integer nlp1, nlp2, nrp1; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLASD3 finds all the square roots of the roots of the secular equation, as defined by the values in D and Z. It makes the appropriate calls to DLASD4 and then updates the singular vectors by matrix multiplication. This code makes very mild assumptions about floating point arithmetic. It will work on machines with a guard digit in add/subtract, or on those binary machines without guard digits which subtract like the Cray XMP, Cray YMP, Cray C 90, or Cray 2. It could conceivably fail on hexadecimal or decimal machines without guard digits, but we know of none. DLASD3 is called from DLASD1. Arguments ========= NL (input) INTEGER The row dimension of the upper block. NL >= 1. NR (input) INTEGER The row dimension of the lower block. NR >= 1. SQRE (input) INTEGER = 0: the lower block is an NR-by-NR square matrix. = 1: the lower block is an NR-by-(NR+1) rectangular matrix. The bidiagonal matrix has N = NL + NR + 1 rows and M = N + SQRE >= N columns. K (input) INTEGER The size of the secular equation, 1 =< K = < N. D (output) DOUBLE PRECISION array, dimension(K) On exit the square roots of the roots of the secular equation, in ascending order. Q (workspace) DOUBLE PRECISION array, dimension at least (LDQ,K). LDQ (input) INTEGER The leading dimension of the array Q. LDQ >= K. DSIGMA (input) DOUBLE PRECISION array, dimension(K) The first K elements of this array contain the old roots of the deflated updating problem. These are the poles of the secular equation. U (output) DOUBLE PRECISION array, dimension (LDU, N) The last N - K columns of this matrix contain the deflated left singular vectors. LDU (input) INTEGER The leading dimension of the array U. LDU >= N. U2 (input/output) DOUBLE PRECISION array, dimension (LDU2, N) The first K columns of this matrix contain the non-deflated left singular vectors for the split problem. LDU2 (input) INTEGER The leading dimension of the array U2. LDU2 >= N. VT (output) DOUBLE PRECISION array, dimension (LDVT, M) The last M - K columns of VT' contain the deflated right singular vectors. LDVT (input) INTEGER The leading dimension of the array VT. LDVT >= N. VT2 (input/output) DOUBLE PRECISION array, dimension (LDVT2, N) The first K columns of VT2' contain the non-deflated right singular vectors for the split problem. LDVT2 (input) INTEGER The leading dimension of the array VT2. LDVT2 >= N. IDXC (input) INTEGER array, dimension ( N ) The permutation used to arrange the columns of U (and rows of VT) into three groups: the first group contains non-zero entries only at and above (or before) NL +1; the second contains non-zero entries only at and below (or after) NL+2; and the third is dense. The first column of U and the row of VT are treated separately, however. The rows of the singular vectors found by DLASD4 must be likewise permuted before the matrix multiplies can take place. CTOT (input) INTEGER array, dimension ( 4 ) A count of the total number of the various types of columns in U (or rows in VT), as described in IDXC. The fourth column type is any column which has been deflated. Z (input) DOUBLE PRECISION array, dimension (K) The first K elements of this array contain the components of the deflation-adjusted updating row vector. INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. > 0: if INFO = 1, an singular value did not converge Further Details =============== Based on contributions by Ming Gu and Huan Ren, Computer Science Division, University of California at Berkeley, USA ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; q_dim1 = *ldq; q_offset = 1 + q_dim1 * 1; q -= q_offset; --dsigma; u_dim1 = *ldu; u_offset = 1 + u_dim1 * 1; u -= u_offset; u2_dim1 = *ldu2; u2_offset = 1 + u2_dim1 * 1; u2 -= u2_offset; vt_dim1 = *ldvt; vt_offset = 1 + vt_dim1 * 1; vt -= vt_offset; vt2_dim1 = *ldvt2; vt2_offset = 1 + vt2_dim1 * 1; vt2 -= vt2_offset; --idxc; --ctot; --z__; /* Function Body */ *info = 0; if (*nl < 1) { *info = -1; } else if (*nr < 1) { *info = -2; } else if (*sqre != 1 && *sqre != 0) { *info = -3; } n = *nl + *nr + 1; m = n + *sqre; nlp1 = *nl + 1; nlp2 = *nl + 2; if (*k < 1 || *k > n) { *info = -4; } else if (*ldq < *k) { *info = -7; } else if (*ldu < n) { *info = -10; } else if (*ldu2 < n) { *info = -12; } else if (*ldvt < m) { *info = -14; } else if (*ldvt2 < m) { *info = -16; } if (*info != 0) { i__1 = -(*info); xerbla_("DLASD3", &i__1); return 0; } /* Quick return if possible */ if (*k == 1) { d__[1] = abs(z__[1]); dcopy_(&m, &vt2[vt2_dim1 + 1], ldvt2, &vt[vt_dim1 + 1], ldvt); if (z__[1] > 0.) { dcopy_(&n, &u2[u2_dim1 + 1], &c__1, &u[u_dim1 + 1], &c__1); } else { i__1 = n; for (i__ = 1; i__ <= i__1; ++i__) { u[i__ + u_dim1] = -u2[i__ + u2_dim1]; /* L10: */ } } return 0; } /* Modify values DSIGMA(i) to make sure all DSIGMA(i)-DSIGMA(j) can be computed with high relative accuracy (barring over/underflow). This is a problem on machines without a guard digit in add/subtract (Cray XMP, Cray YMP, Cray C 90 and Cray 2). The following code replaces DSIGMA(I) by 2*DSIGMA(I)-DSIGMA(I), which on any of these machines zeros out the bottommost bit of DSIGMA(I) if it is 1; this makes the subsequent subtractions DSIGMA(I)-DSIGMA(J) unproblematic when cancellation occurs. On binary machines with a guard digit (almost all machines) it does not change DSIGMA(I) at all. On hexadecimal and decimal machines with a guard digit, it slightly changes the bottommost bits of DSIGMA(I). It does not account for hexadecimal or decimal machines without guard digits (we know of none). We use a subroutine call to compute 2*DSIGMA(I) to prevent optimizing compilers from eliminating this code. */ i__1 = *k; for (i__ = 1; i__ <= i__1; ++i__) { dsigma[i__] = dlamc3_(&dsigma[i__], &dsigma[i__]) - dsigma[i__]; /* L20: */ } /* Keep a copy of Z. */ dcopy_(k, &z__[1], &c__1, &q[q_offset], &c__1); /* Normalize Z. */ rho = dnrm2_(k, &z__[1], &c__1); dlascl_("G", &c__0, &c__0, &rho, &c_b15, k, &c__1, &z__[1], k, info); rho *= rho; /* Find the new singular values. */ i__1 = *k; for (j = 1; j <= i__1; ++j) { dlasd4_(k, &j, &dsigma[1], &z__[1], &u[j * u_dim1 + 1], &rho, &d__[j], &vt[j * vt_dim1 + 1], info); /* If the zero finder fails, the computation is terminated. */ if (*info != 0) { return 0; } /* L30: */ } /* Compute updated Z. */ i__1 = *k; for (i__ = 1; i__ <= i__1; ++i__) { z__[i__] = u[i__ + *k * u_dim1] * vt[i__ + *k * vt_dim1]; i__2 = i__ - 1; for (j = 1; j <= i__2; ++j) { z__[i__] *= u[i__ + j * u_dim1] * vt[i__ + j * vt_dim1] / (dsigma[ i__] - dsigma[j]) / (dsigma[i__] + dsigma[j]); /* L40: */ } i__2 = *k - 1; for (j = i__; j <= i__2; ++j) { z__[i__] *= u[i__ + j * u_dim1] * vt[i__ + j * vt_dim1] / (dsigma[ i__] - dsigma[j + 1]) / (dsigma[i__] + dsigma[j + 1]); /* L50: */ } d__2 = sqrt((d__1 = z__[i__], abs(d__1))); z__[i__] = d_sign(&d__2, &q[i__ + q_dim1]); /* L60: */ } /* Compute left singular vectors of the modified diagonal matrix, and store related information for the right singular vectors. */ i__1 = *k; for (i__ = 1; i__ <= i__1; ++i__) { vt[i__ * vt_dim1 + 1] = z__[1] / u[i__ * u_dim1 + 1] / vt[i__ * vt_dim1 + 1]; u[i__ * u_dim1 + 1] = -1.; i__2 = *k; for (j = 2; j <= i__2; ++j) { vt[j + i__ * vt_dim1] = z__[j] / u[j + i__ * u_dim1] / vt[j + i__ * vt_dim1]; u[j + i__ * u_dim1] = dsigma[j] * vt[j + i__ * vt_dim1]; /* L70: */ } temp = dnrm2_(k, &u[i__ * u_dim1 + 1], &c__1); q[i__ * q_dim1 + 1] = u[i__ * u_dim1 + 1] / temp; i__2 = *k; for (j = 2; j <= i__2; ++j) { jc = idxc[j]; q[j + i__ * q_dim1] = u[jc + i__ * u_dim1] / temp; /* L80: */ } /* L90: */ } /* Update the left singular vector matrix. */ if (*k == 2) { dgemm_("N", "N", &n, k, k, &c_b15, &u2[u2_offset], ldu2, &q[q_offset], ldq, &c_b29, &u[u_offset], ldu); goto L100; } if (ctot[1] > 0) { dgemm_("N", "N", nl, k, &ctot[1], &c_b15, &u2[(u2_dim1 << 1) + 1], ldu2, &q[q_dim1 + 2], ldq, &c_b29, &u[u_dim1 + 1], ldu); if (ctot[3] > 0) { ktemp = ctot[1] + 2 + ctot[2]; dgemm_("N", "N", nl, k, &ctot[3], &c_b15, &u2[ktemp * u2_dim1 + 1] , ldu2, &q[ktemp + q_dim1], ldq, &c_b15, &u[u_dim1 + 1], ldu); } } else if (ctot[3] > 0) { ktemp = ctot[1] + 2 + ctot[2]; dgemm_("N", "N", nl, k, &ctot[3], &c_b15, &u2[ktemp * u2_dim1 + 1], ldu2, &q[ktemp + q_dim1], ldq, &c_b29, &u[u_dim1 + 1], ldu); } else { dlacpy_("F", nl, k, &u2[u2_offset], ldu2, &u[u_offset], ldu); } dcopy_(k, &q[q_dim1 + 1], ldq, &u[nlp1 + u_dim1], ldu); ktemp = ctot[1] + 2; ctemp = ctot[2] + ctot[3]; dgemm_("N", "N", nr, k, &ctemp, &c_b15, &u2[nlp2 + ktemp * u2_dim1], ldu2, &q[ktemp + q_dim1], ldq, &c_b29, &u[nlp2 + u_dim1], ldu); /* Generate the right singular vectors. */ L100: i__1 = *k; for (i__ = 1; i__ <= i__1; ++i__) { temp = dnrm2_(k, &vt[i__ * vt_dim1 + 1], &c__1); q[i__ + q_dim1] = vt[i__ * vt_dim1 + 1] / temp; i__2 = *k; for (j = 2; j <= i__2; ++j) { jc = idxc[j]; q[i__ + j * q_dim1] = vt[jc + i__ * vt_dim1] / temp; /* L110: */ } /* L120: */ } /* Update the right singular vector matrix. */ if (*k == 2) { dgemm_("N", "N", k, &m, k, &c_b15, &q[q_offset], ldq, &vt2[vt2_offset] , ldvt2, &c_b29, &vt[vt_offset], ldvt); return 0; } ktemp = ctot[1] + 1; dgemm_("N", "N", k, &nlp1, &ktemp, &c_b15, &q[q_dim1 + 1], ldq, &vt2[ vt2_dim1 + 1], ldvt2, &c_b29, &vt[vt_dim1 + 1], ldvt); ktemp = ctot[1] + 2 + ctot[2]; if (ktemp <= *ldvt2) { dgemm_("N", "N", k, &nlp1, &ctot[3], &c_b15, &q[ktemp * q_dim1 + 1], ldq, &vt2[ktemp + vt2_dim1], ldvt2, &c_b15, &vt[vt_dim1 + 1], ldvt); } ktemp = ctot[1] + 1; nrp1 = *nr + *sqre; if (ktemp > 1) { i__1 = *k; for (i__ = 1; i__ <= i__1; ++i__) { q[i__ + ktemp * q_dim1] = q[i__ + q_dim1]; /* L130: */ } i__1 = m; for (i__ = nlp2; i__ <= i__1; ++i__) { vt2[ktemp + i__ * vt2_dim1] = vt2[i__ * vt2_dim1 + 1]; /* L140: */ } } ctemp = ctot[2] + 1 + ctot[3]; dgemm_("N", "N", k, &nrp1, &ctemp, &c_b15, &q[ktemp * q_dim1 + 1], ldq, & vt2[ktemp + nlp2 * vt2_dim1], ldvt2, &c_b29, &vt[nlp2 * vt_dim1 + 1], ldvt); return 0; /* End of DLASD3 */ } /* dlasd3_ */ /* Subroutine */ int dlasd4_(integer *n, integer *i__, doublereal *d__, doublereal *z__, doublereal *delta, doublereal *rho, doublereal * sigma, doublereal *work, integer *info) { /* System generated locals */ integer i__1; doublereal d__1; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static doublereal dphi, dpsi; static integer iter; static doublereal temp, prew, sg2lb, sg2ub, temp1, temp2, a, b, c__; static integer j; static doublereal w, dtiim, delsq, dtiip; static integer niter; static doublereal dtisq; static logical swtch; static doublereal dtnsq; extern /* Subroutine */ int dlaed6_(integer *, logical *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, integer *) , dlasd5_(integer *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *); static doublereal delsq2, dd[3], dtnsq1; static logical swtch3; static integer ii; static doublereal dw, zz[3]; static logical orgati; static doublereal erretm, dtipsq, rhoinv; static integer ip1; static doublereal eta, phi, eps, tau, psi; static integer iim1, iip1; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= This subroutine computes the square root of the I-th updated eigenvalue of a positive symmetric rank-one modification to a positive diagonal matrix whose entries are given as the squares of the corresponding entries in the array d, and that 0 <= D(i) < D(j) for i < j and that RHO > 0. This is arranged by the calling routine, and is no loss in generality. The rank-one modified system is thus diag( D ) * diag( D ) + RHO * Z * Z_transpose. where we assume the Euclidean norm of Z is 1. The method consists of approximating the rational functions in the secular equation by simpler interpolating rational functions. Arguments ========= N (input) INTEGER The length of all arrays. I (input) INTEGER The index of the eigenvalue to be computed. 1 <= I <= N. D (input) DOUBLE PRECISION array, dimension ( N ) The original eigenvalues. It is assumed that they are in order, 0 <= D(I) < D(J) for I < J. Z (input) DOUBLE PRECISION array, dimension ( N ) The components of the updating vector. DELTA (output) DOUBLE PRECISION array, dimension ( N ) If N .ne. 1, DELTA contains (D(j) - sigma_I) in its j-th component. If N = 1, then DELTA(1) = 1. The vector DELTA contains the information necessary to construct the (singular) eigenvectors. RHO (input) DOUBLE PRECISION The scalar in the symmetric updating formula. SIGMA (output) DOUBLE PRECISION The computed sigma_I, the I-th updated eigenvalue. WORK (workspace) DOUBLE PRECISION array, dimension ( N ) If N .ne. 1, WORK contains (D(j) + sigma_I) in its j-th component. If N = 1, then WORK( 1 ) = 1. INFO (output) INTEGER = 0: successful exit > 0: if INFO = 1, the updating process failed. Internal Parameters =================== Logical variable ORGATI (origin-at-i?) is used for distinguishing whether D(i) or D(i+1) is treated as the origin. ORGATI = .true. origin at i ORGATI = .false. origin at i+1 Logical variable SWTCH3 (switch-for-3-poles?) is for noting if we are working with THREE poles! MAXIT is the maximum number of iterations allowed for each eigenvalue. Further Details =============== Based on contributions by Ren-Cang Li, Computer Science Division, University of California at Berkeley, USA ===================================================================== Since this routine is called in an inner loop, we do no argument checking. Quick return for N=1 and 2. */ /* Parameter adjustments */ --work; --delta; --z__; --d__; /* Function Body */ *info = 0; if (*n == 1) { /* Presumably, I=1 upon entry */ *sigma = sqrt(d__[1] * d__[1] + *rho * z__[1] * z__[1]); delta[1] = 1.; work[1] = 1.; return 0; } if (*n == 2) { dlasd5_(i__, &d__[1], &z__[1], &delta[1], rho, sigma, &work[1]); return 0; } /* Compute machine epsilon */ eps = EPSILON; rhoinv = 1. / *rho; /* The case I = N */ if (*i__ == *n) { /* Initialize some basic variables */ ii = *n - 1; niter = 1; /* Calculate initial guess */ temp = *rho / 2.; /* If ||Z||_2 is not one, then TEMP should be set to RHO * ||Z||_2^2 / TWO */ temp1 = temp / (d__[*n] + sqrt(d__[*n] * d__[*n] + temp)); i__1 = *n; for (j = 1; j <= i__1; ++j) { work[j] = d__[j] + d__[*n] + temp1; delta[j] = d__[j] - d__[*n] - temp1; /* L10: */ } psi = 0.; i__1 = *n - 2; for (j = 1; j <= i__1; ++j) { psi += z__[j] * z__[j] / (delta[j] * work[j]); /* L20: */ } c__ = rhoinv + psi; w = c__ + z__[ii] * z__[ii] / (delta[ii] * work[ii]) + z__[*n] * z__[* n] / (delta[*n] * work[*n]); if (w <= 0.) { temp1 = sqrt(d__[*n] * d__[*n] + *rho); temp = z__[*n - 1] * z__[*n - 1] / ((d__[*n - 1] + temp1) * (d__[* n] - d__[*n - 1] + *rho / (d__[*n] + temp1))) + z__[*n] * z__[*n] / *rho; /* The following TAU is to approximate SIGMA_n^2 - D( N )*D( N ) */ if (c__ <= temp) { tau = *rho; } else { delsq = (d__[*n] - d__[*n - 1]) * (d__[*n] + d__[*n - 1]); a = -c__ * delsq + z__[*n - 1] * z__[*n - 1] + z__[*n] * z__[* n]; b = z__[*n] * z__[*n] * delsq; if (a < 0.) { tau = b * 2. / (sqrt(a * a + b * 4. * c__) - a); } else { tau = (a + sqrt(a * a + b * 4. * c__)) / (c__ * 2.); } } /* It can be proved that D(N)^2+RHO/2 <= SIGMA_n^2 < D(N)^2+TAU <= D(N)^2+RHO */ } else { delsq = (d__[*n] - d__[*n - 1]) * (d__[*n] + d__[*n - 1]); a = -c__ * delsq + z__[*n - 1] * z__[*n - 1] + z__[*n] * z__[*n]; b = z__[*n] * z__[*n] * delsq; /* The following TAU is to approximate SIGMA_n^2 - D( N )*D( N ) */ if (a < 0.) { tau = b * 2. / (sqrt(a * a + b * 4. * c__) - a); } else { tau = (a + sqrt(a * a + b * 4. * c__)) / (c__ * 2.); } /* It can be proved that D(N)^2 < D(N)^2+TAU < SIGMA(N)^2 < D(N)^2+RHO/2 */ } /* The following ETA is to approximate SIGMA_n - D( N ) */ eta = tau / (d__[*n] + sqrt(d__[*n] * d__[*n] + tau)); *sigma = d__[*n] + eta; i__1 = *n; for (j = 1; j <= i__1; ++j) { delta[j] = d__[j] - d__[*i__] - eta; work[j] = d__[j] + d__[*i__] + eta; /* L30: */ } /* Evaluate PSI and the derivative DPSI */ dpsi = 0.; psi = 0.; erretm = 0.; i__1 = ii; for (j = 1; j <= i__1; ++j) { temp = z__[j] / (delta[j] * work[j]); psi += z__[j] * temp; dpsi += temp * temp; erretm += psi; /* L40: */ } erretm = abs(erretm); /* Evaluate PHI and the derivative DPHI */ temp = z__[*n] / (delta[*n] * work[*n]); phi = z__[*n] * temp; dphi = temp * temp; erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * (dpsi + dphi); w = rhoinv + phi + psi; /* Test for convergence */ if (abs(w) <= eps * erretm) { goto L240; } /* Calculate the new step */ ++niter; dtnsq1 = work[*n - 1] * delta[*n - 1]; dtnsq = work[*n] * delta[*n]; c__ = w - dtnsq1 * dpsi - dtnsq * dphi; a = (dtnsq + dtnsq1) * w - dtnsq * dtnsq1 * (dpsi + dphi); b = dtnsq * dtnsq1 * w; if (c__ < 0.) { c__ = abs(c__); } if (c__ == 0.) { eta = *rho - *sigma * *sigma; } else if (a >= 0.) { eta = (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (c__ * 2.); } else { eta = b * 2. / (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1))) ); } /* Note, eta should be positive if w is negative, and eta should be negative otherwise. However, if for some reason caused by roundoff, eta*w > 0, we simply use one Newton step instead. This way will guarantee eta*w < 0. */ if (w * eta > 0.) { eta = -w / (dpsi + dphi); } temp = eta - dtnsq; if (temp > *rho) { eta = *rho + dtnsq; } tau += eta; eta /= *sigma + sqrt(eta + *sigma * *sigma); i__1 = *n; for (j = 1; j <= i__1; ++j) { delta[j] -= eta; work[j] += eta; /* L50: */ } *sigma += eta; /* Evaluate PSI and the derivative DPSI */ dpsi = 0.; psi = 0.; erretm = 0.; i__1 = ii; for (j = 1; j <= i__1; ++j) { temp = z__[j] / (work[j] * delta[j]); psi += z__[j] * temp; dpsi += temp * temp; erretm += psi; /* L60: */ } erretm = abs(erretm); /* Evaluate PHI and the derivative DPHI */ temp = z__[*n] / (work[*n] * delta[*n]); phi = z__[*n] * temp; dphi = temp * temp; erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * (dpsi + dphi); w = rhoinv + phi + psi; /* Main loop to update the values of the array DELTA */ iter = niter + 1; for (niter = iter; niter <= 20; ++niter) { /* Test for convergence */ if (abs(w) <= eps * erretm) { goto L240; } /* Calculate the new step */ dtnsq1 = work[*n - 1] * delta[*n - 1]; dtnsq = work[*n] * delta[*n]; c__ = w - dtnsq1 * dpsi - dtnsq * dphi; a = (dtnsq + dtnsq1) * w - dtnsq1 * dtnsq * (dpsi + dphi); b = dtnsq1 * dtnsq * w; if (a >= 0.) { eta = (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / ( c__ * 2.); } else { eta = b * 2. / (a - sqrt((d__1 = a * a - b * 4. * c__, abs( d__1)))); } /* Note, eta should be positive if w is negative, and eta should be negative otherwise. However, if for some reason caused by roundoff, eta*w > 0, we simply use one Newton step instead. This way will guarantee eta*w < 0. */ if (w * eta > 0.) { eta = -w / (dpsi + dphi); } temp = eta - dtnsq; if (temp <= 0.) { eta /= 2.; } tau += eta; eta /= *sigma + sqrt(eta + *sigma * *sigma); i__1 = *n; for (j = 1; j <= i__1; ++j) { delta[j] -= eta; work[j] += eta; /* L70: */ } *sigma += eta; /* Evaluate PSI and the derivative DPSI */ dpsi = 0.; psi = 0.; erretm = 0.; i__1 = ii; for (j = 1; j <= i__1; ++j) { temp = z__[j] / (work[j] * delta[j]); psi += z__[j] * temp; dpsi += temp * temp; erretm += psi; /* L80: */ } erretm = abs(erretm); /* Evaluate PHI and the derivative DPHI */ temp = z__[*n] / (work[*n] * delta[*n]); phi = z__[*n] * temp; dphi = temp * temp; erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * ( dpsi + dphi); w = rhoinv + phi + psi; /* L90: */ } /* Return with INFO = 1, NITER = MAXIT and not converged */ *info = 1; goto L240; /* End for the case I = N */ } else { /* The case for I < N */ niter = 1; ip1 = *i__ + 1; /* Calculate initial guess */ delsq = (d__[ip1] - d__[*i__]) * (d__[ip1] + d__[*i__]); delsq2 = delsq / 2.; temp = delsq2 / (d__[*i__] + sqrt(d__[*i__] * d__[*i__] + delsq2)); i__1 = *n; for (j = 1; j <= i__1; ++j) { work[j] = d__[j] + d__[*i__] + temp; delta[j] = d__[j] - d__[*i__] - temp; /* L100: */ } psi = 0.; i__1 = *i__ - 1; for (j = 1; j <= i__1; ++j) { psi += z__[j] * z__[j] / (work[j] * delta[j]); /* L110: */ } phi = 0.; i__1 = *i__ + 2; for (j = *n; j >= i__1; --j) { phi += z__[j] * z__[j] / (work[j] * delta[j]); /* L120: */ } c__ = rhoinv + psi + phi; w = c__ + z__[*i__] * z__[*i__] / (work[*i__] * delta[*i__]) + z__[ ip1] * z__[ip1] / (work[ip1] * delta[ip1]); if (w > 0.) { /* d(i)^2 < the ith sigma^2 < (d(i)^2+d(i+1)^2)/2 We choose d(i) as origin. */ orgati = TRUE_; sg2lb = 0.; sg2ub = delsq2; a = c__ * delsq + z__[*i__] * z__[*i__] + z__[ip1] * z__[ip1]; b = z__[*i__] * z__[*i__] * delsq; if (a > 0.) { tau = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs( d__1)))); } else { tau = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / ( c__ * 2.); } /* TAU now is an estimation of SIGMA^2 - D( I )^2. The following, however, is the corresponding estimation of SIGMA - D( I ). */ eta = tau / (d__[*i__] + sqrt(d__[*i__] * d__[*i__] + tau)); } else { /* (d(i)^2+d(i+1)^2)/2 <= the ith sigma^2 < d(i+1)^2/2 We choose d(i+1) as origin. */ orgati = FALSE_; sg2lb = -delsq2; sg2ub = 0.; a = c__ * delsq - z__[*i__] * z__[*i__] - z__[ip1] * z__[ip1]; b = z__[ip1] * z__[ip1] * delsq; if (a < 0.) { tau = b * 2. / (a - sqrt((d__1 = a * a + b * 4. * c__, abs( d__1)))); } else { tau = -(a + sqrt((d__1 = a * a + b * 4. * c__, abs(d__1)))) / (c__ * 2.); } /* TAU now is an estimation of SIGMA^2 - D( IP1 )^2. The following, however, is the corresponding estimation of SIGMA - D( IP1 ). */ eta = tau / (d__[ip1] + sqrt((d__1 = d__[ip1] * d__[ip1] + tau, abs(d__1)))); } if (orgati) { ii = *i__; *sigma = d__[*i__] + eta; i__1 = *n; for (j = 1; j <= i__1; ++j) { work[j] = d__[j] + d__[*i__] + eta; delta[j] = d__[j] - d__[*i__] - eta; /* L130: */ } } else { ii = *i__ + 1; *sigma = d__[ip1] + eta; i__1 = *n; for (j = 1; j <= i__1; ++j) { work[j] = d__[j] + d__[ip1] + eta; delta[j] = d__[j] - d__[ip1] - eta; /* L140: */ } } iim1 = ii - 1; iip1 = ii + 1; /* Evaluate PSI and the derivative DPSI */ dpsi = 0.; psi = 0.; erretm = 0.; i__1 = iim1; for (j = 1; j <= i__1; ++j) { temp = z__[j] / (work[j] * delta[j]); psi += z__[j] * temp; dpsi += temp * temp; erretm += psi; /* L150: */ } erretm = abs(erretm); /* Evaluate PHI and the derivative DPHI */ dphi = 0.; phi = 0.; i__1 = iip1; for (j = *n; j >= i__1; --j) { temp = z__[j] / (work[j] * delta[j]); phi += z__[j] * temp; dphi += temp * temp; erretm += phi; /* L160: */ } w = rhoinv + phi + psi; /* W is the value of the secular function with its ii-th element removed. */ swtch3 = FALSE_; if (orgati) { if (w < 0.) { swtch3 = TRUE_; } } else { if (w > 0.) { swtch3 = TRUE_; } } if (ii == 1 || ii == *n) { swtch3 = FALSE_; } temp = z__[ii] / (work[ii] * delta[ii]); dw = dpsi + dphi + temp * temp; temp = z__[ii] * temp; w += temp; erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3. + abs(tau) * dw; /* Test for convergence */ if (abs(w) <= eps * erretm) { goto L240; } if (w <= 0.) { sg2lb = max(sg2lb,tau); } else { sg2ub = min(sg2ub,tau); } /* Calculate the new step */ ++niter; if (! swtch3) { dtipsq = work[ip1] * delta[ip1]; dtisq = work[*i__] * delta[*i__]; if (orgati) { /* Computing 2nd power */ d__1 = z__[*i__] / dtisq; c__ = w - dtipsq * dw + delsq * (d__1 * d__1); } else { /* Computing 2nd power */ d__1 = z__[ip1] / dtipsq; c__ = w - dtisq * dw - delsq * (d__1 * d__1); } a = (dtipsq + dtisq) * w - dtipsq * dtisq * dw; b = dtipsq * dtisq * w; if (c__ == 0.) { if (a == 0.) { if (orgati) { a = z__[*i__] * z__[*i__] + dtipsq * dtipsq * (dpsi + dphi); } else { a = z__[ip1] * z__[ip1] + dtisq * dtisq * (dpsi + dphi); } } eta = b / a; } else if (a <= 0.) { eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / ( c__ * 2.); } else { eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs( d__1)))); } } else { /* Interpolation using THREE most relevant poles */ dtiim = work[iim1] * delta[iim1]; dtiip = work[iip1] * delta[iip1]; temp = rhoinv + psi + phi; if (orgati) { temp1 = z__[iim1] / dtiim; temp1 *= temp1; c__ = temp - dtiip * (dpsi + dphi) - (d__[iim1] - d__[iip1]) * (d__[iim1] + d__[iip1]) * temp1; zz[0] = z__[iim1] * z__[iim1]; if (dpsi < temp1) { zz[2] = dtiip * dtiip * dphi; } else { zz[2] = dtiip * dtiip * (dpsi - temp1 + dphi); } } else { temp1 = z__[iip1] / dtiip; temp1 *= temp1; c__ = temp - dtiim * (dpsi + dphi) - (d__[iip1] - d__[iim1]) * (d__[iim1] + d__[iip1]) * temp1; if (dphi < temp1) { zz[0] = dtiim * dtiim * dpsi; } else { zz[0] = dtiim * dtiim * (dpsi + (dphi - temp1)); } zz[2] = z__[iip1] * z__[iip1]; } zz[1] = z__[ii] * z__[ii]; dd[0] = dtiim; dd[1] = delta[ii] * work[ii]; dd[2] = dtiip; dlaed6_(&niter, &orgati, &c__, dd, zz, &w, &eta, info); if (*info != 0) { goto L240; } } /* Note, eta should be positive if w is negative, and eta should be negative otherwise. However, if for some reason caused by roundoff, eta*w > 0, we simply use one Newton step instead. This way will guarantee eta*w < 0. */ if (w * eta >= 0.) { eta = -w / dw; } if (orgati) { temp1 = work[*i__] * delta[*i__]; temp = eta - temp1; } else { temp1 = work[ip1] * delta[ip1]; temp = eta - temp1; } if (temp > sg2ub || temp < sg2lb) { if (w < 0.) { eta = (sg2ub - tau) / 2.; } else { eta = (sg2lb - tau) / 2.; } } tau += eta; eta /= *sigma + sqrt(*sigma * *sigma + eta); prew = w; *sigma += eta; i__1 = *n; for (j = 1; j <= i__1; ++j) { work[j] += eta; delta[j] -= eta; /* L170: */ } /* Evaluate PSI and the derivative DPSI */ dpsi = 0.; psi = 0.; erretm = 0.; i__1 = iim1; for (j = 1; j <= i__1; ++j) { temp = z__[j] / (work[j] * delta[j]); psi += z__[j] * temp; dpsi += temp * temp; erretm += psi; /* L180: */ } erretm = abs(erretm); /* Evaluate PHI and the derivative DPHI */ dphi = 0.; phi = 0.; i__1 = iip1; for (j = *n; j >= i__1; --j) { temp = z__[j] / (work[j] * delta[j]); phi += z__[j] * temp; dphi += temp * temp; erretm += phi; /* L190: */ } temp = z__[ii] / (work[ii] * delta[ii]); dw = dpsi + dphi + temp * temp; temp = z__[ii] * temp; w = rhoinv + phi + psi + temp; erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3. + abs(tau) * dw; if (w <= 0.) { sg2lb = max(sg2lb,tau); } else { sg2ub = min(sg2ub,tau); } swtch = FALSE_; if (orgati) { if (-w > abs(prew) / 10.) { swtch = TRUE_; } } else { if (w > abs(prew) / 10.) { swtch = TRUE_; } } /* Main loop to update the values of the array DELTA and WORK */ iter = niter + 1; for (niter = iter; niter <= 20; ++niter) { /* Test for convergence */ if (abs(w) <= eps * erretm) { goto L240; } /* Calculate the new step */ if (! swtch3) { dtipsq = work[ip1] * delta[ip1]; dtisq = work[*i__] * delta[*i__]; if (! swtch) { if (orgati) { /* Computing 2nd power */ d__1 = z__[*i__] / dtisq; c__ = w - dtipsq * dw + delsq * (d__1 * d__1); } else { /* Computing 2nd power */ d__1 = z__[ip1] / dtipsq; c__ = w - dtisq * dw - delsq * (d__1 * d__1); } } else { temp = z__[ii] / (work[ii] * delta[ii]); if (orgati) { dpsi += temp * temp; } else { dphi += temp * temp; } c__ = w - dtisq * dpsi - dtipsq * dphi; } a = (dtipsq + dtisq) * w - dtipsq * dtisq * dw; b = dtipsq * dtisq * w; if (c__ == 0.) { if (a == 0.) { if (! swtch) { if (orgati) { a = z__[*i__] * z__[*i__] + dtipsq * dtipsq * (dpsi + dphi); } else { a = z__[ip1] * z__[ip1] + dtisq * dtisq * ( dpsi + dphi); } } else { a = dtisq * dtisq * dpsi + dtipsq * dtipsq * dphi; } } eta = b / a; } else if (a <= 0.) { eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (c__ * 2.); } else { eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))); } } else { /* Interpolation using THREE most relevant poles */ dtiim = work[iim1] * delta[iim1]; dtiip = work[iip1] * delta[iip1]; temp = rhoinv + psi + phi; if (swtch) { c__ = temp - dtiim * dpsi - dtiip * dphi; zz[0] = dtiim * dtiim * dpsi; zz[2] = dtiip * dtiip * dphi; } else { if (orgati) { temp1 = z__[iim1] / dtiim; temp1 *= temp1; temp2 = (d__[iim1] - d__[iip1]) * (d__[iim1] + d__[ iip1]) * temp1; c__ = temp - dtiip * (dpsi + dphi) - temp2; zz[0] = z__[iim1] * z__[iim1]; if (dpsi < temp1) { zz[2] = dtiip * dtiip * dphi; } else { zz[2] = dtiip * dtiip * (dpsi - temp1 + dphi); } } else { temp1 = z__[iip1] / dtiip; temp1 *= temp1; temp2 = (d__[iip1] - d__[iim1]) * (d__[iim1] + d__[ iip1]) * temp1; c__ = temp - dtiim * (dpsi + dphi) - temp2; if (dphi < temp1) { zz[0] = dtiim * dtiim * dpsi; } else { zz[0] = dtiim * dtiim * (dpsi + (dphi - temp1)); } zz[2] = z__[iip1] * z__[iip1]; } } dd[0] = dtiim; dd[1] = delta[ii] * work[ii]; dd[2] = dtiip; dlaed6_(&niter, &orgati, &c__, dd, zz, &w, &eta, info); if (*info != 0) { goto L240; } } /* Note, eta should be positive if w is negative, and eta should be negative otherwise. However, if for some reason caused by roundoff, eta*w > 0, we simply use one Newton step instead. This way will guarantee eta*w < 0. */ if (w * eta >= 0.) { eta = -w / dw; } if (orgati) { temp1 = work[*i__] * delta[*i__]; temp = eta - temp1; } else { temp1 = work[ip1] * delta[ip1]; temp = eta - temp1; } if (temp > sg2ub || temp < sg2lb) { if (w < 0.) { eta = (sg2ub - tau) / 2.; } else { eta = (sg2lb - tau) / 2.; } } tau += eta; eta /= *sigma + sqrt(*sigma * *sigma + eta); *sigma += eta; i__1 = *n; for (j = 1; j <= i__1; ++j) { work[j] += eta; delta[j] -= eta; /* L200: */ } prew = w; /* Evaluate PSI and the derivative DPSI */ dpsi = 0.; psi = 0.; erretm = 0.; i__1 = iim1; for (j = 1; j <= i__1; ++j) { temp = z__[j] / (work[j] * delta[j]); psi += z__[j] * temp; dpsi += temp * temp; erretm += psi; /* L210: */ } erretm = abs(erretm); /* Evaluate PHI and the derivative DPHI */ dphi = 0.; phi = 0.; i__1 = iip1; for (j = *n; j >= i__1; --j) { temp = z__[j] / (work[j] * delta[j]); phi += z__[j] * temp; dphi += temp * temp; erretm += phi; /* L220: */ } temp = z__[ii] / (work[ii] * delta[ii]); dw = dpsi + dphi + temp * temp; temp = z__[ii] * temp; w = rhoinv + phi + psi + temp; erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3. + abs(tau) * dw; if (w * prew > 0. && abs(w) > abs(prew) / 10.) { swtch = ! swtch; } if (w <= 0.) { sg2lb = max(sg2lb,tau); } else { sg2ub = min(sg2ub,tau); } /* L230: */ } /* Return with INFO = 1, NITER = MAXIT and not converged */ *info = 1; } L240: return 0; /* End of DLASD4 */ } /* dlasd4_ */ /* Subroutine */ int dlasd5_(integer *i__, doublereal *d__, doublereal *z__, doublereal *delta, doublereal *rho, doublereal *dsigma, doublereal * work) { /* System generated locals */ doublereal d__1; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static doublereal b, c__, w, delsq, del, tau; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= This subroutine computes the square root of the I-th eigenvalue of a positive symmetric rank-one modification of a 2-by-2 diagonal matrix diag( D ) * diag( D ) + RHO * Z * transpose(Z) . The diagonal entries in the array D are assumed to satisfy 0 <= D(i) < D(j) for i < j . We also assume RHO > 0 and that the Euclidean norm of the vector Z is one. Arguments ========= I (input) INTEGER The index of the eigenvalue to be computed. I = 1 or I = 2. D (input) DOUBLE PRECISION array, dimension ( 2 ) The original eigenvalues. We assume 0 <= D(1) < D(2). Z (input) DOUBLE PRECISION array, dimension ( 2 ) The components of the updating vector. DELTA (output) DOUBLE PRECISION array, dimension ( 2 ) Contains (D(j) - sigma_I) in its j-th component. The vector DELTA contains the information necessary to construct the eigenvectors. RHO (input) DOUBLE PRECISION The scalar in the symmetric updating formula. DSIGMA (output) DOUBLE PRECISION The computed sigma_I, the I-th updated eigenvalue. WORK (workspace) DOUBLE PRECISION array, dimension ( 2 ) WORK contains (D(j) + sigma_I) in its j-th component. Further Details =============== Based on contributions by Ren-Cang Li, Computer Science Division, University of California at Berkeley, USA ===================================================================== */ /* Parameter adjustments */ --work; --delta; --z__; --d__; /* Function Body */ del = d__[2] - d__[1]; delsq = del * (d__[2] + d__[1]); if (*i__ == 1) { w = *rho * 4. * (z__[2] * z__[2] / (d__[1] + d__[2] * 3.) - z__[1] * z__[1] / (d__[1] * 3. + d__[2])) / del + 1.; if (w > 0.) { b = delsq + *rho * (z__[1] * z__[1] + z__[2] * z__[2]); c__ = *rho * z__[1] * z__[1] * delsq; /* B > ZERO, always The following TAU is DSIGMA * DSIGMA - D( 1 ) * D( 1 ) */ tau = c__ * 2. / (b + sqrt((d__1 = b * b - c__ * 4., abs(d__1)))); /* The following TAU is DSIGMA - D( 1 ) */ tau /= d__[1] + sqrt(d__[1] * d__[1] + tau); *dsigma = d__[1] + tau; delta[1] = -tau; delta[2] = del - tau; work[1] = d__[1] * 2. + tau; work[2] = d__[1] + tau + d__[2]; /* DELTA( 1 ) = -Z( 1 ) / TAU DELTA( 2 ) = Z( 2 ) / ( DEL-TAU ) */ } else { b = -delsq + *rho * (z__[1] * z__[1] + z__[2] * z__[2]); c__ = *rho * z__[2] * z__[2] * delsq; /* The following TAU is DSIGMA * DSIGMA - D( 2 ) * D( 2 ) */ if (b > 0.) { tau = c__ * -2. / (b + sqrt(b * b + c__ * 4.)); } else { tau = (b - sqrt(b * b + c__ * 4.)) / 2.; } /* The following TAU is DSIGMA - D( 2 ) */ tau /= d__[2] + sqrt((d__1 = d__[2] * d__[2] + tau, abs(d__1))); *dsigma = d__[2] + tau; delta[1] = -(del + tau); delta[2] = -tau; work[1] = d__[1] + tau + d__[2]; work[2] = d__[2] * 2. + tau; /* DELTA( 1 ) = -Z( 1 ) / ( DEL+TAU ) DELTA( 2 ) = -Z( 2 ) / TAU */ } /* TEMP = SQRT( DELTA( 1 )*DELTA( 1 )+DELTA( 2 )*DELTA( 2 ) ) DELTA( 1 ) = DELTA( 1 ) / TEMP DELTA( 2 ) = DELTA( 2 ) / TEMP */ } else { /* Now I=2 */ b = -delsq + *rho * (z__[1] * z__[1] + z__[2] * z__[2]); c__ = *rho * z__[2] * z__[2] * delsq; /* The following TAU is DSIGMA * DSIGMA - D( 2 ) * D( 2 ) */ if (b > 0.) { tau = (b + sqrt(b * b + c__ * 4.)) / 2.; } else { tau = c__ * 2. / (-b + sqrt(b * b + c__ * 4.)); } /* The following TAU is DSIGMA - D( 2 ) */ tau /= d__[2] + sqrt(d__[2] * d__[2] + tau); *dsigma = d__[2] + tau; delta[1] = -(del + tau); delta[2] = -tau; work[1] = d__[1] + tau + d__[2]; work[2] = d__[2] * 2. + tau; /* DELTA( 1 ) = -Z( 1 ) / ( DEL+TAU ) DELTA( 2 ) = -Z( 2 ) / TAU TEMP = SQRT( DELTA( 1 )*DELTA( 1 )+DELTA( 2 )*DELTA( 2 ) ) DELTA( 1 ) = DELTA( 1 ) / TEMP DELTA( 2 ) = DELTA( 2 ) / TEMP */ } return 0; /* End of DLASD5 */ } /* dlasd5_ */ /* Subroutine */ int dlasd6_(integer *icompq, integer *nl, integer *nr, integer *sqre, doublereal *d__, doublereal *vf, doublereal *vl, doublereal *alpha, doublereal *beta, integer *idxq, integer *perm, integer *givptr, integer *givcol, integer *ldgcol, doublereal *givnum, integer *ldgnum, doublereal *poles, doublereal *difl, doublereal * difr, doublereal *z__, integer *k, doublereal *c__, doublereal *s, doublereal *work, integer *iwork, integer *info) { /* System generated locals */ integer givcol_dim1, givcol_offset, givnum_dim1, givnum_offset, poles_dim1, poles_offset, i__1; doublereal d__1, d__2; /* Local variables */ static integer idxc, idxp, ivfw, ivlw, i__, m, n; extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, doublereal *, integer *); static integer n1, n2; extern /* Subroutine */ int dlasd7_(integer *, integer *, integer *, integer *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, integer *, integer *, integer *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *), dlasd8_( integer *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, doublereal *, integer *); static integer iw; extern /* Subroutine */ int dlascl_(char *, integer *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *), dlamrg_(integer *, integer *, doublereal *, integer *, integer *, integer *); static integer isigma; extern /* Subroutine */ int xerbla_(char *, integer *); static doublereal orgnrm; static integer idx; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLASD6 computes the SVD of an updated upper bidiagonal matrix B obtained by merging two smaller ones by appending a row. This routine is used only for the problem which requires all singular values and optionally singular vector matrices in factored form. B is an N-by-M matrix with N = NL + NR + 1 and M = N + SQRE. A related subroutine, DLASD1, handles the case in which all singular values and singular vectors of the bidiagonal matrix are desired. DLASD6 computes the SVD as follows: ( D1(in) 0 0 0 ) B = U(in) * ( Z1' a Z2' b ) * VT(in) ( 0 0 D2(in) 0 ) = U(out) * ( D(out) 0) * VT(out) where Z' = (Z1' a Z2' b) = u' VT', and u is a vector of dimension M with ALPHA and BETA in the NL+1 and NL+2 th entries and zeros elsewhere; and the entry b is empty if SQRE = 0. The singular values of B can be computed using D1, D2, the first components of all the right singular vectors of the lower block, and the last components of all the right singular vectors of the upper block. These components are stored and updated in VF and VL, respectively, in DLASD6. Hence U and VT are not explicitly referenced. The singular values are stored in D. The algorithm consists of two stages: The first stage consists of deflating the size of the problem when there are multiple singular values or if there is a zero in the Z vector. For each such occurence the dimension of the secular equation problem is reduced by one. This stage is performed by the routine DLASD7. The second stage consists of calculating the updated singular values. This is done by finding the roots of the secular equation via the routine DLASD4 (as called by DLASD8). This routine also updates VF and VL and computes the distances between the updated singular values and the old singular values. DLASD6 is called from DLASDA. Arguments ========= ICOMPQ (input) INTEGER Specifies whether singular vectors are to be computed in factored form: = 0: Compute singular values only. = 1: Compute singular vectors in factored form as well. NL (input) INTEGER The row dimension of the upper block. NL >= 1. NR (input) INTEGER The row dimension of the lower block. NR >= 1. SQRE (input) INTEGER = 0: the lower block is an NR-by-NR square matrix. = 1: the lower block is an NR-by-(NR+1) rectangular matrix. The bidiagonal matrix has row dimension N = NL + NR + 1, and column dimension M = N + SQRE. D (input/output) DOUBLE PRECISION array, dimension ( NL+NR+1 ). On entry D(1:NL,1:NL) contains the singular values of the upper block, and D(NL+2:N) contains the singular values of the lower block. On exit D(1:N) contains the singular values of the modified matrix. VF (input/output) DOUBLE PRECISION array, dimension ( M ) On entry, VF(1:NL+1) contains the first components of all right singular vectors of the upper block; and VF(NL+2:M) contains the first components of all right singular vectors of the lower block. On exit, VF contains the first components of all right singular vectors of the bidiagonal matrix. VL (input/output) DOUBLE PRECISION array, dimension ( M ) On entry, VL(1:NL+1) contains the last components of all right singular vectors of the upper block; and VL(NL+2:M) contains the last components of all right singular vectors of the lower block. On exit, VL contains the last components of all right singular vectors of the bidiagonal matrix. ALPHA (input/output) DOUBLE PRECISION Contains the diagonal element associated with the added row. BETA (input/output) DOUBLE PRECISION Contains the off-diagonal element associated with the added row. IDXQ (output) INTEGER array, dimension ( N ) This contains the permutation which will reintegrate the subproblem just solved back into sorted order, i.e. D( IDXQ( I = 1, N ) ) will be in ascending order. PERM (output) INTEGER array, dimension ( N ) The permutations (from deflation and sorting) to be applied to each block. Not referenced if ICOMPQ = 0. GIVPTR (output) INTEGER The number of Givens rotations which took place in this subproblem. Not referenced if ICOMPQ = 0. GIVCOL (output) INTEGER array, dimension ( LDGCOL, 2 ) Each pair of numbers indicates a pair of columns to take place in a Givens rotation. Not referenced if ICOMPQ = 0. LDGCOL (input) INTEGER leading dimension of GIVCOL, must be at least N. GIVNUM (output) DOUBLE PRECISION array, dimension ( LDGNUM, 2 ) Each number indicates the C or S value to be used in the corresponding Givens rotation. Not referenced if ICOMPQ = 0. LDGNUM (input) INTEGER The leading dimension of GIVNUM and POLES, must be at least N. POLES (output) DOUBLE PRECISION array, dimension ( LDGNUM, 2 ) On exit, POLES(1,*) is an array containing the new singular values obtained from solving the secular equation, and POLES(2,*) is an array containing the poles in the secular equation. Not referenced if ICOMPQ = 0. DIFL (output) DOUBLE PRECISION array, dimension ( N ) On exit, DIFL(I) is the distance between I-th updated (undeflated) singular value and the I-th (undeflated) old singular value. DIFR (output) DOUBLE PRECISION array, dimension ( LDGNUM, 2 ) if ICOMPQ = 1 and dimension ( N ) if ICOMPQ = 0. On exit, DIFR(I, 1) is the distance between I-th updated (undeflated) singular value and the I+1-th (undeflated) old singular value. If ICOMPQ = 1, DIFR(1:K,2) is an array containing the normalizing factors for the right singular vector matrix. See DLASD8 for details on DIFL and DIFR. Z (output) DOUBLE PRECISION array, dimension ( M ) The first elements of this array contain the components of the deflation-adjusted updating row vector. K (output) INTEGER Contains the dimension of the non-deflated matrix, This is the order of the related secular equation. 1 <= K <=N. C (output) DOUBLE PRECISION C contains garbage if SQRE =0 and the C-value of a Givens rotation related to the right null space if SQRE = 1. S (output) DOUBLE PRECISION S contains garbage if SQRE =0 and the S-value of a Givens rotation related to the right null space if SQRE = 1. WORK (workspace) DOUBLE PRECISION array, dimension ( 4 * M ) IWORK (workspace) INTEGER array, dimension ( 3 * N ) INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. > 0: if INFO = 1, an singular value did not converge Further Details =============== Based on contributions by Ming Gu and Huan Ren, Computer Science Division, University of California at Berkeley, USA ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; --vf; --vl; --idxq; --perm; givcol_dim1 = *ldgcol; givcol_offset = 1 + givcol_dim1 * 1; givcol -= givcol_offset; poles_dim1 = *ldgnum; poles_offset = 1 + poles_dim1 * 1; poles -= poles_offset; givnum_dim1 = *ldgnum; givnum_offset = 1 + givnum_dim1 * 1; givnum -= givnum_offset; --difl; --difr; --z__; --work; --iwork; /* Function Body */ *info = 0; n = *nl + *nr + 1; m = n + *sqre; if (*icompq < 0 || *icompq > 1) { *info = -1; } else if (*nl < 1) { *info = -2; } else if (*nr < 1) { *info = -3; } else if (*sqre < 0 || *sqre > 1) { *info = -4; } else if (*ldgcol < n) { *info = -14; } else if (*ldgnum < n) { *info = -16; } if (*info != 0) { i__1 = -(*info); xerbla_("DLASD6", &i__1); return 0; } /* The following values are for bookkeeping purposes only. They are integer pointers which indicate the portion of the workspace used by a particular array in DLASD7 and DLASD8. */ isigma = 1; iw = isigma + n; ivfw = iw + m; ivlw = ivfw + m; idx = 1; idxc = idx + n; idxp = idxc + n; /* Scale. Computing MAX */ d__1 = abs(*alpha), d__2 = abs(*beta); orgnrm = max(d__1,d__2); d__[*nl + 1] = 0.; i__1 = n; for (i__ = 1; i__ <= i__1; ++i__) { if ((d__1 = d__[i__], abs(d__1)) > orgnrm) { orgnrm = (d__1 = d__[i__], abs(d__1)); } /* L10: */ } dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, &n, &c__1, &d__[1], &n, info); *alpha /= orgnrm; *beta /= orgnrm; /* Sort and Deflate singular values. */ dlasd7_(icompq, nl, nr, sqre, k, &d__[1], &z__[1], &work[iw], &vf[1], & work[ivfw], &vl[1], &work[ivlw], alpha, beta, &work[isigma], & iwork[idx], &iwork[idxp], &idxq[1], &perm[1], givptr, &givcol[ givcol_offset], ldgcol, &givnum[givnum_offset], ldgnum, c__, s, info); /* Solve Secular Equation, compute DIFL, DIFR, and update VF, VL. */ dlasd8_(icompq, k, &d__[1], &z__[1], &vf[1], &vl[1], &difl[1], &difr[1], ldgnum, &work[isigma], &work[iw], info); /* Save the poles if ICOMPQ = 1. */ if (*icompq == 1) { dcopy_(k, &d__[1], &c__1, &poles[poles_dim1 + 1], &c__1); dcopy_(k, &work[isigma], &c__1, &poles[(poles_dim1 << 1) + 1], &c__1); } /* Unscale. */ dlascl_("G", &c__0, &c__0, &c_b15, &orgnrm, &n, &c__1, &d__[1], &n, info); /* Prepare the IDXQ sorting permutation. */ n1 = *k; n2 = n - *k; dlamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &idxq[1]); return 0; /* End of DLASD6 */ } /* dlasd6_ */ /* Subroutine */ int dlasd7_(integer *icompq, integer *nl, integer *nr, integer *sqre, integer *k, doublereal *d__, doublereal *z__, doublereal *zw, doublereal *vf, doublereal *vfw, doublereal *vl, doublereal *vlw, doublereal *alpha, doublereal *beta, doublereal * dsigma, integer *idx, integer *idxp, integer *idxq, integer *perm, integer *givptr, integer *givcol, integer *ldgcol, doublereal *givnum, integer *ldgnum, doublereal *c__, doublereal *s, integer *info) { /* System generated locals */ integer givcol_dim1, givcol_offset, givnum_dim1, givnum_offset, i__1; doublereal d__1, d__2; /* Local variables */ static integer idxi, idxj; extern /* Subroutine */ int drot_(integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *); static integer i__, j, m, n, idxjp; extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, doublereal *, integer *); static integer jprev, k2; static doublereal z1; static integer jp; extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *, integer *, integer *, integer *), xerbla_(char *, integer *); static doublereal hlftol, eps, tau, tol; static integer nlp1, nlp2; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLASD7 merges the two sets of singular values together into a single sorted set. Then it tries to deflate the size of the problem. There are two ways in which deflation can occur: when two or more singular values are close together or if there is a tiny entry in the Z vector. For each such occurrence the order of the related secular equation problem is reduced by one. DLASD7 is called from DLASD6. Arguments ========= ICOMPQ (input) INTEGER Specifies whether singular vectors are to be computed in compact form, as follows: = 0: Compute singular values only. = 1: Compute singular vectors of upper bidiagonal matrix in compact form. NL (input) INTEGER The row dimension of the upper block. NL >= 1. NR (input) INTEGER The row dimension of the lower block. NR >= 1. SQRE (input) INTEGER = 0: the lower block is an NR-by-NR square matrix. = 1: the lower block is an NR-by-(NR+1) rectangular matrix. The bidiagonal matrix has N = NL + NR + 1 rows and M = N + SQRE >= N columns. K (output) INTEGER Contains the dimension of the non-deflated matrix, this is the order of the related secular equation. 1 <= K <=N. D (input/output) DOUBLE PRECISION array, dimension ( N ) On entry D contains the singular values of the two submatrices to be combined. On exit D contains the trailing (N-K) updated singular values (those which were deflated) sorted into increasing order. Z (output) DOUBLE PRECISION array, dimension ( M ) On exit Z contains the updating row vector in the secular equation. ZW (workspace) DOUBLE PRECISION array, dimension ( M ) Workspace for Z. VF (input/output) DOUBLE PRECISION array, dimension ( M ) On entry, VF(1:NL+1) contains the first components of all right singular vectors of the upper block; and VF(NL+2:M) contains the first components of all right singular vectors of the lower block. On exit, VF contains the first components of all right singular vectors of the bidiagonal matrix. VFW (workspace) DOUBLE PRECISION array, dimension ( M ) Workspace for VF. VL (input/output) DOUBLE PRECISION array, dimension ( M ) On entry, VL(1:NL+1) contains the last components of all right singular vectors of the upper block; and VL(NL+2:M) contains the last components of all right singular vectors of the lower block. On exit, VL contains the last components of all right singular vectors of the bidiagonal matrix. VLW (workspace) DOUBLE PRECISION array, dimension ( M ) Workspace for VL. ALPHA (input) DOUBLE PRECISION Contains the diagonal element associated with the added row. BETA (input) DOUBLE PRECISION Contains the off-diagonal element associated with the added row. DSIGMA (output) DOUBLE PRECISION array, dimension ( N ) Contains a copy of the diagonal elements (K-1 singular values and one zero) in the secular equation. IDX (workspace) INTEGER array, dimension ( N ) This will contain the permutation used to sort the contents of D into ascending order. IDXP (workspace) INTEGER array, dimension ( N ) This will contain the permutation used to place deflated values of D at the end of the array. On output IDXP(2:K) points to the nondeflated D-values and IDXP(K+1:N) points to the deflated singular values. IDXQ (input) INTEGER array, dimension ( N ) This contains the permutation which separately sorts the two sub-problems in D into ascending order. Note that entries in the first half of this permutation must first be moved one position backward; and entries in the second half must first have NL+1 added to their values. PERM (output) INTEGER array, dimension ( N ) The permutations (from deflation and sorting) to be applied to each singular block. Not referenced if ICOMPQ = 0. GIVPTR (output) INTEGER The number of Givens rotations which took place in this subproblem. Not referenced if ICOMPQ = 0. GIVCOL (output) INTEGER array, dimension ( LDGCOL, 2 ) Each pair of numbers indicates a pair of columns to take place in a Givens rotation. Not referenced if ICOMPQ = 0. LDGCOL (input) INTEGER The leading dimension of GIVCOL, must be at least N. GIVNUM (output) DOUBLE PRECISION array, dimension ( LDGNUM, 2 ) Each number indicates the C or S value to be used in the corresponding Givens rotation. Not referenced if ICOMPQ = 0. LDGNUM (input) INTEGER The leading dimension of GIVNUM, must be at least N. C (output) DOUBLE PRECISION C contains garbage if SQRE =0 and the C-value of a Givens rotation related to the right null space if SQRE = 1. S (output) DOUBLE PRECISION S contains garbage if SQRE =0 and the S-value of a Givens rotation related to the right null space if SQRE = 1. INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. Further Details =============== Based on contributions by Ming Gu and Huan Ren, Computer Science Division, University of California at Berkeley, USA ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; --z__; --zw; --vf; --vfw; --vl; --vlw; --dsigma; --idx; --idxp; --idxq; --perm; givcol_dim1 = *ldgcol; givcol_offset = 1 + givcol_dim1 * 1; givcol -= givcol_offset; givnum_dim1 = *ldgnum; givnum_offset = 1 + givnum_dim1 * 1; givnum -= givnum_offset; /* Function Body */ *info = 0; n = *nl + *nr + 1; m = n + *sqre; if (*icompq < 0 || *icompq > 1) { *info = -1; } else if (*nl < 1) { *info = -2; } else if (*nr < 1) { *info = -3; } else if (*sqre < 0 || *sqre > 1) { *info = -4; } else if (*ldgcol < n) { *info = -22; } else if (*ldgnum < n) { *info = -24; } if (*info != 0) { i__1 = -(*info); xerbla_("DLASD7", &i__1); return 0; } nlp1 = *nl + 1; nlp2 = *nl + 2; if (*icompq == 1) { *givptr = 0; } /* Generate the first part of the vector Z and move the singular values in the first part of D one position backward. */ z1 = *alpha * vl[nlp1]; vl[nlp1] = 0.; tau = vf[nlp1]; for (i__ = *nl; i__ >= 1; --i__) { z__[i__ + 1] = *alpha * vl[i__]; vl[i__] = 0.; vf[i__ + 1] = vf[i__]; d__[i__ + 1] = d__[i__]; idxq[i__ + 1] = idxq[i__] + 1; /* L10: */ } vf[1] = tau; /* Generate the second part of the vector Z. */ i__1 = m; for (i__ = nlp2; i__ <= i__1; ++i__) { z__[i__] = *beta * vf[i__]; vf[i__] = 0.; /* L20: */ } /* Sort the singular values into increasing order */ i__1 = n; for (i__ = nlp2; i__ <= i__1; ++i__) { idxq[i__] += nlp1; /* L30: */ } /* DSIGMA, IDXC, IDXC, and ZW are used as storage space. */ i__1 = n; for (i__ = 2; i__ <= i__1; ++i__) { dsigma[i__] = d__[idxq[i__]]; zw[i__] = z__[idxq[i__]]; vfw[i__] = vf[idxq[i__]]; vlw[i__] = vl[idxq[i__]]; /* L40: */ } dlamrg_(nl, nr, &dsigma[2], &c__1, &c__1, &idx[2]); i__1 = n; for (i__ = 2; i__ <= i__1; ++i__) { idxi = idx[i__] + 1; d__[i__] = dsigma[idxi]; z__[i__] = zw[idxi]; vf[i__] = vfw[idxi]; vl[i__] = vlw[idxi]; /* L50: */ } /* Calculate the allowable deflation tolerence */ eps = EPSILON; /* Computing MAX */ d__1 = abs(*alpha), d__2 = abs(*beta); tol = max(d__1,d__2); /* Computing MAX */ d__2 = (d__1 = d__[n], abs(d__1)); tol = eps * 64. * max(d__2,tol); /* There are 2 kinds of deflation -- first a value in the z-vector is small, second two (or more) singular values are very close together (their difference is small). If the value in the z-vector is small, we simply permute the array so that the corresponding singular value is moved to the end. If two values in the D-vector are close, we perform a two-sided rotation designed to make one of the corresponding z-vector entries zero, and then permute the array so that the deflated singular value is moved to the end. If there are multiple singular values then the problem deflates. Here the number of equal singular values are found. As each equal singular value is found, an elementary reflector is computed to rotate the corresponding singular subspace so that the corresponding components of Z are zero in this new basis. */ *k = 1; k2 = n + 1; i__1 = n; for (j = 2; j <= i__1; ++j) { if ((d__1 = z__[j], abs(d__1)) <= tol) { /* Deflate due to small z component. */ --k2; idxp[k2] = j; if (j == n) { goto L100; } } else { jprev = j; goto L70; } /* L60: */ } L70: j = jprev; L80: ++j; if (j > n) { goto L90; } if ((d__1 = z__[j], abs(d__1)) <= tol) { /* Deflate due to small z component. */ --k2; idxp[k2] = j; } else { /* Check if singular values are close enough to allow deflation. */ if ((d__1 = d__[j] - d__[jprev], abs(d__1)) <= tol) { /* Deflation is possible. */ *s = z__[jprev]; *c__ = z__[j]; /* Find sqrt(a**2+b**2) without overflow or destructive underflow. */ tau = dlapy2_(c__, s); z__[j] = tau; z__[jprev] = 0.; *c__ /= tau; *s = -(*s) / tau; /* Record the appropriate Givens rotation */ if (*icompq == 1) { ++(*givptr); idxjp = idxq[idx[jprev] + 1]; idxj = idxq[idx[j] + 1]; if (idxjp <= nlp1) { --idxjp; } if (idxj <= nlp1) { --idxj; } givcol[*givptr + (givcol_dim1 << 1)] = idxjp; givcol[*givptr + givcol_dim1] = idxj; givnum[*givptr + (givnum_dim1 << 1)] = *c__; givnum[*givptr + givnum_dim1] = *s; } drot_(&c__1, &vf[jprev], &c__1, &vf[j], &c__1, c__, s); drot_(&c__1, &vl[jprev], &c__1, &vl[j], &c__1, c__, s); --k2; idxp[k2] = jprev; jprev = j; } else { ++(*k); zw[*k] = z__[jprev]; dsigma[*k] = d__[jprev]; idxp[*k] = jprev; jprev = j; } } goto L80; L90: /* Record the last singular value. */ ++(*k); zw[*k] = z__[jprev]; dsigma[*k] = d__[jprev]; idxp[*k] = jprev; L100: /* Sort the singular values into DSIGMA. The singular values which were not deflated go into the first K slots of DSIGMA, except that DSIGMA(1) is treated separately. */ i__1 = n; for (j = 2; j <= i__1; ++j) { jp = idxp[j]; dsigma[j] = d__[jp]; vfw[j] = vf[jp]; vlw[j] = vl[jp]; /* L110: */ } if (*icompq == 1) { i__1 = n; for (j = 2; j <= i__1; ++j) { jp = idxp[j]; perm[j] = idxq[idx[jp] + 1]; if (perm[j] <= nlp1) { --perm[j]; } /* L120: */ } } /* The deflated singular values go back into the last N - K slots of D. */ i__1 = n - *k; dcopy_(&i__1, &dsigma[*k + 1], &c__1, &d__[*k + 1], &c__1); /* Determine DSIGMA(1), DSIGMA(2), Z(1), VF(1), VL(1), VF(M), and VL(M). */ dsigma[1] = 0.; hlftol = tol / 2.; if (abs(dsigma[2]) <= hlftol) { dsigma[2] = hlftol; } if (m > n) { z__[1] = dlapy2_(&z1, &z__[m]); if (z__[1] <= tol) { *c__ = 1.; *s = 0.; z__[1] = tol; } else { *c__ = z1 / z__[1]; *s = -z__[m] / z__[1]; } drot_(&c__1, &vf[m], &c__1, &vf[1], &c__1, c__, s); drot_(&c__1, &vl[m], &c__1, &vl[1], &c__1, c__, s); } else { if (abs(z1) <= tol) { z__[1] = tol; } else { z__[1] = z1; } } /* Restore Z, VF, and VL. */ i__1 = *k - 1; dcopy_(&i__1, &zw[2], &c__1, &z__[2], &c__1); i__1 = n - 1; dcopy_(&i__1, &vfw[2], &c__1, &vf[2], &c__1); i__1 = n - 1; dcopy_(&i__1, &vlw[2], &c__1, &vl[2], &c__1); return 0; /* End of DLASD7 */ } /* dlasd7_ */ /* Subroutine */ int dlasd8_(integer *icompq, integer *k, doublereal *d__, doublereal *z__, doublereal *vf, doublereal *vl, doublereal *difl, doublereal *difr, integer *lddifr, doublereal *dsigma, doublereal * work, integer *info) { /* System generated locals */ integer difr_dim1, difr_offset, i__1, i__2; doublereal d__1, d__2; /* Builtin functions */ double sqrt(doublereal), d_sign(doublereal *, doublereal *); /* Local variables */ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *, integer *); static doublereal temp; extern doublereal dnrm2_(integer *, doublereal *, integer *); static integer iwk2i, iwk3i, i__, j; static doublereal diflj, difrj, dsigj; extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, doublereal *, integer *); extern doublereal dlamc3_(doublereal *, doublereal *); extern /* Subroutine */ int dlasd4_(integer *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, integer *); static doublereal dj; extern /* Subroutine */ int dlascl_(char *, integer *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *), dlaset_(char *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *); static doublereal dsigjp, rho; static integer iwk1, iwk2, iwk3; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLASD8 finds the square roots of the roots of the secular equation, as defined by the values in DSIGMA and Z. It makes the appropriate calls to DLASD4, and stores, for each element in D, the distance to its two nearest poles (elements in DSIGMA). It also updates the arrays VF and VL, the first and last components of all the right singular vectors of the original bidiagonal matrix. DLASD8 is called from DLASD6. Arguments ========= ICOMPQ (input) INTEGER Specifies whether singular vectors are to be computed in factored form in the calling routine: = 0: Compute singular values only. = 1: Compute singular vectors in factored form as well. K (input) INTEGER The number of terms in the rational function to be solved by DLASD4. K >= 1. D (output) DOUBLE PRECISION array, dimension ( K ) On output, D contains the updated singular values. Z (input) DOUBLE PRECISION array, dimension ( K ) The first K elements of this array contain the components of the deflation-adjusted updating row vector. VF (input/output) DOUBLE PRECISION array, dimension ( K ) On entry, VF contains information passed through DBEDE8. On exit, VF contains the first K components of the first components of all right singular vectors of the bidiagonal matrix. VL (input/output) DOUBLE PRECISION array, dimension ( K ) On entry, VL contains information passed through DBEDE8. On exit, VL contains the first K components of the last components of all right singular vectors of the bidiagonal matrix. DIFL (output) DOUBLE PRECISION array, dimension ( K ) On exit, DIFL(I) = D(I) - DSIGMA(I). DIFR (output) DOUBLE PRECISION array, dimension ( LDDIFR, 2 ) if ICOMPQ = 1 and dimension ( K ) if ICOMPQ = 0. On exit, DIFR(I,1) = D(I) - DSIGMA(I+1), DIFR(K,1) is not defined and will not be referenced. If ICOMPQ = 1, DIFR(1:K,2) is an array containing the normalizing factors for the right singular vector matrix. LDDIFR (input) INTEGER The leading dimension of DIFR, must be at least K. DSIGMA (input) DOUBLE PRECISION array, dimension ( K ) The first K elements of this array contain the old roots of the deflated updating problem. These are the poles of the secular equation. WORK (workspace) DOUBLE PRECISION array, dimension at least 3 * K INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. > 0: if INFO = 1, an singular value did not converge Further Details =============== Based on contributions by Ming Gu and Huan Ren, Computer Science Division, University of California at Berkeley, USA ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; --z__; --vf; --vl; --difl; difr_dim1 = *lddifr; difr_offset = 1 + difr_dim1 * 1; difr -= difr_offset; --dsigma; --work; /* Function Body */ *info = 0; if (*icompq < 0 || *icompq > 1) { *info = -1; } else if (*k < 1) { *info = -2; } else if (*lddifr < *k) { *info = -9; } if (*info != 0) { i__1 = -(*info); xerbla_("DLASD8", &i__1); return 0; } /* Quick return if possible */ if (*k == 1) { d__[1] = abs(z__[1]); difl[1] = d__[1]; if (*icompq == 1) { difl[2] = 1.; difr[(difr_dim1 << 1) + 1] = 1.; } return 0; } /* Modify values DSIGMA(i) to make sure all DSIGMA(i)-DSIGMA(j) can be computed with high relative accuracy (barring over/underflow). This is a problem on machines without a guard digit in add/subtract (Cray XMP, Cray YMP, Cray C 90 and Cray 2). The following code replaces DSIGMA(I) by 2*DSIGMA(I)-DSIGMA(I), which on any of these machines zeros out the bottommost bit of DSIGMA(I) if it is 1; this makes the subsequent subtractions DSIGMA(I)-DSIGMA(J) unproblematic when cancellation occurs. On binary machines with a guard digit (almost all machines) it does not change DSIGMA(I) at all. On hexadecimal and decimal machines with a guard digit, it slightly changes the bottommost bits of DSIGMA(I). It does not account for hexadecimal or decimal machines without guard digits (we know of none). We use a subroutine call to compute 2*DSIGMA(I) to prevent optimizing compilers from eliminating this code. */ i__1 = *k; for (i__ = 1; i__ <= i__1; ++i__) { dsigma[i__] = dlamc3_(&dsigma[i__], &dsigma[i__]) - dsigma[i__]; /* L10: */ } /* Book keeping. */ iwk1 = 1; iwk2 = iwk1 + *k; iwk3 = iwk2 + *k; iwk2i = iwk2 - 1; iwk3i = iwk3 - 1; /* Normalize Z. */ rho = dnrm2_(k, &z__[1], &c__1); dlascl_("G", &c__0, &c__0, &rho, &c_b15, k, &c__1, &z__[1], k, info); rho *= rho; /* Initialize WORK(IWK3). */ dlaset_("A", k, &c__1, &c_b15, &c_b15, &work[iwk3], k); /* Compute the updated singular values, the arrays DIFL, DIFR, and the updated Z. */ i__1 = *k; for (j = 1; j <= i__1; ++j) { dlasd4_(k, &j, &dsigma[1], &z__[1], &work[iwk1], &rho, &d__[j], &work[ iwk2], info); /* If the root finder fails, the computation is terminated. */ if (*info != 0) { return 0; } work[iwk3i + j] = work[iwk3i + j] * work[j] * work[iwk2i + j]; difl[j] = -work[j]; difr[j + difr_dim1] = -work[j + 1]; i__2 = j - 1; for (i__ = 1; i__ <= i__2; ++i__) { work[iwk3i + i__] = work[iwk3i + i__] * work[i__] * work[iwk2i + i__] / (dsigma[i__] - dsigma[j]) / (dsigma[i__] + dsigma[ j]); /* L20: */ } i__2 = *k; for (i__ = j + 1; i__ <= i__2; ++i__) { work[iwk3i + i__] = work[iwk3i + i__] * work[i__] * work[iwk2i + i__] / (dsigma[i__] - dsigma[j]) / (dsigma[i__] + dsigma[ j]); /* L30: */ } /* L40: */ } /* Compute updated Z. */ i__1 = *k; for (i__ = 1; i__ <= i__1; ++i__) { d__2 = sqrt((d__1 = work[iwk3i + i__], abs(d__1))); z__[i__] = d_sign(&d__2, &z__[i__]); /* L50: */ } /* Update VF and VL. */ i__1 = *k; for (j = 1; j <= i__1; ++j) { diflj = difl[j]; dj = d__[j]; dsigj = -dsigma[j]; if (j < *k) { difrj = -difr[j + difr_dim1]; dsigjp = -dsigma[j + 1]; } work[j] = -z__[j] / diflj / (dsigma[j] + dj); i__2 = j - 1; for (i__ = 1; i__ <= i__2; ++i__) { work[i__] = z__[i__] / (dlamc3_(&dsigma[i__], &dsigj) - diflj) / ( dsigma[i__] + dj); /* L60: */ } i__2 = *k; for (i__ = j + 1; i__ <= i__2; ++i__) { work[i__] = z__[i__] / (dlamc3_(&dsigma[i__], &dsigjp) + difrj) / (dsigma[i__] + dj); /* L70: */ } temp = dnrm2_(k, &work[1], &c__1); work[iwk2i + j] = ddot_(k, &work[1], &c__1, &vf[1], &c__1) / temp; work[iwk3i + j] = ddot_(k, &work[1], &c__1, &vl[1], &c__1) / temp; if (*icompq == 1) { difr[j + (difr_dim1 << 1)] = temp; } /* L80: */ } dcopy_(k, &work[iwk2], &c__1, &vf[1], &c__1); dcopy_(k, &work[iwk3], &c__1, &vl[1], &c__1); return 0; /* End of DLASD8 */ } /* dlasd8_ */ /* Subroutine */ int dlasda_(integer *icompq, integer *smlsiz, integer *n, integer *sqre, doublereal *d__, doublereal *e, doublereal *u, integer *ldu, doublereal *vt, integer *k, doublereal *difl, doublereal *difr, doublereal *z__, doublereal *poles, integer *givptr, integer *givcol, integer *ldgcol, integer *perm, doublereal *givnum, doublereal *c__, doublereal *s, doublereal *work, integer *iwork, integer *info) { /* System generated locals */ integer givcol_dim1, givcol_offset, perm_dim1, perm_offset, difl_dim1, difl_offset, difr_dim1, difr_offset, givnum_dim1, givnum_offset, poles_dim1, poles_offset, u_dim1, u_offset, vt_dim1, vt_offset, z_dim1, z_offset, i__1, i__2; /* Builtin functions */ integer pow_ii(integer *, integer *); /* Local variables */ static doublereal beta; static integer idxq, nlvl, i__, j, m; static doublereal alpha; static integer inode, ndiml, ndimr, idxqi, itemp; extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, doublereal *, integer *); static integer sqrei, i1; extern /* Subroutine */ int dlasd6_(integer *, integer *, integer *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, integer *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, integer *, integer *); static integer ic, nwork1, lf, nd, nwork2, ll, nl, vf, nr, vl; extern /* Subroutine */ int dlasdq_(char *, integer *, integer *, integer *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *), dlasdt_(integer *, integer *, integer *, integer *, integer *, integer *, integer *), dlaset_( char *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *); static integer im1, smlszp, ncc, nlf, nrf, vfi, iwk, vli, lvl, nru, ndb1, nlp1, lvl2, nrp1; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= Using a divide and conquer approach, DLASDA computes the singular value decomposition (SVD) of a real upper bidiagonal N-by-M matrix B with diagonal D and offdiagonal E, where M = N + SQRE. The algorithm computes the singular values in the SVD B = U * S * VT. The orthogonal matrices U and VT are optionally computed in compact form. A related subroutine, DLASD0, computes the singular values and the singular vectors in explicit form. Arguments ========= ICOMPQ (input) INTEGER Specifies whether singular vectors are to be computed in compact form, as follows = 0: Compute singular values only. = 1: Compute singular vectors of upper bidiagonal matrix in compact form. SMLSIZ (input) INTEGER The maximum size of the subproblems at the bottom of the computation tree. N (input) INTEGER The row dimension of the upper bidiagonal matrix. This is also the dimension of the main diagonal array D. SQRE (input) INTEGER Specifies the column dimension of the bidiagonal matrix. = 0: The bidiagonal matrix has column dimension M = N; = 1: The bidiagonal matrix has column dimension M = N + 1. D (input/output) DOUBLE PRECISION array, dimension ( N ) On entry D contains the main diagonal of the bidiagonal matrix. On exit D, if INFO = 0, contains its singular values. E (input) DOUBLE PRECISION array, dimension ( M-1 ) Contains the subdiagonal entries of the bidiagonal matrix. On exit, E has been destroyed. U (output) DOUBLE PRECISION array, dimension ( LDU, SMLSIZ ) if ICOMPQ = 1, and not referenced if ICOMPQ = 0. If ICOMPQ = 1, on exit, U contains the left singular vector matrices of all subproblems at the bottom level. LDU (input) INTEGER, LDU = > N. The leading dimension of arrays U, VT, DIFL, DIFR, POLES, GIVNUM, and Z. VT (output) DOUBLE PRECISION array, dimension ( LDU, SMLSIZ+1 ) if ICOMPQ = 1, and not referenced if ICOMPQ = 0. If ICOMPQ = 1, on exit, VT' contains the right singular vector matrices of all subproblems at the bottom level. K (output) INTEGER array, dimension ( N ) if ICOMPQ = 1 and dimension 1 if ICOMPQ = 0. If ICOMPQ = 1, on exit, K(I) is the dimension of the I-th secular equation on the computation tree. DIFL (output) DOUBLE PRECISION array, dimension ( LDU, NLVL ), where NLVL = floor(log_2 (N/SMLSIZ))). DIFR (output) DOUBLE PRECISION array, dimension ( LDU, 2 * NLVL ) if ICOMPQ = 1 and dimension ( N ) if ICOMPQ = 0. If ICOMPQ = 1, on exit, DIFL(1:N, I) and DIFR(1:N, 2 * I - 1) record distances between singular values on the I-th level and singular values on the (I -1)-th level, and DIFR(1:N, 2 * I ) contains the normalizing factors for the right singular vector matrix. See DLASD8 for details. Z (output) DOUBLE PRECISION array, dimension ( LDU, NLVL ) if ICOMPQ = 1 and dimension ( N ) if ICOMPQ = 0. The first K elements of Z(1, I) contain the components of the deflation-adjusted updating row vector for subproblems on the I-th level. POLES (output) DOUBLE PRECISION array, dimension ( LDU, 2 * NLVL ) if ICOMPQ = 1, and not referenced if ICOMPQ = 0. If ICOMPQ = 1, on exit, POLES(1, 2*I - 1) and POLES(1, 2*I) contain the new and old singular values involved in the secular equations on the I-th level. GIVPTR (output) INTEGER array, dimension ( N ) if ICOMPQ = 1, and not referenced if ICOMPQ = 0. If ICOMPQ = 1, on exit, GIVPTR( I ) records the number of Givens rotations performed on the I-th problem on the computation tree. GIVCOL (output) INTEGER array, dimension ( LDGCOL, 2 * NLVL ) if ICOMPQ = 1, and not referenced if ICOMPQ = 0. If ICOMPQ = 1, on exit, for each I, GIVCOL(1, 2 *I - 1) and GIVCOL(1, 2 *I) record the locations of Givens rotations performed on the I-th level on the computation tree. LDGCOL (input) INTEGER, LDGCOL = > N. The leading dimension of arrays GIVCOL and PERM. PERM (output) INTEGER array, dimension ( LDGCOL, NLVL ) if ICOMPQ = 1, and not referenced if ICOMPQ = 0. If ICOMPQ = 1, on exit, PERM(1, I) records permutations done on the I-th level of the computation tree. GIVNUM (output) DOUBLE PRECISION array, dimension ( LDU, 2 * NLVL ) if ICOMPQ = 1, and not referenced if ICOMPQ = 0. If ICOMPQ = 1, on exit, for each I, GIVNUM(1, 2 *I - 1) and GIVNUM(1, 2 *I) record the C- and S- values of Givens rotations performed on the I-th level on the computation tree. C (output) DOUBLE PRECISION array, dimension ( N ) if ICOMPQ = 1, and dimension 1 if ICOMPQ = 0. If ICOMPQ = 1 and the I-th subproblem is not square, on exit, C( I ) contains the C-value of a Givens rotation related to the right null space of the I-th subproblem. S (output) DOUBLE PRECISION array, dimension ( N ) if ICOMPQ = 1, and dimension 1 if ICOMPQ = 0. If ICOMPQ = 1 and the I-th subproblem is not square, on exit, S( I ) contains the S-value of a Givens rotation related to the right null space of the I-th subproblem. WORK (workspace) DOUBLE PRECISION array, dimension (6 * N + (SMLSIZ + 1)*(SMLSIZ + 1)). IWORK (workspace) INTEGER array. Dimension must be at least (7 * N). INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. > 0: if INFO = 1, an singular value did not converge Further Details =============== Based on contributions by Ming Gu and Huan Ren, Computer Science Division, University of California at Berkeley, USA ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; --e; givnum_dim1 = *ldu; givnum_offset = 1 + givnum_dim1 * 1; givnum -= givnum_offset; poles_dim1 = *ldu; poles_offset = 1 + poles_dim1 * 1; poles -= poles_offset; z_dim1 = *ldu; z_offset = 1 + z_dim1 * 1; z__ -= z_offset; difr_dim1 = *ldu; difr_offset = 1 + difr_dim1 * 1; difr -= difr_offset; difl_dim1 = *ldu; difl_offset = 1 + difl_dim1 * 1; difl -= difl_offset; vt_dim1 = *ldu; vt_offset = 1 + vt_dim1 * 1; vt -= vt_offset; u_dim1 = *ldu; u_offset = 1 + u_dim1 * 1; u -= u_offset; --k; --givptr; perm_dim1 = *ldgcol; perm_offset = 1 + perm_dim1 * 1; perm -= perm_offset; givcol_dim1 = *ldgcol; givcol_offset = 1 + givcol_dim1 * 1; givcol -= givcol_offset; --c__; --s; --work; --iwork; /* Function Body */ *info = 0; if (*icompq < 0 || *icompq > 1) { *info = -1; } else if (*smlsiz < 3) { *info = -2; } else if (*n < 0) { *info = -3; } else if (*sqre < 0 || *sqre > 1) { *info = -4; } else if (*ldu < *n + *sqre) { *info = -8; } else if (*ldgcol < *n) { *info = -17; } if (*info != 0) { i__1 = -(*info); xerbla_("DLASDA", &i__1); return 0; } m = *n + *sqre; /* If the input matrix is too small, call DLASDQ to find the SVD. */ if (*n <= *smlsiz) { if (*icompq == 0) { dlasdq_("U", sqre, n, &c__0, &c__0, &c__0, &d__[1], &e[1], &vt[ vt_offset], ldu, &u[u_offset], ldu, &u[u_offset], ldu, & work[1], info); } else { dlasdq_("U", sqre, n, &m, n, &c__0, &d__[1], &e[1], &vt[vt_offset] , ldu, &u[u_offset], ldu, &u[u_offset], ldu, &work[1], info); } return 0; } /* Book-keeping and set up the computation tree. */ inode = 1; ndiml = inode + *n; ndimr = ndiml + *n; idxq = ndimr + *n; iwk = idxq + *n; ncc = 0; nru = 0; smlszp = *smlsiz + 1; vf = 1; vl = vf + m; nwork1 = vl + m; nwork2 = nwork1 + smlszp * smlszp; dlasdt_(n, &nlvl, &nd, &iwork[inode], &iwork[ndiml], &iwork[ndimr], smlsiz); /* for the nodes on bottom level of the tree, solve their subproblems by DLASDQ. */ ndb1 = (nd + 1) / 2; i__1 = nd; for (i__ = ndb1; i__ <= i__1; ++i__) { /* IC : center row of each node NL : number of rows of left subproblem NR : number of rows of right subproblem NLF: starting row of the left subproblem NRF: starting row of the right subproblem */ i1 = i__ - 1; ic = iwork[inode + i1]; nl = iwork[ndiml + i1]; nlp1 = nl + 1; nr = iwork[ndimr + i1]; nlf = ic - nl; nrf = ic + 1; idxqi = idxq + nlf - 2; vfi = vf + nlf - 1; vli = vl + nlf - 1; sqrei = 1; if (*icompq == 0) { dlaset_("A", &nlp1, &nlp1, &c_b29, &c_b15, &work[nwork1], &smlszp); dlasdq_("U", &sqrei, &nl, &nlp1, &nru, &ncc, &d__[nlf], &e[nlf], & work[nwork1], &smlszp, &work[nwork2], &nl, &work[nwork2], &nl, &work[nwork2], info); itemp = nwork1 + nl * smlszp; dcopy_(&nlp1, &work[nwork1], &c__1, &work[vfi], &c__1); dcopy_(&nlp1, &work[itemp], &c__1, &work[vli], &c__1); } else { dlaset_("A", &nl, &nl, &c_b29, &c_b15, &u[nlf + u_dim1], ldu); dlaset_("A", &nlp1, &nlp1, &c_b29, &c_b15, &vt[nlf + vt_dim1], ldu); dlasdq_("U", &sqrei, &nl, &nlp1, &nl, &ncc, &d__[nlf], &e[nlf], & vt[nlf + vt_dim1], ldu, &u[nlf + u_dim1], ldu, &u[nlf + u_dim1], ldu, &work[nwork1], info); dcopy_(&nlp1, &vt[nlf + vt_dim1], &c__1, &work[vfi], &c__1); dcopy_(&nlp1, &vt[nlf + nlp1 * vt_dim1], &c__1, &work[vli], &c__1) ; } if (*info != 0) { return 0; } i__2 = nl; for (j = 1; j <= i__2; ++j) { iwork[idxqi + j] = j; /* L10: */ } if (i__ == nd && *sqre == 0) { sqrei = 0; } else { sqrei = 1; } idxqi += nlp1; vfi += nlp1; vli += nlp1; nrp1 = nr + sqrei; if (*icompq == 0) { dlaset_("A", &nrp1, &nrp1, &c_b29, &c_b15, &work[nwork1], &smlszp); dlasdq_("U", &sqrei, &nr, &nrp1, &nru, &ncc, &d__[nrf], &e[nrf], & work[nwork1], &smlszp, &work[nwork2], &nr, &work[nwork2], &nr, &work[nwork2], info); itemp = nwork1 + (nrp1 - 1) * smlszp; dcopy_(&nrp1, &work[nwork1], &c__1, &work[vfi], &c__1); dcopy_(&nrp1, &work[itemp], &c__1, &work[vli], &c__1); } else { dlaset_("A", &nr, &nr, &c_b29, &c_b15, &u[nrf + u_dim1], ldu); dlaset_("A", &nrp1, &nrp1, &c_b29, &c_b15, &vt[nrf + vt_dim1], ldu); dlasdq_("U", &sqrei, &nr, &nrp1, &nr, &ncc, &d__[nrf], &e[nrf], & vt[nrf + vt_dim1], ldu, &u[nrf + u_dim1], ldu, &u[nrf + u_dim1], ldu, &work[nwork1], info); dcopy_(&nrp1, &vt[nrf + vt_dim1], &c__1, &work[vfi], &c__1); dcopy_(&nrp1, &vt[nrf + nrp1 * vt_dim1], &c__1, &work[vli], &c__1) ; } if (*info != 0) { return 0; } i__2 = nr; for (j = 1; j <= i__2; ++j) { iwork[idxqi + j] = j; /* L20: */ } /* L30: */ } /* Now conquer each subproblem bottom-up. */ j = pow_ii(&c__2, &nlvl); for (lvl = nlvl; lvl >= 1; --lvl) { lvl2 = (lvl << 1) - 1; /* Find the first node LF and last node LL on the current level LVL. */ if (lvl == 1) { lf = 1; ll = 1; } else { i__1 = lvl - 1; lf = pow_ii(&c__2, &i__1); ll = (lf << 1) - 1; } i__1 = ll; for (i__ = lf; i__ <= i__1; ++i__) { im1 = i__ - 1; ic = iwork[inode + im1]; nl = iwork[ndiml + im1]; nr = iwork[ndimr + im1]; nlf = ic - nl; nrf = ic + 1; if (i__ == ll) { sqrei = *sqre; } else { sqrei = 1; } vfi = vf + nlf - 1; vli = vl + nlf - 1; idxqi = idxq + nlf - 1; alpha = d__[ic]; beta = e[ic]; if (*icompq == 0) { dlasd6_(icompq, &nl, &nr, &sqrei, &d__[nlf], &work[vfi], & work[vli], &alpha, &beta, &iwork[idxqi], &perm[ perm_offset], &givptr[1], &givcol[givcol_offset], ldgcol, &givnum[givnum_offset], ldu, &poles[ poles_offset], &difl[difl_offset], &difr[difr_offset], &z__[z_offset], &k[1], &c__[1], &s[1], &work[nwork1], &iwork[iwk], info); } else { --j; dlasd6_(icompq, &nl, &nr, &sqrei, &d__[nlf], &work[vfi], & work[vli], &alpha, &beta, &iwork[idxqi], &perm[nlf + lvl * perm_dim1], &givptr[j], &givcol[nlf + lvl2 * givcol_dim1], ldgcol, &givnum[nlf + lvl2 * givnum_dim1], ldu, &poles[nlf + lvl2 * poles_dim1], & difl[nlf + lvl * difl_dim1], &difr[nlf + lvl2 * difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[j], &s[j], &work[nwork1], &iwork[iwk], info); } if (*info != 0) { return 0; } /* L40: */ } /* L50: */ } return 0; /* End of DLASDA */ } /* dlasda_ */ /* Subroutine */ int dlasdq_(char *uplo, integer *sqre, integer *n, integer * ncvt, integer *nru, integer *ncc, doublereal *d__, doublereal *e, doublereal *vt, integer *ldvt, doublereal *u, integer *ldu, doublereal *c__, integer *ldc, doublereal *work, integer *info) { /* System generated locals */ integer c_dim1, c_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1, i__2; /* Local variables */ static integer isub; static doublereal smin; static integer sqre1, i__, j; static doublereal r__; extern logical lsame_(char *, char *); extern /* Subroutine */ int dlasr_(char *, char *, char *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *), dswap_(integer *, doublereal *, integer * , doublereal *, integer *); static integer iuplo; static doublereal cs, sn; extern /* Subroutine */ int dlartg_(doublereal *, doublereal *, doublereal *, doublereal *, doublereal *), xerbla_(char *, integer *), dbdsqr_(char *, integer *, integer *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *); static logical rotate; static integer np1; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLASDQ computes the singular value decomposition (SVD) of a real (upper or lower) bidiagonal matrix with diagonal D and offdiagonal E, accumulating the transformations if desired. Letting B denote the input bidiagonal matrix, the algorithm computes orthogonal matrices Q and P such that B = Q * S * P' (P' denotes the transpose of P). The singular values S are overwritten on D. The input matrix U is changed to U * Q if desired. The input matrix VT is changed to P' * VT if desired. The input matrix C is changed to Q' * C if desired. See "Computing Small Singular Values of Bidiagonal Matrices With Guaranteed High Relative Accuracy," by J. Demmel and W. Kahan, LAPACK Working Note #3, for a detailed description of the algorithm. Arguments ========= UPLO (input) CHARACTER*1 On entry, UPLO specifies whether the input bidiagonal matrix is upper or lower bidiagonal, and wether it is square are not. UPLO = 'U' or 'u' B is upper bidiagonal. UPLO = 'L' or 'l' B is lower bidiagonal. SQRE (input) INTEGER = 0: then the input matrix is N-by-N. = 1: then the input matrix is N-by-(N+1) if UPLU = 'U' and (N+1)-by-N if UPLU = 'L'. The bidiagonal matrix has N = NL + NR + 1 rows and M = N + SQRE >= N columns. N (input) INTEGER On entry, N specifies the number of rows and columns in the matrix. N must be at least 0. NCVT (input) INTEGER On entry, NCVT specifies the number of columns of the matrix VT. NCVT must be at least 0. NRU (input) INTEGER On entry, NRU specifies the number of rows of the matrix U. NRU must be at least 0. NCC (input) INTEGER On entry, NCC specifies the number of columns of the matrix C. NCC must be at least 0. D (input/output) DOUBLE PRECISION array, dimension (N) On entry, D contains the diagonal entries of the bidiagonal matrix whose SVD is desired. On normal exit, D contains the singular values in ascending order. E (input/output) DOUBLE PRECISION array. dimension is (N-1) if SQRE = 0 and N if SQRE = 1. On entry, the entries of E contain the offdiagonal entries of the bidiagonal matrix whose SVD is desired. On normal exit, E will contain 0. If the algorithm does not converge, D and E will contain the diagonal and superdiagonal entries of a bidiagonal matrix orthogonally equivalent to the one given as input. VT (input/output) DOUBLE PRECISION array, dimension (LDVT, NCVT) On entry, contains a matrix which on exit has been premultiplied by P', dimension N-by-NCVT if SQRE = 0 and (N+1)-by-NCVT if SQRE = 1 (not referenced if NCVT=0). LDVT (input) INTEGER On entry, LDVT specifies the leading dimension of VT as declared in the calling (sub) program. LDVT must be at least 1. If NCVT is nonzero LDVT must also be at least N. U (input/output) DOUBLE PRECISION array, dimension (LDU, N) On entry, contains a matrix which on exit has been postmultiplied by Q, dimension NRU-by-N if SQRE = 0 and NRU-by-(N+1) if SQRE = 1 (not referenced if NRU=0). LDU (input) INTEGER On entry, LDU specifies the leading dimension of U as declared in the calling (sub) program. LDU must be at least max( 1, NRU ) . C (input/output) DOUBLE PRECISION array, dimension (LDC, NCC) On entry, contains an N-by-NCC matrix which on exit has been premultiplied by Q' dimension N-by-NCC if SQRE = 0 and (N+1)-by-NCC if SQRE = 1 (not referenced if NCC=0). LDC (input) INTEGER On entry, LDC specifies the leading dimension of C as declared in the calling (sub) program. LDC must be at least 1. If NCC is nonzero, LDC must also be at least N. WORK (workspace) DOUBLE PRECISION array, dimension (4*N) Workspace. Only referenced if one of NCVT, NRU, or NCC is nonzero, and if N is at least 2. INFO (output) INTEGER On exit, a value of 0 indicates a successful exit. If INFO < 0, argument number -INFO is illegal. If INFO > 0, the algorithm did not converge, and INFO specifies how many superdiagonals did not converge. Further Details =============== Based on contributions by Ming Gu and Huan Ren, Computer Science Division, University of California at Berkeley, USA ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; --e; vt_dim1 = *ldvt; vt_offset = 1 + vt_dim1 * 1; vt -= vt_offset; u_dim1 = *ldu; u_offset = 1 + u_dim1 * 1; u -= u_offset; c_dim1 = *ldc; c_offset = 1 + c_dim1 * 1; c__ -= c_offset; --work; /* Function Body */ *info = 0; iuplo = 0; if (lsame_(uplo, "U")) { iuplo = 1; } if (lsame_(uplo, "L")) { iuplo = 2; } if (iuplo == 0) { *info = -1; } else if (*sqre < 0 || *sqre > 1) { *info = -2; } else if (*n < 0) { *info = -3; } else if (*ncvt < 0) { *info = -4; } else if (*nru < 0) { *info = -5; } else if (*ncc < 0) { *info = -6; } else if (*ncvt == 0 && *ldvt < 1 || *ncvt > 0 && *ldvt < max(1,*n)) { *info = -10; } else if (*ldu < max(1,*nru)) { *info = -12; } else if (*ncc == 0 && *ldc < 1 || *ncc > 0 && *ldc < max(1,*n)) { *info = -14; } if (*info != 0) { i__1 = -(*info); xerbla_("DLASDQ", &i__1); return 0; } if (*n == 0) { return 0; } /* ROTATE is true if any singular vectors desired, false otherwise */ rotate = *ncvt > 0 || *nru > 0 || *ncc > 0; np1 = *n + 1; sqre1 = *sqre; /* If matrix non-square upper bidiagonal, rotate to be lower bidiagonal. The rotations are on the right. */ if (iuplo == 1 && sqre1 == 1) { i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { dlartg_(&d__[i__], &e[i__], &cs, &sn, &r__); d__[i__] = r__; e[i__] = sn * d__[i__ + 1]; d__[i__ + 1] = cs * d__[i__ + 1]; if (rotate) { work[i__] = cs; work[*n + i__] = sn; } /* L10: */ } dlartg_(&d__[*n], &e[*n], &cs, &sn, &r__); d__[*n] = r__; e[*n] = 0.; if (rotate) { work[*n] = cs; work[*n + *n] = sn; } iuplo = 2; sqre1 = 0; /* Update singular vectors if desired. */ if (*ncvt > 0) { dlasr_("L", "V", "F", &np1, ncvt, &work[1], &work[np1], &vt[ vt_offset], ldvt); } } /* If matrix lower bidiagonal, rotate to be upper bidiagonal by applying Givens rotations on the left. */ if (iuplo == 2) { i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { dlartg_(&d__[i__], &e[i__], &cs, &sn, &r__); d__[i__] = r__; e[i__] = sn * d__[i__ + 1]; d__[i__ + 1] = cs * d__[i__ + 1]; if (rotate) { work[i__] = cs; work[*n + i__] = sn; } /* L20: */ } /* If matrix (N+1)-by-N lower bidiagonal, one additional rotation is needed. */ if (sqre1 == 1) { dlartg_(&d__[*n], &e[*n], &cs, &sn, &r__); d__[*n] = r__; if (rotate) { work[*n] = cs; work[*n + *n] = sn; } } /* Update singular vectors if desired. */ if (*nru > 0) { if (sqre1 == 0) { dlasr_("R", "V", "F", nru, n, &work[1], &work[np1], &u[ u_offset], ldu); } else { dlasr_("R", "V", "F", nru, &np1, &work[1], &work[np1], &u[ u_offset], ldu); } } if (*ncc > 0) { if (sqre1 == 0) { dlasr_("L", "V", "F", n, ncc, &work[1], &work[np1], &c__[ c_offset], ldc); } else { dlasr_("L", "V", "F", &np1, ncc, &work[1], &work[np1], &c__[ c_offset], ldc); } } } /* Call DBDSQR to compute the SVD of the reduced real N-by-N upper bidiagonal matrix. */ dbdsqr_("U", n, ncvt, nru, ncc, &d__[1], &e[1], &vt[vt_offset], ldvt, &u[ u_offset], ldu, &c__[c_offset], ldc, &work[1], info); /* Sort the singular values into ascending order (insertion sort on singular values, but only one transposition per singular vector) */ i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { /* Scan for smallest D(I). */ isub = i__; smin = d__[i__]; i__2 = *n; for (j = i__ + 1; j <= i__2; ++j) { if (d__[j] < smin) { isub = j; smin = d__[j]; } /* L30: */ } if (isub != i__) { /* Swap singular values and vectors. */ d__[isub] = d__[i__]; d__[i__] = smin; if (*ncvt > 0) { dswap_(ncvt, &vt[isub + vt_dim1], ldvt, &vt[i__ + vt_dim1], ldvt); } if (*nru > 0) { dswap_(nru, &u[isub * u_dim1 + 1], &c__1, &u[i__ * u_dim1 + 1] , &c__1); } if (*ncc > 0) { dswap_(ncc, &c__[isub + c_dim1], ldc, &c__[i__ + c_dim1], ldc) ; } } /* L40: */ } return 0; /* End of DLASDQ */ } /* dlasdq_ */ /* Subroutine */ int dlasdt_(integer *n, integer *lvl, integer *nd, integer * inode, integer *ndiml, integer *ndimr, integer *msub) { /* System generated locals */ integer i__1, i__2; /* Builtin functions */ double log(doublereal); /* Local variables */ static integer maxn; static doublereal temp; static integer nlvl, llst, i__, ncrnt, il, ir; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLASDT creates a tree of subproblems for bidiagonal divide and conquer. Arguments ========= N (input) INTEGER On entry, the number of diagonal elements of the bidiagonal matrix. LVL (output) INTEGER On exit, the number of levels on the computation tree. ND (output) INTEGER On exit, the number of nodes on the tree. INODE (output) INTEGER array, dimension ( N ) On exit, centers of subproblems. NDIML (output) INTEGER array, dimension ( N ) On exit, row dimensions of left children. NDIMR (output) INTEGER array, dimension ( N ) On exit, row dimensions of right children. MSUB (input) INTEGER. On entry, the maximum row dimension each subproblem at the bottom of the tree can be of. Further Details =============== Based on contributions by Ming Gu and Huan Ren, Computer Science Division, University of California at Berkeley, USA ===================================================================== Find the number of levels on the tree. */ /* Parameter adjustments */ --ndimr; --ndiml; --inode; /* Function Body */ maxn = max(1,*n); temp = log((doublereal) maxn / (doublereal) (*msub + 1)) / log(2.); *lvl = (integer) temp + 1; i__ = *n / 2; inode[1] = i__ + 1; ndiml[1] = i__; ndimr[1] = *n - i__ - 1; il = 0; ir = 1; llst = 1; i__1 = *lvl - 1; for (nlvl = 1; nlvl <= i__1; ++nlvl) { /* Constructing the tree at (NLVL+1)-st level. The number of nodes created on this level is LLST * 2. */ i__2 = llst - 1; for (i__ = 0; i__ <= i__2; ++i__) { il += 2; ir += 2; ncrnt = llst + i__; ndiml[il] = ndiml[ncrnt] / 2; ndimr[il] = ndiml[ncrnt] - ndiml[il] - 1; inode[il] = inode[ncrnt] - ndimr[il] - 1; ndiml[ir] = ndimr[ncrnt] / 2; ndimr[ir] = ndimr[ncrnt] - ndiml[ir] - 1; inode[ir] = inode[ncrnt] + ndiml[ir] + 1; /* L10: */ } llst <<= 1; /* L20: */ } *nd = (llst << 1) - 1; return 0; /* End of DLASDT */ } /* dlasdt_ */ /* Subroutine */ int dlaset_(char *uplo, integer *m, integer *n, doublereal * alpha, doublereal *beta, doublereal *a, integer *lda) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; /* Local variables */ static integer i__, j; extern logical lsame_(char *, char *); /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLASET initializes an m-by-n matrix A to BETA on the diagonal and ALPHA on the offdiagonals. Arguments ========= UPLO (input) CHARACTER*1 Specifies the part of the matrix A to be set. = 'U': Upper triangular part is set; the strictly lower triangular part of A is not changed. = 'L': Lower triangular part is set; the strictly upper triangular part of A is not changed. Otherwise: All of the matrix A is set. M (input) INTEGER The number of rows of the matrix A. M >= 0. N (input) INTEGER The number of columns of the matrix A. N >= 0. ALPHA (input) DOUBLE PRECISION The constant to which the offdiagonal elements are to be set. BETA (input) DOUBLE PRECISION The constant to which the diagonal elements are to be set. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On exit, the leading m-by-n submatrix of A is set as follows: if UPLO = 'U', A(i,j) = ALPHA, 1<=i<=j-1, 1<=j<=n, if UPLO = 'L', A(i,j) = ALPHA, j+1<=i<=m, 1<=j<=n, otherwise, A(i,j) = ALPHA, 1<=i<=m, 1<=j<=n, i.ne.j, and, for all UPLO, A(i,i) = BETA, 1<=i<=min(m,n). LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,M). ===================================================================== */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; /* Function Body */ if (lsame_(uplo, "U")) { /* Set the strictly upper triangular or trapezoidal part of the array to ALPHA. */ i__1 = *n; for (j = 2; j <= i__1; ++j) { /* Computing MIN */ i__3 = j - 1; i__2 = min(i__3,*m); for (i__ = 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] = *alpha; /* L10: */ } /* L20: */ } } else if (lsame_(uplo, "L")) { /* Set the strictly lower triangular or trapezoidal part of the array to ALPHA. */ i__1 = min(*m,*n); for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = j + 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] = *alpha; /* L30: */ } /* L40: */ } } else { /* Set the leading m-by-n submatrix to ALPHA. */ i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] = *alpha; /* L50: */ } /* L60: */ } } /* Set the first min(M,N) diagonal elements to BETA. */ i__1 = min(*m,*n); for (i__ = 1; i__ <= i__1; ++i__) { a[i__ + i__ * a_dim1] = *beta; /* L70: */ } return 0; /* End of DLASET */ } /* dlaset_ */ /* Subroutine */ int dlasq1_(integer *n, doublereal *d__, doublereal *e, doublereal *work, integer *info) { /* System generated locals */ integer i__1, i__2; doublereal d__1, d__2, d__3; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ extern /* Subroutine */ int dlas2_(doublereal *, doublereal *, doublereal *, doublereal *, doublereal *); static integer i__; static doublereal scale; static integer iinfo; static doublereal sigmn; extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, doublereal *, integer *); static doublereal sigmx; extern /* Subroutine */ int dlasq2_(integer *, doublereal *, integer *); extern /* Subroutine */ int dlascl_(char *, integer *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *); static doublereal safmin; extern /* Subroutine */ int xerbla_(char *, integer *), dlasrt_( char *, integer *, doublereal *, integer *); static doublereal eps; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLASQ1 computes the singular values of a real N-by-N bidiagonal matrix with diagonal D and off-diagonal E. The singular values are computed to high relative accuracy, in the absence of denormalization, underflow and overflow. The algorithm was first presented in "Accurate singular values and differential qd algorithms" by K. V. Fernando and B. N. Parlett, Numer. Math., Vol-67, No. 2, pp. 191-230, 1994, and the present implementation is described in "An implementation of the dqds Algorithm (Positive Case)", LAPACK Working Note. Arguments ========= N (input) INTEGER The number of rows and columns in the matrix. N >= 0. D (input/output) DOUBLE PRECISION array, dimension (N) On entry, D contains the diagonal elements of the bidiagonal matrix whose SVD is desired. On normal exit, D contains the singular values in decreasing order. E (input/output) DOUBLE PRECISION array, dimension (N) On entry, elements E(1:N-1) contain the off-diagonal elements of the bidiagonal matrix whose SVD is desired. On exit, E is overwritten. WORK (workspace) DOUBLE PRECISION array, dimension (4*N) INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value > 0: the algorithm failed = 1, a split was marked by a positive value in E = 2, current block of Z not diagonalized after 30*N iterations (in inner while loop) = 3, termination criterion of outer while loop not met (program created more than N unreduced blocks) ===================================================================== */ /* Parameter adjustments */ --work; --e; --d__; /* Function Body */ *info = 0; if (*n < 0) { *info = -2; i__1 = -(*info); xerbla_("DLASQ1", &i__1); return 0; } else if (*n == 0) { return 0; } else if (*n == 1) { d__[1] = abs(d__[1]); return 0; } else if (*n == 2) { dlas2_(&d__[1], &e[1], &d__[2], &sigmn, &sigmx); d__[1] = sigmx; d__[2] = sigmn; return 0; } /* Estimate the largest singular value. */ sigmx = 0.; i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { d__[i__] = (d__1 = d__[i__], abs(d__1)); /* Computing MAX */ d__2 = sigmx, d__3 = (d__1 = e[i__], abs(d__1)); sigmx = max(d__2,d__3); /* L10: */ } d__[*n] = (d__1 = d__[*n], abs(d__1)); /* Early return if SIGMX is zero (matrix is already diagonal). */ if (sigmx == 0.) { dlasrt_("D", n, &d__[1], &iinfo); return 0; } i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { /* Computing MAX */ d__1 = sigmx, d__2 = d__[i__]; sigmx = max(d__1,d__2); /* L20: */ } /* Copy D and E into WORK (in the Z format) and scale (squaring the input data makes scaling by a power of the radix pointless). */ eps = PRECISION; safmin = SAFEMINIMUM; scale = sqrt(eps / safmin); dcopy_(n, &d__[1], &c__1, &work[1], &c__2); i__1 = *n - 1; dcopy_(&i__1, &e[1], &c__1, &work[2], &c__2); i__1 = (*n << 1) - 1; i__2 = (*n << 1) - 1; dlascl_("G", &c__0, &c__0, &sigmx, &scale, &i__1, &c__1, &work[1], &i__2, &iinfo); /* Compute the q's and e's. */ i__1 = (*n << 1) - 1; for (i__ = 1; i__ <= i__1; ++i__) { /* Computing 2nd power */ d__1 = work[i__]; work[i__] = d__1 * d__1; /* L30: */ } work[*n * 2] = 0.; dlasq2_(n, &work[1], info); if (*info == 0) { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { d__[i__] = sqrt(work[i__]); /* L40: */ } dlascl_("G", &c__0, &c__0, &scale, &sigmx, n, &c__1, &d__[1], n, & iinfo); } return 0; /* End of DLASQ1 */ } /* dlasq1_ */ /* Subroutine */ int dlasq2_(integer *n, doublereal *z__, integer *info) { /* System generated locals */ integer i__1, i__2, i__3; doublereal d__1, d__2; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static logical ieee; static integer nbig; static doublereal dmin__, emin, emax; static integer ndiv, iter; static doublereal qmin, temp, qmax, zmax; static integer splt; static doublereal dmin1, dmin2, d__, e; static integer k; static doublereal s, t; static integer nfail; static doublereal desig, trace, sigma; static integer iinfo, i0, i4, n0, ttype; extern /* Subroutine */ int dlazq3_(integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, integer *, integer *, logical *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *); static doublereal dn; static integer pp, iwhila, iwhilb; static doublereal oldemn, safmin; extern /* Subroutine */ int xerbla_(char *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); extern /* Subroutine */ int dlasrt_(char *, integer *, doublereal *, integer *); static doublereal dn1, dn2, eps, tau, tol; static integer ipn4; static doublereal tol2; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Modified to call DLAZQ3 in place of DLASQ3, 13 Feb 03, SJH. Purpose ======= DLASQ2 computes all the eigenvalues of the symmetric positive definite tridiagonal matrix associated with the qd array Z to high relative accuracy are computed to high relative accuracy, in the absence of denormalization, underflow and overflow. To see the relation of Z to the tridiagonal matrix, let L be a unit lower bidiagonal matrix with subdiagonals Z(2,4,6,,..) and let U be an upper bidiagonal matrix with 1's above and diagonal Z(1,3,5,,..). The tridiagonal is L*U or, if you prefer, the symmetric tridiagonal to which it is similar. Note : DLASQ2 defines a logical variable, IEEE, which is true on machines which follow ieee-754 floating-point standard in their handling of infinities and NaNs, and false otherwise. This variable is passed to DLAZQ3. Arguments ========= N (input) INTEGER The number of rows and columns in the matrix. N >= 0. Z (workspace) DOUBLE PRECISION array, dimension ( 4*N ) On entry Z holds the qd array. On exit, entries 1 to N hold the eigenvalues in decreasing order, Z( 2*N+1 ) holds the trace, and Z( 2*N+2 ) holds the sum of the eigenvalues. If N > 2, then Z( 2*N+3 ) holds the iteration count, Z( 2*N+4 ) holds NDIVS/NIN^2, and Z( 2*N+5 ) holds the percentage of shifts that failed. INFO (output) INTEGER = 0: successful exit < 0: if the i-th argument is a scalar and had an illegal value, then INFO = -i, if the i-th argument is an array and the j-entry had an illegal value, then INFO = -(i*100+j) > 0: the algorithm failed = 1, a split was marked by a positive value in E = 2, current block of Z not diagonalized after 30*N iterations (in inner while loop) = 3, termination criterion of outer while loop not met (program created more than N unreduced blocks) Further Details =============== Local Variables: I0:N0 defines a current unreduced segment of Z. The shifts are accumulated in SIGMA. Iteration count is in ITER. Ping-pong is controlled by PP (alternates between 0 and 1). ===================================================================== Test the input arguments. (in case DLASQ2 is not called by DLASQ1) */ /* Parameter adjustments */ --z__; /* Function Body */ *info = 0; eps = PRECISION; safmin = SAFEMINIMUM; tol = eps * 100.; /* Computing 2nd power */ d__1 = tol; tol2 = d__1 * d__1; if (*n < 0) { *info = -1; xerbla_("DLASQ2", &c__1); return 0; } else if (*n == 0) { return 0; } else if (*n == 1) { /* 1-by-1 case. */ if (z__[1] < 0.) { *info = -201; xerbla_("DLASQ2", &c__2); } return 0; } else if (*n == 2) { /* 2-by-2 case. */ if (z__[2] < 0. || z__[3] < 0.) { *info = -2; xerbla_("DLASQ2", &c__2); return 0; } else if (z__[3] > z__[1]) { d__ = z__[3]; z__[3] = z__[1]; z__[1] = d__; } z__[5] = z__[1] + z__[2] + z__[3]; if (z__[2] > z__[3] * tol2) { t = (z__[1] - z__[3] + z__[2]) * .5; s = z__[3] * (z__[2] / t); if (s <= t) { s = z__[3] * (z__[2] / (t * (sqrt(s / t + 1.) + 1.))); } else { s = z__[3] * (z__[2] / (t + sqrt(t) * sqrt(t + s))); } t = z__[1] + (s + z__[2]); z__[3] *= z__[1] / t; z__[1] = t; } z__[2] = z__[3]; z__[6] = z__[2] + z__[1]; return 0; } /* Check for negative data and compute sums of q's and e's. */ z__[*n * 2] = 0.; emin = z__[2]; qmax = 0.; zmax = 0.; d__ = 0.; e = 0.; i__1 = *n - 1 << 1; for (k = 1; k <= i__1; k += 2) { if (z__[k] < 0.) { *info = -(k + 200); xerbla_("DLASQ2", &c__2); return 0; } else if (z__[k + 1] < 0.) { *info = -(k + 201); xerbla_("DLASQ2", &c__2); return 0; } d__ += z__[k]; e += z__[k + 1]; /* Computing MAX */ d__1 = qmax, d__2 = z__[k]; qmax = max(d__1,d__2); /* Computing MIN */ d__1 = emin, d__2 = z__[k + 1]; emin = min(d__1,d__2); /* Computing MAX */ d__1 = max(qmax,zmax), d__2 = z__[k + 1]; zmax = max(d__1,d__2); /* L10: */ } if (z__[(*n << 1) - 1] < 0.) { *info = -((*n << 1) + 199); xerbla_("DLASQ2", &c__2); return 0; } d__ += z__[(*n << 1) - 1]; /* Computing MAX */ d__1 = qmax, d__2 = z__[(*n << 1) - 1]; qmax = max(d__1,d__2); zmax = max(qmax,zmax); /* Check for diagonality. */ if (e == 0.) { i__1 = *n; for (k = 2; k <= i__1; ++k) { z__[k] = z__[(k << 1) - 1]; /* L20: */ } dlasrt_("D", n, &z__[1], &iinfo); z__[(*n << 1) - 1] = d__; return 0; } trace = d__ + e; /* Check for zero data. */ if (trace == 0.) { z__[(*n << 1) - 1] = 0.; return 0; } /* Check whether the machine is IEEE conformable. */ ieee = ilaenv_(&c__10, "DLASQ2", "N", &c__1, &c__2, &c__3, &c__4, (ftnlen) 6, (ftnlen)1) == 1 && ilaenv_(&c__11, "DLASQ2", "N", &c__1, &c__2, &c__3, &c__4, (ftnlen)6, (ftnlen)1) == 1; /* Rearrange data for locality: Z=(q1,qq1,e1,ee1,q2,qq2,e2,ee2,...). */ for (k = *n << 1; k >= 2; k += -2) { z__[k * 2] = 0.; z__[(k << 1) - 1] = z__[k]; z__[(k << 1) - 2] = 0.; z__[(k << 1) - 3] = z__[k - 1]; /* L30: */ } i0 = 1; n0 = *n; /* Reverse the qd-array, if warranted. */ if (z__[(i0 << 2) - 3] * 1.5 < z__[(n0 << 2) - 3]) { ipn4 = i0 + n0 << 2; i__1 = i0 + n0 - 1 << 1; for (i4 = i0 << 2; i4 <= i__1; i4 += 4) { temp = z__[i4 - 3]; z__[i4 - 3] = z__[ipn4 - i4 - 3]; z__[ipn4 - i4 - 3] = temp; temp = z__[i4 - 1]; z__[i4 - 1] = z__[ipn4 - i4 - 5]; z__[ipn4 - i4 - 5] = temp; /* L40: */ } } /* Initial split checking via dqd and Li's test. */ pp = 0; for (k = 1; k <= 2; ++k) { d__ = z__[(n0 << 2) + pp - 3]; i__1 = (i0 << 2) + pp; for (i4 = (n0 - 1 << 2) + pp; i4 >= i__1; i4 += -4) { if (z__[i4 - 1] <= tol2 * d__) { z__[i4 - 1] = 0.; d__ = z__[i4 - 3]; } else { d__ = z__[i4 - 3] * (d__ / (d__ + z__[i4 - 1])); } /* L50: */ } /* dqd maps Z to ZZ plus Li's test. */ emin = z__[(i0 << 2) + pp + 1]; d__ = z__[(i0 << 2) + pp - 3]; i__1 = (n0 - 1 << 2) + pp; for (i4 = (i0 << 2) + pp; i4 <= i__1; i4 += 4) { z__[i4 - (pp << 1) - 2] = d__ + z__[i4 - 1]; if (z__[i4 - 1] <= tol2 * d__) { z__[i4 - 1] = 0.; z__[i4 - (pp << 1) - 2] = d__; z__[i4 - (pp << 1)] = 0.; d__ = z__[i4 + 1]; } else if (safmin * z__[i4 + 1] < z__[i4 - (pp << 1) - 2] && safmin * z__[i4 - (pp << 1) - 2] < z__[i4 + 1]) { temp = z__[i4 + 1] / z__[i4 - (pp << 1) - 2]; z__[i4 - (pp << 1)] = z__[i4 - 1] * temp; d__ *= temp; } else { z__[i4 - (pp << 1)] = z__[i4 + 1] * (z__[i4 - 1] / z__[i4 - ( pp << 1) - 2]); d__ = z__[i4 + 1] * (d__ / z__[i4 - (pp << 1) - 2]); } /* Computing MIN */ d__1 = emin, d__2 = z__[i4 - (pp << 1)]; emin = min(d__1,d__2); /* L60: */ } z__[(n0 << 2) - pp - 2] = d__; /* Now find qmax. */ qmax = z__[(i0 << 2) - pp - 2]; i__1 = (n0 << 2) - pp - 2; for (i4 = (i0 << 2) - pp + 2; i4 <= i__1; i4 += 4) { /* Computing MAX */ d__1 = qmax, d__2 = z__[i4]; qmax = max(d__1,d__2); /* L70: */ } /* Prepare for the next iteration on K. */ pp = 1 - pp; /* L80: */ } /* Initialise variables to pass to DLAZQ3 */ ttype = 0; dmin1 = 0.; dmin2 = 0.; dn = 0.; dn1 = 0.; dn2 = 0.; tau = 0.; iter = 2; nfail = 0; ndiv = n0 - i0 << 1; i__1 = *n + 1; for (iwhila = 1; iwhila <= i__1; ++iwhila) { if (n0 < 1) { goto L150; } /* While array unfinished do E(N0) holds the value of SIGMA when submatrix in I0:N0 splits from the rest of the array, but is negated. */ desig = 0.; if (n0 == *n) { sigma = 0.; } else { sigma = -z__[(n0 << 2) - 1]; } if (sigma < 0.) { *info = 1; return 0; } /* Find last unreduced submatrix's top index I0, find QMAX and EMIN. Find Gershgorin-type bound if Q's much greater than E's. */ emax = 0.; if (n0 > i0) { emin = (d__1 = z__[(n0 << 2) - 5], abs(d__1)); } else { emin = 0.; } qmin = z__[(n0 << 2) - 3]; qmax = qmin; for (i4 = n0 << 2; i4 >= 8; i4 += -4) { if (z__[i4 - 5] <= 0.) { goto L100; } if (qmin >= emax * 4.) { /* Computing MIN */ d__1 = qmin, d__2 = z__[i4 - 3]; qmin = min(d__1,d__2); /* Computing MAX */ d__1 = emax, d__2 = z__[i4 - 5]; emax = max(d__1,d__2); } /* Computing MAX */ d__1 = qmax, d__2 = z__[i4 - 7] + z__[i4 - 5]; qmax = max(d__1,d__2); /* Computing MIN */ d__1 = emin, d__2 = z__[i4 - 5]; emin = min(d__1,d__2); /* L90: */ } i4 = 4; L100: i0 = i4 / 4; /* Store EMIN for passing to DLAZQ3. */ z__[(n0 << 2) - 1] = emin; /* Put -(initial shift) into DMIN. Computing MAX */ d__1 = 0., d__2 = qmin - sqrt(qmin) * 2. * sqrt(emax); dmin__ = -max(d__1,d__2); /* Now I0:N0 is unreduced. PP = 0 for ping, PP = 1 for pong. */ pp = 0; nbig = (n0 - i0 + 1) * 30; i__2 = nbig; for (iwhilb = 1; iwhilb <= i__2; ++iwhilb) { if (i0 > n0) { goto L130; } /* While submatrix unfinished take a good dqds step. */ dlazq3_(&i0, &n0, &z__[1], &pp, &dmin__, &sigma, &desig, &qmax, & nfail, &iter, &ndiv, &ieee, &ttype, &dmin1, &dmin2, &dn, & dn1, &dn2, &tau); pp = 1 - pp; /* When EMIN is very small check for splits. */ if (pp == 0 && n0 - i0 >= 3) { if (z__[n0 * 4] <= tol2 * qmax || z__[(n0 << 2) - 1] <= tol2 * sigma) { splt = i0 - 1; qmax = z__[(i0 << 2) - 3]; emin = z__[(i0 << 2) - 1]; oldemn = z__[i0 * 4]; i__3 = n0 - 3 << 2; for (i4 = i0 << 2; i4 <= i__3; i4 += 4) { if (z__[i4] <= tol2 * z__[i4 - 3] || z__[i4 - 1] <= tol2 * sigma) { z__[i4 - 1] = -sigma; splt = i4 / 4; qmax = 0.; emin = z__[i4 + 3]; oldemn = z__[i4 + 4]; } else { /* Computing MAX */ d__1 = qmax, d__2 = z__[i4 + 1]; qmax = max(d__1,d__2); /* Computing MIN */ d__1 = emin, d__2 = z__[i4 - 1]; emin = min(d__1,d__2); /* Computing MIN */ d__1 = oldemn, d__2 = z__[i4]; oldemn = min(d__1,d__2); } /* L110: */ } z__[(n0 << 2) - 1] = emin; z__[n0 * 4] = oldemn; i0 = splt + 1; } } /* L120: */ } *info = 2; return 0; /* end IWHILB */ L130: /* L140: */ ; } *info = 3; return 0; /* end IWHILA */ L150: /* Move q's to the front. */ i__1 = *n; for (k = 2; k <= i__1; ++k) { z__[k] = z__[(k << 2) - 3]; /* L160: */ } /* Sort and compute sum of eigenvalues. */ dlasrt_("D", n, &z__[1], &iinfo); e = 0.; for (k = *n; k >= 1; --k) { e += z__[k]; /* L170: */ } /* Store trace, sum(eigenvalues) and information on performance. */ z__[(*n << 1) + 1] = trace; z__[(*n << 1) + 2] = e; z__[(*n << 1) + 3] = (doublereal) iter; /* Computing 2nd power */ i__1 = *n; z__[(*n << 1) + 4] = (doublereal) ndiv / (doublereal) (i__1 * i__1); z__[(*n << 1) + 5] = nfail * 100. / (doublereal) iter; return 0; /* End of DLASQ2 */ } /* dlasq2_ */ /* Subroutine */ int dlasq5_(integer *i0, integer *n0, doublereal *z__, integer *pp, doublereal *tau, doublereal *dmin__, doublereal *dmin1, doublereal *dmin2, doublereal *dn, doublereal *dnm1, doublereal *dnm2, logical *ieee) { /* System generated locals */ integer i__1; doublereal d__1, d__2; /* Local variables */ static doublereal emin, temp, d__; static integer j4, j4p2; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLASQ5 computes one dqds transform in ping-pong form, one version for IEEE machines another for non IEEE machines. Arguments ========= I0 (input) INTEGER First index. N0 (input) INTEGER Last index. Z (input) DOUBLE PRECISION array, dimension ( 4*N ) Z holds the qd array. EMIN is stored in Z(4*N0) to avoid an extra argument. PP (input) INTEGER PP=0 for ping, PP=1 for pong. TAU (input) DOUBLE PRECISION This is the shift. DMIN (output) DOUBLE PRECISION Minimum value of d. DMIN1 (output) DOUBLE PRECISION Minimum value of d, excluding D( N0 ). DMIN2 (output) DOUBLE PRECISION Minimum value of d, excluding D( N0 ) and D( N0-1 ). DN (output) DOUBLE PRECISION d(N0), the last value of d. DNM1 (output) DOUBLE PRECISION d(N0-1). DNM2 (output) DOUBLE PRECISION d(N0-2). IEEE (input) LOGICAL Flag for IEEE or non IEEE arithmetic. ===================================================================== */ /* Parameter adjustments */ --z__; /* Function Body */ if (*n0 - *i0 - 1 <= 0) { return 0; } j4 = (*i0 << 2) + *pp - 3; emin = z__[j4 + 4]; d__ = z__[j4] - *tau; *dmin__ = d__; *dmin1 = -z__[j4]; if (*ieee) { /* Code for IEEE arithmetic. */ if (*pp == 0) { i__1 = *n0 - 3 << 2; for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) { z__[j4 - 2] = d__ + z__[j4 - 1]; temp = z__[j4 + 1] / z__[j4 - 2]; d__ = d__ * temp - *tau; *dmin__ = min(*dmin__,d__); z__[j4] = z__[j4 - 1] * temp; /* Computing MIN */ d__1 = z__[j4]; emin = min(d__1,emin); /* L10: */ } } else { i__1 = *n0 - 3 << 2; for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) { z__[j4 - 3] = d__ + z__[j4]; temp = z__[j4 + 2] / z__[j4 - 3]; d__ = d__ * temp - *tau; *dmin__ = min(*dmin__,d__); z__[j4 - 1] = z__[j4] * temp; /* Computing MIN */ d__1 = z__[j4 - 1]; emin = min(d__1,emin); /* L20: */ } } /* Unroll last two steps. */ *dnm2 = d__; *dmin2 = *dmin__; j4 = (*n0 - 2 << 2) - *pp; j4p2 = j4 + (*pp << 1) - 1; z__[j4 - 2] = *dnm2 + z__[j4p2]; z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]); *dnm1 = z__[j4p2 + 2] * (*dnm2 / z__[j4 - 2]) - *tau; *dmin__ = min(*dmin__,*dnm1); *dmin1 = *dmin__; j4 += 4; j4p2 = j4 + (*pp << 1) - 1; z__[j4 - 2] = *dnm1 + z__[j4p2]; z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]); *dn = z__[j4p2 + 2] * (*dnm1 / z__[j4 - 2]) - *tau; *dmin__ = min(*dmin__,*dn); } else { /* Code for non IEEE arithmetic. */ if (*pp == 0) { i__1 = *n0 - 3 << 2; for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) { z__[j4 - 2] = d__ + z__[j4 - 1]; if (d__ < 0.) { return 0; } else { z__[j4] = z__[j4 + 1] * (z__[j4 - 1] / z__[j4 - 2]); d__ = z__[j4 + 1] * (d__ / z__[j4 - 2]) - *tau; } *dmin__ = min(*dmin__,d__); /* Computing MIN */ d__1 = emin, d__2 = z__[j4]; emin = min(d__1,d__2); /* L30: */ } } else { i__1 = *n0 - 3 << 2; for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) { z__[j4 - 3] = d__ + z__[j4]; if (d__ < 0.) { return 0; } else { z__[j4 - 1] = z__[j4 + 2] * (z__[j4] / z__[j4 - 3]); d__ = z__[j4 + 2] * (d__ / z__[j4 - 3]) - *tau; } *dmin__ = min(*dmin__,d__); /* Computing MIN */ d__1 = emin, d__2 = z__[j4 - 1]; emin = min(d__1,d__2); /* L40: */ } } /* Unroll last two steps. */ *dnm2 = d__; *dmin2 = *dmin__; j4 = (*n0 - 2 << 2) - *pp; j4p2 = j4 + (*pp << 1) - 1; z__[j4 - 2] = *dnm2 + z__[j4p2]; if (*dnm2 < 0.) { return 0; } else { z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]); *dnm1 = z__[j4p2 + 2] * (*dnm2 / z__[j4 - 2]) - *tau; } *dmin__ = min(*dmin__,*dnm1); *dmin1 = *dmin__; j4 += 4; j4p2 = j4 + (*pp << 1) - 1; z__[j4 - 2] = *dnm1 + z__[j4p2]; if (*dnm1 < 0.) { return 0; } else { z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]); *dn = z__[j4p2 + 2] * (*dnm1 / z__[j4 - 2]) - *tau; } *dmin__ = min(*dmin__,*dn); } z__[j4 + 2] = *dn; z__[(*n0 << 2) - *pp] = emin; return 0; /* End of DLASQ5 */ } /* dlasq5_ */ /* Subroutine */ int dlasq6_(integer *i0, integer *n0, doublereal *z__, integer *pp, doublereal *dmin__, doublereal *dmin1, doublereal *dmin2, doublereal *dn, doublereal *dnm1, doublereal *dnm2) { /* System generated locals */ integer i__1; doublereal d__1, d__2; /* Local variables */ static doublereal emin, temp, d__; static integer j4; static doublereal safmin; static integer j4p2; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLASQ6 computes one dqd (shift equal to zero) transform in ping-pong form, with protection against underflow and overflow. Arguments ========= I0 (input) INTEGER First index. N0 (input) INTEGER Last index. Z (input) DOUBLE PRECISION array, dimension ( 4*N ) Z holds the qd array. EMIN is stored in Z(4*N0) to avoid an extra argument. PP (input) INTEGER PP=0 for ping, PP=1 for pong. DMIN (output) DOUBLE PRECISION Minimum value of d. DMIN1 (output) DOUBLE PRECISION Minimum value of d, excluding D( N0 ). DMIN2 (output) DOUBLE PRECISION Minimum value of d, excluding D( N0 ) and D( N0-1 ). DN (output) DOUBLE PRECISION d(N0), the last value of d. DNM1 (output) DOUBLE PRECISION d(N0-1). DNM2 (output) DOUBLE PRECISION d(N0-2). ===================================================================== */ /* Parameter adjustments */ --z__; /* Function Body */ if (*n0 - *i0 - 1 <= 0) { return 0; } safmin = SAFEMINIMUM; j4 = (*i0 << 2) + *pp - 3; emin = z__[j4 + 4]; d__ = z__[j4]; *dmin__ = d__; if (*pp == 0) { i__1 = *n0 - 3 << 2; for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) { z__[j4 - 2] = d__ + z__[j4 - 1]; if (z__[j4 - 2] == 0.) { z__[j4] = 0.; d__ = z__[j4 + 1]; *dmin__ = d__; emin = 0.; } else if (safmin * z__[j4 + 1] < z__[j4 - 2] && safmin * z__[j4 - 2] < z__[j4 + 1]) { temp = z__[j4 + 1] / z__[j4 - 2]; z__[j4] = z__[j4 - 1] * temp; d__ *= temp; } else { z__[j4] = z__[j4 + 1] * (z__[j4 - 1] / z__[j4 - 2]); d__ = z__[j4 + 1] * (d__ / z__[j4 - 2]); } *dmin__ = min(*dmin__,d__); /* Computing MIN */ d__1 = emin, d__2 = z__[j4]; emin = min(d__1,d__2); /* L10: */ } } else { i__1 = *n0 - 3 << 2; for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) { z__[j4 - 3] = d__ + z__[j4]; if (z__[j4 - 3] == 0.) { z__[j4 - 1] = 0.; d__ = z__[j4 + 2]; *dmin__ = d__; emin = 0.; } else if (safmin * z__[j4 + 2] < z__[j4 - 3] && safmin * z__[j4 - 3] < z__[j4 + 2]) { temp = z__[j4 + 2] / z__[j4 - 3]; z__[j4 - 1] = z__[j4] * temp; d__ *= temp; } else { z__[j4 - 1] = z__[j4 + 2] * (z__[j4] / z__[j4 - 3]); d__ = z__[j4 + 2] * (d__ / z__[j4 - 3]); } *dmin__ = min(*dmin__,d__); /* Computing MIN */ d__1 = emin, d__2 = z__[j4 - 1]; emin = min(d__1,d__2); /* L20: */ } } /* Unroll last two steps. */ *dnm2 = d__; *dmin2 = *dmin__; j4 = (*n0 - 2 << 2) - *pp; j4p2 = j4 + (*pp << 1) - 1; z__[j4 - 2] = *dnm2 + z__[j4p2]; if (z__[j4 - 2] == 0.) { z__[j4] = 0.; *dnm1 = z__[j4p2 + 2]; *dmin__ = *dnm1; emin = 0.; } else if (safmin * z__[j4p2 + 2] < z__[j4 - 2] && safmin * z__[j4 - 2] < z__[j4p2 + 2]) { temp = z__[j4p2 + 2] / z__[j4 - 2]; z__[j4] = z__[j4p2] * temp; *dnm1 = *dnm2 * temp; } else { z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]); *dnm1 = z__[j4p2 + 2] * (*dnm2 / z__[j4 - 2]); } *dmin__ = min(*dmin__,*dnm1); *dmin1 = *dmin__; j4 += 4; j4p2 = j4 + (*pp << 1) - 1; z__[j4 - 2] = *dnm1 + z__[j4p2]; if (z__[j4 - 2] == 0.) { z__[j4] = 0.; *dn = z__[j4p2 + 2]; *dmin__ = *dn; emin = 0.; } else if (safmin * z__[j4p2 + 2] < z__[j4 - 2] && safmin * z__[j4 - 2] < z__[j4p2 + 2]) { temp = z__[j4p2 + 2] / z__[j4 - 2]; z__[j4] = z__[j4p2] * temp; *dn = *dnm1 * temp; } else { z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]); *dn = z__[j4p2 + 2] * (*dnm1 / z__[j4 - 2]); } *dmin__ = min(*dmin__,*dn); z__[j4 + 2] = *dn; z__[(*n0 << 2) - *pp] = emin; return 0; /* End of DLASQ6 */ } /* dlasq6_ */ /* Subroutine */ int dlasr_(char *side, char *pivot, char *direct, integer *m, integer *n, doublereal *c__, doublereal *s, doublereal *a, integer * lda) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2; /* Local variables */ static integer info; static doublereal temp; static integer i__, j; extern logical lsame_(char *, char *); static doublereal ctemp, stemp; extern /* Subroutine */ int xerbla_(char *, integer *); /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLASR applies a sequence of plane rotations to a real matrix A, from either the left or the right. When SIDE = 'L', the transformation takes the form A := P*A and when SIDE = 'R', the transformation takes the form A := A*P**T where P is an orthogonal matrix consisting of a sequence of z plane rotations, with z = M when SIDE = 'L' and z = N when SIDE = 'R', and P**T is the transpose of P. When DIRECT = 'F' (Forward sequence), then P = P(z-1) * ... * P(2) * P(1) and when DIRECT = 'B' (Backward sequence), then P = P(1) * P(2) * ... * P(z-1) where P(k) is a plane rotation matrix defined by the 2-by-2 rotation R(k) = ( c(k) s(k) ) = ( -s(k) c(k) ). When PIVOT = 'V' (Variable pivot), the rotation is performed for the plane (k,k+1), i.e., P(k) has the form P(k) = ( 1 ) ( ... ) ( 1 ) ( c(k) s(k) ) ( -s(k) c(k) ) ( 1 ) ( ... ) ( 1 ) where R(k) appears as a rank-2 modification to the identity matrix in rows and columns k and k+1. When PIVOT = 'T' (Top pivot), the rotation is performed for the plane (1,k+1), so P(k) has the form P(k) = ( c(k) s(k) ) ( 1 ) ( ... ) ( 1 ) ( -s(k) c(k) ) ( 1 ) ( ... ) ( 1 ) where R(k) appears in rows and columns 1 and k+1. Similarly, when PIVOT = 'B' (Bottom pivot), the rotation is performed for the plane (k,z), giving P(k) the form P(k) = ( 1 ) ( ... ) ( 1 ) ( c(k) s(k) ) ( 1 ) ( ... ) ( 1 ) ( -s(k) c(k) ) where R(k) appears in rows and columns k and z. The rotations are performed without ever forming P(k) explicitly. Arguments ========= SIDE (input) CHARACTER*1 Specifies whether the plane rotation matrix P is applied to A on the left or the right. = 'L': Left, compute A := P*A = 'R': Right, compute A:= A*P**T PIVOT (input) CHARACTER*1 Specifies the plane for which P(k) is a plane rotation matrix. = 'V': Variable pivot, the plane (k,k+1) = 'T': Top pivot, the plane (1,k+1) = 'B': Bottom pivot, the plane (k,z) DIRECT (input) CHARACTER*1 Specifies whether P is a forward or backward sequence of plane rotations. = 'F': Forward, P = P(z-1)*...*P(2)*P(1) = 'B': Backward, P = P(1)*P(2)*...*P(z-1) M (input) INTEGER The number of rows of the matrix A. If m <= 1, an immediate return is effected. N (input) INTEGER The number of columns of the matrix A. If n <= 1, an immediate return is effected. C (input) DOUBLE PRECISION array, dimension (M-1) if SIDE = 'L' (N-1) if SIDE = 'R' The cosines c(k) of the plane rotations. S (input) DOUBLE PRECISION array, dimension (M-1) if SIDE = 'L' (N-1) if SIDE = 'R' The sines s(k) of the plane rotations. The 2-by-2 plane rotation part of the matrix P(k), R(k), has the form R(k) = ( c(k) s(k) ) ( -s(k) c(k) ). A (input/output) DOUBLE PRECISION array, dimension (LDA,N) The M-by-N matrix A. On exit, A is overwritten by P*A if SIDE = 'R' or by A*P**T if SIDE = 'L'. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,M). ===================================================================== Test the input parameters */ /* Parameter adjustments */ --c__; --s; a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; /* Function Body */ info = 0; if (! (lsame_(side, "L") || lsame_(side, "R"))) { info = 1; } else if (! (lsame_(pivot, "V") || lsame_(pivot, "T") || lsame_(pivot, "B"))) { info = 2; } else if (! (lsame_(direct, "F") || lsame_(direct, "B"))) { info = 3; } else if (*m < 0) { info = 4; } else if (*n < 0) { info = 5; } else if (*lda < max(1,*m)) { info = 9; } if (info != 0) { xerbla_("DLASR ", &info); return 0; } /* Quick return if possible */ if (*m == 0 || *n == 0) { return 0; } if (lsame_(side, "L")) { /* Form P * A */ if (lsame_(pivot, "V")) { if (lsame_(direct, "F")) { i__1 = *m - 1; for (j = 1; j <= i__1; ++j) { ctemp = c__[j]; stemp = s[j]; if (ctemp != 1. || stemp != 0.) { i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { temp = a[j + 1 + i__ * a_dim1]; a[j + 1 + i__ * a_dim1] = ctemp * temp - stemp * a[j + i__ * a_dim1]; a[j + i__ * a_dim1] = stemp * temp + ctemp * a[j + i__ * a_dim1]; /* L10: */ } } /* L20: */ } } else if (lsame_(direct, "B")) { for (j = *m - 1; j >= 1; --j) { ctemp = c__[j]; stemp = s[j]; if (ctemp != 1. || stemp != 0.) { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { temp = a[j + 1 + i__ * a_dim1]; a[j + 1 + i__ * a_dim1] = ctemp * temp - stemp * a[j + i__ * a_dim1]; a[j + i__ * a_dim1] = stemp * temp + ctemp * a[j + i__ * a_dim1]; /* L30: */ } } /* L40: */ } } } else if (lsame_(pivot, "T")) { if (lsame_(direct, "F")) { i__1 = *m; for (j = 2; j <= i__1; ++j) { ctemp = c__[j - 1]; stemp = s[j - 1]; if (ctemp != 1. || stemp != 0.) { i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { temp = a[j + i__ * a_dim1]; a[j + i__ * a_dim1] = ctemp * temp - stemp * a[ i__ * a_dim1 + 1]; a[i__ * a_dim1 + 1] = stemp * temp + ctemp * a[ i__ * a_dim1 + 1]; /* L50: */ } } /* L60: */ } } else if (lsame_(direct, "B")) { for (j = *m; j >= 2; --j) { ctemp = c__[j - 1]; stemp = s[j - 1]; if (ctemp != 1. || stemp != 0.) { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { temp = a[j + i__ * a_dim1]; a[j + i__ * a_dim1] = ctemp * temp - stemp * a[ i__ * a_dim1 + 1]; a[i__ * a_dim1 + 1] = stemp * temp + ctemp * a[ i__ * a_dim1 + 1]; /* L70: */ } } /* L80: */ } } } else if (lsame_(pivot, "B")) { if (lsame_(direct, "F")) { i__1 = *m - 1; for (j = 1; j <= i__1; ++j) { ctemp = c__[j]; stemp = s[j]; if (ctemp != 1. || stemp != 0.) { i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { temp = a[j + i__ * a_dim1]; a[j + i__ * a_dim1] = stemp * a[*m + i__ * a_dim1] + ctemp * temp; a[*m + i__ * a_dim1] = ctemp * a[*m + i__ * a_dim1] - stemp * temp; /* L90: */ } } /* L100: */ } } else if (lsame_(direct, "B")) { for (j = *m - 1; j >= 1; --j) { ctemp = c__[j]; stemp = s[j]; if (ctemp != 1. || stemp != 0.) { i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { temp = a[j + i__ * a_dim1]; a[j + i__ * a_dim1] = stemp * a[*m + i__ * a_dim1] + ctemp * temp; a[*m + i__ * a_dim1] = ctemp * a[*m + i__ * a_dim1] - stemp * temp; /* L110: */ } } /* L120: */ } } } } else if (lsame_(side, "R")) { /* Form A * P' */ if (lsame_(pivot, "V")) { if (lsame_(direct, "F")) { i__1 = *n - 1; for (j = 1; j <= i__1; ++j) { ctemp = c__[j]; stemp = s[j]; if (ctemp != 1. || stemp != 0.) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { temp = a[i__ + (j + 1) * a_dim1]; a[i__ + (j + 1) * a_dim1] = ctemp * temp - stemp * a[i__ + j * a_dim1]; a[i__ + j * a_dim1] = stemp * temp + ctemp * a[ i__ + j * a_dim1]; /* L130: */ } } /* L140: */ } } else if (lsame_(direct, "B")) { for (j = *n - 1; j >= 1; --j) { ctemp = c__[j]; stemp = s[j]; if (ctemp != 1. || stemp != 0.) { i__1 = *m; for (i__ = 1; i__ <= i__1; ++i__) { temp = a[i__ + (j + 1) * a_dim1]; a[i__ + (j + 1) * a_dim1] = ctemp * temp - stemp * a[i__ + j * a_dim1]; a[i__ + j * a_dim1] = stemp * temp + ctemp * a[ i__ + j * a_dim1]; /* L150: */ } } /* L160: */ } } } else if (lsame_(pivot, "T")) { if (lsame_(direct, "F")) { i__1 = *n; for (j = 2; j <= i__1; ++j) { ctemp = c__[j - 1]; stemp = s[j - 1]; if (ctemp != 1. || stemp != 0.) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { temp = a[i__ + j * a_dim1]; a[i__ + j * a_dim1] = ctemp * temp - stemp * a[ i__ + a_dim1]; a[i__ + a_dim1] = stemp * temp + ctemp * a[i__ + a_dim1]; /* L170: */ } } /* L180: */ } } else if (lsame_(direct, "B")) { for (j = *n; j >= 2; --j) { ctemp = c__[j - 1]; stemp = s[j - 1]; if (ctemp != 1. || stemp != 0.) { i__1 = *m; for (i__ = 1; i__ <= i__1; ++i__) { temp = a[i__ + j * a_dim1]; a[i__ + j * a_dim1] = ctemp * temp - stemp * a[ i__ + a_dim1]; a[i__ + a_dim1] = stemp * temp + ctemp * a[i__ + a_dim1]; /* L190: */ } } /* L200: */ } } } else if (lsame_(pivot, "B")) { if (lsame_(direct, "F")) { i__1 = *n - 1; for (j = 1; j <= i__1; ++j) { ctemp = c__[j]; stemp = s[j]; if (ctemp != 1. || stemp != 0.) { i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { temp = a[i__ + j * a_dim1]; a[i__ + j * a_dim1] = stemp * a[i__ + *n * a_dim1] + ctemp * temp; a[i__ + *n * a_dim1] = ctemp * a[i__ + *n * a_dim1] - stemp * temp; /* L210: */ } } /* L220: */ } } else if (lsame_(direct, "B")) { for (j = *n - 1; j >= 1; --j) { ctemp = c__[j]; stemp = s[j]; if (ctemp != 1. || stemp != 0.) { i__1 = *m; for (i__ = 1; i__ <= i__1; ++i__) { temp = a[i__ + j * a_dim1]; a[i__ + j * a_dim1] = stemp * a[i__ + *n * a_dim1] + ctemp * temp; a[i__ + *n * a_dim1] = ctemp * a[i__ + *n * a_dim1] - stemp * temp; /* L230: */ } } /* L240: */ } } } } return 0; /* End of DLASR */ } /* dlasr_ */ /* Subroutine */ int dlasrt_(char *id, integer *n, doublereal *d__, integer * info) { /* System generated locals */ integer i__1, i__2; /* Local variables */ static integer endd, i__, j; extern logical lsame_(char *, char *); static integer stack[64] /* was [2][32] */; static doublereal dmnmx, d1, d2, d3; static integer start; extern /* Subroutine */ int xerbla_(char *, integer *); static integer stkpnt, dir; static doublereal tmp; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= Sort the numbers in D in increasing order (if ID = 'I') or in decreasing order (if ID = 'D' ). Use Quick Sort, reverting to Insertion sort on arrays of size <= 20. Dimension of STACK limits N to about 2**32. Arguments ========= ID (input) CHARACTER*1 = 'I': sort D in increasing order; = 'D': sort D in decreasing order. N (input) INTEGER The length of the array D. D (input/output) DOUBLE PRECISION array, dimension (N) On entry, the array to be sorted. On exit, D has been sorted into increasing order (D(1) <= ... <= D(N) ) or into decreasing order (D(1) >= ... >= D(N) ), depending on ID. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value ===================================================================== Test the input paramters. */ /* Parameter adjustments */ --d__; /* Function Body */ *info = 0; dir = -1; if (lsame_(id, "D")) { dir = 0; } else if (lsame_(id, "I")) { dir = 1; } if (dir == -1) { *info = -1; } else if (*n < 0) { *info = -2; } if (*info != 0) { i__1 = -(*info); xerbla_("DLASRT", &i__1); return 0; } /* Quick return if possible */ if (*n <= 1) { return 0; } stkpnt = 1; stack[0] = 1; stack[1] = *n; L10: start = stack[(stkpnt << 1) - 2]; endd = stack[(stkpnt << 1) - 1]; --stkpnt; if (endd - start <= 20 && endd - start > 0) { /* Do Insertion sort on D( START:ENDD ) */ if (dir == 0) { /* Sort into decreasing order */ i__1 = endd; for (i__ = start + 1; i__ <= i__1; ++i__) { i__2 = start + 1; for (j = i__; j >= i__2; --j) { if (d__[j] > d__[j - 1]) { dmnmx = d__[j]; d__[j] = d__[j - 1]; d__[j - 1] = dmnmx; } else { goto L30; } /* L20: */ } L30: ; } } else { /* Sort into increasing order */ i__1 = endd; for (i__ = start + 1; i__ <= i__1; ++i__) { i__2 = start + 1; for (j = i__; j >= i__2; --j) { if (d__[j] < d__[j - 1]) { dmnmx = d__[j]; d__[j] = d__[j - 1]; d__[j - 1] = dmnmx; } else { goto L50; } /* L40: */ } L50: ; } } } else if (endd - start > 20) { /* Partition D( START:ENDD ) and stack parts, largest one first Choose partition entry as median of 3 */ d1 = d__[start]; d2 = d__[endd]; i__ = (start + endd) / 2; d3 = d__[i__]; if (d1 < d2) { if (d3 < d1) { dmnmx = d1; } else if (d3 < d2) { dmnmx = d3; } else { dmnmx = d2; } } else { if (d3 < d2) { dmnmx = d2; } else if (d3 < d1) { dmnmx = d3; } else { dmnmx = d1; } } if (dir == 0) { /* Sort into decreasing order */ i__ = start - 1; j = endd + 1; L60: L70: --j; if (d__[j] < dmnmx) { goto L70; } L80: ++i__; if (d__[i__] > dmnmx) { goto L80; } if (i__ < j) { tmp = d__[i__]; d__[i__] = d__[j]; d__[j] = tmp; goto L60; } if (j - start > endd - j - 1) { ++stkpnt; stack[(stkpnt << 1) - 2] = start; stack[(stkpnt << 1) - 1] = j; ++stkpnt; stack[(stkpnt << 1) - 2] = j + 1; stack[(stkpnt << 1) - 1] = endd; } else { ++stkpnt; stack[(stkpnt << 1) - 2] = j + 1; stack[(stkpnt << 1) - 1] = endd; ++stkpnt; stack[(stkpnt << 1) - 2] = start; stack[(stkpnt << 1) - 1] = j; } } else { /* Sort into increasing order */ i__ = start - 1; j = endd + 1; L90: L100: --j; if (d__[j] > dmnmx) { goto L100; } L110: ++i__; if (d__[i__] < dmnmx) { goto L110; } if (i__ < j) { tmp = d__[i__]; d__[i__] = d__[j]; d__[j] = tmp; goto L90; } if (j - start > endd - j - 1) { ++stkpnt; stack[(stkpnt << 1) - 2] = start; stack[(stkpnt << 1) - 1] = j; ++stkpnt; stack[(stkpnt << 1) - 2] = j + 1; stack[(stkpnt << 1) - 1] = endd; } else { ++stkpnt; stack[(stkpnt << 1) - 2] = j + 1; stack[(stkpnt << 1) - 1] = endd; ++stkpnt; stack[(stkpnt << 1) - 2] = start; stack[(stkpnt << 1) - 1] = j; } } } if (stkpnt > 0) { goto L10; } return 0; /* End of DLASRT */ } /* dlasrt_ */ /* Subroutine */ int dlassq_(integer *n, doublereal *x, integer *incx, doublereal *scale, doublereal *sumsq) { /* System generated locals */ integer i__1, i__2; doublereal d__1; /* Local variables */ static doublereal absxi; static integer ix; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLASSQ returns the values scl and smsq such that ( scl**2 )*smsq = x( 1 )**2 +...+ x( n )**2 + ( scale**2 )*sumsq, where x( i ) = X( 1 + ( i - 1 )*INCX ). The value of sumsq is assumed to be non-negative and scl returns the value scl = max( scale, abs( x( i ) ) ). scale and sumsq must be supplied in SCALE and SUMSQ and scl and smsq are overwritten on SCALE and SUMSQ respectively. The routine makes only one pass through the vector x. Arguments ========= N (input) INTEGER The number of elements to be used from the vector X. X (input) DOUBLE PRECISION array, dimension (N) The vector for which a scaled sum of squares is computed. x( i ) = X( 1 + ( i - 1 )*INCX ), 1 <= i <= n. INCX (input) INTEGER The increment between successive values of the vector X. INCX > 0. SCALE (input/output) DOUBLE PRECISION On entry, the value scale in the equation above. On exit, SCALE is overwritten with scl , the scaling factor for the sum of squares. SUMSQ (input/output) DOUBLE PRECISION On entry, the value sumsq in the equation above. On exit, SUMSQ is overwritten with smsq , the basic sum of squares from which scl has been factored out. ===================================================================== */ /* Parameter adjustments */ --x; /* Function Body */ if (*n > 0) { i__1 = (*n - 1) * *incx + 1; i__2 = *incx; for (ix = 1; i__2 < 0 ? ix >= i__1 : ix <= i__1; ix += i__2) { if (x[ix] != 0.) { absxi = (d__1 = x[ix], abs(d__1)); if (*scale < absxi) { /* Computing 2nd power */ d__1 = *scale / absxi; *sumsq = *sumsq * (d__1 * d__1) + 1; *scale = absxi; } else { /* Computing 2nd power */ d__1 = absxi / *scale; *sumsq += d__1 * d__1; } } /* L10: */ } } return 0; /* End of DLASSQ */ } /* dlassq_ */ /* Subroutine */ int dlasv2_(doublereal *f, doublereal *g, doublereal *h__, doublereal *ssmin, doublereal *ssmax, doublereal *snr, doublereal * csr, doublereal *snl, doublereal *csl) { /* System generated locals */ doublereal d__1; /* Builtin functions */ double sqrt(doublereal), d_sign(doublereal *, doublereal *); /* Local variables */ static integer pmax; static doublereal temp; static logical swap; static doublereal a, d__, l, m, r__, s, t, tsign, fa, ga, ha; static doublereal ft, gt, ht, mm; static logical gasmal; static doublereal tt, clt, crt, slt, srt; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLASV2 computes the singular value decomposition of a 2-by-2 triangular matrix [ F G ] [ 0 H ]. On return, abs(SSMAX) is the larger singular value, abs(SSMIN) is the smaller singular value, and (CSL,SNL) and (CSR,SNR) are the left and right singular vectors for abs(SSMAX), giving the decomposition [ CSL SNL ] [ F G ] [ CSR -SNR ] = [ SSMAX 0 ] [-SNL CSL ] [ 0 H ] [ SNR CSR ] [ 0 SSMIN ]. Arguments ========= F (input) DOUBLE PRECISION The (1,1) element of the 2-by-2 matrix. G (input) DOUBLE PRECISION The (1,2) element of the 2-by-2 matrix. H (input) DOUBLE PRECISION The (2,2) element of the 2-by-2 matrix. SSMIN (output) DOUBLE PRECISION abs(SSMIN) is the smaller singular value. SSMAX (output) DOUBLE PRECISION abs(SSMAX) is the larger singular value. SNL (output) DOUBLE PRECISION CSL (output) DOUBLE PRECISION The vector (CSL, SNL) is a unit left singular vector for the singular value abs(SSMAX). SNR (output) DOUBLE PRECISION CSR (output) DOUBLE PRECISION The vector (CSR, SNR) is a unit right singular vector for the singular value abs(SSMAX). Further Details =============== Any input parameter may be aliased with any output parameter. Barring over/underflow and assuming a guard digit in subtraction, all output quantities are correct to within a few units in the last place (ulps). In IEEE arithmetic, the code works correctly if one matrix element is infinite. Overflow will not occur unless the largest singular value itself overflows or is within a few ulps of overflow. (On machines with partial overflow, like the Cray, overflow may occur if the largest singular value is within a factor of 2 of overflow.) Underflow is harmless if underflow is gradual. Otherwise, results may correspond to a matrix modified by perturbations of size near the underflow threshold. ===================================================================== */ ft = *f; fa = abs(ft); ht = *h__; ha = abs(*h__); /* PMAX points to the maximum absolute element of matrix PMAX = 1 if F largest in absolute values PMAX = 2 if G largest in absolute values PMAX = 3 if H largest in absolute values */ pmax = 1; swap = ha > fa; if (swap) { pmax = 3; temp = ft; ft = ht; ht = temp; temp = fa; fa = ha; ha = temp; /* Now FA .ge. HA */ } gt = *g; ga = abs(gt); if (ga == 0.) { /* Diagonal matrix */ *ssmin = ha; *ssmax = fa; clt = 1.; crt = 1.; slt = 0.; srt = 0.; } else { gasmal = TRUE_; if (ga > fa) { pmax = 2; if (fa / ga < EPSILON) { /* Case of very large GA */ gasmal = FALSE_; *ssmax = ga; if (ha > 1.) { *ssmin = fa / (ga / ha); } else { *ssmin = fa / ga * ha; } clt = 1.; slt = ht / gt; srt = 1.; crt = ft / gt; } } if (gasmal) { /* Normal case */ d__ = fa - ha; if (d__ == fa) { /* Copes with infinite F or H */ l = 1.; } else { l = d__ / fa; } /* Note that 0 .le. L .le. 1 */ m = gt / ft; /* Note that abs(M) .le. 1/macheps */ t = 2. - l; /* Note that T .ge. 1 */ mm = m * m; tt = t * t; s = sqrt(tt + mm); /* Note that 1 .le. S .le. 1 + 1/macheps */ if (l == 0.) { r__ = abs(m); } else { r__ = sqrt(l * l + mm); } /* Note that 0 .le. R .le. 1 + 1/macheps */ a = (s + r__) * .5; /* Note that 1 .le. A .le. 1 + abs(M) */ *ssmin = ha / a; *ssmax = fa * a; if (mm == 0.) { /* Note that M is very tiny */ if (l == 0.) { t = d_sign(&c_b3176, &ft) * d_sign(&c_b15, >); } else { t = gt / d_sign(&d__, &ft) + m / t; } } else { t = (m / (s + t) + m / (r__ + l)) * (a + 1.); } l = sqrt(t * t + 4.); crt = 2. / l; srt = t / l; clt = (crt + srt * m) / a; slt = ht / ft * srt / a; } } if (swap) { *csl = srt; *snl = crt; *csr = slt; *snr = clt; } else { *csl = clt; *snl = slt; *csr = crt; *snr = srt; } /* Correct signs of SSMAX and SSMIN */ if (pmax == 1) { tsign = d_sign(&c_b15, csr) * d_sign(&c_b15, csl) * d_sign(&c_b15, f); } if (pmax == 2) { tsign = d_sign(&c_b15, snr) * d_sign(&c_b15, csl) * d_sign(&c_b15, g); } if (pmax == 3) { tsign = d_sign(&c_b15, snr) * d_sign(&c_b15, snl) * d_sign(&c_b15, h__); } *ssmax = d_sign(ssmax, &tsign); d__1 = tsign * d_sign(&c_b15, f) * d_sign(&c_b15, h__); *ssmin = d_sign(ssmin, &d__1); return 0; /* End of DLASV2 */ } /* dlasv2_ */ /* Subroutine */ int dlaswp_(integer *n, doublereal *a, integer *lda, integer *k1, integer *k2, integer *ipiv, integer *incx) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3, i__4; /* Local variables */ static doublereal temp; static integer i__, j, k, i1, i2, n32, ip, ix, ix0, inc; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLASWP performs a series of row interchanges on the matrix A. One row interchange is initiated for each of rows K1 through K2 of A. Arguments ========= N (input) INTEGER The number of columns of the matrix A. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the matrix of column dimension N to which the row interchanges will be applied. On exit, the permuted matrix. LDA (input) INTEGER The leading dimension of the array A. K1 (input) INTEGER The first element of IPIV for which a row interchange will be done. K2 (input) INTEGER The last element of IPIV for which a row interchange will be done. IPIV (input) INTEGER array, dimension (K2*abs(INCX)) The vector of pivot indices. Only the elements in positions K1 through K2 of IPIV are accessed. IPIV(K) = L implies rows K and L are to be interchanged. INCX (input) INTEGER The increment between successive values of IPIV. If IPIV is negative, the pivots are applied in reverse order. Further Details =============== Modified by R. C. Whaley, Computer Science Dept., Univ. of Tenn., Knoxville, USA ===================================================================== Interchange row I with row IPIV(I) for each of rows K1 through K2. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --ipiv; /* Function Body */ if (*incx > 0) { ix0 = *k1; i1 = *k1; i2 = *k2; inc = 1; } else if (*incx < 0) { ix0 = (1 - *k2) * *incx + 1; i1 = *k2; i2 = *k1; inc = -1; } else { return 0; } n32 = *n / 32 << 5; if (n32 != 0) { i__1 = n32; for (j = 1; j <= i__1; j += 32) { ix = ix0; i__2 = i2; i__3 = inc; for (i__ = i1; i__3 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__3) { ip = ipiv[ix]; if (ip != i__) { i__4 = j + 31; for (k = j; k <= i__4; ++k) { temp = a[i__ + k * a_dim1]; a[i__ + k * a_dim1] = a[ip + k * a_dim1]; a[ip + k * a_dim1] = temp; /* L10: */ } } ix += *incx; /* L20: */ } /* L30: */ } } if (n32 != *n) { ++n32; ix = ix0; i__1 = i2; i__3 = inc; for (i__ = i1; i__3 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__3) { ip = ipiv[ix]; if (ip != i__) { i__2 = *n; for (k = n32; k <= i__2; ++k) { temp = a[i__ + k * a_dim1]; a[i__ + k * a_dim1] = a[ip + k * a_dim1]; a[ip + k * a_dim1] = temp; /* L40: */ } } ix += *incx; /* L50: */ } } return 0; /* End of DLASWP */ } /* dlaswp_ */ /* Subroutine */ int dlasy2_(logical *ltranl, logical *ltranr, integer *isgn, integer *n1, integer *n2, doublereal *tl, integer *ldtl, doublereal * tr, integer *ldtr, doublereal *b, integer *ldb, doublereal *scale, doublereal *x, integer *ldx, doublereal *xnorm, integer *info) { /* Initialized data */ static integer locu12[4] = { 3,4,1,2 }; static integer locl21[4] = { 2,1,4,3 }; static integer locu22[4] = { 4,3,2,1 }; static logical xswpiv[4] = { FALSE_,FALSE_,TRUE_,TRUE_ }; static logical bswpiv[4] = { FALSE_,TRUE_,FALSE_,TRUE_ }; /* System generated locals */ integer b_dim1, b_offset, tl_dim1, tl_offset, tr_dim1, tr_offset, x_dim1, x_offset; doublereal d__1, d__2, d__3, d__4, d__5, d__6, d__7, d__8; /* Local variables */ static doublereal btmp[4], smin; static integer ipiv; static doublereal temp; static integer jpiv[4]; static doublereal xmax; static integer ipsv, jpsv, i__, j, k; static logical bswap; extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, doublereal *, integer *), dswap_(integer *, doublereal *, integer *, doublereal *, integer *); static logical xswap; static doublereal x2[2], l21, u11, u12; static integer ip, jp; static doublereal u22, t16[16] /* was [4][4] */; extern integer idamax_(integer *, doublereal *, integer *); static doublereal smlnum, gam, bet, eps, sgn, tmp[4], tau1; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLASY2 solves for the N1 by N2 matrix X, 1 <= N1,N2 <= 2, in op(TL)*X + ISGN*X*op(TR) = SCALE*B, where TL is N1 by N1, TR is N2 by N2, B is N1 by N2, and ISGN = 1 or -1. op(T) = T or T', where T' denotes the transpose of T. Arguments ========= LTRANL (input) LOGICAL On entry, LTRANL specifies the op(TL): = .FALSE., op(TL) = TL, = .TRUE., op(TL) = TL'. LTRANR (input) LOGICAL On entry, LTRANR specifies the op(TR): = .FALSE., op(TR) = TR, = .TRUE., op(TR) = TR'. ISGN (input) INTEGER On entry, ISGN specifies the sign of the equation as described before. ISGN may only be 1 or -1. N1 (input) INTEGER On entry, N1 specifies the order of matrix TL. N1 may only be 0, 1 or 2. N2 (input) INTEGER On entry, N2 specifies the order of matrix TR. N2 may only be 0, 1 or 2. TL (input) DOUBLE PRECISION array, dimension (LDTL,2) On entry, TL contains an N1 by N1 matrix. LDTL (input) INTEGER The leading dimension of the matrix TL. LDTL >= max(1,N1). TR (input) DOUBLE PRECISION array, dimension (LDTR,2) On entry, TR contains an N2 by N2 matrix. LDTR (input) INTEGER The leading dimension of the matrix TR. LDTR >= max(1,N2). B (input) DOUBLE PRECISION array, dimension (LDB,2) On entry, the N1 by N2 matrix B contains the right-hand side of the equation. LDB (input) INTEGER The leading dimension of the matrix B. LDB >= max(1,N1). SCALE (output) DOUBLE PRECISION On exit, SCALE contains the scale factor. SCALE is chosen less than or equal to 1 to prevent the solution overflowing. X (output) DOUBLE PRECISION array, dimension (LDX,2) On exit, X contains the N1 by N2 solution. LDX (input) INTEGER The leading dimension of the matrix X. LDX >= max(1,N1). XNORM (output) DOUBLE PRECISION On exit, XNORM is the infinity-norm of the solution. INFO (output) INTEGER On exit, INFO is set to 0: successful exit. 1: TL and TR have too close eigenvalues, so TL or TR is perturbed to get a nonsingular equation. NOTE: In the interests of speed, this routine does not check the inputs for errors. ===================================================================== */ /* Parameter adjustments */ tl_dim1 = *ldtl; tl_offset = 1 + tl_dim1 * 1; tl -= tl_offset; tr_dim1 = *ldtr; tr_offset = 1 + tr_dim1 * 1; tr -= tr_offset; b_dim1 = *ldb; b_offset = 1 + b_dim1 * 1; b -= b_offset; x_dim1 = *ldx; x_offset = 1 + x_dim1 * 1; x -= x_offset; /* Function Body */ /* Do not check the input parameters for errors */ *info = 0; /* Quick return if possible */ if (*n1 == 0 || *n2 == 0) { return 0; } /* Set constants to control overflow */ eps = PRECISION; smlnum = SAFEMINIMUM / eps; sgn = (doublereal) (*isgn); k = *n1 + *n1 + *n2 - 2; switch (k) { case 1: goto L10; case 2: goto L20; case 3: goto L30; case 4: goto L50; } /* 1 by 1: TL11*X + SGN*X*TR11 = B11 */ L10: tau1 = tl[tl_dim1 + 1] + sgn * tr[tr_dim1 + 1]; bet = abs(tau1); if (bet <= smlnum) { tau1 = smlnum; bet = smlnum; *info = 1; } *scale = 1.; gam = (d__1 = b[b_dim1 + 1], abs(d__1)); if (smlnum * gam > bet) { *scale = 1. / gam; } x[x_dim1 + 1] = b[b_dim1 + 1] * *scale / tau1; *xnorm = (d__1 = x[x_dim1 + 1], abs(d__1)); return 0; /* 1 by 2: TL11*[X11 X12] + ISGN*[X11 X12]*op[TR11 TR12] = [B11 B12] [TR21 TR22] */ L20: /* Computing MAX Computing MAX */ d__7 = (d__1 = tl[tl_dim1 + 1], abs(d__1)), d__8 = (d__2 = tr[tr_dim1 + 1] , abs(d__2)), d__7 = max(d__7,d__8), d__8 = (d__3 = tr[(tr_dim1 << 1) + 1], abs(d__3)), d__7 = max(d__7,d__8), d__8 = (d__4 = tr[ tr_dim1 + 2], abs(d__4)), d__7 = max(d__7,d__8), d__8 = (d__5 = tr[(tr_dim1 << 1) + 2], abs(d__5)); d__6 = eps * max(d__7,d__8); smin = max(d__6,smlnum); tmp[0] = tl[tl_dim1 + 1] + sgn * tr[tr_dim1 + 1]; tmp[3] = tl[tl_dim1 + 1] + sgn * tr[(tr_dim1 << 1) + 2]; if (*ltranr) { tmp[1] = sgn * tr[tr_dim1 + 2]; tmp[2] = sgn * tr[(tr_dim1 << 1) + 1]; } else { tmp[1] = sgn * tr[(tr_dim1 << 1) + 1]; tmp[2] = sgn * tr[tr_dim1 + 2]; } btmp[0] = b[b_dim1 + 1]; btmp[1] = b[(b_dim1 << 1) + 1]; goto L40; /* 2 by 1: op[TL11 TL12]*[X11] + ISGN* [X11]*TR11 = [B11] [TL21 TL22] [X21] [X21] [B21] */ L30: /* Computing MAX Computing MAX */ d__7 = (d__1 = tr[tr_dim1 + 1], abs(d__1)), d__8 = (d__2 = tl[tl_dim1 + 1] , abs(d__2)), d__7 = max(d__7,d__8), d__8 = (d__3 = tl[(tl_dim1 << 1) + 1], abs(d__3)), d__7 = max(d__7,d__8), d__8 = (d__4 = tl[ tl_dim1 + 2], abs(d__4)), d__7 = max(d__7,d__8), d__8 = (d__5 = tl[(tl_dim1 << 1) + 2], abs(d__5)); d__6 = eps * max(d__7,d__8); smin = max(d__6,smlnum); tmp[0] = tl[tl_dim1 + 1] + sgn * tr[tr_dim1 + 1]; tmp[3] = tl[(tl_dim1 << 1) + 2] + sgn * tr[tr_dim1 + 1]; if (*ltranl) { tmp[1] = tl[(tl_dim1 << 1) + 1]; tmp[2] = tl[tl_dim1 + 2]; } else { tmp[1] = tl[tl_dim1 + 2]; tmp[2] = tl[(tl_dim1 << 1) + 1]; } btmp[0] = b[b_dim1 + 1]; btmp[1] = b[b_dim1 + 2]; L40: /* Solve 2 by 2 system using complete pivoting. Set pivots less than SMIN to SMIN. */ ipiv = idamax_(&c__4, tmp, &c__1); u11 = tmp[ipiv - 1]; if (abs(u11) <= smin) { *info = 1; u11 = smin; } u12 = tmp[locu12[ipiv - 1] - 1]; l21 = tmp[locl21[ipiv - 1] - 1] / u11; u22 = tmp[locu22[ipiv - 1] - 1] - u12 * l21; xswap = xswpiv[ipiv - 1]; bswap = bswpiv[ipiv - 1]; if (abs(u22) <= smin) { *info = 1; u22 = smin; } if (bswap) { temp = btmp[1]; btmp[1] = btmp[0] - l21 * temp; btmp[0] = temp; } else { btmp[1] -= l21 * btmp[0]; } *scale = 1.; if (smlnum * 2. * abs(btmp[1]) > abs(u22) || smlnum * 2. * abs(btmp[0]) > abs(u11)) { /* Computing MAX */ d__1 = abs(btmp[0]), d__2 = abs(btmp[1]); *scale = .5 / max(d__1,d__2); btmp[0] *= *scale; btmp[1] *= *scale; } x2[1] = btmp[1] / u22; x2[0] = btmp[0] / u11 - u12 / u11 * x2[1]; if (xswap) { temp = x2[1]; x2[1] = x2[0]; x2[0] = temp; } x[x_dim1 + 1] = x2[0]; if (*n1 == 1) { x[(x_dim1 << 1) + 1] = x2[1]; *xnorm = (d__1 = x[x_dim1 + 1], abs(d__1)) + (d__2 = x[(x_dim1 << 1) + 1], abs(d__2)); } else { x[x_dim1 + 2] = x2[1]; /* Computing MAX */ d__3 = (d__1 = x[x_dim1 + 1], abs(d__1)), d__4 = (d__2 = x[x_dim1 + 2] , abs(d__2)); *xnorm = max(d__3,d__4); } return 0; /* 2 by 2: op[TL11 TL12]*[X11 X12] +ISGN* [X11 X12]*op[TR11 TR12] = [B11 B12] [TL21 TL22] [X21 X22] [X21 X22] [TR21 TR22] [B21 B22] Solve equivalent 4 by 4 system using complete pivoting. Set pivots less than SMIN to SMIN. */ L50: /* Computing MAX */ d__5 = (d__1 = tr[tr_dim1 + 1], abs(d__1)), d__6 = (d__2 = tr[(tr_dim1 << 1) + 1], abs(d__2)), d__5 = max(d__5,d__6), d__6 = (d__3 = tr[ tr_dim1 + 2], abs(d__3)), d__5 = max(d__5,d__6), d__6 = (d__4 = tr[(tr_dim1 << 1) + 2], abs(d__4)); smin = max(d__5,d__6); /* Computing MAX */ d__5 = smin, d__6 = (d__1 = tl[tl_dim1 + 1], abs(d__1)), d__5 = max(d__5, d__6), d__6 = (d__2 = tl[(tl_dim1 << 1) + 1], abs(d__2)), d__5 = max(d__5,d__6), d__6 = (d__3 = tl[tl_dim1 + 2], abs(d__3)), d__5 = max(d__5,d__6), d__6 = (d__4 = tl[(tl_dim1 << 1) + 2], abs(d__4)) ; smin = max(d__5,d__6); /* Computing MAX */ d__1 = eps * smin; smin = max(d__1,smlnum); btmp[0] = 0.; dcopy_(&c__16, btmp, &c__0, t16, &c__1); t16[0] = tl[tl_dim1 + 1] + sgn * tr[tr_dim1 + 1]; t16[5] = tl[(tl_dim1 << 1) + 2] + sgn * tr[tr_dim1 + 1]; t16[10] = tl[tl_dim1 + 1] + sgn * tr[(tr_dim1 << 1) + 2]; t16[15] = tl[(tl_dim1 << 1) + 2] + sgn * tr[(tr_dim1 << 1) + 2]; if (*ltranl) { t16[4] = tl[tl_dim1 + 2]; t16[1] = tl[(tl_dim1 << 1) + 1]; t16[14] = tl[tl_dim1 + 2]; t16[11] = tl[(tl_dim1 << 1) + 1]; } else { t16[4] = tl[(tl_dim1 << 1) + 1]; t16[1] = tl[tl_dim1 + 2]; t16[14] = tl[(tl_dim1 << 1) + 1]; t16[11] = tl[tl_dim1 + 2]; } if (*ltranr) { t16[8] = sgn * tr[(tr_dim1 << 1) + 1]; t16[13] = sgn * tr[(tr_dim1 << 1) + 1]; t16[2] = sgn * tr[tr_dim1 + 2]; t16[7] = sgn * tr[tr_dim1 + 2]; } else { t16[8] = sgn * tr[tr_dim1 + 2]; t16[13] = sgn * tr[tr_dim1 + 2]; t16[2] = sgn * tr[(tr_dim1 << 1) + 1]; t16[7] = sgn * tr[(tr_dim1 << 1) + 1]; } btmp[0] = b[b_dim1 + 1]; btmp[1] = b[b_dim1 + 2]; btmp[2] = b[(b_dim1 << 1) + 1]; btmp[3] = b[(b_dim1 << 1) + 2]; /* Perform elimination */ for (i__ = 1; i__ <= 3; ++i__) { xmax = 0.; for (ip = i__; ip <= 4; ++ip) { for (jp = i__; jp <= 4; ++jp) { if ((d__1 = t16[ip + (jp << 2) - 5], abs(d__1)) >= xmax) { xmax = (d__1 = t16[ip + (jp << 2) - 5], abs(d__1)); ipsv = ip; jpsv = jp; } /* L60: */ } /* L70: */ } if (ipsv != i__) { dswap_(&c__4, &t16[ipsv - 1], &c__4, &t16[i__ - 1], &c__4); temp = btmp[i__ - 1]; btmp[i__ - 1] = btmp[ipsv - 1]; btmp[ipsv - 1] = temp; } if (jpsv != i__) { dswap_(&c__4, &t16[(jpsv << 2) - 4], &c__1, &t16[(i__ << 2) - 4], &c__1); } jpiv[i__ - 1] = jpsv; if ((d__1 = t16[i__ + (i__ << 2) - 5], abs(d__1)) < smin) { *info = 1; t16[i__ + (i__ << 2) - 5] = smin; } for (j = i__ + 1; j <= 4; ++j) { t16[j + (i__ << 2) - 5] /= t16[i__ + (i__ << 2) - 5]; btmp[j - 1] -= t16[j + (i__ << 2) - 5] * btmp[i__ - 1]; for (k = i__ + 1; k <= 4; ++k) { t16[j + (k << 2) - 5] -= t16[j + (i__ << 2) - 5] * t16[i__ + ( k << 2) - 5]; /* L80: */ } /* L90: */ } /* L100: */ } if (abs(t16[15]) < smin) { t16[15] = smin; } *scale = 1.; if (smlnum * 8. * abs(btmp[0]) > abs(t16[0]) || smlnum * 8. * abs(btmp[1]) > abs(t16[5]) || smlnum * 8. * abs(btmp[2]) > abs(t16[10]) || smlnum * 8. * abs(btmp[3]) > abs(t16[15])) { /* Computing MAX */ d__1 = abs(btmp[0]), d__2 = abs(btmp[1]), d__1 = max(d__1,d__2), d__2 = abs(btmp[2]), d__1 = max(d__1,d__2), d__2 = abs(btmp[3]); *scale = .125 / max(d__1,d__2); btmp[0] *= *scale; btmp[1] *= *scale; btmp[2] *= *scale; btmp[3] *= *scale; } for (i__ = 1; i__ <= 4; ++i__) { k = 5 - i__; temp = 1. / t16[k + (k << 2) - 5]; tmp[k - 1] = btmp[k - 1] * temp; for (j = k + 1; j <= 4; ++j) { tmp[k - 1] -= temp * t16[k + (j << 2) - 5] * tmp[j - 1]; /* L110: */ } /* L120: */ } for (i__ = 1; i__ <= 3; ++i__) { if (jpiv[4 - i__ - 1] != 4 - i__) { temp = tmp[4 - i__ - 1]; tmp[4 - i__ - 1] = tmp[jpiv[4 - i__ - 1] - 1]; tmp[jpiv[4 - i__ - 1] - 1] = temp; } /* L130: */ } x[x_dim1 + 1] = tmp[0]; x[x_dim1 + 2] = tmp[1]; x[(x_dim1 << 1) + 1] = tmp[2]; x[(x_dim1 << 1) + 2] = tmp[3]; /* Computing MAX */ d__1 = abs(tmp[0]) + abs(tmp[2]), d__2 = abs(tmp[1]) + abs(tmp[3]); *xnorm = max(d__1,d__2); return 0; /* End of DLASY2 */ } /* dlasy2_ */ /* Subroutine */ int dlatrd_(char *uplo, integer *n, integer *nb, doublereal * a, integer *lda, doublereal *e, doublereal *tau, doublereal *w, integer *ldw) { /* System generated locals */ integer a_dim1, a_offset, w_dim1, w_offset, i__1, i__2, i__3; /* Local variables */ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *, integer *); static integer i__; static doublereal alpha; extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, integer *); extern logical lsame_(char *, char *); extern /* Subroutine */ int dgemv_(char *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *), daxpy_(integer *, doublereal *, doublereal *, integer *, doublereal *, integer *), dsymv_(char *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *), dlarfg_(integer *, doublereal *, doublereal *, integer *, doublereal *); static integer iw; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLATRD reduces NB rows and columns of a real symmetric matrix A to symmetric tridiagonal form by an orthogonal similarity transformation Q' * A * Q, and returns the matrices V and W which are needed to apply the transformation to the unreduced part of A. If UPLO = 'U', DLATRD reduces the last NB rows and columns of a matrix, of which the upper triangle is supplied; if UPLO = 'L', DLATRD reduces the first NB rows and columns of a matrix, of which the lower triangle is supplied. This is an auxiliary routine called by DSYTRD. Arguments ========= UPLO (input) CHARACTER*1 Specifies whether the upper or lower triangular part of the symmetric matrix A is stored: = 'U': Upper triangular = 'L': Lower triangular N (input) INTEGER The order of the matrix A. NB (input) INTEGER The number of rows and columns to be reduced. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the symmetric matrix A. If UPLO = 'U', the leading n-by-n upper triangular part of A contains the upper triangular part of the matrix A, and the strictly lower triangular part of A is not referenced. If UPLO = 'L', the leading n-by-n lower triangular part of A contains the lower triangular part of the matrix A, and the strictly upper triangular part of A is not referenced. On exit: if UPLO = 'U', the last NB columns have been reduced to tridiagonal form, with the diagonal elements overwriting the diagonal elements of A; the elements above the diagonal with the array TAU, represent the orthogonal matrix Q as a product of elementary reflectors; if UPLO = 'L', the first NB columns have been reduced to tridiagonal form, with the diagonal elements overwriting the diagonal elements of A; the elements below the diagonal with the array TAU, represent the orthogonal matrix Q as a product of elementary reflectors. See Further Details. LDA (input) INTEGER The leading dimension of the array A. LDA >= (1,N). E (output) DOUBLE PRECISION array, dimension (N-1) If UPLO = 'U', E(n-nb:n-1) contains the superdiagonal elements of the last NB columns of the reduced matrix; if UPLO = 'L', E(1:nb) contains the subdiagonal elements of the first NB columns of the reduced matrix. TAU (output) DOUBLE PRECISION array, dimension (N-1) The scalar factors of the elementary reflectors, stored in TAU(n-nb:n-1) if UPLO = 'U', and in TAU(1:nb) if UPLO = 'L'. See Further Details. W (output) DOUBLE PRECISION array, dimension (LDW,NB) The n-by-nb matrix W required to update the unreduced part of A. LDW (input) INTEGER The leading dimension of the array W. LDW >= max(1,N). Further Details =============== If UPLO = 'U', the matrix Q is represented as a product of elementary reflectors Q = H(n) H(n-1) . . . H(n-nb+1). Each H(i) has the form H(i) = I - tau * v * v' where tau is a real scalar, and v is a real vector with v(i:n) = 0 and v(i-1) = 1; v(1:i-1) is stored on exit in A(1:i-1,i), and tau in TAU(i-1). If UPLO = 'L', the matrix Q is represented as a product of elementary reflectors Q = H(1) H(2) . . . H(nb). Each H(i) has the form H(i) = I - tau * v * v' where tau is a real scalar, and v is a real vector with v(1:i) = 0 and v(i+1) = 1; v(i+1:n) is stored on exit in A(i+1:n,i), and tau in TAU(i). The elements of the vectors v together form the n-by-nb matrix V which is needed, with W, to apply the transformation to the unreduced part of the matrix, using a symmetric rank-2k update of the form: A := A - V*W' - W*V'. The contents of A on exit are illustrated by the following examples with n = 5 and nb = 2: if UPLO = 'U': if UPLO = 'L': ( a a a v4 v5 ) ( d ) ( a a v4 v5 ) ( 1 d ) ( a 1 v5 ) ( v1 1 a ) ( d 1 ) ( v1 v2 a a ) ( d ) ( v1 v2 a a a ) where d denotes a diagonal element of the reduced matrix, a denotes an element of the original matrix that is unchanged, and vi denotes an element of the vector defining H(i). ===================================================================== Quick return if possible */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --e; --tau; w_dim1 = *ldw; w_offset = 1 + w_dim1 * 1; w -= w_offset; /* Function Body */ if (*n <= 0) { return 0; } if (lsame_(uplo, "U")) { /* Reduce last NB columns of upper triangle */ i__1 = *n - *nb + 1; for (i__ = *n; i__ >= i__1; --i__) { iw = i__ - *n + *nb; if (i__ < *n) { /* Update A(1:i,i) */ i__2 = *n - i__; dgemv_("No transpose", &i__, &i__2, &c_b151, &a[(i__ + 1) * a_dim1 + 1], lda, &w[i__ + (iw + 1) * w_dim1], ldw, & c_b15, &a[i__ * a_dim1 + 1], &c__1); i__2 = *n - i__; dgemv_("No transpose", &i__, &i__2, &c_b151, &w[(iw + 1) * w_dim1 + 1], ldw, &a[i__ + (i__ + 1) * a_dim1], lda, & c_b15, &a[i__ * a_dim1 + 1], &c__1); } if (i__ > 1) { /* Generate elementary reflector H(i) to annihilate A(1:i-2,i) */ i__2 = i__ - 1; dlarfg_(&i__2, &a[i__ - 1 + i__ * a_dim1], &a[i__ * a_dim1 + 1], &c__1, &tau[i__ - 1]); e[i__ - 1] = a[i__ - 1 + i__ * a_dim1]; a[i__ - 1 + i__ * a_dim1] = 1.; /* Compute W(1:i-1,i) */ i__2 = i__ - 1; dsymv_("Upper", &i__2, &c_b15, &a[a_offset], lda, &a[i__ * a_dim1 + 1], &c__1, &c_b29, &w[iw * w_dim1 + 1], & c__1); if (i__ < *n) { i__2 = i__ - 1; i__3 = *n - i__; dgemv_("Transpose", &i__2, &i__3, &c_b15, &w[(iw + 1) * w_dim1 + 1], ldw, &a[i__ * a_dim1 + 1], &c__1, & c_b29, &w[i__ + 1 + iw * w_dim1], &c__1); i__2 = i__ - 1; i__3 = *n - i__; dgemv_("No transpose", &i__2, &i__3, &c_b151, &a[(i__ + 1) * a_dim1 + 1], lda, &w[i__ + 1 + iw * w_dim1], & c__1, &c_b15, &w[iw * w_dim1 + 1], &c__1); i__2 = i__ - 1; i__3 = *n - i__; dgemv_("Transpose", &i__2, &i__3, &c_b15, &a[(i__ + 1) * a_dim1 + 1], lda, &a[i__ * a_dim1 + 1], &c__1, & c_b29, &w[i__ + 1 + iw * w_dim1], &c__1); i__2 = i__ - 1; i__3 = *n - i__; dgemv_("No transpose", &i__2, &i__3, &c_b151, &w[(iw + 1) * w_dim1 + 1], ldw, &w[i__ + 1 + iw * w_dim1], & c__1, &c_b15, &w[iw * w_dim1 + 1], &c__1); } i__2 = i__ - 1; dscal_(&i__2, &tau[i__ - 1], &w[iw * w_dim1 + 1], &c__1); i__2 = i__ - 1; alpha = tau[i__ - 1] * -.5 * ddot_(&i__2, &w[iw * w_dim1 + 1], &c__1, &a[i__ * a_dim1 + 1], &c__1); i__2 = i__ - 1; daxpy_(&i__2, &alpha, &a[i__ * a_dim1 + 1], &c__1, &w[iw * w_dim1 + 1], &c__1); } /* L10: */ } } else { /* Reduce first NB columns of lower triangle */ i__1 = *nb; for (i__ = 1; i__ <= i__1; ++i__) { /* Update A(i:n,i) */ i__2 = *n - i__ + 1; i__3 = i__ - 1; dgemv_("No transpose", &i__2, &i__3, &c_b151, &a[i__ + a_dim1], lda, &w[i__ + w_dim1], ldw, &c_b15, &a[i__ + i__ * a_dim1] , &c__1); i__2 = *n - i__ + 1; i__3 = i__ - 1; dgemv_("No transpose", &i__2, &i__3, &c_b151, &w[i__ + w_dim1], ldw, &a[i__ + a_dim1], lda, &c_b15, &a[i__ + i__ * a_dim1] , &c__1); if (i__ < *n) { /* Generate elementary reflector H(i) to annihilate A(i+2:n,i) */ i__2 = *n - i__; /* Computing MIN */ i__3 = i__ + 2; dlarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3,*n) + i__ * a_dim1], &c__1, &tau[i__]); e[i__] = a[i__ + 1 + i__ * a_dim1]; a[i__ + 1 + i__ * a_dim1] = 1.; /* Compute W(i+1:n,i) */ i__2 = *n - i__; dsymv_("Lower", &i__2, &c_b15, &a[i__ + 1 + (i__ + 1) * a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, & c_b29, &w[i__ + 1 + i__ * w_dim1], &c__1); i__2 = *n - i__; i__3 = i__ - 1; dgemv_("Transpose", &i__2, &i__3, &c_b15, &w[i__ + 1 + w_dim1] , ldw, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b29, &w[ i__ * w_dim1 + 1], &c__1); i__2 = *n - i__; i__3 = i__ - 1; dgemv_("No transpose", &i__2, &i__3, &c_b151, &a[i__ + 1 + a_dim1], lda, &w[i__ * w_dim1 + 1], &c__1, &c_b15, &w[ i__ + 1 + i__ * w_dim1], &c__1); i__2 = *n - i__; i__3 = i__ - 1; dgemv_("Transpose", &i__2, &i__3, &c_b15, &a[i__ + 1 + a_dim1] , lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b29, &w[ i__ * w_dim1 + 1], &c__1); i__2 = *n - i__; i__3 = i__ - 1; dgemv_("No transpose", &i__2, &i__3, &c_b151, &w[i__ + 1 + w_dim1], ldw, &w[i__ * w_dim1 + 1], &c__1, &c_b15, &w[ i__ + 1 + i__ * w_dim1], &c__1); i__2 = *n - i__; dscal_(&i__2, &tau[i__], &w[i__ + 1 + i__ * w_dim1], &c__1); i__2 = *n - i__; alpha = tau[i__] * -.5 * ddot_(&i__2, &w[i__ + 1 + i__ * w_dim1], &c__1, &a[i__ + 1 + i__ * a_dim1], &c__1); i__2 = *n - i__; daxpy_(&i__2, &alpha, &a[i__ + 1 + i__ * a_dim1], &c__1, &w[ i__ + 1 + i__ * w_dim1], &c__1); } /* L20: */ } } return 0; /* End of DLATRD */ } /* dlatrd_ */ /* Subroutine */ int dlazq3_(integer *i0, integer *n0, doublereal *z__, integer *pp, doublereal *dmin__, doublereal *sigma, doublereal *desig, doublereal *qmax, integer *nfail, integer *iter, integer *ndiv, logical *ieee, integer *ttype, doublereal *dmin1, doublereal *dmin2, doublereal *dn, doublereal *dn1, doublereal *dn2, doublereal *tau) { /* System generated locals */ integer i__1; doublereal d__1, d__2; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static doublereal temp, g, s, t; static integer j4; extern /* Subroutine */ int dlasq5_(integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, logical *), dlasq6_( integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *), dlazq4_(integer *, integer *, doublereal *, integer *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, doublereal *); static integer nn; static doublereal safmin, eps, tol; static integer n0in, ipn4; static doublereal tol2; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLAZQ3 checks for deflation, computes a shift (TAU) and calls dqds. In case of failure it changes shifts, and tries again until output is positive. Arguments ========= I0 (input) INTEGER First index. N0 (input) INTEGER Last index. Z (input) DOUBLE PRECISION array, dimension ( 4*N ) Z holds the qd array. PP (input) INTEGER PP=0 for ping, PP=1 for pong. DMIN (output) DOUBLE PRECISION Minimum value of d. SIGMA (output) DOUBLE PRECISION Sum of shifts used in current segment. DESIG (input/output) DOUBLE PRECISION Lower order part of SIGMA QMAX (input) DOUBLE PRECISION Maximum value of q. NFAIL (output) INTEGER Number of times shift was too big. ITER (output) INTEGER Number of iterations. NDIV (output) INTEGER Number of divisions. IEEE (input) LOGICAL Flag for IEEE or non IEEE arithmetic (passed to DLASQ5). TTYPE (input/output) INTEGER Shift type. TTYPE is passed as an argument in order to save its value between calls to DLAZQ3 DMIN1 (input/output) REAL DMIN2 (input/output) REAL DN (input/output) REAL DN1 (input/output) REAL DN2 (input/output) REAL TAU (input/output) REAL These are passed as arguments in order to save their values between calls to DLAZQ3 This is a thread safe version of DLASQ3, which passes TTYPE, DMIN1, DMIN2, DN, DN1. DN2 and TAU through the argument list in place of declaring them in a SAVE statment. ===================================================================== */ /* Parameter adjustments */ --z__; /* Function Body */ n0in = *n0; eps = PRECISION; safmin = SAFEMINIMUM; tol = eps * 100.; /* Computing 2nd power */ d__1 = tol; tol2 = d__1 * d__1; g = 0.; /* Check for deflation. */ L10: if (*n0 < *i0) { return 0; } if (*n0 == *i0) { goto L20; } nn = (*n0 << 2) + *pp; if (*n0 == *i0 + 1) { goto L40; } /* Check whether E(N0-1) is negligible, 1 eigenvalue. */ if (z__[nn - 5] > tol2 * (*sigma + z__[nn - 3]) && z__[nn - (*pp << 1) - 4] > tol2 * z__[nn - 7]) { goto L30; } L20: z__[(*n0 << 2) - 3] = z__[(*n0 << 2) + *pp - 3] + *sigma; --(*n0); goto L10; /* Check whether E(N0-2) is negligible, 2 eigenvalues. */ L30: if (z__[nn - 9] > tol2 * *sigma && z__[nn - (*pp << 1) - 8] > tol2 * z__[ nn - 11]) { goto L50; } L40: if (z__[nn - 3] > z__[nn - 7]) { s = z__[nn - 3]; z__[nn - 3] = z__[nn - 7]; z__[nn - 7] = s; } if (z__[nn - 5] > z__[nn - 3] * tol2) { t = (z__[nn - 7] - z__[nn - 3] + z__[nn - 5]) * .5; s = z__[nn - 3] * (z__[nn - 5] / t); if (s <= t) { s = z__[nn - 3] * (z__[nn - 5] / (t * (sqrt(s / t + 1.) + 1.))); } else { s = z__[nn - 3] * (z__[nn - 5] / (t + sqrt(t) * sqrt(t + s))); } t = z__[nn - 7] + (s + z__[nn - 5]); z__[nn - 3] *= z__[nn - 7] / t; z__[nn - 7] = t; } z__[(*n0 << 2) - 7] = z__[nn - 7] + *sigma; z__[(*n0 << 2) - 3] = z__[nn - 3] + *sigma; *n0 += -2; goto L10; L50: /* Reverse the qd-array, if warranted. */ if (*dmin__ <= 0. || *n0 < n0in) { if (z__[(*i0 << 2) + *pp - 3] * 1.5 < z__[(*n0 << 2) + *pp - 3]) { ipn4 = *i0 + *n0 << 2; i__1 = *i0 + *n0 - 1 << 1; for (j4 = *i0 << 2; j4 <= i__1; j4 += 4) { temp = z__[j4 - 3]; z__[j4 - 3] = z__[ipn4 - j4 - 3]; z__[ipn4 - j4 - 3] = temp; temp = z__[j4 - 2]; z__[j4 - 2] = z__[ipn4 - j4 - 2]; z__[ipn4 - j4 - 2] = temp; temp = z__[j4 - 1]; z__[j4 - 1] = z__[ipn4 - j4 - 5]; z__[ipn4 - j4 - 5] = temp; temp = z__[j4]; z__[j4] = z__[ipn4 - j4 - 4]; z__[ipn4 - j4 - 4] = temp; /* L60: */ } if (*n0 - *i0 <= 4) { z__[(*n0 << 2) + *pp - 1] = z__[(*i0 << 2) + *pp - 1]; z__[(*n0 << 2) - *pp] = z__[(*i0 << 2) - *pp]; } /* Computing MIN */ d__1 = *dmin2, d__2 = z__[(*n0 << 2) + *pp - 1]; *dmin2 = min(d__1,d__2); /* Computing MIN */ d__1 = z__[(*n0 << 2) + *pp - 1], d__2 = z__[(*i0 << 2) + *pp - 1] , d__1 = min(d__1,d__2), d__2 = z__[(*i0 << 2) + *pp + 3]; z__[(*n0 << 2) + *pp - 1] = min(d__1,d__2); /* Computing MIN */ d__1 = z__[(*n0 << 2) - *pp], d__2 = z__[(*i0 << 2) - *pp], d__1 = min(d__1,d__2), d__2 = z__[(*i0 << 2) - *pp + 4]; z__[(*n0 << 2) - *pp] = min(d__1,d__2); /* Computing MAX */ d__1 = *qmax, d__2 = z__[(*i0 << 2) + *pp - 3], d__1 = max(d__1, d__2), d__2 = z__[(*i0 << 2) + *pp + 1]; *qmax = max(d__1,d__2); *dmin__ = 0.; } } /* Computing MIN */ d__1 = z__[(*n0 << 2) + *pp - 1], d__2 = z__[(*n0 << 2) + *pp - 9], d__1 = min(d__1,d__2), d__2 = *dmin2 + z__[(*n0 << 2) - *pp]; if (*dmin__ < 0. || safmin * *qmax < min(d__1,d__2)) { /* Choose a shift. */ dlazq4_(i0, n0, &z__[1], pp, &n0in, dmin__, dmin1, dmin2, dn, dn1, dn2, tau, ttype, &g); /* Call dqds until DMIN > 0. */ L80: dlasq5_(i0, n0, &z__[1], pp, tau, dmin__, dmin1, dmin2, dn, dn1, dn2, ieee); *ndiv += *n0 - *i0 + 2; ++(*iter); /* Check status. */ if (*dmin__ >= 0. && *dmin1 > 0.) { /* Success. */ goto L100; } else if (*dmin__ < 0. && *dmin1 > 0. && z__[(*n0 - 1 << 2) - *pp] < tol * (*sigma + *dn1) && abs(*dn) < tol * *sigma) { /* Convergence hidden by negative DN. */ z__[(*n0 - 1 << 2) - *pp + 2] = 0.; *dmin__ = 0.; goto L100; } else if (*dmin__ < 0.) { /* TAU too big. Select new TAU and try again. */ ++(*nfail); if (*ttype < -22) { /* Failed twice. Play it safe. */ *tau = 0.; } else if (*dmin1 > 0.) { /* Late failure. Gives excellent shift. */ *tau = (*tau + *dmin__) * (1. - eps * 2.); *ttype += -11; } else { /* Early failure. Divide by 4. */ *tau *= .25; *ttype += -12; } goto L80; } else if (*dmin__ != *dmin__) { /* NaN. */ *tau = 0.; goto L80; } else { /* Possible underflow. Play it safe. */ goto L90; } } /* Risk of underflow. */ L90: dlasq6_(i0, n0, &z__[1], pp, dmin__, dmin1, dmin2, dn, dn1, dn2); *ndiv += *n0 - *i0 + 2; ++(*iter); *tau = 0.; L100: if (*tau < *sigma) { *desig += *tau; t = *sigma + *desig; *desig -= t - *sigma; } else { t = *sigma + *tau; *desig = *sigma - (t - *tau) + *desig; } *sigma = t; return 0; /* End of DLAZQ3 */ } /* dlazq3_ */ /* Subroutine */ int dlazq4_(integer *i0, integer *n0, doublereal *z__, integer *pp, integer *n0in, doublereal *dmin__, doublereal *dmin1, doublereal *dmin2, doublereal *dn, doublereal *dn1, doublereal *dn2, doublereal *tau, integer *ttype, doublereal *g) { /* System generated locals */ integer i__1; doublereal d__1, d__2; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static doublereal s, a2, b1, b2; static integer i4, nn, np; static doublereal gam, gap1, gap2; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DLAZQ4 computes an approximation TAU to the smallest eigenvalue using values of d from the previous transform. I0 (input) INTEGER First index. N0 (input) INTEGER Last index. Z (input) DOUBLE PRECISION array, dimension ( 4*N ) Z holds the qd array. PP (input) INTEGER PP=0 for ping, PP=1 for pong. N0IN (input) INTEGER The value of N0 at start of EIGTEST. DMIN (input) DOUBLE PRECISION Minimum value of d. DMIN1 (input) DOUBLE PRECISION Minimum value of d, excluding D( N0 ). DMIN2 (input) DOUBLE PRECISION Minimum value of d, excluding D( N0 ) and D( N0-1 ). DN (input) DOUBLE PRECISION d(N) DN1 (input) DOUBLE PRECISION d(N-1) DN2 (input) DOUBLE PRECISION d(N-2) TAU (output) DOUBLE PRECISION This is the shift. TTYPE (output) INTEGER Shift type. G (input/output) DOUBLE PRECISION G is passed as an argument in order to save its value between calls to DLAZQ4 Further Details =============== CNST1 = 9/16 This is a thread safe version of DLASQ4, which passes G through the argument list in place of declaring G in a SAVE statment. ===================================================================== A negative DMIN forces the shift to take that absolute value TTYPE records the type of shift. */ /* Parameter adjustments */ --z__; /* Function Body */ if (*dmin__ <= 0.) { *tau = -(*dmin__); *ttype = -1; return 0; } nn = (*n0 << 2) + *pp; if (*n0in == *n0) { /* No eigenvalues deflated. */ if (*dmin__ == *dn || *dmin__ == *dn1) { b1 = sqrt(z__[nn - 3]) * sqrt(z__[nn - 5]); b2 = sqrt(z__[nn - 7]) * sqrt(z__[nn - 9]); a2 = z__[nn - 7] + z__[nn - 5]; /* Cases 2 and 3. */ if (*dmin__ == *dn && *dmin1 == *dn1) { gap2 = *dmin2 - a2 - *dmin2 * .25; if (gap2 > 0. && gap2 > b2) { gap1 = a2 - *dn - b2 / gap2 * b2; } else { gap1 = a2 - *dn - (b1 + b2); } if (gap1 > 0. && gap1 > b1) { /* Computing MAX */ d__1 = *dn - b1 / gap1 * b1, d__2 = *dmin__ * .5; s = max(d__1,d__2); *ttype = -2; } else { s = 0.; if (*dn > b1) { s = *dn - b1; } if (a2 > b1 + b2) { /* Computing MIN */ d__1 = s, d__2 = a2 - (b1 + b2); s = min(d__1,d__2); } /* Computing MAX */ d__1 = s, d__2 = *dmin__ * .333; s = max(d__1,d__2); *ttype = -3; } } else { /* Case 4. */ *ttype = -4; s = *dmin__ * .25; if (*dmin__ == *dn) { gam = *dn; a2 = 0.; if (z__[nn - 5] > z__[nn - 7]) { return 0; } b2 = z__[nn - 5] / z__[nn - 7]; np = nn - 9; } else { np = nn - (*pp << 1); b2 = z__[np - 2]; gam = *dn1; if (z__[np - 4] > z__[np - 2]) { return 0; } a2 = z__[np - 4] / z__[np - 2]; if (z__[nn - 9] > z__[nn - 11]) { return 0; } b2 = z__[nn - 9] / z__[nn - 11]; np = nn - 13; } /* Approximate contribution to norm squared from I < NN-1. */ a2 += b2; i__1 = (*i0 << 2) - 1 + *pp; for (i4 = np; i4 >= i__1; i4 += -4) { if (b2 == 0.) { goto L20; } b1 = b2; if (z__[i4] > z__[i4 - 2]) { return 0; } b2 *= z__[i4] / z__[i4 - 2]; a2 += b2; if (max(b2,b1) * 100. < a2 || .563 < a2) { goto L20; } /* L10: */ } L20: a2 *= 1.05; /* Rayleigh quotient residual bound. */ if (a2 < .563) { s = gam * (1. - sqrt(a2)) / (a2 + 1.); } } } else if (*dmin__ == *dn2) { /* Case 5. */ *ttype = -5; s = *dmin__ * .25; /* Compute contribution to norm squared from I > NN-2. */ np = nn - (*pp << 1); b1 = z__[np - 2]; b2 = z__[np - 6]; gam = *dn2; if (z__[np - 8] > b2 || z__[np - 4] > b1) { return 0; } a2 = z__[np - 8] / b2 * (z__[np - 4] / b1 + 1.); /* Approximate contribution to norm squared from I < NN-2. */ if (*n0 - *i0 > 2) { b2 = z__[nn - 13] / z__[nn - 15]; a2 += b2; i__1 = (*i0 << 2) - 1 + *pp; for (i4 = nn - 17; i4 >= i__1; i4 += -4) { if (b2 == 0.) { goto L40; } b1 = b2; if (z__[i4] > z__[i4 - 2]) { return 0; } b2 *= z__[i4] / z__[i4 - 2]; a2 += b2; if (max(b2,b1) * 100. < a2 || .563 < a2) { goto L40; } /* L30: */ } L40: a2 *= 1.05; } if (a2 < .563) { s = gam * (1. - sqrt(a2)) / (a2 + 1.); } } else { /* Case 6, no information to guide us. */ if (*ttype == -6) { *g += (1. - *g) * .333; } else if (*ttype == -18) { *g = .083250000000000005; } else { *g = .25; } s = *g * *dmin__; *ttype = -6; } } else if (*n0in == *n0 + 1) { /* One eigenvalue just deflated. Use DMIN1, DN1 for DMIN and DN. */ if (*dmin1 == *dn1 && *dmin2 == *dn2) { /* Cases 7 and 8. */ *ttype = -7; s = *dmin1 * .333; if (z__[nn - 5] > z__[nn - 7]) { return 0; } b1 = z__[nn - 5] / z__[nn - 7]; b2 = b1; if (b2 == 0.) { goto L60; } i__1 = (*i0 << 2) - 1 + *pp; for (i4 = (*n0 << 2) - 9 + *pp; i4 >= i__1; i4 += -4) { a2 = b1; if (z__[i4] > z__[i4 - 2]) { return 0; } b1 *= z__[i4] / z__[i4 - 2]; b2 += b1; if (max(b1,a2) * 100. < b2) { goto L60; } /* L50: */ } L60: b2 = sqrt(b2 * 1.05); /* Computing 2nd power */ d__1 = b2; a2 = *dmin1 / (d__1 * d__1 + 1.); gap2 = *dmin2 * .5 - a2; if (gap2 > 0. && gap2 > b2 * a2) { /* Computing MAX */ d__1 = s, d__2 = a2 * (1. - a2 * 1.01 * (b2 / gap2) * b2); s = max(d__1,d__2); } else { /* Computing MAX */ d__1 = s, d__2 = a2 * (1. - b2 * 1.01); s = max(d__1,d__2); *ttype = -8; } } else { /* Case 9. */ s = *dmin1 * .25; if (*dmin1 == *dn1) { s = *dmin1 * .5; } *ttype = -9; } } else if (*n0in == *n0 + 2) { /* Two eigenvalues deflated. Use DMIN2, DN2 for DMIN and DN. Cases 10 and 11. */ if (*dmin2 == *dn2 && z__[nn - 5] * 2. < z__[nn - 7]) { *ttype = -10; s = *dmin2 * .333; if (z__[nn - 5] > z__[nn - 7]) { return 0; } b1 = z__[nn - 5] / z__[nn - 7]; b2 = b1; if (b2 == 0.) { goto L80; } i__1 = (*i0 << 2) - 1 + *pp; for (i4 = (*n0 << 2) - 9 + *pp; i4 >= i__1; i4 += -4) { if (z__[i4] > z__[i4 - 2]) { return 0; } b1 *= z__[i4] / z__[i4 - 2]; b2 += b1; if (b1 * 100. < b2) { goto L80; } /* L70: */ } L80: b2 = sqrt(b2 * 1.05); /* Computing 2nd power */ d__1 = b2; a2 = *dmin2 / (d__1 * d__1 + 1.); gap2 = z__[nn - 7] + z__[nn - 9] - sqrt(z__[nn - 11]) * sqrt(z__[ nn - 9]) - a2; if (gap2 > 0. && gap2 > b2 * a2) { /* Computing MAX */ d__1 = s, d__2 = a2 * (1. - a2 * 1.01 * (b2 / gap2) * b2); s = max(d__1,d__2); } else { /* Computing MAX */ d__1 = s, d__2 = a2 * (1. - b2 * 1.01); s = max(d__1,d__2); } } else { s = *dmin2 * .25; *ttype = -11; } } else if (*n0in > *n0 + 2) { /* Case 12, more than two eigenvalues deflated. No information. */ s = 0.; *ttype = -12; } *tau = s; return 0; /* End of DLAZQ4 */ } /* dlazq4_ */ /* Subroutine */ int dorg2r_(integer *m, integer *n, integer *k, doublereal * a, integer *lda, doublereal *tau, doublereal *work, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2; doublereal d__1; /* Local variables */ static integer i__, j, l; extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, integer *), dlarf_(char *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *), xerbla_(char *, integer *); /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DORG2R generates an m by n real matrix Q with orthonormal columns, which is defined as the first n columns of a product of k elementary reflectors of order m Q = H(1) H(2) . . . H(k) as returned by DGEQRF. Arguments ========= M (input) INTEGER The number of rows of the matrix Q. M >= 0. N (input) INTEGER The number of columns of the matrix Q. M >= N >= 0. K (input) INTEGER The number of elementary reflectors whose product defines the matrix Q. N >= K >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the i-th column must contain the vector which defines the elementary reflector H(i), for i = 1,2,...,k, as returned by DGEQRF in the first k columns of its array argument A. On exit, the m-by-n matrix Q. LDA (input) INTEGER The first dimension of the array A. LDA >= max(1,M). TAU (input) DOUBLE PRECISION array, dimension (K) TAU(i) must contain the scalar factor of the elementary reflector H(i), as returned by DGEQRF. WORK (workspace) DOUBLE PRECISION array, dimension (N) INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument has an illegal value ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --tau; --work; /* Function Body */ *info = 0; if (*m < 0) { *info = -1; } else if (*n < 0 || *n > *m) { *info = -2; } else if (*k < 0 || *k > *n) { *info = -3; } else if (*lda < max(1,*m)) { *info = -5; } if (*info != 0) { i__1 = -(*info); xerbla_("DORG2R", &i__1); return 0; } /* Quick return if possible */ if (*n <= 0) { return 0; } /* Initialise columns k+1:n to columns of the unit matrix */ i__1 = *n; for (j = *k + 1; j <= i__1; ++j) { i__2 = *m; for (l = 1; l <= i__2; ++l) { a[l + j * a_dim1] = 0.; /* L10: */ } a[j + j * a_dim1] = 1.; /* L20: */ } for (i__ = *k; i__ >= 1; --i__) { /* Apply H(i) to A(i:m,i:n) from the left */ if (i__ < *n) { a[i__ + i__ * a_dim1] = 1.; i__1 = *m - i__ + 1; i__2 = *n - i__; dlarf_("Left", &i__1, &i__2, &a[i__ + i__ * a_dim1], &c__1, &tau[ i__], &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]); } if (i__ < *m) { i__1 = *m - i__; d__1 = -tau[i__]; dscal_(&i__1, &d__1, &a[i__ + 1 + i__ * a_dim1], &c__1); } a[i__ + i__ * a_dim1] = 1. - tau[i__]; /* Set A(1:i-1,i) to zero */ i__1 = i__ - 1; for (l = 1; l <= i__1; ++l) { a[l + i__ * a_dim1] = 0.; /* L30: */ } /* L40: */ } return 0; /* End of DORG2R */ } /* dorg2r_ */ /* Subroutine */ int dorgbr_(char *vect, integer *m, integer *n, integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal *work, integer *lwork, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; /* Local variables */ static integer i__, j; extern logical lsame_(char *, char *); static integer iinfo; static logical wantq; static integer nb, mn; extern /* Subroutine */ int xerbla_(char *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); extern /* Subroutine */ int dorglq_(integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *), dorgqr_(integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *); static integer lwkopt; static logical lquery; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DORGBR generates one of the real orthogonal matrices Q or P**T determined by DGEBRD when reducing a real matrix A to bidiagonal form: A = Q * B * P**T. Q and P**T are defined as products of elementary reflectors H(i) or G(i) respectively. If VECT = 'Q', A is assumed to have been an M-by-K matrix, and Q is of order M: if m >= k, Q = H(1) H(2) . . . H(k) and DORGBR returns the first n columns of Q, where m >= n >= k; if m < k, Q = H(1) H(2) . . . H(m-1) and DORGBR returns Q as an M-by-M matrix. If VECT = 'P', A is assumed to have been a K-by-N matrix, and P**T is of order N: if k < n, P**T = G(k) . . . G(2) G(1) and DORGBR returns the first m rows of P**T, where n >= m >= k; if k >= n, P**T = G(n-1) . . . G(2) G(1) and DORGBR returns P**T as an N-by-N matrix. Arguments ========= VECT (input) CHARACTER*1 Specifies whether the matrix Q or the matrix P**T is required, as defined in the transformation applied by DGEBRD: = 'Q': generate Q; = 'P': generate P**T. M (input) INTEGER The number of rows of the matrix Q or P**T to be returned. M >= 0. N (input) INTEGER The number of columns of the matrix Q or P**T to be returned. N >= 0. If VECT = 'Q', M >= N >= min(M,K); if VECT = 'P', N >= M >= min(N,K). K (input) INTEGER If VECT = 'Q', the number of columns in the original M-by-K matrix reduced by DGEBRD. If VECT = 'P', the number of rows in the original K-by-N matrix reduced by DGEBRD. K >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the vectors which define the elementary reflectors, as returned by DGEBRD. On exit, the M-by-N matrix Q or P**T. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,M). TAU (input) DOUBLE PRECISION array, dimension (min(M,K)) if VECT = 'Q' (min(N,K)) if VECT = 'P' TAU(i) must contain the scalar factor of the elementary reflector H(i) or G(i), which determines Q or P**T, as returned by DGEBRD in its array argument TAUQ or TAUP. WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) On exit, if INFO = 0, WORK(1) returns the optimal LWORK. LWORK (input) INTEGER The dimension of the array WORK. LWORK >= max(1,min(M,N)). For optimum performance LWORK >= min(M,N)*NB, where NB is the optimal blocksize. If LWORK = -1, then a workspace query is assumed; the routine only calculates the optimal size of the WORK array, returns this value as the first entry of the WORK array, and no error message related to LWORK is issued by XERBLA. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --tau; --work; /* Function Body */ *info = 0; wantq = lsame_(vect, "Q"); mn = min(*m,*n); lquery = *lwork == -1; if (! wantq && ! lsame_(vect, "P")) { *info = -1; } else if (*m < 0) { *info = -2; } else if (*n < 0 || wantq && (*n > *m || *n < min(*m,*k)) || ! wantq && ( *m > *n || *m < min(*n,*k))) { *info = -3; } else if (*k < 0) { *info = -4; } else if (*lda < max(1,*m)) { *info = -6; } else if (*lwork < max(1,mn) && ! lquery) { *info = -9; } if (*info == 0) { if (wantq) { nb = ilaenv_(&c__1, "DORGQR", " ", m, n, k, &c_n1, (ftnlen)6, ( ftnlen)1); } else { nb = ilaenv_(&c__1, "DORGLQ", " ", m, n, k, &c_n1, (ftnlen)6, ( ftnlen)1); } lwkopt = max(1,mn) * nb; work[1] = (doublereal) lwkopt; } if (*info != 0) { i__1 = -(*info); xerbla_("DORGBR", &i__1); return 0; } else if (lquery) { return 0; } /* Quick return if possible */ if (*m == 0 || *n == 0) { work[1] = 1.; return 0; } if (wantq) { /* Form Q, determined by a call to DGEBRD to reduce an m-by-k matrix */ if (*m >= *k) { /* If m >= k, assume m >= n >= k */ dorgqr_(m, n, k, &a[a_offset], lda, &tau[1], &work[1], lwork, & iinfo); } else { /* If m < k, assume m = n Shift the vectors which define the elementary reflectors one column to the right, and set the first row and column of Q to those of the unit matrix */ for (j = *m; j >= 2; --j) { a[j * a_dim1 + 1] = 0.; i__1 = *m; for (i__ = j + 1; i__ <= i__1; ++i__) { a[i__ + j * a_dim1] = a[i__ + (j - 1) * a_dim1]; /* L10: */ } /* L20: */ } a[a_dim1 + 1] = 1.; i__1 = *m; for (i__ = 2; i__ <= i__1; ++i__) { a[i__ + a_dim1] = 0.; /* L30: */ } if (*m > 1) { /* Form Q(2:m,2:m) */ i__1 = *m - 1; i__2 = *m - 1; i__3 = *m - 1; dorgqr_(&i__1, &i__2, &i__3, &a[(a_dim1 << 1) + 2], lda, &tau[ 1], &work[1], lwork, &iinfo); } } } else { /* Form P', determined by a call to DGEBRD to reduce a k-by-n matrix */ if (*k < *n) { /* If k < n, assume k <= m <= n */ dorglq_(m, n, k, &a[a_offset], lda, &tau[1], &work[1], lwork, & iinfo); } else { /* If k >= n, assume m = n Shift the vectors which define the elementary reflectors one row downward, and set the first row and column of P' to those of the unit matrix */ a[a_dim1 + 1] = 1.; i__1 = *n; for (i__ = 2; i__ <= i__1; ++i__) { a[i__ + a_dim1] = 0.; /* L40: */ } i__1 = *n; for (j = 2; j <= i__1; ++j) { for (i__ = j - 1; i__ >= 2; --i__) { a[i__ + j * a_dim1] = a[i__ - 1 + j * a_dim1]; /* L50: */ } a[j * a_dim1 + 1] = 0.; /* L60: */ } if (*n > 1) { /* Form P'(2:n,2:n) */ i__1 = *n - 1; i__2 = *n - 1; i__3 = *n - 1; dorglq_(&i__1, &i__2, &i__3, &a[(a_dim1 << 1) + 2], lda, &tau[ 1], &work[1], lwork, &iinfo); } } } work[1] = (doublereal) lwkopt; return 0; /* End of DORGBR */ } /* dorgbr_ */ /* Subroutine */ int dorghr_(integer *n, integer *ilo, integer *ihi, doublereal *a, integer *lda, doublereal *tau, doublereal *work, integer *lwork, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2; /* Local variables */ static integer i__, j, iinfo, nb, nh; extern /* Subroutine */ int xerbla_(char *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); extern /* Subroutine */ int dorgqr_(integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, integer *); static integer lwkopt; static logical lquery; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DORGHR generates a real orthogonal matrix Q which is defined as the product of IHI-ILO elementary reflectors of order N, as returned by DGEHRD: Q = H(ilo) H(ilo+1) . . . H(ihi-1). Arguments ========= N (input) INTEGER The order of the matrix Q. N >= 0. ILO (input) INTEGER IHI (input) INTEGER ILO and IHI must have the same values as in the previous call of DGEHRD. Q is equal to the unit matrix except in the submatrix Q(ilo+1:ihi,ilo+1:ihi). 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the vectors which define the elementary reflectors, as returned by DGEHRD. On exit, the N-by-N orthogonal matrix Q. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,N). TAU (input) DOUBLE PRECISION array, dimension (N-1) TAU(i) must contain the scalar factor of the elementary reflector H(i), as returned by DGEHRD. WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) On exit, if INFO = 0, WORK(1) returns the optimal LWORK. LWORK (input) INTEGER The dimension of the array WORK. LWORK >= IHI-ILO. For optimum performance LWORK >= (IHI-ILO)*NB, where NB is the optimal blocksize. If LWORK = -1, then a workspace query is assumed; the routine only calculates the optimal size of the WORK array, returns this value as the first entry of the WORK array, and no error message related to LWORK is issued by XERBLA. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --tau; --work; /* Function Body */ *info = 0; nh = *ihi - *ilo; lquery = *lwork == -1; if (*n < 0) { *info = -1; } else if (*ilo < 1 || *ilo > max(1,*n)) { *info = -2; } else if (*ihi < min(*ilo,*n) || *ihi > *n) { *info = -3; } else if (*lda < max(1,*n)) { *info = -5; } else if (*lwork < max(1,nh) && ! lquery) { *info = -8; } if (*info == 0) { nb = ilaenv_(&c__1, "DORGQR", " ", &nh, &nh, &nh, &c_n1, (ftnlen)6, ( ftnlen)1); lwkopt = max(1,nh) * nb; work[1] = (doublereal) lwkopt; } if (*info != 0) { i__1 = -(*info); xerbla_("DORGHR", &i__1); return 0; } else if (lquery) { return 0; } /* Quick return if possible */ if (*n == 0) { work[1] = 1.; return 0; } /* Shift the vectors which define the elementary reflectors one column to the right, and set the first ilo and the last n-ihi rows and columns to those of the unit matrix */ i__1 = *ilo + 1; for (j = *ihi; j >= i__1; --j) { i__2 = j - 1; for (i__ = 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] = 0.; /* L10: */ } i__2 = *ihi; for (i__ = j + 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] = a[i__ + (j - 1) * a_dim1]; /* L20: */ } i__2 = *n; for (i__ = *ihi + 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] = 0.; /* L30: */ } /* L40: */ } i__1 = *ilo; for (j = 1; j <= i__1; ++j) { i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] = 0.; /* L50: */ } a[j + j * a_dim1] = 1.; /* L60: */ } i__1 = *n; for (j = *ihi + 1; j <= i__1; ++j) { i__2 = *n; for (i__ = 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] = 0.; /* L70: */ } a[j + j * a_dim1] = 1.; /* L80: */ } if (nh > 0) { /* Generate Q(ilo+1:ihi,ilo+1:ihi) */ dorgqr_(&nh, &nh, &nh, &a[*ilo + 1 + (*ilo + 1) * a_dim1], lda, &tau[* ilo], &work[1], lwork, &iinfo); } work[1] = (doublereal) lwkopt; return 0; /* End of DORGHR */ } /* dorghr_ */ /* Subroutine */ int dorgl2_(integer *m, integer *n, integer *k, doublereal * a, integer *lda, doublereal *tau, doublereal *work, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2; doublereal d__1; /* Local variables */ static integer i__, j, l; extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, integer *), dlarf_(char *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *), xerbla_(char *, integer *); /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DORGL2 generates an m by n real matrix Q with orthonormal rows, which is defined as the first m rows of a product of k elementary reflectors of order n Q = H(k) . . . H(2) H(1) as returned by DGELQF. Arguments ========= M (input) INTEGER The number of rows of the matrix Q. M >= 0. N (input) INTEGER The number of columns of the matrix Q. N >= M. K (input) INTEGER The number of elementary reflectors whose product defines the matrix Q. M >= K >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the i-th row must contain the vector which defines the elementary reflector H(i), for i = 1,2,...,k, as returned by DGELQF in the first k rows of its array argument A. On exit, the m-by-n matrix Q. LDA (input) INTEGER The first dimension of the array A. LDA >= max(1,M). TAU (input) DOUBLE PRECISION array, dimension (K) TAU(i) must contain the scalar factor of the elementary reflector H(i), as returned by DGELQF. WORK (workspace) DOUBLE PRECISION array, dimension (M) INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument has an illegal value ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --tau; --work; /* Function Body */ *info = 0; if (*m < 0) { *info = -1; } else if (*n < *m) { *info = -2; } else if (*k < 0 || *k > *m) { *info = -3; } else if (*lda < max(1,*m)) { *info = -5; } if (*info != 0) { i__1 = -(*info); xerbla_("DORGL2", &i__1); return 0; } /* Quick return if possible */ if (*m <= 0) { return 0; } if (*k < *m) { /* Initialise rows k+1:m to rows of the unit matrix */ i__1 = *n; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (l = *k + 1; l <= i__2; ++l) { a[l + j * a_dim1] = 0.; /* L10: */ } if (j > *k && j <= *m) { a[j + j * a_dim1] = 1.; } /* L20: */ } } for (i__ = *k; i__ >= 1; --i__) { /* Apply H(i) to A(i:m,i:n) from the right */ if (i__ < *n) { if (i__ < *m) { a[i__ + i__ * a_dim1] = 1.; i__1 = *m - i__; i__2 = *n - i__ + 1; dlarf_("Right", &i__1, &i__2, &a[i__ + i__ * a_dim1], lda, & tau[i__], &a[i__ + 1 + i__ * a_dim1], lda, &work[1]); } i__1 = *n - i__; d__1 = -tau[i__]; dscal_(&i__1, &d__1, &a[i__ + (i__ + 1) * a_dim1], lda); } a[i__ + i__ * a_dim1] = 1. - tau[i__]; /* Set A(i,1:i-1) to zero */ i__1 = i__ - 1; for (l = 1; l <= i__1; ++l) { a[i__ + l * a_dim1] = 0.; /* L30: */ } /* L40: */ } return 0; /* End of DORGL2 */ } /* dorgl2_ */ /* Subroutine */ int dorglq_(integer *m, integer *n, integer *k, doublereal * a, integer *lda, doublereal *tau, doublereal *work, integer *lwork, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; /* Local variables */ static integer i__, j, l, nbmin, iinfo; extern /* Subroutine */ int dorgl2_(integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); static integer ib, nb, ki, kk; extern /* Subroutine */ int dlarfb_(char *, char *, char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *); static integer nx; extern /* Subroutine */ int dlarft_(char *, char *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); static integer ldwork, lwkopt; static logical lquery; static integer iws; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DORGLQ generates an M-by-N real matrix Q with orthonormal rows, which is defined as the first M rows of a product of K elementary reflectors of order N Q = H(k) . . . H(2) H(1) as returned by DGELQF. Arguments ========= M (input) INTEGER The number of rows of the matrix Q. M >= 0. N (input) INTEGER The number of columns of the matrix Q. N >= M. K (input) INTEGER The number of elementary reflectors whose product defines the matrix Q. M >= K >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the i-th row must contain the vector which defines the elementary reflector H(i), for i = 1,2,...,k, as returned by DGELQF in the first k rows of its array argument A. On exit, the M-by-N matrix Q. LDA (input) INTEGER The first dimension of the array A. LDA >= max(1,M). TAU (input) DOUBLE PRECISION array, dimension (K) TAU(i) must contain the scalar factor of the elementary reflector H(i), as returned by DGELQF. WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) On exit, if INFO = 0, WORK(1) returns the optimal LWORK. LWORK (input) INTEGER The dimension of the array WORK. LWORK >= max(1,M). For optimum performance LWORK >= M*NB, where NB is the optimal blocksize. If LWORK = -1, then a workspace query is assumed; the routine only calculates the optimal size of the WORK array, returns this value as the first entry of the WORK array, and no error message related to LWORK is issued by XERBLA. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument has an illegal value ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --tau; --work; /* Function Body */ *info = 0; nb = ilaenv_(&c__1, "DORGLQ", " ", m, n, k, &c_n1, (ftnlen)6, (ftnlen)1); lwkopt = max(1,*m) * nb; work[1] = (doublereal) lwkopt; lquery = *lwork == -1; if (*m < 0) { *info = -1; } else if (*n < *m) { *info = -2; } else if (*k < 0 || *k > *m) { *info = -3; } else if (*lda < max(1,*m)) { *info = -5; } else if (*lwork < max(1,*m) && ! lquery) { *info = -8; } if (*info != 0) { i__1 = -(*info); xerbla_("DORGLQ", &i__1); return 0; } else if (lquery) { return 0; } /* Quick return if possible */ if (*m <= 0) { work[1] = 1.; return 0; } nbmin = 2; nx = 0; iws = *m; if (nb > 1 && nb < *k) { /* Determine when to cross over from blocked to unblocked code. Computing MAX */ i__1 = 0, i__2 = ilaenv_(&c__3, "DORGLQ", " ", m, n, k, &c_n1, ( ftnlen)6, (ftnlen)1); nx = max(i__1,i__2); if (nx < *k) { /* Determine if workspace is large enough for blocked code. */ ldwork = *m; iws = ldwork * nb; if (*lwork < iws) { /* Not enough workspace to use optimal NB: reduce NB and determine the minimum value of NB. */ nb = *lwork / ldwork; /* Computing MAX */ i__1 = 2, i__2 = ilaenv_(&c__2, "DORGLQ", " ", m, n, k, &c_n1, (ftnlen)6, (ftnlen)1); nbmin = max(i__1,i__2); } } } if (nb >= nbmin && nb < *k && nx < *k) { /* Use blocked code after the last block. The first kk rows are handled by the block method. */ ki = (*k - nx - 1) / nb * nb; /* Computing MIN */ i__1 = *k, i__2 = ki + nb; kk = min(i__1,i__2); /* Set A(kk+1:m,1:kk) to zero. */ i__1 = kk; for (j = 1; j <= i__1; ++j) { i__2 = *m; for (i__ = kk + 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] = 0.; /* L10: */ } /* L20: */ } } else { kk = 0; } /* Use unblocked code for the last or only block. */ if (kk < *m) { i__1 = *m - kk; i__2 = *n - kk; i__3 = *k - kk; dorgl2_(&i__1, &i__2, &i__3, &a[kk + 1 + (kk + 1) * a_dim1], lda, & tau[kk + 1], &work[1], &iinfo); } if (kk > 0) { /* Use blocked code */ i__1 = -nb; for (i__ = ki + 1; i__1 < 0 ? i__ >= 1 : i__ <= 1; i__ += i__1) { /* Computing MIN */ i__2 = nb, i__3 = *k - i__ + 1; ib = min(i__2,i__3); if (i__ + ib <= *m) { /* Form the triangular factor of the block reflector H = H(i) H(i+1) . . . H(i+ib-1) */ i__2 = *n - i__ + 1; dlarft_("Forward", "Rowwise", &i__2, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[1], &ldwork); /* Apply H' to A(i+ib:m,i:n) from the right */ i__2 = *m - i__ - ib + 1; i__3 = *n - i__ + 1; dlarfb_("Right", "Transpose", "Forward", "Rowwise", &i__2, & i__3, &ib, &a[i__ + i__ * a_dim1], lda, &work[1], & ldwork, &a[i__ + ib + i__ * a_dim1], lda, &work[ib + 1], &ldwork); } /* Apply H' to columns i:n of current block */ i__2 = *n - i__ + 1; dorgl2_(&ib, &i__2, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], & work[1], &iinfo); /* Set columns 1:i-1 of current block to zero */ i__2 = i__ - 1; for (j = 1; j <= i__2; ++j) { i__3 = i__ + ib - 1; for (l = i__; l <= i__3; ++l) { a[l + j * a_dim1] = 0.; /* L30: */ } /* L40: */ } /* L50: */ } } work[1] = (doublereal) iws; return 0; /* End of DORGLQ */ } /* dorglq_ */ /* Subroutine */ int dorgqr_(integer *m, integer *n, integer *k, doublereal * a, integer *lda, doublereal *tau, doublereal *work, integer *lwork, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; /* Local variables */ static integer i__, j, l, nbmin, iinfo; extern /* Subroutine */ int dorg2r_(integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); static integer ib, nb, ki, kk; extern /* Subroutine */ int dlarfb_(char *, char *, char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *); static integer nx; extern /* Subroutine */ int dlarft_(char *, char *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); static integer ldwork, lwkopt; static logical lquery; static integer iws; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DORGQR generates an M-by-N real matrix Q with orthonormal columns, which is defined as the first N columns of a product of K elementary reflectors of order M Q = H(1) H(2) . . . H(k) as returned by DGEQRF. Arguments ========= M (input) INTEGER The number of rows of the matrix Q. M >= 0. N (input) INTEGER The number of columns of the matrix Q. M >= N >= 0. K (input) INTEGER The number of elementary reflectors whose product defines the matrix Q. N >= K >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the i-th column must contain the vector which defines the elementary reflector H(i), for i = 1,2,...,k, as returned by DGEQRF in the first k columns of its array argument A. On exit, the M-by-N matrix Q. LDA (input) INTEGER The first dimension of the array A. LDA >= max(1,M). TAU (input) DOUBLE PRECISION array, dimension (K) TAU(i) must contain the scalar factor of the elementary reflector H(i), as returned by DGEQRF. WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) On exit, if INFO = 0, WORK(1) returns the optimal LWORK. LWORK (input) INTEGER The dimension of the array WORK. LWORK >= max(1,N). For optimum performance LWORK >= N*NB, where NB is the optimal blocksize. If LWORK = -1, then a workspace query is assumed; the routine only calculates the optimal size of the WORK array, returns this value as the first entry of the WORK array, and no error message related to LWORK is issued by XERBLA. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument has an illegal value ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --tau; --work; /* Function Body */ *info = 0; nb = ilaenv_(&c__1, "DORGQR", " ", m, n, k, &c_n1, (ftnlen)6, (ftnlen)1); lwkopt = max(1,*n) * nb; work[1] = (doublereal) lwkopt; lquery = *lwork == -1; if (*m < 0) { *info = -1; } else if (*n < 0 || *n > *m) { *info = -2; } else if (*k < 0 || *k > *n) { *info = -3; } else if (*lda < max(1,*m)) { *info = -5; } else if (*lwork < max(1,*n) && ! lquery) { *info = -8; } if (*info != 0) { i__1 = -(*info); xerbla_("DORGQR", &i__1); return 0; } else if (lquery) { return 0; } /* Quick return if possible */ if (*n <= 0) { work[1] = 1.; return 0; } nbmin = 2; nx = 0; iws = *n; if (nb > 1 && nb < *k) { /* Determine when to cross over from blocked to unblocked code. Computing MAX */ i__1 = 0, i__2 = ilaenv_(&c__3, "DORGQR", " ", m, n, k, &c_n1, ( ftnlen)6, (ftnlen)1); nx = max(i__1,i__2); if (nx < *k) { /* Determine if workspace is large enough for blocked code. */ ldwork = *n; iws = ldwork * nb; if (*lwork < iws) { /* Not enough workspace to use optimal NB: reduce NB and determine the minimum value of NB. */ nb = *lwork / ldwork; /* Computing MAX */ i__1 = 2, i__2 = ilaenv_(&c__2, "DORGQR", " ", m, n, k, &c_n1, (ftnlen)6, (ftnlen)1); nbmin = max(i__1,i__2); } } } if (nb >= nbmin && nb < *k && nx < *k) { /* Use blocked code after the last block. The first kk columns are handled by the block method. */ ki = (*k - nx - 1) / nb * nb; /* Computing MIN */ i__1 = *k, i__2 = ki + nb; kk = min(i__1,i__2); /* Set A(1:kk,kk+1:n) to zero. */ i__1 = *n; for (j = kk + 1; j <= i__1; ++j) { i__2 = kk; for (i__ = 1; i__ <= i__2; ++i__) { a[i__ + j * a_dim1] = 0.; /* L10: */ } /* L20: */ } } else { kk = 0; } /* Use unblocked code for the last or only block. */ if (kk < *n) { i__1 = *m - kk; i__2 = *n - kk; i__3 = *k - kk; dorg2r_(&i__1, &i__2, &i__3, &a[kk + 1 + (kk + 1) * a_dim1], lda, & tau[kk + 1], &work[1], &iinfo); } if (kk > 0) { /* Use blocked code */ i__1 = -nb; for (i__ = ki + 1; i__1 < 0 ? i__ >= 1 : i__ <= 1; i__ += i__1) { /* Computing MIN */ i__2 = nb, i__3 = *k - i__ + 1; ib = min(i__2,i__3); if (i__ + ib <= *n) { /* Form the triangular factor of the block reflector H = H(i) H(i+1) . . . H(i+ib-1) */ i__2 = *m - i__ + 1; dlarft_("Forward", "Columnwise", &i__2, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[1], &ldwork); /* Apply H to A(i:m,i+ib:n) from the left */ i__2 = *m - i__ + 1; i__3 = *n - i__ - ib + 1; dlarfb_("Left", "No transpose", "Forward", "Columnwise", & i__2, &i__3, &ib, &a[i__ + i__ * a_dim1], lda, &work[ 1], &ldwork, &a[i__ + (i__ + ib) * a_dim1], lda, & work[ib + 1], &ldwork); } /* Apply H to rows i:m of current block */ i__2 = *m - i__ + 1; dorg2r_(&i__2, &ib, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], & work[1], &iinfo); /* Set rows 1:i-1 of current block to zero */ i__2 = i__ + ib - 1; for (j = i__; j <= i__2; ++j) { i__3 = i__ - 1; for (l = 1; l <= i__3; ++l) { a[l + j * a_dim1] = 0.; /* L30: */ } /* L40: */ } /* L50: */ } } work[1] = (doublereal) iws; return 0; /* End of DORGQR */ } /* dorgqr_ */ /* Subroutine */ int dorm2l_(char *side, char *trans, integer *m, integer *n, integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal * c__, integer *ldc, doublereal *work, integer *info) { /* System generated locals */ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2; /* Local variables */ static logical left; static integer i__; extern /* Subroutine */ int dlarf_(char *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *); extern logical lsame_(char *, char *); static integer i1, i2, i3, mi, ni, nq; extern /* Subroutine */ int xerbla_(char *, integer *); static logical notran; static doublereal aii; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DORM2L overwrites the general real m by n matrix C with Q * C if SIDE = 'L' and TRANS = 'N', or Q'* C if SIDE = 'L' and TRANS = 'T', or C * Q if SIDE = 'R' and TRANS = 'N', or C * Q' if SIDE = 'R' and TRANS = 'T', where Q is a real orthogonal matrix defined as the product of k elementary reflectors Q = H(k) . . . H(2) H(1) as returned by DGEQLF. Q is of order m if SIDE = 'L' and of order n if SIDE = 'R'. Arguments ========= SIDE (input) CHARACTER*1 = 'L': apply Q or Q' from the Left = 'R': apply Q or Q' from the Right TRANS (input) CHARACTER*1 = 'N': apply Q (No transpose) = 'T': apply Q' (Transpose) M (input) INTEGER The number of rows of the matrix C. M >= 0. N (input) INTEGER The number of columns of the matrix C. N >= 0. K (input) INTEGER The number of elementary reflectors whose product defines the matrix Q. If SIDE = 'L', M >= K >= 0; if SIDE = 'R', N >= K >= 0. A (input) DOUBLE PRECISION array, dimension (LDA,K) The i-th column must contain the vector which defines the elementary reflector H(i), for i = 1,2,...,k, as returned by DGEQLF in the last k columns of its array argument A. A is modified by the routine but restored on exit. LDA (input) INTEGER The leading dimension of the array A. If SIDE = 'L', LDA >= max(1,M); if SIDE = 'R', LDA >= max(1,N). TAU (input) DOUBLE PRECISION array, dimension (K) TAU(i) must contain the scalar factor of the elementary reflector H(i), as returned by DGEQLF. C (input/output) DOUBLE PRECISION array, dimension (LDC,N) On entry, the m by n matrix C. On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q. LDC (input) INTEGER The leading dimension of the array C. LDC >= max(1,M). WORK (workspace) DOUBLE PRECISION array, dimension (N) if SIDE = 'L', (M) if SIDE = 'R' INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --tau; c_dim1 = *ldc; c_offset = 1 + c_dim1 * 1; c__ -= c_offset; --work; /* Function Body */ *info = 0; left = lsame_(side, "L"); notran = lsame_(trans, "N"); /* NQ is the order of Q */ if (left) { nq = *m; } else { nq = *n; } if (! left && ! lsame_(side, "R")) { *info = -1; } else if (! notran && ! lsame_(trans, "T")) { *info = -2; } else if (*m < 0) { *info = -3; } else if (*n < 0) { *info = -4; } else if (*k < 0 || *k > nq) { *info = -5; } else if (*lda < max(1,nq)) { *info = -7; } else if (*ldc < max(1,*m)) { *info = -10; } if (*info != 0) { i__1 = -(*info); xerbla_("DORM2L", &i__1); return 0; } /* Quick return if possible */ if (*m == 0 || *n == 0 || *k == 0) { return 0; } if (left && notran || ! left && ! notran) { i1 = 1; i2 = *k; i3 = 1; } else { i1 = *k; i2 = 1; i3 = -1; } if (left) { ni = *n; } else { mi = *m; } i__1 = i2; i__2 = i3; for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { if (left) { /* H(i) is applied to C(1:m-k+i,1:n) */ mi = *m - *k + i__; } else { /* H(i) is applied to C(1:m,1:n-k+i) */ ni = *n - *k + i__; } /* Apply H(i) */ aii = a[nq - *k + i__ + i__ * a_dim1]; a[nq - *k + i__ + i__ * a_dim1] = 1.; dlarf_(side, &mi, &ni, &a[i__ * a_dim1 + 1], &c__1, &tau[i__], &c__[ c_offset], ldc, &work[1]); a[nq - *k + i__ + i__ * a_dim1] = aii; /* L10: */ } return 0; /* End of DORM2L */ } /* dorm2l_ */ /* Subroutine */ int dorm2r_(char *side, char *trans, integer *m, integer *n, integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal * c__, integer *ldc, doublereal *work, integer *info) { /* System generated locals */ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2; /* Local variables */ static logical left; static integer i__; extern /* Subroutine */ int dlarf_(char *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *); extern logical lsame_(char *, char *); static integer i1, i2, i3, ic, jc, mi, ni, nq; extern /* Subroutine */ int xerbla_(char *, integer *); static logical notran; static doublereal aii; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DORM2R overwrites the general real m by n matrix C with Q * C if SIDE = 'L' and TRANS = 'N', or Q'* C if SIDE = 'L' and TRANS = 'T', or C * Q if SIDE = 'R' and TRANS = 'N', or C * Q' if SIDE = 'R' and TRANS = 'T', where Q is a real orthogonal matrix defined as the product of k elementary reflectors Q = H(1) H(2) . . . H(k) as returned by DGEQRF. Q is of order m if SIDE = 'L' and of order n if SIDE = 'R'. Arguments ========= SIDE (input) CHARACTER*1 = 'L': apply Q or Q' from the Left = 'R': apply Q or Q' from the Right TRANS (input) CHARACTER*1 = 'N': apply Q (No transpose) = 'T': apply Q' (Transpose) M (input) INTEGER The number of rows of the matrix C. M >= 0. N (input) INTEGER The number of columns of the matrix C. N >= 0. K (input) INTEGER The number of elementary reflectors whose product defines the matrix Q. If SIDE = 'L', M >= K >= 0; if SIDE = 'R', N >= K >= 0. A (input) DOUBLE PRECISION array, dimension (LDA,K) The i-th column must contain the vector which defines the elementary reflector H(i), for i = 1,2,...,k, as returned by DGEQRF in the first k columns of its array argument A. A is modified by the routine but restored on exit. LDA (input) INTEGER The leading dimension of the array A. If SIDE = 'L', LDA >= max(1,M); if SIDE = 'R', LDA >= max(1,N). TAU (input) DOUBLE PRECISION array, dimension (K) TAU(i) must contain the scalar factor of the elementary reflector H(i), as returned by DGEQRF. C (input/output) DOUBLE PRECISION array, dimension (LDC,N) On entry, the m by n matrix C. On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q. LDC (input) INTEGER The leading dimension of the array C. LDC >= max(1,M). WORK (workspace) DOUBLE PRECISION array, dimension (N) if SIDE = 'L', (M) if SIDE = 'R' INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --tau; c_dim1 = *ldc; c_offset = 1 + c_dim1 * 1; c__ -= c_offset; --work; /* Function Body */ *info = 0; left = lsame_(side, "L"); notran = lsame_(trans, "N"); /* NQ is the order of Q */ if (left) { nq = *m; } else { nq = *n; } if (! left && ! lsame_(side, "R")) { *info = -1; } else if (! notran && ! lsame_(trans, "T")) { *info = -2; } else if (*m < 0) { *info = -3; } else if (*n < 0) { *info = -4; } else if (*k < 0 || *k > nq) { *info = -5; } else if (*lda < max(1,nq)) { *info = -7; } else if (*ldc < max(1,*m)) { *info = -10; } if (*info != 0) { i__1 = -(*info); xerbla_("DORM2R", &i__1); return 0; } /* Quick return if possible */ if (*m == 0 || *n == 0 || *k == 0) { return 0; } if (left && ! notran || ! left && notran) { i1 = 1; i2 = *k; i3 = 1; } else { i1 = *k; i2 = 1; i3 = -1; } if (left) { ni = *n; jc = 1; } else { mi = *m; ic = 1; } i__1 = i2; i__2 = i3; for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { if (left) { /* H(i) is applied to C(i:m,1:n) */ mi = *m - i__ + 1; ic = i__; } else { /* H(i) is applied to C(1:m,i:n) */ ni = *n - i__ + 1; jc = i__; } /* Apply H(i) */ aii = a[i__ + i__ * a_dim1]; a[i__ + i__ * a_dim1] = 1.; dlarf_(side, &mi, &ni, &a[i__ + i__ * a_dim1], &c__1, &tau[i__], &c__[ ic + jc * c_dim1], ldc, &work[1]); a[i__ + i__ * a_dim1] = aii; /* L10: */ } return 0; /* End of DORM2R */ } /* dorm2r_ */ /* Subroutine */ int dormbr_(char *vect, char *side, char *trans, integer *m, integer *n, integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal *c__, integer *ldc, doublereal *work, integer *lwork, integer *info) { /* System generated locals */ address a__1[2]; integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2]; char ch__1[2]; /* Builtin functions */ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); /* Local variables */ static logical left; extern logical lsame_(char *, char *); static integer iinfo, i1, i2, nb, mi, ni, nq, nw; extern /* Subroutine */ int xerbla_(char *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); extern /* Subroutine */ int dormlq_(char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, integer *); static logical notran; extern /* Subroutine */ int dormqr_(char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, integer *); static logical applyq; static char transt[1]; static integer lwkopt; static logical lquery; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= If VECT = 'Q', DORMBR overwrites the general real M-by-N matrix C with SIDE = 'L' SIDE = 'R' TRANS = 'N': Q * C C * Q TRANS = 'T': Q**T * C C * Q**T If VECT = 'P', DORMBR overwrites the general real M-by-N matrix C with SIDE = 'L' SIDE = 'R' TRANS = 'N': P * C C * P TRANS = 'T': P**T * C C * P**T Here Q and P**T are the orthogonal matrices determined by DGEBRD when reducing a real matrix A to bidiagonal form: A = Q * B * P**T. Q and P**T are defined as products of elementary reflectors H(i) and G(i) respectively. Let nq = m if SIDE = 'L' and nq = n if SIDE = 'R'. Thus nq is the order of the orthogonal matrix Q or P**T that is applied. If VECT = 'Q', A is assumed to have been an NQ-by-K matrix: if nq >= k, Q = H(1) H(2) . . . H(k); if nq < k, Q = H(1) H(2) . . . H(nq-1). If VECT = 'P', A is assumed to have been a K-by-NQ matrix: if k < nq, P = G(1) G(2) . . . G(k); if k >= nq, P = G(1) G(2) . . . G(nq-1). Arguments ========= VECT (input) CHARACTER*1 = 'Q': apply Q or Q**T; = 'P': apply P or P**T. SIDE (input) CHARACTER*1 = 'L': apply Q, Q**T, P or P**T from the Left; = 'R': apply Q, Q**T, P or P**T from the Right. TRANS (input) CHARACTER*1 = 'N': No transpose, apply Q or P; = 'T': Transpose, apply Q**T or P**T. M (input) INTEGER The number of rows of the matrix C. M >= 0. N (input) INTEGER The number of columns of the matrix C. N >= 0. K (input) INTEGER If VECT = 'Q', the number of columns in the original matrix reduced by DGEBRD. If VECT = 'P', the number of rows in the original matrix reduced by DGEBRD. K >= 0. A (input) DOUBLE PRECISION array, dimension (LDA,min(nq,K)) if VECT = 'Q' (LDA,nq) if VECT = 'P' The vectors which define the elementary reflectors H(i) and G(i), whose products determine the matrices Q and P, as returned by DGEBRD. LDA (input) INTEGER The leading dimension of the array A. If VECT = 'Q', LDA >= max(1,nq); if VECT = 'P', LDA >= max(1,min(nq,K)). TAU (input) DOUBLE PRECISION array, dimension (min(nq,K)) TAU(i) must contain the scalar factor of the elementary reflector H(i) or G(i) which determines Q or P, as returned by DGEBRD in the array argument TAUQ or TAUP. C (input/output) DOUBLE PRECISION array, dimension (LDC,N) On entry, the M-by-N matrix C. On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q or P*C or P**T*C or C*P or C*P**T. LDC (input) INTEGER The leading dimension of the array C. LDC >= max(1,M). WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) On exit, if INFO = 0, WORK(1) returns the optimal LWORK. LWORK (input) INTEGER The dimension of the array WORK. If SIDE = 'L', LWORK >= max(1,N); if SIDE = 'R', LWORK >= max(1,M). For optimum performance LWORK >= N*NB if SIDE = 'L', and LWORK >= M*NB if SIDE = 'R', where NB is the optimal blocksize. If LWORK = -1, then a workspace query is assumed; the routine only calculates the optimal size of the WORK array, returns this value as the first entry of the WORK array, and no error message related to LWORK is issued by XERBLA. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --tau; c_dim1 = *ldc; c_offset = 1 + c_dim1 * 1; c__ -= c_offset; --work; /* Function Body */ *info = 0; applyq = lsame_(vect, "Q"); left = lsame_(side, "L"); notran = lsame_(trans, "N"); lquery = *lwork == -1; /* NQ is the order of Q or P and NW is the minimum dimension of WORK */ if (left) { nq = *m; nw = *n; } else { nq = *n; nw = *m; } if (! applyq && ! lsame_(vect, "P")) { *info = -1; } else if (! left && ! lsame_(side, "R")) { *info = -2; } else if (! notran && ! lsame_(trans, "T")) { *info = -3; } else if (*m < 0) { *info = -4; } else if (*n < 0) { *info = -5; } else if (*k < 0) { *info = -6; } else /* if(complicated condition) */ { /* Computing MAX */ i__1 = 1, i__2 = min(nq,*k); if (applyq && *lda < max(1,nq) || ! applyq && *lda < max(i__1,i__2)) { *info = -8; } else if (*ldc < max(1,*m)) { *info = -11; } else if (*lwork < max(1,nw) && ! lquery) { *info = -13; } } if (*info == 0) { if (applyq) { if (left) { /* Writing concatenation */ i__3[0] = 1, a__1[0] = side; i__3[1] = 1, a__1[1] = trans; s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); i__1 = *m - 1; i__2 = *m - 1; nb = ilaenv_(&c__1, "DORMQR", ch__1, &i__1, n, &i__2, &c_n1, ( ftnlen)6, (ftnlen)2); } else { /* Writing concatenation */ i__3[0] = 1, a__1[0] = side; i__3[1] = 1, a__1[1] = trans; s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); i__1 = *n - 1; i__2 = *n - 1; nb = ilaenv_(&c__1, "DORMQR", ch__1, m, &i__1, &i__2, &c_n1, ( ftnlen)6, (ftnlen)2); } } else { if (left) { /* Writing concatenation */ i__3[0] = 1, a__1[0] = side; i__3[1] = 1, a__1[1] = trans; s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); i__1 = *m - 1; i__2 = *m - 1; nb = ilaenv_(&c__1, "DORMLQ", ch__1, &i__1, n, &i__2, &c_n1, ( ftnlen)6, (ftnlen)2); } else { /* Writing concatenation */ i__3[0] = 1, a__1[0] = side; i__3[1] = 1, a__1[1] = trans; s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); i__1 = *n - 1; i__2 = *n - 1; nb = ilaenv_(&c__1, "DORMLQ", ch__1, m, &i__1, &i__2, &c_n1, ( ftnlen)6, (ftnlen)2); } } lwkopt = max(1,nw) * nb; work[1] = (doublereal) lwkopt; } if (*info != 0) { i__1 = -(*info); xerbla_("DORMBR", &i__1); return 0; } else if (lquery) { return 0; } /* Quick return if possible */ work[1] = 1.; if (*m == 0 || *n == 0) { return 0; } if (applyq) { /* Apply Q */ if (nq >= *k) { /* Q was determined by a call to DGEBRD with nq >= k */ dormqr_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[ c_offset], ldc, &work[1], lwork, &iinfo); } else if (nq > 1) { /* Q was determined by a call to DGEBRD with nq < k */ if (left) { mi = *m - 1; ni = *n; i1 = 2; i2 = 1; } else { mi = *m; ni = *n - 1; i1 = 1; i2 = 2; } i__1 = nq - 1; dormqr_(side, trans, &mi, &ni, &i__1, &a[a_dim1 + 2], lda, &tau[1] , &c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, &iinfo); } } else { /* Apply P */ if (notran) { *(unsigned char *)transt = 'T'; } else { *(unsigned char *)transt = 'N'; } if (nq > *k) { /* P was determined by a call to DGEBRD with nq > k */ dormlq_(side, transt, m, n, k, &a[a_offset], lda, &tau[1], &c__[ c_offset], ldc, &work[1], lwork, &iinfo); } else if (nq > 1) { /* P was determined by a call to DGEBRD with nq <= k */ if (left) { mi = *m - 1; ni = *n; i1 = 2; i2 = 1; } else { mi = *m; ni = *n - 1; i1 = 1; i2 = 2; } i__1 = nq - 1; dormlq_(side, transt, &mi, &ni, &i__1, &a[(a_dim1 << 1) + 1], lda, &tau[1], &c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, & iinfo); } } work[1] = (doublereal) lwkopt; return 0; /* End of DORMBR */ } /* dormbr_ */ /* Subroutine */ int dorml2_(char *side, char *trans, integer *m, integer *n, integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal * c__, integer *ldc, doublereal *work, integer *info) { /* System generated locals */ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2; /* Local variables */ static logical left; static integer i__; extern /* Subroutine */ int dlarf_(char *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *); extern logical lsame_(char *, char *); static integer i1, i2, i3, ic, jc, mi, ni, nq; extern /* Subroutine */ int xerbla_(char *, integer *); static logical notran; static doublereal aii; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DORML2 overwrites the general real m by n matrix C with Q * C if SIDE = 'L' and TRANS = 'N', or Q'* C if SIDE = 'L' and TRANS = 'T', or C * Q if SIDE = 'R' and TRANS = 'N', or C * Q' if SIDE = 'R' and TRANS = 'T', where Q is a real orthogonal matrix defined as the product of k elementary reflectors Q = H(k) . . . H(2) H(1) as returned by DGELQF. Q is of order m if SIDE = 'L' and of order n if SIDE = 'R'. Arguments ========= SIDE (input) CHARACTER*1 = 'L': apply Q or Q' from the Left = 'R': apply Q or Q' from the Right TRANS (input) CHARACTER*1 = 'N': apply Q (No transpose) = 'T': apply Q' (Transpose) M (input) INTEGER The number of rows of the matrix C. M >= 0. N (input) INTEGER The number of columns of the matrix C. N >= 0. K (input) INTEGER The number of elementary reflectors whose product defines the matrix Q. If SIDE = 'L', M >= K >= 0; if SIDE = 'R', N >= K >= 0. A (input) DOUBLE PRECISION array, dimension (LDA,M) if SIDE = 'L', (LDA,N) if SIDE = 'R' The i-th row must contain the vector which defines the elementary reflector H(i), for i = 1,2,...,k, as returned by DGELQF in the first k rows of its array argument A. A is modified by the routine but restored on exit. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,K). TAU (input) DOUBLE PRECISION array, dimension (K) TAU(i) must contain the scalar factor of the elementary reflector H(i), as returned by DGELQF. C (input/output) DOUBLE PRECISION array, dimension (LDC,N) On entry, the m by n matrix C. On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q. LDC (input) INTEGER The leading dimension of the array C. LDC >= max(1,M). WORK (workspace) DOUBLE PRECISION array, dimension (N) if SIDE = 'L', (M) if SIDE = 'R' INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --tau; c_dim1 = *ldc; c_offset = 1 + c_dim1 * 1; c__ -= c_offset; --work; /* Function Body */ *info = 0; left = lsame_(side, "L"); notran = lsame_(trans, "N"); /* NQ is the order of Q */ if (left) { nq = *m; } else { nq = *n; } if (! left && ! lsame_(side, "R")) { *info = -1; } else if (! notran && ! lsame_(trans, "T")) { *info = -2; } else if (*m < 0) { *info = -3; } else if (*n < 0) { *info = -4; } else if (*k < 0 || *k > nq) { *info = -5; } else if (*lda < max(1,*k)) { *info = -7; } else if (*ldc < max(1,*m)) { *info = -10; } if (*info != 0) { i__1 = -(*info); xerbla_("DORML2", &i__1); return 0; } /* Quick return if possible */ if (*m == 0 || *n == 0 || *k == 0) { return 0; } if (left && notran || ! left && ! notran) { i1 = 1; i2 = *k; i3 = 1; } else { i1 = *k; i2 = 1; i3 = -1; } if (left) { ni = *n; jc = 1; } else { mi = *m; ic = 1; } i__1 = i2; i__2 = i3; for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { if (left) { /* H(i) is applied to C(i:m,1:n) */ mi = *m - i__ + 1; ic = i__; } else { /* H(i) is applied to C(1:m,i:n) */ ni = *n - i__ + 1; jc = i__; } /* Apply H(i) */ aii = a[i__ + i__ * a_dim1]; a[i__ + i__ * a_dim1] = 1.; dlarf_(side, &mi, &ni, &a[i__ + i__ * a_dim1], lda, &tau[i__], &c__[ ic + jc * c_dim1], ldc, &work[1]); a[i__ + i__ * a_dim1] = aii; /* L10: */ } return 0; /* End of DORML2 */ } /* dorml2_ */ /* Subroutine */ int dormlq_(char *side, char *trans, integer *m, integer *n, integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal * c__, integer *ldc, doublereal *work, integer *lwork, integer *info) { /* System generated locals */ address a__1[2]; integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4, i__5; char ch__1[2]; /* Builtin functions */ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); /* Local variables */ static logical left; static integer i__; static doublereal t[4160] /* was [65][64] */; extern logical lsame_(char *, char *); static integer nbmin, iinfo, i1, i2, i3; extern /* Subroutine */ int dorml2_(char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *); static integer ib, ic, jc, nb, mi, ni; extern /* Subroutine */ int dlarfb_(char *, char *, char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *); static integer nq, nw; extern /* Subroutine */ int dlarft_(char *, char *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); static logical notran; static integer ldwork; static char transt[1]; static integer lwkopt; static logical lquery; static integer iws; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DORMLQ overwrites the general real M-by-N matrix C with SIDE = 'L' SIDE = 'R' TRANS = 'N': Q * C C * Q TRANS = 'T': Q**T * C C * Q**T where Q is a real orthogonal matrix defined as the product of k elementary reflectors Q = H(k) . . . H(2) H(1) as returned by DGELQF. Q is of order M if SIDE = 'L' and of order N if SIDE = 'R'. Arguments ========= SIDE (input) CHARACTER*1 = 'L': apply Q or Q**T from the Left; = 'R': apply Q or Q**T from the Right. TRANS (input) CHARACTER*1 = 'N': No transpose, apply Q; = 'T': Transpose, apply Q**T. M (input) INTEGER The number of rows of the matrix C. M >= 0. N (input) INTEGER The number of columns of the matrix C. N >= 0. K (input) INTEGER The number of elementary reflectors whose product defines the matrix Q. If SIDE = 'L', M >= K >= 0; if SIDE = 'R', N >= K >= 0. A (input) DOUBLE PRECISION array, dimension (LDA,M) if SIDE = 'L', (LDA,N) if SIDE = 'R' The i-th row must contain the vector which defines the elementary reflector H(i), for i = 1,2,...,k, as returned by DGELQF in the first k rows of its array argument A. A is modified by the routine but restored on exit. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,K). TAU (input) DOUBLE PRECISION array, dimension (K) TAU(i) must contain the scalar factor of the elementary reflector H(i), as returned by DGELQF. C (input/output) DOUBLE PRECISION array, dimension (LDC,N) On entry, the M-by-N matrix C. On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q. LDC (input) INTEGER The leading dimension of the array C. LDC >= max(1,M). WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) On exit, if INFO = 0, WORK(1) returns the optimal LWORK. LWORK (input) INTEGER The dimension of the array WORK. If SIDE = 'L', LWORK >= max(1,N); if SIDE = 'R', LWORK >= max(1,M). For optimum performance LWORK >= N*NB if SIDE = 'L', and LWORK >= M*NB if SIDE = 'R', where NB is the optimal blocksize. If LWORK = -1, then a workspace query is assumed; the routine only calculates the optimal size of the WORK array, returns this value as the first entry of the WORK array, and no error message related to LWORK is issued by XERBLA. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --tau; c_dim1 = *ldc; c_offset = 1 + c_dim1 * 1; c__ -= c_offset; --work; /* Function Body */ *info = 0; left = lsame_(side, "L"); notran = lsame_(trans, "N"); lquery = *lwork == -1; /* NQ is the order of Q and NW is the minimum dimension of WORK */ if (left) { nq = *m; nw = *n; } else { nq = *n; nw = *m; } if (! left && ! lsame_(side, "R")) { *info = -1; } else if (! notran && ! lsame_(trans, "T")) { *info = -2; } else if (*m < 0) { *info = -3; } else if (*n < 0) { *info = -4; } else if (*k < 0 || *k > nq) { *info = -5; } else if (*lda < max(1,*k)) { *info = -7; } else if (*ldc < max(1,*m)) { *info = -10; } else if (*lwork < max(1,nw) && ! lquery) { *info = -12; } if (*info == 0) { /* Determine the block size. NB may be at most NBMAX, where NBMAX is used to define the local array T. Computing MIN Writing concatenation */ i__3[0] = 1, a__1[0] = side; i__3[1] = 1, a__1[1] = trans; s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); i__1 = 64, i__2 = ilaenv_(&c__1, "DORMLQ", ch__1, m, n, k, &c_n1, ( ftnlen)6, (ftnlen)2); nb = min(i__1,i__2); lwkopt = max(1,nw) * nb; work[1] = (doublereal) lwkopt; } if (*info != 0) { i__1 = -(*info); xerbla_("DORMLQ", &i__1); return 0; } else if (lquery) { return 0; } /* Quick return if possible */ if (*m == 0 || *n == 0 || *k == 0) { work[1] = 1.; return 0; } nbmin = 2; ldwork = nw; if (nb > 1 && nb < *k) { iws = nw * nb; if (*lwork < iws) { nb = *lwork / ldwork; /* Computing MAX Writing concatenation */ i__3[0] = 1, a__1[0] = side; i__3[1] = 1, a__1[1] = trans; s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); i__1 = 2, i__2 = ilaenv_(&c__2, "DORMLQ", ch__1, m, n, k, &c_n1, ( ftnlen)6, (ftnlen)2); nbmin = max(i__1,i__2); } } else { iws = nw; } if (nb < nbmin || nb >= *k) { /* Use unblocked code */ dorml2_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[ c_offset], ldc, &work[1], &iinfo); } else { /* Use blocked code */ if (left && notran || ! left && ! notran) { i1 = 1; i2 = *k; i3 = nb; } else { i1 = (*k - 1) / nb * nb + 1; i2 = 1; i3 = -nb; } if (left) { ni = *n; jc = 1; } else { mi = *m; ic = 1; } if (notran) { *(unsigned char *)transt = 'T'; } else { *(unsigned char *)transt = 'N'; } i__1 = i2; i__2 = i3; for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { /* Computing MIN */ i__4 = nb, i__5 = *k - i__ + 1; ib = min(i__4,i__5); /* Form the triangular factor of the block reflector H = H(i) H(i+1) . . . H(i+ib-1) */ i__4 = nq - i__ + 1; dlarft_("Forward", "Rowwise", &i__4, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], t, &c__65); if (left) { /* H or H' is applied to C(i:m,1:n) */ mi = *m - i__ + 1; ic = i__; } else { /* H or H' is applied to C(1:m,i:n) */ ni = *n - i__ + 1; jc = i__; } /* Apply H or H' */ dlarfb_(side, transt, "Forward", "Rowwise", &mi, &ni, &ib, &a[i__ + i__ * a_dim1], lda, t, &c__65, &c__[ic + jc * c_dim1], ldc, &work[1], &ldwork); /* L10: */ } } work[1] = (doublereal) lwkopt; return 0; /* End of DORMLQ */ } /* dormlq_ */ /* Subroutine */ int dormql_(char *side, char *trans, integer *m, integer *n, integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal * c__, integer *ldc, doublereal *work, integer *lwork, integer *info) { /* System generated locals */ address a__1[2]; integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4, i__5; char ch__1[2]; /* Builtin functions */ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); /* Local variables */ static logical left; static integer i__; static doublereal t[4160] /* was [65][64] */; extern logical lsame_(char *, char *); static integer nbmin, iinfo, i1, i2, i3; extern /* Subroutine */ int dorm2l_(char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *); static integer ib, nb, mi, ni; extern /* Subroutine */ int dlarfb_(char *, char *, char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *); static integer nq, nw; extern /* Subroutine */ int dlarft_(char *, char *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); static logical notran; static integer ldwork, lwkopt; static logical lquery; static integer iws; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DORMQL overwrites the general real M-by-N matrix C with SIDE = 'L' SIDE = 'R' TRANS = 'N': Q * C C * Q TRANS = 'T': Q**T * C C * Q**T where Q is a real orthogonal matrix defined as the product of k elementary reflectors Q = H(k) . . . H(2) H(1) as returned by DGEQLF. Q is of order M if SIDE = 'L' and of order N if SIDE = 'R'. Arguments ========= SIDE (input) CHARACTER*1 = 'L': apply Q or Q**T from the Left; = 'R': apply Q or Q**T from the Right. TRANS (input) CHARACTER*1 = 'N': No transpose, apply Q; = 'T': Transpose, apply Q**T. M (input) INTEGER The number of rows of the matrix C. M >= 0. N (input) INTEGER The number of columns of the matrix C. N >= 0. K (input) INTEGER The number of elementary reflectors whose product defines the matrix Q. If SIDE = 'L', M >= K >= 0; if SIDE = 'R', N >= K >= 0. A (input) DOUBLE PRECISION array, dimension (LDA,K) The i-th column must contain the vector which defines the elementary reflector H(i), for i = 1,2,...,k, as returned by DGEQLF in the last k columns of its array argument A. A is modified by the routine but restored on exit. LDA (input) INTEGER The leading dimension of the array A. If SIDE = 'L', LDA >= max(1,M); if SIDE = 'R', LDA >= max(1,N). TAU (input) DOUBLE PRECISION array, dimension (K) TAU(i) must contain the scalar factor of the elementary reflector H(i), as returned by DGEQLF. C (input/output) DOUBLE PRECISION array, dimension (LDC,N) On entry, the M-by-N matrix C. On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q. LDC (input) INTEGER The leading dimension of the array C. LDC >= max(1,M). WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) On exit, if INFO = 0, WORK(1) returns the optimal LWORK. LWORK (input) INTEGER The dimension of the array WORK. If SIDE = 'L', LWORK >= max(1,N); if SIDE = 'R', LWORK >= max(1,M). For optimum performance LWORK >= N*NB if SIDE = 'L', and LWORK >= M*NB if SIDE = 'R', where NB is the optimal blocksize. If LWORK = -1, then a workspace query is assumed; the routine only calculates the optimal size of the WORK array, returns this value as the first entry of the WORK array, and no error message related to LWORK is issued by XERBLA. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --tau; c_dim1 = *ldc; c_offset = 1 + c_dim1 * 1; c__ -= c_offset; --work; /* Function Body */ *info = 0; left = lsame_(side, "L"); notran = lsame_(trans, "N"); lquery = *lwork == -1; /* NQ is the order of Q and NW is the minimum dimension of WORK */ if (left) { nq = *m; nw = max(1,*n); } else { nq = *n; nw = max(1,*m); } if (! left && ! lsame_(side, "R")) { *info = -1; } else if (! notran && ! lsame_(trans, "T")) { *info = -2; } else if (*m < 0) { *info = -3; } else if (*n < 0) { *info = -4; } else if (*k < 0 || *k > nq) { *info = -5; } else if (*lda < max(1,nq)) { *info = -7; } else if (*ldc < max(1,*m)) { *info = -10; } if (*info == 0) { if (*m == 0 || *n == 0) { lwkopt = 1; } else { /* Determine the block size. NB may be at most NBMAX, where NBMAX is used to define the local array T. Computing MIN Writing concatenation */ i__3[0] = 1, a__1[0] = side; i__3[1] = 1, a__1[1] = trans; s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); i__1 = 64, i__2 = ilaenv_(&c__1, "DORMQL", ch__1, m, n, k, &c_n1, (ftnlen)6, (ftnlen)2); nb = min(i__1,i__2); lwkopt = nw * nb; } work[1] = (doublereal) lwkopt; if (*lwork < nw && ! lquery) { *info = -12; } } if (*info != 0) { i__1 = -(*info); xerbla_("DORMQL", &i__1); return 0; } else if (lquery) { return 0; } /* Quick return if possible */ if (*m == 0 || *n == 0) { return 0; } nbmin = 2; ldwork = nw; if (nb > 1 && nb < *k) { iws = nw * nb; if (*lwork < iws) { nb = *lwork / ldwork; /* Computing MAX Writing concatenation */ i__3[0] = 1, a__1[0] = side; i__3[1] = 1, a__1[1] = trans; s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); i__1 = 2, i__2 = ilaenv_(&c__2, "DORMQL", ch__1, m, n, k, &c_n1, ( ftnlen)6, (ftnlen)2); nbmin = max(i__1,i__2); } } else { iws = nw; } if (nb < nbmin || nb >= *k) { /* Use unblocked code */ dorm2l_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[ c_offset], ldc, &work[1], &iinfo); } else { /* Use blocked code */ if (left && notran || ! left && ! notran) { i1 = 1; i2 = *k; i3 = nb; } else { i1 = (*k - 1) / nb * nb + 1; i2 = 1; i3 = -nb; } if (left) { ni = *n; } else { mi = *m; } i__1 = i2; i__2 = i3; for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { /* Computing MIN */ i__4 = nb, i__5 = *k - i__ + 1; ib = min(i__4,i__5); /* Form the triangular factor of the block reflector H = H(i+ib-1) . . . H(i+1) H(i) */ i__4 = nq - *k + i__ + ib - 1; dlarft_("Backward", "Columnwise", &i__4, &ib, &a[i__ * a_dim1 + 1] , lda, &tau[i__], t, &c__65); if (left) { /* H or H' is applied to C(1:m-k+i+ib-1,1:n) */ mi = *m - *k + i__ + ib - 1; } else { /* H or H' is applied to C(1:m,1:n-k+i+ib-1) */ ni = *n - *k + i__ + ib - 1; } /* Apply H or H' */ dlarfb_(side, trans, "Backward", "Columnwise", &mi, &ni, &ib, &a[ i__ * a_dim1 + 1], lda, t, &c__65, &c__[c_offset], ldc, & work[1], &ldwork); /* L10: */ } } work[1] = (doublereal) lwkopt; return 0; /* End of DORMQL */ } /* dormql_ */ /* Subroutine */ int dormqr_(char *side, char *trans, integer *m, integer *n, integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal * c__, integer *ldc, doublereal *work, integer *lwork, integer *info) { /* System generated locals */ address a__1[2]; integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4, i__5; char ch__1[2]; /* Builtin functions */ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); /* Local variables */ static logical left; static integer i__; static doublereal t[4160] /* was [65][64] */; extern logical lsame_(char *, char *); static integer nbmin, iinfo, i1, i2, i3; extern /* Subroutine */ int dorm2r_(char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *); static integer ib, ic, jc, nb, mi, ni; extern /* Subroutine */ int dlarfb_(char *, char *, char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *); static integer nq, nw; extern /* Subroutine */ int dlarft_(char *, char *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); static logical notran; static integer ldwork, lwkopt; static logical lquery; static integer iws; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DORMQR overwrites the general real M-by-N matrix C with SIDE = 'L' SIDE = 'R' TRANS = 'N': Q * C C * Q TRANS = 'T': Q**T * C C * Q**T where Q is a real orthogonal matrix defined as the product of k elementary reflectors Q = H(1) H(2) . . . H(k) as returned by DGEQRF. Q is of order M if SIDE = 'L' and of order N if SIDE = 'R'. Arguments ========= SIDE (input) CHARACTER*1 = 'L': apply Q or Q**T from the Left; = 'R': apply Q or Q**T from the Right. TRANS (input) CHARACTER*1 = 'N': No transpose, apply Q; = 'T': Transpose, apply Q**T. M (input) INTEGER The number of rows of the matrix C. M >= 0. N (input) INTEGER The number of columns of the matrix C. N >= 0. K (input) INTEGER The number of elementary reflectors whose product defines the matrix Q. If SIDE = 'L', M >= K >= 0; if SIDE = 'R', N >= K >= 0. A (input) DOUBLE PRECISION array, dimension (LDA,K) The i-th column must contain the vector which defines the elementary reflector H(i), for i = 1,2,...,k, as returned by DGEQRF in the first k columns of its array argument A. A is modified by the routine but restored on exit. LDA (input) INTEGER The leading dimension of the array A. If SIDE = 'L', LDA >= max(1,M); if SIDE = 'R', LDA >= max(1,N). TAU (input) DOUBLE PRECISION array, dimension (K) TAU(i) must contain the scalar factor of the elementary reflector H(i), as returned by DGEQRF. C (input/output) DOUBLE PRECISION array, dimension (LDC,N) On entry, the M-by-N matrix C. On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q. LDC (input) INTEGER The leading dimension of the array C. LDC >= max(1,M). WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) On exit, if INFO = 0, WORK(1) returns the optimal LWORK. LWORK (input) INTEGER The dimension of the array WORK. If SIDE = 'L', LWORK >= max(1,N); if SIDE = 'R', LWORK >= max(1,M). For optimum performance LWORK >= N*NB if SIDE = 'L', and LWORK >= M*NB if SIDE = 'R', where NB is the optimal blocksize. If LWORK = -1, then a workspace query is assumed; the routine only calculates the optimal size of the WORK array, returns this value as the first entry of the WORK array, and no error message related to LWORK is issued by XERBLA. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --tau; c_dim1 = *ldc; c_offset = 1 + c_dim1 * 1; c__ -= c_offset; --work; /* Function Body */ *info = 0; left = lsame_(side, "L"); notran = lsame_(trans, "N"); lquery = *lwork == -1; /* NQ is the order of Q and NW is the minimum dimension of WORK */ if (left) { nq = *m; nw = *n; } else { nq = *n; nw = *m; } if (! left && ! lsame_(side, "R")) { *info = -1; } else if (! notran && ! lsame_(trans, "T")) { *info = -2; } else if (*m < 0) { *info = -3; } else if (*n < 0) { *info = -4; } else if (*k < 0 || *k > nq) { *info = -5; } else if (*lda < max(1,nq)) { *info = -7; } else if (*ldc < max(1,*m)) { *info = -10; } else if (*lwork < max(1,nw) && ! lquery) { *info = -12; } if (*info == 0) { /* Determine the block size. NB may be at most NBMAX, where NBMAX is used to define the local array T. Computing MIN Writing concatenation */ i__3[0] = 1, a__1[0] = side; i__3[1] = 1, a__1[1] = trans; s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); i__1 = 64, i__2 = ilaenv_(&c__1, "DORMQR", ch__1, m, n, k, &c_n1, ( ftnlen)6, (ftnlen)2); nb = min(i__1,i__2); lwkopt = max(1,nw) * nb; work[1] = (doublereal) lwkopt; } if (*info != 0) { i__1 = -(*info); xerbla_("DORMQR", &i__1); return 0; } else if (lquery) { return 0; } /* Quick return if possible */ if (*m == 0 || *n == 0 || *k == 0) { work[1] = 1.; return 0; } nbmin = 2; ldwork = nw; if (nb > 1 && nb < *k) { iws = nw * nb; if (*lwork < iws) { nb = *lwork / ldwork; /* Computing MAX Writing concatenation */ i__3[0] = 1, a__1[0] = side; i__3[1] = 1, a__1[1] = trans; s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2); i__1 = 2, i__2 = ilaenv_(&c__2, "DORMQR", ch__1, m, n, k, &c_n1, ( ftnlen)6, (ftnlen)2); nbmin = max(i__1,i__2); } } else { iws = nw; } if (nb < nbmin || nb >= *k) { /* Use unblocked code */ dorm2r_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[ c_offset], ldc, &work[1], &iinfo); } else { /* Use blocked code */ if (left && ! notran || ! left && notran) { i1 = 1; i2 = *k; i3 = nb; } else { i1 = (*k - 1) / nb * nb + 1; i2 = 1; i3 = -nb; } if (left) { ni = *n; jc = 1; } else { mi = *m; ic = 1; } i__1 = i2; i__2 = i3; for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { /* Computing MIN */ i__4 = nb, i__5 = *k - i__ + 1; ib = min(i__4,i__5); /* Form the triangular factor of the block reflector H = H(i) H(i+1) . . . H(i+ib-1) */ i__4 = nq - i__ + 1; dlarft_("Forward", "Columnwise", &i__4, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], t, &c__65) ; if (left) { /* H or H' is applied to C(i:m,1:n) */ mi = *m - i__ + 1; ic = i__; } else { /* H or H' is applied to C(1:m,i:n) */ ni = *n - i__ + 1; jc = i__; } /* Apply H or H' */ dlarfb_(side, trans, "Forward", "Columnwise", &mi, &ni, &ib, &a[ i__ + i__ * a_dim1], lda, t, &c__65, &c__[ic + jc * c_dim1], ldc, &work[1], &ldwork); /* L10: */ } } work[1] = (doublereal) lwkopt; return 0; /* End of DORMQR */ } /* dormqr_ */ /* Subroutine */ int dormtr_(char *side, char *uplo, char *trans, integer *m, integer *n, doublereal *a, integer *lda, doublereal *tau, doublereal * c__, integer *ldc, doublereal *work, integer *lwork, integer *info) { /* System generated locals */ address a__1[2]; integer a_dim1, a_offset, c_dim1, c_offset, i__1[2], i__2, i__3; char ch__1[2]; /* Builtin functions */ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); /* Local variables */ static logical left; extern logical lsame_(char *, char *); static integer iinfo, i1; static logical upper; static integer i2, nb, mi, ni, nq, nw; extern /* Subroutine */ int xerbla_(char *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); extern /* Subroutine */ int dormql_(char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, integer *), dormqr_(char *, char *, integer *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, integer *); static integer lwkopt; static logical lquery; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DORMTR overwrites the general real M-by-N matrix C with SIDE = 'L' SIDE = 'R' TRANS = 'N': Q * C C * Q TRANS = 'T': Q**T * C C * Q**T where Q is a real orthogonal matrix of order nq, with nq = m if SIDE = 'L' and nq = n if SIDE = 'R'. Q is defined as the product of nq-1 elementary reflectors, as returned by DSYTRD: if UPLO = 'U', Q = H(nq-1) . . . H(2) H(1); if UPLO = 'L', Q = H(1) H(2) . . . H(nq-1). Arguments ========= SIDE (input) CHARACTER*1 = 'L': apply Q or Q**T from the Left; = 'R': apply Q or Q**T from the Right. UPLO (input) CHARACTER*1 = 'U': Upper triangle of A contains elementary reflectors from DSYTRD; = 'L': Lower triangle of A contains elementary reflectors from DSYTRD. TRANS (input) CHARACTER*1 = 'N': No transpose, apply Q; = 'T': Transpose, apply Q**T. M (input) INTEGER The number of rows of the matrix C. M >= 0. N (input) INTEGER The number of columns of the matrix C. N >= 0. A (input) DOUBLE PRECISION array, dimension (LDA,M) if SIDE = 'L' (LDA,N) if SIDE = 'R' The vectors which define the elementary reflectors, as returned by DSYTRD. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,M) if SIDE = 'L'; LDA >= max(1,N) if SIDE = 'R'. TAU (input) DOUBLE PRECISION array, dimension (M-1) if SIDE = 'L' (N-1) if SIDE = 'R' TAU(i) must contain the scalar factor of the elementary reflector H(i), as returned by DSYTRD. C (input/output) DOUBLE PRECISION array, dimension (LDC,N) On entry, the M-by-N matrix C. On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q. LDC (input) INTEGER The leading dimension of the array C. LDC >= max(1,M). WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) On exit, if INFO = 0, WORK(1) returns the optimal LWORK. LWORK (input) INTEGER The dimension of the array WORK. If SIDE = 'L', LWORK >= max(1,N); if SIDE = 'R', LWORK >= max(1,M). For optimum performance LWORK >= N*NB if SIDE = 'L', and LWORK >= M*NB if SIDE = 'R', where NB is the optimal blocksize. If LWORK = -1, then a workspace query is assumed; the routine only calculates the optimal size of the WORK array, returns this value as the first entry of the WORK array, and no error message related to LWORK is issued by XERBLA. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value ===================================================================== Test the input arguments */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --tau; c_dim1 = *ldc; c_offset = 1 + c_dim1 * 1; c__ -= c_offset; --work; /* Function Body */ *info = 0; left = lsame_(side, "L"); upper = lsame_(uplo, "U"); lquery = *lwork == -1; /* NQ is the order of Q and NW is the minimum dimension of WORK */ if (left) { nq = *m; nw = *n; } else { nq = *n; nw = *m; } if (! left && ! lsame_(side, "R")) { *info = -1; } else if (! upper && ! lsame_(uplo, "L")) { *info = -2; } else if (! lsame_(trans, "N") && ! lsame_(trans, "T")) { *info = -3; } else if (*m < 0) { *info = -4; } else if (*n < 0) { *info = -5; } else if (*lda < max(1,nq)) { *info = -7; } else if (*ldc < max(1,*m)) { *info = -10; } else if (*lwork < max(1,nw) && ! lquery) { *info = -12; } if (*info == 0) { if (upper) { if (left) { /* Writing concatenation */ i__1[0] = 1, a__1[0] = side; i__1[1] = 1, a__1[1] = trans; s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2); i__2 = *m - 1; i__3 = *m - 1; nb = ilaenv_(&c__1, "DORMQL", ch__1, &i__2, n, &i__3, &c_n1, ( ftnlen)6, (ftnlen)2); } else { /* Writing concatenation */ i__1[0] = 1, a__1[0] = side; i__1[1] = 1, a__1[1] = trans; s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2); i__2 = *n - 1; i__3 = *n - 1; nb = ilaenv_(&c__1, "DORMQL", ch__1, m, &i__2, &i__3, &c_n1, ( ftnlen)6, (ftnlen)2); } } else { if (left) { /* Writing concatenation */ i__1[0] = 1, a__1[0] = side; i__1[1] = 1, a__1[1] = trans; s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2); i__2 = *m - 1; i__3 = *m - 1; nb = ilaenv_(&c__1, "DORMQR", ch__1, &i__2, n, &i__3, &c_n1, ( ftnlen)6, (ftnlen)2); } else { /* Writing concatenation */ i__1[0] = 1, a__1[0] = side; i__1[1] = 1, a__1[1] = trans; s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2); i__2 = *n - 1; i__3 = *n - 1; nb = ilaenv_(&c__1, "DORMQR", ch__1, m, &i__2, &i__3, &c_n1, ( ftnlen)6, (ftnlen)2); } } lwkopt = max(1,nw) * nb; work[1] = (doublereal) lwkopt; } if (*info != 0) { i__2 = -(*info); xerbla_("DORMTR", &i__2); return 0; } else if (lquery) { return 0; } /* Quick return if possible */ if (*m == 0 || *n == 0 || nq == 1) { work[1] = 1.; return 0; } if (left) { mi = *m - 1; ni = *n; } else { mi = *m; ni = *n - 1; } if (upper) { /* Q was determined by a call to DSYTRD with UPLO = 'U' */ i__2 = nq - 1; dormql_(side, trans, &mi, &ni, &i__2, &a[(a_dim1 << 1) + 1], lda, & tau[1], &c__[c_offset], ldc, &work[1], lwork, &iinfo); } else { /* Q was determined by a call to DSYTRD with UPLO = 'L' */ if (left) { i1 = 2; i2 = 1; } else { i1 = 1; i2 = 2; } i__2 = nq - 1; dormqr_(side, trans, &mi, &ni, &i__2, &a[a_dim1 + 2], lda, &tau[1], & c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, &iinfo); } work[1] = (doublereal) lwkopt; return 0; /* End of DORMTR */ } /* dormtr_ */ /* Subroutine */ int dpotf2_(char *uplo, integer *n, doublereal *a, integer * lda, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; doublereal d__1; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *, integer *); static integer j; extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, integer *); extern logical lsame_(char *, char *); extern /* Subroutine */ int dgemv_(char *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); static logical upper; extern /* Subroutine */ int xerbla_(char *, integer *); static doublereal ajj; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DPOTF2 computes the Cholesky factorization of a real symmetric positive definite matrix A. The factorization has the form A = U' * U , if UPLO = 'U', or A = L * L', if UPLO = 'L', where U is an upper triangular matrix and L is lower triangular. This is the unblocked version of the algorithm, calling Level 2 BLAS. Arguments ========= UPLO (input) CHARACTER*1 Specifies whether the upper or lower triangular part of the symmetric matrix A is stored. = 'U': Upper triangular = 'L': Lower triangular N (input) INTEGER The order of the matrix A. N >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the symmetric matrix A. If UPLO = 'U', the leading n by n upper triangular part of A contains the upper triangular part of the matrix A, and the strictly lower triangular part of A is not referenced. If UPLO = 'L', the leading n by n lower triangular part of A contains the lower triangular part of the matrix A, and the strictly upper triangular part of A is not referenced. On exit, if INFO = 0, the factor U or L from the Cholesky factorization A = U'*U or A = L*L'. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,N). INFO (output) INTEGER = 0: successful exit < 0: if INFO = -k, the k-th argument had an illegal value > 0: if INFO = k, the leading minor of order k is not positive definite, and the factorization could not be completed. ===================================================================== Test the input parameters. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; /* Function Body */ *info = 0; upper = lsame_(uplo, "U"); if (! upper && ! lsame_(uplo, "L")) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*lda < max(1,*n)) { *info = -4; } if (*info != 0) { i__1 = -(*info); xerbla_("DPOTF2", &i__1); return 0; } /* Quick return if possible */ if (*n == 0) { return 0; } if (upper) { /* Compute the Cholesky factorization A = U'*U. */ i__1 = *n; for (j = 1; j <= i__1; ++j) { /* Compute U(J,J) and test for non-positive-definiteness. */ i__2 = j - 1; ajj = a[j + j * a_dim1] - ddot_(&i__2, &a[j * a_dim1 + 1], &c__1, &a[j * a_dim1 + 1], &c__1); if (ajj <= 0.) { a[j + j * a_dim1] = ajj; goto L30; } ajj = sqrt(ajj); a[j + j * a_dim1] = ajj; /* Compute elements J+1:N of row J. */ if (j < *n) { i__2 = j - 1; i__3 = *n - j; dgemv_("Transpose", &i__2, &i__3, &c_b151, &a[(j + 1) * a_dim1 + 1], lda, &a[j * a_dim1 + 1], &c__1, &c_b15, & a[j + (j + 1) * a_dim1], lda); i__2 = *n - j; d__1 = 1. / ajj; dscal_(&i__2, &d__1, &a[j + (j + 1) * a_dim1], lda); } /* L10: */ } } else { /* Compute the Cholesky factorization A = L*L'. */ i__1 = *n; for (j = 1; j <= i__1; ++j) { /* Compute L(J,J) and test for non-positive-definiteness. */ i__2 = j - 1; ajj = a[j + j * a_dim1] - ddot_(&i__2, &a[j + a_dim1], lda, &a[j + a_dim1], lda); if (ajj <= 0.) { a[j + j * a_dim1] = ajj; goto L30; } ajj = sqrt(ajj); a[j + j * a_dim1] = ajj; /* Compute elements J+1:N of column J. */ if (j < *n) { i__2 = *n - j; i__3 = j - 1; dgemv_("No transpose", &i__2, &i__3, &c_b151, &a[j + 1 + a_dim1], lda, &a[j + a_dim1], lda, &c_b15, &a[j + 1 + j * a_dim1], &c__1); i__2 = *n - j; d__1 = 1. / ajj; dscal_(&i__2, &d__1, &a[j + 1 + j * a_dim1], &c__1); } /* L20: */ } } goto L40; L30: *info = j; L40: return 0; /* End of DPOTF2 */ } /* dpotf2_ */ /* Subroutine */ int dpotrf_(char *uplo, integer *n, doublereal *a, integer * lda, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3, i__4; /* Local variables */ static integer j; extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); extern logical lsame_(char *, char *); extern /* Subroutine */ int dtrsm_(char *, char *, char *, char *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *); static logical upper; extern /* Subroutine */ int dsyrk_(char *, char *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, doublereal *, integer *), dpotf2_(char *, integer *, doublereal *, integer *, integer *); static integer jb, nb; extern /* Subroutine */ int xerbla_(char *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DPOTRF computes the Cholesky factorization of a real symmetric positive definite matrix A. The factorization has the form A = U**T * U, if UPLO = 'U', or A = L * L**T, if UPLO = 'L', where U is an upper triangular matrix and L is lower triangular. This is the block version of the algorithm, calling Level 3 BLAS. Arguments ========= UPLO (input) CHARACTER*1 = 'U': Upper triangle of A is stored; = 'L': Lower triangle of A is stored. N (input) INTEGER The order of the matrix A. N >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the symmetric matrix A. If UPLO = 'U', the leading N-by-N upper triangular part of A contains the upper triangular part of the matrix A, and the strictly lower triangular part of A is not referenced. If UPLO = 'L', the leading N-by-N lower triangular part of A contains the lower triangular part of the matrix A, and the strictly upper triangular part of A is not referenced. On exit, if INFO = 0, the factor U or L from the Cholesky factorization A = U**T*U or A = L*L**T. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,N). INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value > 0: if INFO = i, the leading minor of order i is not positive definite, and the factorization could not be completed. ===================================================================== Test the input parameters. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; /* Function Body */ *info = 0; upper = lsame_(uplo, "U"); if (! upper && ! lsame_(uplo, "L")) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*lda < max(1,*n)) { *info = -4; } if (*info != 0) { i__1 = -(*info); xerbla_("DPOTRF", &i__1); return 0; } /* Quick return if possible */ if (*n == 0) { return 0; } /* Determine the block size for this environment. */ nb = ilaenv_(&c__1, "DPOTRF", uplo, n, &c_n1, &c_n1, &c_n1, (ftnlen)6, ( ftnlen)1); if (nb <= 1 || nb >= *n) { /* Use unblocked code. */ dpotf2_(uplo, n, &a[a_offset], lda, info); } else { /* Use blocked code. */ if (upper) { /* Compute the Cholesky factorization A = U'*U. */ i__1 = *n; i__2 = nb; for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) { /* Update and factorize the current diagonal block and test for non-positive-definiteness. Computing MIN */ i__3 = nb, i__4 = *n - j + 1; jb = min(i__3,i__4); i__3 = j - 1; dsyrk_("Upper", "Transpose", &jb, &i__3, &c_b151, &a[j * a_dim1 + 1], lda, &c_b15, &a[j + j * a_dim1], lda); dpotf2_("Upper", &jb, &a[j + j * a_dim1], lda, info); if (*info != 0) { goto L30; } if (j + jb <= *n) { /* Compute the current block row. */ i__3 = *n - j - jb + 1; i__4 = j - 1; dgemm_("Transpose", "No transpose", &jb, &i__3, &i__4, & c_b151, &a[j * a_dim1 + 1], lda, &a[(j + jb) * a_dim1 + 1], lda, &c_b15, &a[j + (j + jb) * a_dim1], lda); i__3 = *n - j - jb + 1; dtrsm_("Left", "Upper", "Transpose", "Non-unit", &jb, & i__3, &c_b15, &a[j + j * a_dim1], lda, &a[j + (j + jb) * a_dim1], lda); } /* L10: */ } } else { /* Compute the Cholesky factorization A = L*L'. */ i__2 = *n; i__1 = nb; for (j = 1; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) { /* Update and factorize the current diagonal block and test for non-positive-definiteness. Computing MIN */ i__3 = nb, i__4 = *n - j + 1; jb = min(i__3,i__4); i__3 = j - 1; dsyrk_("Lower", "No transpose", &jb, &i__3, &c_b151, &a[j + a_dim1], lda, &c_b15, &a[j + j * a_dim1], lda); dpotf2_("Lower", &jb, &a[j + j * a_dim1], lda, info); if (*info != 0) { goto L30; } if (j + jb <= *n) { /* Compute the current block column. */ i__3 = *n - j - jb + 1; i__4 = j - 1; dgemm_("No transpose", "Transpose", &i__3, &jb, &i__4, & c_b151, &a[j + jb + a_dim1], lda, &a[j + a_dim1], lda, &c_b15, &a[j + jb + j * a_dim1], lda); i__3 = *n - j - jb + 1; dtrsm_("Right", "Lower", "Transpose", "Non-unit", &i__3, & jb, &c_b15, &a[j + j * a_dim1], lda, &a[j + jb + j * a_dim1], lda); } /* L20: */ } } } goto L40; L30: *info = *info + j - 1; L40: return 0; /* End of DPOTRF */ } /* dpotrf_ */ /* Subroutine */ int dstedc_(char *compz, integer *n, doublereal *d__, doublereal *e, doublereal *z__, integer *ldz, doublereal *work, integer *lwork, integer *iwork, integer *liwork, integer *info) { /* System generated locals */ integer z_dim1, z_offset, i__1, i__2; doublereal d__1, d__2; /* Builtin functions */ double log(doublereal); integer pow_ii(integer *, integer *); double sqrt(doublereal); /* Local variables */ static doublereal tiny; static integer i__, j, k, m; static doublereal p; extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); extern logical lsame_(char *, char *); extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *, doublereal *, integer *); static integer lwmin; extern /* Subroutine */ int dlaed0_(integer *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *, integer *); static integer start, ii; extern /* Subroutine */ int dlascl_(char *, integer *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *), dlacpy_(char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *), dlaset_(char *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); extern /* Subroutine */ int xerbla_(char *, integer *); static integer finish; extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *); extern /* Subroutine */ int dsterf_(integer *, doublereal *, doublereal *, integer *), dlasrt_(char *, integer *, doublereal *, integer *); static integer liwmin, icompz; extern /* Subroutine */ int dsteqr_(char *, integer *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, integer *); static doublereal orgnrm; static logical lquery; static integer smlsiz, storez, strtrw, lgn; static doublereal eps; /* -- LAPACK driver routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DSTEDC computes all eigenvalues and, optionally, eigenvectors of a symmetric tridiagonal matrix using the divide and conquer method. The eigenvectors of a full or band real symmetric matrix can also be found if DSYTRD or DSPTRD or DSBTRD has been used to reduce this matrix to tridiagonal form. This code makes very mild assumptions about floating point arithmetic. It will work on machines with a guard digit in add/subtract, or on those binary machines without guard digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or Cray-2. It could conceivably fail on hexadecimal or decimal machines without guard digits, but we know of none. See DLAED3 for details. Arguments ========= COMPZ (input) CHARACTER*1 = 'N': Compute eigenvalues only. = 'I': Compute eigenvectors of tridiagonal matrix also. = 'V': Compute eigenvectors of original dense symmetric matrix also. On entry, Z contains the orthogonal matrix used to reduce the original matrix to tridiagonal form. N (input) INTEGER The dimension of the symmetric tridiagonal matrix. N >= 0. D (input/output) DOUBLE PRECISION array, dimension (N) On entry, the diagonal elements of the tridiagonal matrix. On exit, if INFO = 0, the eigenvalues in ascending order. E (input/output) DOUBLE PRECISION array, dimension (N-1) On entry, the subdiagonal elements of the tridiagonal matrix. On exit, E has been destroyed. Z (input/output) DOUBLE PRECISION array, dimension (LDZ,N) On entry, if COMPZ = 'V', then Z contains the orthogonal matrix used in the reduction to tridiagonal form. On exit, if INFO = 0, then if COMPZ = 'V', Z contains the orthonormal eigenvectors of the original symmetric matrix, and if COMPZ = 'I', Z contains the orthonormal eigenvectors of the symmetric tridiagonal matrix. If COMPZ = 'N', then Z is not referenced. LDZ (input) INTEGER The leading dimension of the array Z. LDZ >= 1. If eigenvectors are desired, then LDZ >= max(1,N). WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK) On exit, if INFO = 0, WORK(1) returns the optimal LWORK. LWORK (input) INTEGER The dimension of the array WORK. If COMPZ = 'N' or N <= 1 then LWORK must be at least 1. If COMPZ = 'V' and N > 1 then LWORK must be at least ( 1 + 3*N + 2*N*lg N + 3*N**2 ), where lg( N ) = smallest integer k such that 2**k >= N. If COMPZ = 'I' and N > 1 then LWORK must be at least ( 1 + 4*N + N**2 ). Note that for COMPZ = 'I' or 'V', then if N is less than or equal to the minimum divide size, usually 25, then LWORK need only be max(1,2*(N-1)). If LWORK = -1, then a workspace query is assumed; the routine only calculates the optimal size of the WORK array, returns this value as the first entry of the WORK array, and no error message related to LWORK is issued by XERBLA. IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. LIWORK (input) INTEGER The dimension of the array IWORK. If COMPZ = 'N' or N <= 1 then LIWORK must be at least 1. If COMPZ = 'V' and N > 1 then LIWORK must be at least ( 6 + 6*N + 5*N*lg N ). If COMPZ = 'I' and N > 1 then LIWORK must be at least ( 3 + 5*N ). Note that for COMPZ = 'I' or 'V', then if N is less than or equal to the minimum divide size, usually 25, then LIWORK need only be 1. If LIWORK = -1, then a workspace query is assumed; the routine only calculates the optimal size of the IWORK array, returns this value as the first entry of the IWORK array, and no error message related to LIWORK is issued by XERBLA. INFO (output) INTEGER = 0: successful exit. < 0: if INFO = -i, the i-th argument had an illegal value. > 0: The algorithm failed to compute an eigenvalue while working on the submatrix lying in rows and columns INFO/(N+1) through mod(INFO,N+1). Further Details =============== Based on contributions by Jeff Rutter, Computer Science Division, University of California at Berkeley, USA Modified by Francoise Tisseur, University of Tennessee. ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; --e; z_dim1 = *ldz; z_offset = 1 + z_dim1 * 1; z__ -= z_offset; --work; --iwork; /* Function Body */ *info = 0; lquery = *lwork == -1 || *liwork == -1; if (lsame_(compz, "N")) { icompz = 0; } else if (lsame_(compz, "V")) { icompz = 1; } else if (lsame_(compz, "I")) { icompz = 2; } else { icompz = -1; } if (icompz < 0) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*ldz < 1 || icompz > 0 && *ldz < max(1,*n)) { *info = -6; } if (*info == 0) { /* Compute the workspace requirements */ smlsiz = ilaenv_(&c__9, "DSTEDC", " ", &c__0, &c__0, &c__0, &c__0, ( ftnlen)6, (ftnlen)1); if (*n <= 1 || icompz == 0) { liwmin = 1; lwmin = 1; } else if (*n <= smlsiz) { liwmin = 1; lwmin = *n - 1 << 1; } else { lgn = (integer) (log((doublereal) (*n)) / log(2.)); if (pow_ii(&c__2, &lgn) < *n) { ++lgn; } if (pow_ii(&c__2, &lgn) < *n) { ++lgn; } if (icompz == 1) { /* Computing 2nd power */ i__1 = *n; lwmin = *n * 3 + 1 + (*n << 1) * lgn + i__1 * i__1 * 3; liwmin = *n * 6 + 6 + *n * 5 * lgn; } else if (icompz == 2) { /* Computing 2nd power */ i__1 = *n; lwmin = (*n << 2) + 1 + i__1 * i__1; liwmin = *n * 5 + 3; } } work[1] = (doublereal) lwmin; iwork[1] = liwmin; if (*lwork < lwmin && ! lquery) { *info = -8; } else if (*liwork < liwmin && ! lquery) { *info = -10; } } if (*info != 0) { i__1 = -(*info); xerbla_("DSTEDC", &i__1); return 0; } else if (lquery) { return 0; } /* Quick return if possible */ if (*n == 0) { return 0; } if (*n == 1) { if (icompz != 0) { z__[z_dim1 + 1] = 1.; } return 0; } /* If the following conditional clause is removed, then the routine will use the Divide and Conquer routine to compute only the eigenvalues, which requires (3N + 3N**2) real workspace and (2 + 5N + 2N lg(N)) integer workspace. Since on many architectures DSTERF is much faster than any other algorithm for finding eigenvalues only, it is used here as the default. If the conditional clause is removed, then information on the size of workspace needs to be changed. If COMPZ = 'N', use DSTERF to compute the eigenvalues. */ if (icompz == 0) { dsterf_(n, &d__[1], &e[1], info); goto L50; } /* If N is smaller than the minimum divide size (SMLSIZ+1), then solve the problem with another solver. */ if (*n <= smlsiz) { dsteqr_(compz, n, &d__[1], &e[1], &z__[z_offset], ldz, &work[1], info); } else { /* If COMPZ = 'V', the Z matrix must be stored elsewhere for later use. */ if (icompz == 1) { storez = *n * *n + 1; } else { storez = 1; } if (icompz == 2) { dlaset_("Full", n, n, &c_b29, &c_b15, &z__[z_offset], ldz); } /* Scale. */ orgnrm = dlanst_("M", n, &d__[1], &e[1]); if (orgnrm == 0.) { goto L50; } eps = EPSILON; start = 1; /* while ( START <= N ) */ L10: if (start <= *n) { /* Let FINISH be the position of the next subdiagonal entry such that E( FINISH ) <= TINY or FINISH = N if no such subdiagonal exists. The matrix identified by the elements between START and FINISH constitutes an independent sub-problem. */ finish = start; L20: if (finish < *n) { tiny = eps * sqrt((d__1 = d__[finish], abs(d__1))) * sqrt(( d__2 = d__[finish + 1], abs(d__2))); if ((d__1 = e[finish], abs(d__1)) > tiny) { ++finish; goto L20; } } /* (Sub) Problem determined. Compute its size and solve it. */ m = finish - start + 1; if (m == 1) { start = finish + 1; goto L10; } if (m > smlsiz) { /* Scale. */ orgnrm = dlanst_("M", &m, &d__[start], &e[start]); dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, &m, &c__1, &d__[ start], &m, info); i__1 = m - 1; i__2 = m - 1; dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, &i__1, &c__1, &e[ start], &i__2, info); if (icompz == 1) { strtrw = 1; } else { strtrw = start; } dlaed0_(&icompz, n, &m, &d__[start], &e[start], &z__[strtrw + start * z_dim1], ldz, &work[1], n, &work[storez], & iwork[1], info); if (*info != 0) { *info = (*info / (m + 1) + start - 1) * (*n + 1) + *info % (m + 1) + start - 1; goto L50; } /* Scale back. */ dlascl_("G", &c__0, &c__0, &c_b15, &orgnrm, &m, &c__1, &d__[ start], &m, info); } else { if (icompz == 1) { /* Since QR won't update a Z matrix which is larger than the length of D, we must solve the sub-problem in a workspace and then multiply back into Z. */ dsteqr_("I", &m, &d__[start], &e[start], &work[1], &m, & work[m * m + 1], info); dlacpy_("A", n, &m, &z__[start * z_dim1 + 1], ldz, &work[ storez], n); dgemm_("N", "N", n, &m, &m, &c_b15, &work[storez], n, & work[1], &m, &c_b29, &z__[start * z_dim1 + 1], ldz); } else if (icompz == 2) { dsteqr_("I", &m, &d__[start], &e[start], &z__[start + start * z_dim1], ldz, &work[1], info); } else { dsterf_(&m, &d__[start], &e[start], info); } if (*info != 0) { *info = start * (*n + 1) + finish; goto L50; } } start = finish + 1; goto L10; } /* endwhile If the problem split any number of times, then the eigenvalues will not be properly ordered. Here we permute the eigenvalues (and the associated eigenvectors) into ascending order. */ if (m != *n) { if (icompz == 0) { /* Use Quick Sort */ dlasrt_("I", n, &d__[1], info); } else { /* Use Selection Sort to minimize swaps of eigenvectors */ i__1 = *n; for (ii = 2; ii <= i__1; ++ii) { i__ = ii - 1; k = i__; p = d__[i__]; i__2 = *n; for (j = ii; j <= i__2; ++j) { if (d__[j] < p) { k = j; p = d__[j]; } /* L30: */ } if (k != i__) { d__[k] = d__[i__]; d__[i__] = p; dswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[k * z_dim1 + 1], &c__1); } /* L40: */ } } } } L50: work[1] = (doublereal) lwmin; iwork[1] = liwmin; return 0; /* End of DSTEDC */ } /* dstedc_ */ /* Subroutine */ int dsteqr_(char *compz, integer *n, doublereal *d__, doublereal *e, doublereal *z__, integer *ldz, doublereal *work, integer *info) { /* System generated locals */ integer z_dim1, z_offset, i__1, i__2; doublereal d__1, d__2; /* Builtin functions */ double sqrt(doublereal), d_sign(doublereal *, doublereal *); /* Local variables */ static integer lend, jtot; extern /* Subroutine */ int dlae2_(doublereal *, doublereal *, doublereal *, doublereal *, doublereal *); static doublereal b, c__, f, g; static integer i__, j, k, l, m; static doublereal p, r__, s; extern logical lsame_(char *, char *); extern /* Subroutine */ int dlasr_(char *, char *, char *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *); static doublereal anorm; extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *, doublereal *, integer *); static integer l1; extern /* Subroutine */ int dlaev2_(doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *, doublereal *); static integer lendm1, lendp1; extern doublereal dlapy2_(doublereal *, doublereal *); static integer ii; static integer mm, iscale; extern /* Subroutine */ int dlascl_(char *, integer *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *), dlaset_(char *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *); static doublereal safmin; extern /* Subroutine */ int dlartg_(doublereal *, doublereal *, doublereal *, doublereal *, doublereal *); static doublereal safmax; extern /* Subroutine */ int xerbla_(char *, integer *); extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *); extern /* Subroutine */ int dlasrt_(char *, integer *, doublereal *, integer *); static integer lendsv; static doublereal ssfmin; static integer nmaxit, icompz; static doublereal ssfmax; static integer lm1, mm1, nm1; static doublereal rt1, rt2, eps; static integer lsv; static doublereal tst, eps2; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DSTEQR computes all eigenvalues and, optionally, eigenvectors of a symmetric tridiagonal matrix using the implicit QL or QR method. The eigenvectors of a full or band symmetric matrix can also be found if DSYTRD or DSPTRD or DSBTRD has been used to reduce this matrix to tridiagonal form. Arguments ========= COMPZ (input) CHARACTER*1 = 'N': Compute eigenvalues only. = 'V': Compute eigenvalues and eigenvectors of the original symmetric matrix. On entry, Z must contain the orthogonal matrix used to reduce the original matrix to tridiagonal form. = 'I': Compute eigenvalues and eigenvectors of the tridiagonal matrix. Z is initialized to the identity matrix. N (input) INTEGER The order of the matrix. N >= 0. D (input/output) DOUBLE PRECISION array, dimension (N) On entry, the diagonal elements of the tridiagonal matrix. On exit, if INFO = 0, the eigenvalues in ascending order. E (input/output) DOUBLE PRECISION array, dimension (N-1) On entry, the (n-1) subdiagonal elements of the tridiagonal matrix. On exit, E has been destroyed. Z (input/output) DOUBLE PRECISION array, dimension (LDZ, N) On entry, if COMPZ = 'V', then Z contains the orthogonal matrix used in the reduction to tridiagonal form. On exit, if INFO = 0, then if COMPZ = 'V', Z contains the orthonormal eigenvectors of the original symmetric matrix, and if COMPZ = 'I', Z contains the orthonormal eigenvectors of the symmetric tridiagonal matrix. If COMPZ = 'N', then Z is not referenced. LDZ (input) INTEGER The leading dimension of the array Z. LDZ >= 1, and if eigenvectors are desired, then LDZ >= max(1,N). WORK (workspace) DOUBLE PRECISION array, dimension (max(1,2*N-2)) If COMPZ = 'N', then WORK is not referenced. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value > 0: the algorithm has failed to find all the eigenvalues in a total of 30*N iterations; if INFO = i, then i elements of E have not converged to zero; on exit, D and E contain the elements of a symmetric tridiagonal matrix which is orthogonally similar to the original matrix. ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --d__; --e; z_dim1 = *ldz; z_offset = 1 + z_dim1 * 1; z__ -= z_offset; --work; /* Function Body */ *info = 0; if (lsame_(compz, "N")) { icompz = 0; } else if (lsame_(compz, "V")) { icompz = 1; } else if (lsame_(compz, "I")) { icompz = 2; } else { icompz = -1; } if (icompz < 0) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*ldz < 1 || icompz > 0 && *ldz < max(1,*n)) { *info = -6; } if (*info != 0) { i__1 = -(*info); xerbla_("DSTEQR", &i__1); return 0; } /* Quick return if possible */ if (*n == 0) { return 0; } if (*n == 1) { if (icompz == 2) { z__[z_dim1 + 1] = 1.; } return 0; } /* Determine the unit roundoff and over/underflow thresholds. */ eps = EPSILON; /* Computing 2nd power */ d__1 = eps; eps2 = d__1 * d__1; safmin = SAFEMINIMUM; safmax = 1. / safmin; ssfmax = sqrt(safmax) / 3.; ssfmin = sqrt(safmin) / eps2; /* Compute the eigenvalues and eigenvectors of the tridiagonal matrix. */ if (icompz == 2) { dlaset_("Full", n, n, &c_b29, &c_b15, &z__[z_offset], ldz); } nmaxit = *n * 30; jtot = 0; /* Determine where the matrix splits and choose QL or QR iteration for each block, according to whether top or bottom diagonal element is smaller. */ l1 = 1; nm1 = *n - 1; L10: if (l1 > *n) { goto L160; } if (l1 > 1) { e[l1 - 1] = 0.; } if (l1 <= nm1) { i__1 = nm1; for (m = l1; m <= i__1; ++m) { tst = (d__1 = e[m], abs(d__1)); if (tst == 0.) { goto L30; } if (tst <= sqrt((d__1 = d__[m], abs(d__1))) * sqrt((d__2 = d__[m + 1], abs(d__2))) * eps) { e[m] = 0.; goto L30; } /* L20: */ } } m = *n; L30: l = l1; lsv = l; lend = m; lendsv = lend; l1 = m + 1; if (lend == l) { goto L10; } /* Scale submatrix in rows and columns L to LEND */ i__1 = lend - l + 1; anorm = dlanst_("I", &i__1, &d__[l], &e[l]); iscale = 0; if (anorm == 0.) { goto L10; } if (anorm > ssfmax) { iscale = 1; i__1 = lend - l + 1; dlascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &d__[l], n, info); i__1 = lend - l; dlascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &e[l], n, info); } else if (anorm < ssfmin) { iscale = 2; i__1 = lend - l + 1; dlascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &d__[l], n, info); i__1 = lend - l; dlascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &e[l], n, info); } /* Choose between QL and QR iteration */ if ((d__1 = d__[lend], abs(d__1)) < (d__2 = d__[l], abs(d__2))) { lend = lsv; l = lendsv; } if (lend > l) { /* QL Iteration Look for small subdiagonal element. */ L40: if (l != lend) { lendm1 = lend - 1; i__1 = lendm1; for (m = l; m <= i__1; ++m) { /* Computing 2nd power */ d__2 = (d__1 = e[m], abs(d__1)); tst = d__2 * d__2; if (tst <= eps2 * (d__1 = d__[m], abs(d__1)) * (d__2 = d__[m + 1], abs(d__2)) + safmin) { goto L60; } /* L50: */ } } m = lend; L60: if (m < lend) { e[m] = 0.; } p = d__[l]; if (m == l) { goto L80; } /* If remaining matrix is 2-by-2, use DLAE2 or SLAEV2 to compute its eigensystem. */ if (m == l + 1) { if (icompz > 0) { dlaev2_(&d__[l], &e[l], &d__[l + 1], &rt1, &rt2, &c__, &s); work[l] = c__; work[*n - 1 + l] = s; dlasr_("R", "V", "B", n, &c__2, &work[l], &work[*n - 1 + l], & z__[l * z_dim1 + 1], ldz); } else { dlae2_(&d__[l], &e[l], &d__[l + 1], &rt1, &rt2); } d__[l] = rt1; d__[l + 1] = rt2; e[l] = 0.; l += 2; if (l <= lend) { goto L40; } goto L140; } if (jtot == nmaxit) { goto L140; } ++jtot; /* Form shift. */ g = (d__[l + 1] - p) / (e[l] * 2.); r__ = dlapy2_(&g, &c_b15); g = d__[m] - p + e[l] / (g + d_sign(&r__, &g)); s = 1.; c__ = 1.; p = 0.; /* Inner loop */ mm1 = m - 1; i__1 = l; for (i__ = mm1; i__ >= i__1; --i__) { f = s * e[i__]; b = c__ * e[i__]; dlartg_(&g, &f, &c__, &s, &r__); if (i__ != m - 1) { e[i__ + 1] = r__; } g = d__[i__ + 1] - p; r__ = (d__[i__] - g) * s + c__ * 2. * b; p = s * r__; d__[i__ + 1] = g + p; g = c__ * r__ - b; /* If eigenvectors are desired, then save rotations. */ if (icompz > 0) { work[i__] = c__; work[*n - 1 + i__] = -s; } /* L70: */ } /* If eigenvectors are desired, then apply saved rotations. */ if (icompz > 0) { mm = m - l + 1; dlasr_("R", "V", "B", n, &mm, &work[l], &work[*n - 1 + l], &z__[l * z_dim1 + 1], ldz); } d__[l] -= p; e[l] = g; goto L40; /* Eigenvalue found. */ L80: d__[l] = p; ++l; if (l <= lend) { goto L40; } goto L140; } else { /* QR Iteration Look for small superdiagonal element. */ L90: if (l != lend) { lendp1 = lend + 1; i__1 = lendp1; for (m = l; m >= i__1; --m) { /* Computing 2nd power */ d__2 = (d__1 = e[m - 1], abs(d__1)); tst = d__2 * d__2; if (tst <= eps2 * (d__1 = d__[m], abs(d__1)) * (d__2 = d__[m - 1], abs(d__2)) + safmin) { goto L110; } /* L100: */ } } m = lend; L110: if (m > lend) { e[m - 1] = 0.; } p = d__[l]; if (m == l) { goto L130; } /* If remaining matrix is 2-by-2, use DLAE2 or SLAEV2 to compute its eigensystem. */ if (m == l - 1) { if (icompz > 0) { dlaev2_(&d__[l - 1], &e[l - 1], &d__[l], &rt1, &rt2, &c__, &s) ; work[m] = c__; work[*n - 1 + m] = s; dlasr_("R", "V", "F", n, &c__2, &work[m], &work[*n - 1 + m], & z__[(l - 1) * z_dim1 + 1], ldz); } else { dlae2_(&d__[l - 1], &e[l - 1], &d__[l], &rt1, &rt2); } d__[l - 1] = rt1; d__[l] = rt2; e[l - 1] = 0.; l += -2; if (l >= lend) { goto L90; } goto L140; } if (jtot == nmaxit) { goto L140; } ++jtot; /* Form shift. */ g = (d__[l - 1] - p) / (e[l - 1] * 2.); r__ = dlapy2_(&g, &c_b15); g = d__[m] - p + e[l - 1] / (g + d_sign(&r__, &g)); s = 1.; c__ = 1.; p = 0.; /* Inner loop */ lm1 = l - 1; i__1 = lm1; for (i__ = m; i__ <= i__1; ++i__) { f = s * e[i__]; b = c__ * e[i__]; dlartg_(&g, &f, &c__, &s, &r__); if (i__ != m) { e[i__ - 1] = r__; } g = d__[i__] - p; r__ = (d__[i__ + 1] - g) * s + c__ * 2. * b; p = s * r__; d__[i__] = g + p; g = c__ * r__ - b; /* If eigenvectors are desired, then save rotations. */ if (icompz > 0) { work[i__] = c__; work[*n - 1 + i__] = s; } /* L120: */ } /* If eigenvectors are desired, then apply saved rotations. */ if (icompz > 0) { mm = l - m + 1; dlasr_("R", "V", "F", n, &mm, &work[m], &work[*n - 1 + m], &z__[m * z_dim1 + 1], ldz); } d__[l] -= p; e[lm1] = g; goto L90; /* Eigenvalue found. */ L130: d__[l] = p; --l; if (l >= lend) { goto L90; } goto L140; } /* Undo scaling if necessary */ L140: if (iscale == 1) { i__1 = lendsv - lsv + 1; dlascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &d__[lsv], n, info); i__1 = lendsv - lsv; dlascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &e[lsv], n, info); } else if (iscale == 2) { i__1 = lendsv - lsv + 1; dlascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &d__[lsv], n, info); i__1 = lendsv - lsv; dlascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &e[lsv], n, info); } /* Check for no convergence to an eigenvalue after a total of N*MAXIT iterations. */ if (jtot < nmaxit) { goto L10; } i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { if (e[i__] != 0.) { ++(*info); } /* L150: */ } goto L190; /* Order eigenvalues and eigenvectors. */ L160: if (icompz == 0) { /* Use Quick Sort */ dlasrt_("I", n, &d__[1], info); } else { /* Use Selection Sort to minimize swaps of eigenvectors */ i__1 = *n; for (ii = 2; ii <= i__1; ++ii) { i__ = ii - 1; k = i__; p = d__[i__]; i__2 = *n; for (j = ii; j <= i__2; ++j) { if (d__[j] < p) { k = j; p = d__[j]; } /* L170: */ } if (k != i__) { d__[k] = d__[i__]; d__[i__] = p; dswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[k * z_dim1 + 1], &c__1); } /* L180: */ } } L190: return 0; /* End of DSTEQR */ } /* dsteqr_ */ /* Subroutine */ int dsterf_(integer *n, doublereal *d__, doublereal *e, integer *info) { /* System generated locals */ integer i__1; doublereal d__1, d__2, d__3; /* Builtin functions */ double sqrt(doublereal), d_sign(doublereal *, doublereal *); /* Local variables */ static doublereal oldc; static integer lend, jtot; extern /* Subroutine */ int dlae2_(doublereal *, doublereal *, doublereal *, doublereal *, doublereal *); static doublereal c__; static integer i__, l, m; static doublereal p, gamma, r__, s, alpha, sigma, anorm; static integer l1; extern doublereal dlapy2_(doublereal *, doublereal *); static doublereal bb; static integer iscale; extern /* Subroutine */ int dlascl_(char *, integer *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *); static doublereal oldgam, safmin; extern /* Subroutine */ int xerbla_(char *, integer *); static doublereal safmax; extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *); extern /* Subroutine */ int dlasrt_(char *, integer *, doublereal *, integer *); static integer lendsv; static doublereal ssfmin; static integer nmaxit; static doublereal ssfmax, rt1, rt2, eps, rte; static integer lsv; static doublereal eps2; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DSTERF computes all eigenvalues of a symmetric tridiagonal matrix using the Pal-Walker-Kahan variant of the QL or QR algorithm. Arguments ========= N (input) INTEGER The order of the matrix. N >= 0. D (input/output) DOUBLE PRECISION array, dimension (N) On entry, the n diagonal elements of the tridiagonal matrix. On exit, if INFO = 0, the eigenvalues in ascending order. E (input/output) DOUBLE PRECISION array, dimension (N-1) On entry, the (n-1) subdiagonal elements of the tridiagonal matrix. On exit, E has been destroyed. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value > 0: the algorithm failed to find all of the eigenvalues in a total of 30*N iterations; if INFO = i, then i elements of E have not converged to zero. ===================================================================== Test the input parameters. */ /* Parameter adjustments */ --e; --d__; /* Function Body */ *info = 0; /* Quick return if possible */ if (*n < 0) { *info = -1; i__1 = -(*info); xerbla_("DSTERF", &i__1); return 0; } if (*n <= 1) { return 0; } /* Determine the unit roundoff for this environment. */ eps = EPSILON; /* Computing 2nd power */ d__1 = eps; eps2 = d__1 * d__1; safmin = SAFEMINIMUM; safmax = 1. / safmin; ssfmax = sqrt(safmax) / 3.; ssfmin = sqrt(safmin) / eps2; /* Compute the eigenvalues of the tridiagonal matrix. */ nmaxit = *n * 30; sigma = 0.; jtot = 0; /* Determine where the matrix splits and choose QL or QR iteration for each block, according to whether top or bottom diagonal element is smaller. */ l1 = 1; L10: if (l1 > *n) { goto L170; } if (l1 > 1) { e[l1 - 1] = 0.; } i__1 = *n - 1; for (m = l1; m <= i__1; ++m) { if ((d__3 = e[m], abs(d__3)) <= sqrt((d__1 = d__[m], abs(d__1))) * sqrt((d__2 = d__[m + 1], abs(d__2))) * eps) { e[m] = 0.; goto L30; } /* L20: */ } m = *n; L30: l = l1; lsv = l; lend = m; lendsv = lend; l1 = m + 1; if (lend == l) { goto L10; } /* Scale submatrix in rows and columns L to LEND */ i__1 = lend - l + 1; anorm = dlanst_("I", &i__1, &d__[l], &e[l]); iscale = 0; if (anorm > ssfmax) { iscale = 1; i__1 = lend - l + 1; dlascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &d__[l], n, info); i__1 = lend - l; dlascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &e[l], n, info); } else if (anorm < ssfmin) { iscale = 2; i__1 = lend - l + 1; dlascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &d__[l], n, info); i__1 = lend - l; dlascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &e[l], n, info); } i__1 = lend - 1; for (i__ = l; i__ <= i__1; ++i__) { /* Computing 2nd power */ d__1 = e[i__]; e[i__] = d__1 * d__1; /* L40: */ } /* Choose between QL and QR iteration */ if ((d__1 = d__[lend], abs(d__1)) < (d__2 = d__[l], abs(d__2))) { lend = lsv; l = lendsv; } if (lend >= l) { /* QL Iteration Look for small subdiagonal element. */ L50: if (l != lend) { i__1 = lend - 1; for (m = l; m <= i__1; ++m) { if ((d__2 = e[m], abs(d__2)) <= eps2 * (d__1 = d__[m] * d__[m + 1], abs(d__1))) { goto L70; } /* L60: */ } } m = lend; L70: if (m < lend) { e[m] = 0.; } p = d__[l]; if (m == l) { goto L90; } /* If remaining matrix is 2 by 2, use DLAE2 to compute its eigenvalues. */ if (m == l + 1) { rte = sqrt(e[l]); dlae2_(&d__[l], &rte, &d__[l + 1], &rt1, &rt2); d__[l] = rt1; d__[l + 1] = rt2; e[l] = 0.; l += 2; if (l <= lend) { goto L50; } goto L150; } if (jtot == nmaxit) { goto L150; } ++jtot; /* Form shift. */ rte = sqrt(e[l]); sigma = (d__[l + 1] - p) / (rte * 2.); r__ = dlapy2_(&sigma, &c_b15); sigma = p - rte / (sigma + d_sign(&r__, &sigma)); c__ = 1.; s = 0.; gamma = d__[m] - sigma; p = gamma * gamma; /* Inner loop */ i__1 = l; for (i__ = m - 1; i__ >= i__1; --i__) { bb = e[i__]; r__ = p + bb; if (i__ != m - 1) { e[i__ + 1] = s * r__; } oldc = c__; c__ = p / r__; s = bb / r__; oldgam = gamma; alpha = d__[i__]; gamma = c__ * (alpha - sigma) - s * oldgam; d__[i__ + 1] = oldgam + (alpha - gamma); if (c__ != 0.) { p = gamma * gamma / c__; } else { p = oldc * bb; } /* L80: */ } e[l] = s * p; d__[l] = sigma + gamma; goto L50; /* Eigenvalue found. */ L90: d__[l] = p; ++l; if (l <= lend) { goto L50; } goto L150; } else { /* QR Iteration Look for small superdiagonal element. */ L100: i__1 = lend + 1; for (m = l; m >= i__1; --m) { if ((d__2 = e[m - 1], abs(d__2)) <= eps2 * (d__1 = d__[m] * d__[m - 1], abs(d__1))) { goto L120; } /* L110: */ } m = lend; L120: if (m > lend) { e[m - 1] = 0.; } p = d__[l]; if (m == l) { goto L140; } /* If remaining matrix is 2 by 2, use DLAE2 to compute its eigenvalues. */ if (m == l - 1) { rte = sqrt(e[l - 1]); dlae2_(&d__[l], &rte, &d__[l - 1], &rt1, &rt2); d__[l] = rt1; d__[l - 1] = rt2; e[l - 1] = 0.; l += -2; if (l >= lend) { goto L100; } goto L150; } if (jtot == nmaxit) { goto L150; } ++jtot; /* Form shift. */ rte = sqrt(e[l - 1]); sigma = (d__[l - 1] - p) / (rte * 2.); r__ = dlapy2_(&sigma, &c_b15); sigma = p - rte / (sigma + d_sign(&r__, &sigma)); c__ = 1.; s = 0.; gamma = d__[m] - sigma; p = gamma * gamma; /* Inner loop */ i__1 = l - 1; for (i__ = m; i__ <= i__1; ++i__) { bb = e[i__]; r__ = p + bb; if (i__ != m) { e[i__ - 1] = s * r__; } oldc = c__; c__ = p / r__; s = bb / r__; oldgam = gamma; alpha = d__[i__ + 1]; gamma = c__ * (alpha - sigma) - s * oldgam; d__[i__] = oldgam + (alpha - gamma); if (c__ != 0.) { p = gamma * gamma / c__; } else { p = oldc * bb; } /* L130: */ } e[l - 1] = s * p; d__[l] = sigma + gamma; goto L100; /* Eigenvalue found. */ L140: d__[l] = p; --l; if (l >= lend) { goto L100; } goto L150; } /* Undo scaling if necessary */ L150: if (iscale == 1) { i__1 = lendsv - lsv + 1; dlascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &d__[lsv], n, info); } if (iscale == 2) { i__1 = lendsv - lsv + 1; dlascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &d__[lsv], n, info); } /* Check for no convergence to an eigenvalue after a total of N*MAXIT iterations. */ if (jtot < nmaxit) { goto L10; } i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { if (e[i__] != 0.) { ++(*info); } /* L160: */ } goto L180; /* Sort eigenvalues in increasing order. */ L170: dlasrt_("I", n, &d__[1], info); L180: return 0; /* End of DSTERF */ } /* dsterf_ */ /* Subroutine */ int dsyevd_(char *jobz, char *uplo, integer *n, doublereal * a, integer *lda, doublereal *w, doublereal *work, integer *lwork, integer *iwork, integer *liwork, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; doublereal d__1; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static integer inde; static doublereal anrm, rmin, rmax; static integer lopt; extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, integer *); static doublereal sigma; extern logical lsame_(char *, char *); static integer iinfo, lwmin, liopt; static logical lower, wantz; static integer indwk2, llwrk2; static integer iscale; extern /* Subroutine */ int dlascl_(char *, integer *, integer *, doublereal *, doublereal *, integer *, integer *, doublereal *, integer *, integer *), dstedc_(char *, integer *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, integer *, integer *, integer *, integer *), dlacpy_( char *, integer *, integer *, doublereal *, integer *, doublereal *, integer *); static doublereal safmin; extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); extern /* Subroutine */ int xerbla_(char *, integer *); static doublereal bignum; static integer indtau; extern /* Subroutine */ int dsterf_(integer *, doublereal *, doublereal *, integer *); extern doublereal dlansy_(char *, char *, integer *, doublereal *, integer *, doublereal *); static integer indwrk, liwmin; extern /* Subroutine */ int dormtr_(char *, char *, char *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, integer *), dsytrd_(char *, integer *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, doublereal *, integer *, integer *); static integer llwork; static doublereal smlnum; static logical lquery; static doublereal eps; /* -- LAPACK driver routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DSYEVD computes all eigenvalues and, optionally, eigenvectors of a real symmetric matrix A. If eigenvectors are desired, it uses a divide and conquer algorithm. The divide and conquer algorithm makes very mild assumptions about floating point arithmetic. It will work on machines with a guard digit in add/subtract, or on those binary machines without guard digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or Cray-2. It could conceivably fail on hexadecimal or decimal machines without guard digits, but we know of none. Because of large use of BLAS of level 3, DSYEVD needs N**2 more workspace than DSYEVX. Arguments ========= JOBZ (input) CHARACTER*1 = 'N': Compute eigenvalues only; = 'V': Compute eigenvalues and eigenvectors. UPLO (input) CHARACTER*1 = 'U': Upper triangle of A is stored; = 'L': Lower triangle of A is stored. N (input) INTEGER The order of the matrix A. N >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA, N) On entry, the symmetric matrix A. If UPLO = 'U', the leading N-by-N upper triangular part of A contains the upper triangular part of the matrix A. If UPLO = 'L', the leading N-by-N lower triangular part of A contains the lower triangular part of the matrix A. On exit, if JOBZ = 'V', then if INFO = 0, A contains the orthonormal eigenvectors of the matrix A. If JOBZ = 'N', then on exit the lower triangle (if UPLO='L') or the upper triangle (if UPLO='U') of A, including the diagonal, is destroyed. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,N). W (output) DOUBLE PRECISION array, dimension (N) If INFO = 0, the eigenvalues in ascending order. WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK) On exit, if INFO = 0, WORK(1) returns the optimal LWORK. LWORK (input) INTEGER The dimension of the array WORK. If N <= 1, LWORK must be at least 1. If JOBZ = 'N' and N > 1, LWORK must be at least 2*N+1. If JOBZ = 'V' and N > 1, LWORK must be at least 1 + 6*N + 2*N**2. If LWORK = -1, then a workspace query is assumed; the routine only calculates the optimal sizes of the WORK and IWORK arrays, returns these values as the first entries of the WORK and IWORK arrays, and no error message related to LWORK or LIWORK is issued by XERBLA. IWORK (workspace/output) INTEGER array, dimension (MAX(1,LIWORK)) On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. LIWORK (input) INTEGER The dimension of the array IWORK. If N <= 1, LIWORK must be at least 1. If JOBZ = 'N' and N > 1, LIWORK must be at least 1. If JOBZ = 'V' and N > 1, LIWORK must be at least 3 + 5*N. If LIWORK = -1, then a workspace query is assumed; the routine only calculates the optimal sizes of the WORK and IWORK arrays, returns these values as the first entries of the WORK and IWORK arrays, and no error message related to LWORK or LIWORK is issued by XERBLA. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value > 0: if INFO = i and JOBZ = 'N', then the algorithm failed to converge; i off-diagonal elements of an intermediate tridiagonal form did not converge to zero; if INFO = i and JOBZ = 'V', then the algorithm failed to compute an eigenvalue while working on the submatrix lying in rows and columns INFO/(N+1) through mod(INFO,N+1). Further Details =============== Based on contributions by Jeff Rutter, Computer Science Division, University of California at Berkeley, USA Modified by Francoise Tisseur, University of Tennessee. Modified description of INFO. Sven, 16 Feb 05. ===================================================================== Test the input parameters. */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --w; --work; --iwork; /* Function Body */ wantz = lsame_(jobz, "V"); lower = lsame_(uplo, "L"); lquery = *lwork == -1 || *liwork == -1; *info = 0; if (! (wantz || lsame_(jobz, "N"))) { *info = -1; } else if (! (lower || lsame_(uplo, "U"))) { *info = -2; } else if (*n < 0) { *info = -3; } else if (*lda < max(1,*n)) { *info = -5; } if (*info == 0) { if (*n <= 1) { liwmin = 1; lwmin = 1; lopt = lwmin; liopt = liwmin; } else { if (wantz) { liwmin = *n * 5 + 3; /* Computing 2nd power */ i__1 = *n; lwmin = *n * 6 + 1 + (i__1 * i__1 << 1); } else { liwmin = 1; lwmin = (*n << 1) + 1; } /* Computing MAX */ i__1 = lwmin, i__2 = (*n << 1) + ilaenv_(&c__1, "DSYTRD", uplo, n, &c_n1, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1); lopt = max(i__1,i__2); liopt = liwmin; } work[1] = (doublereal) lopt; iwork[1] = liopt; if (*lwork < lwmin && ! lquery) { *info = -8; } else if (*liwork < liwmin && ! lquery) { *info = -10; } } if (*info != 0) { i__1 = -(*info); xerbla_("DSYEVD", &i__1); return 0; } else if (lquery) { return 0; } /* Quick return if possible */ if (*n == 0) { return 0; } if (*n == 1) { w[1] = a[a_dim1 + 1]; if (wantz) { a[a_dim1 + 1] = 1.; } return 0; } /* Get machine constants. */ safmin = SAFEMINIMUM; eps = PRECISION; smlnum = safmin / eps; bignum = 1. / smlnum; rmin = sqrt(smlnum); rmax = sqrt(bignum); /* Scale matrix to allowable range, if necessary. */ anrm = dlansy_("M", uplo, n, &a[a_offset], lda, &work[1]); iscale = 0; if (anrm > 0. && anrm < rmin) { iscale = 1; sigma = rmin / anrm; } else if (anrm > rmax) { iscale = 1; sigma = rmax / anrm; } if (iscale == 1) { dlascl_(uplo, &c__0, &c__0, &c_b15, &sigma, n, n, &a[a_offset], lda, info); } /* Call DSYTRD to reduce symmetric matrix to tridiagonal form. */ inde = 1; indtau = inde + *n; indwrk = indtau + *n; llwork = *lwork - indwrk + 1; indwk2 = indwrk + *n * *n; llwrk2 = *lwork - indwk2 + 1; dsytrd_(uplo, n, &a[a_offset], lda, &w[1], &work[inde], &work[indtau], & work[indwrk], &llwork, &iinfo); lopt = (integer) ((*n << 1) + work[indwrk]); /* For eigenvalues only, call DSTERF. For eigenvectors, first call DSTEDC to generate the eigenvector matrix, WORK(INDWRK), of the tridiagonal matrix, then call DORMTR to multiply it by the Householder transformations stored in A. */ if (! wantz) { dsterf_(n, &w[1], &work[inde], info); } else { dstedc_("I", n, &w[1], &work[inde], &work[indwrk], n, &work[indwk2], & llwrk2, &iwork[1], liwork, info); dormtr_("L", uplo, "N", n, n, &a[a_offset], lda, &work[indtau], &work[ indwrk], n, &work[indwk2], &llwrk2, &iinfo); dlacpy_("A", n, n, &work[indwrk], n, &a[a_offset], lda); /* Computing MAX Computing 2nd power */ i__3 = *n; i__1 = lopt, i__2 = *n * 6 + 1 + (i__3 * i__3 << 1); lopt = max(i__1,i__2); } /* If matrix was scaled, then rescale eigenvalues appropriately. */ if (iscale == 1) { d__1 = 1. / sigma; dscal_(n, &d__1, &w[1], &c__1); } work[1] = (doublereal) lopt; iwork[1] = liopt; return 0; /* End of DSYEVD */ } /* dsyevd_ */ /* Subroutine */ int dsytd2_(char *uplo, integer *n, doublereal *a, integer * lda, doublereal *d__, doublereal *e, doublereal *tau, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; /* Local variables */ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *, integer *); static doublereal taui; extern /* Subroutine */ int dsyr2_(char *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, integer *); static integer i__; static doublereal alpha; extern logical lsame_(char *, char *); extern /* Subroutine */ int daxpy_(integer *, doublereal *, doublereal *, integer *, doublereal *, integer *); static logical upper; extern /* Subroutine */ int dsymv_(char *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *), dlarfg_(integer *, doublereal *, doublereal *, integer *, doublereal *), xerbla_(char *, integer * ); /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DSYTD2 reduces a real symmetric matrix A to symmetric tridiagonal form T by an orthogonal similarity transformation: Q' * A * Q = T. Arguments ========= UPLO (input) CHARACTER*1 Specifies whether the upper or lower triangular part of the symmetric matrix A is stored: = 'U': Upper triangular = 'L': Lower triangular N (input) INTEGER The order of the matrix A. N >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the symmetric matrix A. If UPLO = 'U', the leading n-by-n upper triangular part of A contains the upper triangular part of the matrix A, and the strictly lower triangular part of A is not referenced. If UPLO = 'L', the leading n-by-n lower triangular part of A contains the lower triangular part of the matrix A, and the strictly upper triangular part of A is not referenced. On exit, if UPLO = 'U', the diagonal and first superdiagonal of A are overwritten by the corresponding elements of the tridiagonal matrix T, and the elements above the first superdiagonal, with the array TAU, represent the orthogonal matrix Q as a product of elementary reflectors; if UPLO = 'L', the diagonal and first subdiagonal of A are over- written by the corresponding elements of the tridiagonal matrix T, and the elements below the first subdiagonal, with the array TAU, represent the orthogonal matrix Q as a product of elementary reflectors. See Further Details. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,N). D (output) DOUBLE PRECISION array, dimension (N) The diagonal elements of the tridiagonal matrix T: D(i) = A(i,i). E (output) DOUBLE PRECISION array, dimension (N-1) The off-diagonal elements of the tridiagonal matrix T: E(i) = A(i,i+1) if UPLO = 'U', E(i) = A(i+1,i) if UPLO = 'L'. TAU (output) DOUBLE PRECISION array, dimension (N-1) The scalar factors of the elementary reflectors (see Further Details). INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value. Further Details =============== If UPLO = 'U', the matrix Q is represented as a product of elementary reflectors Q = H(n-1) . . . H(2) H(1). Each H(i) has the form H(i) = I - tau * v * v' where tau is a real scalar, and v is a real vector with v(i+1:n) = 0 and v(i) = 1; v(1:i-1) is stored on exit in A(1:i-1,i+1), and tau in TAU(i). If UPLO = 'L', the matrix Q is represented as a product of elementary reflectors Q = H(1) H(2) . . . H(n-1). Each H(i) has the form H(i) = I - tau * v * v' where tau is a real scalar, and v is a real vector with v(1:i) = 0 and v(i+1) = 1; v(i+2:n) is stored on exit in A(i+2:n,i), and tau in TAU(i). The contents of A on exit are illustrated by the following examples with n = 5: if UPLO = 'U': if UPLO = 'L': ( d e v2 v3 v4 ) ( d ) ( d e v3 v4 ) ( e d ) ( d e v4 ) ( v1 e d ) ( d e ) ( v1 v2 e d ) ( d ) ( v1 v2 v3 e d ) where d and e denote diagonal and off-diagonal elements of T, and vi denotes an element of the vector defining H(i). ===================================================================== Test the input parameters */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --d__; --e; --tau; /* Function Body */ *info = 0; upper = lsame_(uplo, "U"); if (! upper && ! lsame_(uplo, "L")) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*lda < max(1,*n)) { *info = -4; } if (*info != 0) { i__1 = -(*info); xerbla_("DSYTD2", &i__1); return 0; } /* Quick return if possible */ if (*n <= 0) { return 0; } if (upper) { /* Reduce the upper triangle of A */ for (i__ = *n - 1; i__ >= 1; --i__) { /* Generate elementary reflector H(i) = I - tau * v * v' to annihilate A(1:i-1,i+1) */ dlarfg_(&i__, &a[i__ + (i__ + 1) * a_dim1], &a[(i__ + 1) * a_dim1 + 1], &c__1, &taui); e[i__] = a[i__ + (i__ + 1) * a_dim1]; if (taui != 0.) { /* Apply H(i) from both sides to A(1:i,1:i) */ a[i__ + (i__ + 1) * a_dim1] = 1.; /* Compute x := tau * A * v storing x in TAU(1:i) */ dsymv_(uplo, &i__, &taui, &a[a_offset], lda, &a[(i__ + 1) * a_dim1 + 1], &c__1, &c_b29, &tau[1], &c__1) ; /* Compute w := x - 1/2 * tau * (x'*v) * v */ alpha = taui * -.5 * ddot_(&i__, &tau[1], &c__1, &a[(i__ + 1) * a_dim1 + 1], &c__1); daxpy_(&i__, &alpha, &a[(i__ + 1) * a_dim1 + 1], &c__1, &tau[ 1], &c__1); /* Apply the transformation as a rank-2 update: A := A - v * w' - w * v' */ dsyr2_(uplo, &i__, &c_b151, &a[(i__ + 1) * a_dim1 + 1], &c__1, &tau[1], &c__1, &a[a_offset], lda); a[i__ + (i__ + 1) * a_dim1] = e[i__]; } d__[i__ + 1] = a[i__ + 1 + (i__ + 1) * a_dim1]; tau[i__] = taui; /* L10: */ } d__[1] = a[a_dim1 + 1]; } else { /* Reduce the lower triangle of A */ i__1 = *n - 1; for (i__ = 1; i__ <= i__1; ++i__) { /* Generate elementary reflector H(i) = I - tau * v * v' to annihilate A(i+2:n,i) */ i__2 = *n - i__; /* Computing MIN */ i__3 = i__ + 2; dlarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3,*n) + i__ * a_dim1], &c__1, &taui); e[i__] = a[i__ + 1 + i__ * a_dim1]; if (taui != 0.) { /* Apply H(i) from both sides to A(i+1:n,i+1:n) */ a[i__ + 1 + i__ * a_dim1] = 1.; /* Compute x := tau * A * v storing y in TAU(i:n-1) */ i__2 = *n - i__; dsymv_(uplo, &i__2, &taui, &a[i__ + 1 + (i__ + 1) * a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b29, &tau[ i__], &c__1); /* Compute w := x - 1/2 * tau * (x'*v) * v */ i__2 = *n - i__; alpha = taui * -.5 * ddot_(&i__2, &tau[i__], &c__1, &a[i__ + 1 + i__ * a_dim1], &c__1); i__2 = *n - i__; daxpy_(&i__2, &alpha, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[ i__], &c__1); /* Apply the transformation as a rank-2 update: A := A - v * w' - w * v' */ i__2 = *n - i__; dsyr2_(uplo, &i__2, &c_b151, &a[i__ + 1 + i__ * a_dim1], & c__1, &tau[i__], &c__1, &a[i__ + 1 + (i__ + 1) * a_dim1], lda); a[i__ + 1 + i__ * a_dim1] = e[i__]; } d__[i__] = a[i__ + i__ * a_dim1]; tau[i__] = taui; /* L20: */ } d__[*n] = a[*n + *n * a_dim1]; } return 0; /* End of DSYTD2 */ } /* dsytd2_ */ /* Subroutine */ int dsytrd_(char *uplo, integer *n, doublereal *a, integer * lda, doublereal *d__, doublereal *e, doublereal *tau, doublereal * work, integer *lwork, integer *info) { /* System generated locals */ integer a_dim1, a_offset, i__1, i__2, i__3; /* Local variables */ static integer i__, j; extern logical lsame_(char *, char *); static integer nbmin, iinfo; static logical upper; extern /* Subroutine */ int dsytd2_(char *, integer *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, integer *), dsyr2k_(char *, char *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); static integer nb, kk, nx; extern /* Subroutine */ int dlatrd_(char *, integer *, integer *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); static integer ldwork, lwkopt; static logical lquery; static integer iws; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DSYTRD reduces a real symmetric matrix A to real symmetric tridiagonal form T by an orthogonal similarity transformation: Q**T * A * Q = T. Arguments ========= UPLO (input) CHARACTER*1 = 'U': Upper triangle of A is stored; = 'L': Lower triangle of A is stored. N (input) INTEGER The order of the matrix A. N >= 0. A (input/output) DOUBLE PRECISION array, dimension (LDA,N) On entry, the symmetric matrix A. If UPLO = 'U', the leading N-by-N upper triangular part of A contains the upper triangular part of the matrix A, and the strictly lower triangular part of A is not referenced. If UPLO = 'L', the leading N-by-N lower triangular part of A contains the lower triangular part of the matrix A, and the strictly upper triangular part of A is not referenced. On exit, if UPLO = 'U', the diagonal and first superdiagonal of A are overwritten by the corresponding elements of the tridiagonal matrix T, and the elements above the first superdiagonal, with the array TAU, represent the orthogonal matrix Q as a product of elementary reflectors; if UPLO = 'L', the diagonal and first subdiagonal of A are over- written by the corresponding elements of the tridiagonal matrix T, and the elements below the first subdiagonal, with the array TAU, represent the orthogonal matrix Q as a product of elementary reflectors. See Further Details. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,N). D (output) DOUBLE PRECISION array, dimension (N) The diagonal elements of the tridiagonal matrix T: D(i) = A(i,i). E (output) DOUBLE PRECISION array, dimension (N-1) The off-diagonal elements of the tridiagonal matrix T: E(i) = A(i,i+1) if UPLO = 'U', E(i) = A(i+1,i) if UPLO = 'L'. TAU (output) DOUBLE PRECISION array, dimension (N-1) The scalar factors of the elementary reflectors (see Further Details). WORK (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK)) On exit, if INFO = 0, WORK(1) returns the optimal LWORK. LWORK (input) INTEGER The dimension of the array WORK. LWORK >= 1. For optimum performance LWORK >= N*NB, where NB is the optimal blocksize. If LWORK = -1, then a workspace query is assumed; the routine only calculates the optimal size of the WORK array, returns this value as the first entry of the WORK array, and no error message related to LWORK is issued by XERBLA. INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value Further Details =============== If UPLO = 'U', the matrix Q is represented as a product of elementary reflectors Q = H(n-1) . . . H(2) H(1). Each H(i) has the form H(i) = I - tau * v * v' where tau is a real scalar, and v is a real vector with v(i+1:n) = 0 and v(i) = 1; v(1:i-1) is stored on exit in A(1:i-1,i+1), and tau in TAU(i). If UPLO = 'L', the matrix Q is represented as a product of elementary reflectors Q = H(1) H(2) . . . H(n-1). Each H(i) has the form H(i) = I - tau * v * v' where tau is a real scalar, and v is a real vector with v(1:i) = 0 and v(i+1) = 1; v(i+2:n) is stored on exit in A(i+2:n,i), and tau in TAU(i). The contents of A on exit are illustrated by the following examples with n = 5: if UPLO = 'U': if UPLO = 'L': ( d e v2 v3 v4 ) ( d ) ( d e v3 v4 ) ( e d ) ( d e v4 ) ( v1 e d ) ( d e ) ( v1 v2 e d ) ( d ) ( v1 v2 v3 e d ) where d and e denote diagonal and off-diagonal elements of T, and vi denotes an element of the vector defining H(i). ===================================================================== Test the input parameters */ /* Parameter adjustments */ a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; --d__; --e; --tau; --work; /* Function Body */ *info = 0; upper = lsame_(uplo, "U"); lquery = *lwork == -1; if (! upper && ! lsame_(uplo, "L")) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*lda < max(1,*n)) { *info = -4; } else if (*lwork < 1 && ! lquery) { *info = -9; } if (*info == 0) { /* Determine the block size. */ nb = ilaenv_(&c__1, "DSYTRD", uplo, n, &c_n1, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1); lwkopt = *n * nb; work[1] = (doublereal) lwkopt; } if (*info != 0) { i__1 = -(*info); xerbla_("DSYTRD", &i__1); return 0; } else if (lquery) { return 0; } /* Quick return if possible */ if (*n == 0) { work[1] = 1.; return 0; } nx = *n; iws = 1; if (nb > 1 && nb < *n) { /* Determine when to cross over from blocked to unblocked code (last block is always handled by unblocked code). Computing MAX */ i__1 = nb, i__2 = ilaenv_(&c__3, "DSYTRD", uplo, n, &c_n1, &c_n1, & c_n1, (ftnlen)6, (ftnlen)1); nx = max(i__1,i__2); if (nx < *n) { /* Determine if workspace is large enough for blocked code. */ ldwork = *n; iws = ldwork * nb; if (*lwork < iws) { /* Not enough workspace to use optimal NB: determine the minimum value of NB, and reduce NB or force use of unblocked code by setting NX = N. Computing MAX */ i__1 = *lwork / ldwork; nb = max(i__1,1); nbmin = ilaenv_(&c__2, "DSYTRD", uplo, n, &c_n1, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1); if (nb < nbmin) { nx = *n; } } } else { nx = *n; } } else { nb = 1; } if (upper) { /* Reduce the upper triangle of A. Columns 1:kk are handled by the unblocked method. */ kk = *n - (*n - nx + nb - 1) / nb * nb; i__1 = kk + 1; i__2 = -nb; for (i__ = *n - nb + 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { /* Reduce columns i:i+nb-1 to tridiagonal form and form the matrix W which is needed to update the unreduced part of the matrix */ i__3 = i__ + nb - 1; dlatrd_(uplo, &i__3, &nb, &a[a_offset], lda, &e[1], &tau[1], & work[1], &ldwork); /* Update the unreduced submatrix A(1:i-1,1:i-1), using an update of the form: A := A - V*W' - W*V' */ i__3 = i__ - 1; dsyr2k_(uplo, "No transpose", &i__3, &nb, &c_b151, &a[i__ * a_dim1 + 1], lda, &work[1], &ldwork, &c_b15, &a[a_offset], lda); /* Copy superdiagonal elements back into A, and diagonal elements into D */ i__3 = i__ + nb - 1; for (j = i__; j <= i__3; ++j) { a[j - 1 + j * a_dim1] = e[j - 1]; d__[j] = a[j + j * a_dim1]; /* L10: */ } /* L20: */ } /* Use unblocked code to reduce the last or only block */ dsytd2_(uplo, &kk, &a[a_offset], lda, &d__[1], &e[1], &tau[1], &iinfo); } else { /* Reduce the lower triangle of A */ i__2 = *n - nx; i__1 = nb; for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) { /* Reduce columns i:i+nb-1 to tridiagonal form and form the matrix W which is needed to update the unreduced part of the matrix */ i__3 = *n - i__ + 1; dlatrd_(uplo, &i__3, &nb, &a[i__ + i__ * a_dim1], lda, &e[i__], & tau[i__], &work[1], &ldwork); /* Update the unreduced submatrix A(i+ib:n,i+ib:n), using an update of the form: A := A - V*W' - W*V' */ i__3 = *n - i__ - nb + 1; dsyr2k_(uplo, "No transpose", &i__3, &nb, &c_b151, &a[i__ + nb + i__ * a_dim1], lda, &work[nb + 1], &ldwork, &c_b15, &a[ i__ + nb + (i__ + nb) * a_dim1], lda); /* Copy subdiagonal elements back into A, and diagonal elements into D */ i__3 = i__ + nb - 1; for (j = i__; j <= i__3; ++j) { a[j + 1 + j * a_dim1] = e[j]; d__[j] = a[j + j * a_dim1]; /* L30: */ } /* L40: */ } /* Use unblocked code to reduce the last or only block */ i__1 = *n - i__ + 1; dsytd2_(uplo, &i__1, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[i__], &tau[i__], &iinfo); } work[1] = (doublereal) lwkopt; return 0; /* End of DSYTRD */ } /* dsytrd_ */ /* Subroutine */ int dtrevc_(char *side, char *howmny, logical *select, integer *n, doublereal *t, integer *ldt, doublereal *vl, integer * ldvl, doublereal *vr, integer *ldvr, integer *mm, integer *m, doublereal *work, integer *info) { /* System generated locals */ integer t_dim1, t_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, i__1, i__2, i__3; doublereal d__1, d__2, d__3, d__4; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static doublereal beta, emax; static logical pair; extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *, integer *); static logical allv; static integer ierr; static doublereal unfl, ovfl, smin; static logical over; static doublereal vmax; static integer jnxt, i__, j, k; extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *, integer *); static doublereal scale, x[4] /* was [2][2] */; extern logical lsame_(char *, char *); extern /* Subroutine */ int dgemv_(char *, integer *, integer *, doublereal *, doublereal *, integer *, doublereal *, integer *, doublereal *, doublereal *, integer *); static doublereal remax; extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *, doublereal *, integer *); static logical leftv, bothv; extern /* Subroutine */ int daxpy_(integer *, doublereal *, doublereal *, integer *, doublereal *, integer *); static doublereal vcrit; static logical somev; static integer j1, j2, n2; static doublereal xnorm; extern /* Subroutine */ int dlaln2_(logical *, integer *, integer *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, doublereal *, doublereal *, integer *, doublereal *, doublereal * , doublereal *, integer *, doublereal *, doublereal *, integer *), dlabad_(doublereal *, doublereal *); static integer ii, ki; static integer ip, is; static doublereal wi; extern integer idamax_(integer *, doublereal *, integer *); static doublereal wr; extern /* Subroutine */ int xerbla_(char *, integer *); static doublereal bignum; static logical rightv; static doublereal smlnum, rec, ulp; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DTREVC computes some or all of the right and/or left eigenvectors of a real upper quasi-triangular matrix T. Matrices of this type are produced by the Schur factorization of a real general matrix: A = Q*T*Q**T, as computed by DHSEQR. The right eigenvector x and the left eigenvector y of T corresponding to an eigenvalue w are defined by: T*x = w*x, (y**H)*T = w*(y**H) where y**H denotes the conjugate transpose of y. The eigenvalues are not input to this routine, but are read directly from the diagonal blocks of T. This routine returns the matrices X and/or Y of right and left eigenvectors of T, or the products Q*X and/or Q*Y, where Q is an input matrix. If Q is the orthogonal factor that reduces a matrix A to Schur form T, then Q*X and Q*Y are the matrices of right and left eigenvectors of A. Arguments ========= SIDE (input) CHARACTER*1 = 'R': compute right eigenvectors only; = 'L': compute left eigenvectors only; = 'B': compute both right and left eigenvectors. HOWMNY (input) CHARACTER*1 = 'A': compute all right and/or left eigenvectors; = 'B': compute all right and/or left eigenvectors, backtransformed by the matrices in VR and/or VL; = 'S': compute selected right and/or left eigenvectors, as indicated by the logical array SELECT. SELECT (input/output) LOGICAL array, dimension (N) If HOWMNY = 'S', SELECT specifies the eigenvectors to be computed. If w(j) is a real eigenvalue, the corresponding real eigenvector is computed if SELECT(j) is .TRUE.. If w(j) and w(j+1) are the real and imaginary parts of a complex eigenvalue, the corresponding complex eigenvector is computed if either SELECT(j) or SELECT(j+1) is .TRUE., and on exit SELECT(j) is set to .TRUE. and SELECT(j+1) is set to .FALSE.. Not referenced if HOWMNY = 'A' or 'B'. N (input) INTEGER The order of the matrix T. N >= 0. T (input) DOUBLE PRECISION array, dimension (LDT,N) The upper quasi-triangular matrix T in Schur canonical form. LDT (input) INTEGER The leading dimension of the array T. LDT >= max(1,N). VL (input/output) DOUBLE PRECISION array, dimension (LDVL,MM) On entry, if SIDE = 'L' or 'B' and HOWMNY = 'B', VL must contain an N-by-N matrix Q (usually the orthogonal matrix Q of Schur vectors returned by DHSEQR). On exit, if SIDE = 'L' or 'B', VL contains: if HOWMNY = 'A', the matrix Y of left eigenvectors of T; if HOWMNY = 'B', the matrix Q*Y; if HOWMNY = 'S', the left eigenvectors of T specified by SELECT, stored consecutively in the columns of VL, in the same order as their eigenvalues. A complex eigenvector corresponding to a complex eigenvalue is stored in two consecutive columns, the first holding the real part, and the second the imaginary part. Not referenced if SIDE = 'R'. LDVL (input) INTEGER The leading dimension of the array VL. LDVL >= 1, and if SIDE = 'L' or 'B', LDVL >= N. VR (input/output) DOUBLE PRECISION array, dimension (LDVR,MM) On entry, if SIDE = 'R' or 'B' and HOWMNY = 'B', VR must contain an N-by-N matrix Q (usually the orthogonal matrix Q of Schur vectors returned by DHSEQR). On exit, if SIDE = 'R' or 'B', VR contains: if HOWMNY = 'A', the matrix X of right eigenvectors of T; if HOWMNY = 'B', the matrix Q*X; if HOWMNY = 'S', the right eigenvectors of T specified by SELECT, stored consecutively in the columns of VR, in the same order as their eigenvalues. A complex eigenvector corresponding to a complex eigenvalue is stored in two consecutive columns, the first holding the real part and the second the imaginary part. Not referenced if SIDE = 'L'. LDVR (input) INTEGER The leading dimension of the array VR. LDVR >= 1, and if SIDE = 'R' or 'B', LDVR >= N. MM (input) INTEGER The number of columns in the arrays VL and/or VR. MM >= M. M (output) INTEGER The number of columns in the arrays VL and/or VR actually used to store the eigenvectors. If HOWMNY = 'A' or 'B', M is set to N. Each selected real eigenvector occupies one column and each selected complex eigenvector occupies two columns. WORK (workspace) DOUBLE PRECISION array, dimension (3*N) INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value Further Details =============== The algorithm used in this program is basically backward (forward) substitution, with scaling to make the the code robust against possible overflow. Each eigenvector is normalized so that the element of largest magnitude has magnitude 1; here the magnitude of a complex number (x,y) is taken to be |x| + |y|. ===================================================================== Decode and test the input parameters */ /* Parameter adjustments */ --select; t_dim1 = *ldt; t_offset = 1 + t_dim1 * 1; t -= t_offset; vl_dim1 = *ldvl; vl_offset = 1 + vl_dim1 * 1; vl -= vl_offset; vr_dim1 = *ldvr; vr_offset = 1 + vr_dim1 * 1; vr -= vr_offset; --work; /* Function Body */ bothv = lsame_(side, "B"); rightv = lsame_(side, "R") || bothv; leftv = lsame_(side, "L") || bothv; allv = lsame_(howmny, "A"); over = lsame_(howmny, "B"); somev = lsame_(howmny, "S"); *info = 0; if (! rightv && ! leftv) { *info = -1; } else if (! allv && ! over && ! somev) { *info = -2; } else if (*n < 0) { *info = -4; } else if (*ldt < max(1,*n)) { *info = -6; } else if (*ldvl < 1 || leftv && *ldvl < *n) { *info = -8; } else if (*ldvr < 1 || rightv && *ldvr < *n) { *info = -10; } else { /* Set M to the number of columns required to store the selected eigenvectors, standardize the array SELECT if necessary, and test MM. */ if (somev) { *m = 0; pair = FALSE_; i__1 = *n; for (j = 1; j <= i__1; ++j) { if (pair) { pair = FALSE_; select[j] = FALSE_; } else { if (j < *n) { if (t[j + 1 + j * t_dim1] == 0.) { if (select[j]) { ++(*m); } } else { pair = TRUE_; if (select[j] || select[j + 1]) { select[j] = TRUE_; *m += 2; } } } else { if (select[*n]) { ++(*m); } } } /* L10: */ } } else { *m = *n; } if (*mm < *m) { *info = -11; } } if (*info != 0) { i__1 = -(*info); xerbla_("DTREVC", &i__1); return 0; } /* Quick return if possible. */ if (*n == 0) { return 0; } /* Set the constants to control overflow. */ unfl = SAFEMINIMUM; ovfl = 1. / unfl; dlabad_(&unfl, &ovfl); ulp = PRECISION; smlnum = unfl * (*n / ulp); bignum = (1. - ulp) / smlnum; /* Compute 1-norm of each column of strictly upper triangular part of T to control overflow in triangular solver. */ work[1] = 0.; i__1 = *n; for (j = 2; j <= i__1; ++j) { work[j] = 0.; i__2 = j - 1; for (i__ = 1; i__ <= i__2; ++i__) { work[j] += (d__1 = t[i__ + j * t_dim1], abs(d__1)); /* L20: */ } /* L30: */ } /* Index IP is used to specify the real or complex eigenvalue: IP = 0, real eigenvalue, 1, first of conjugate complex pair: (wr,wi) -1, second of conjugate complex pair: (wr,wi) */ n2 = *n << 1; if (rightv) { /* Compute right eigenvectors. */ ip = 0; is = *m; for (ki = *n; ki >= 1; --ki) { if (ip == 1) { goto L130; } if (ki == 1) { goto L40; } if (t[ki + (ki - 1) * t_dim1] == 0.) { goto L40; } ip = -1; L40: if (somev) { if (ip == 0) { if (! select[ki]) { goto L130; } } else { if (! select[ki - 1]) { goto L130; } } } /* Compute the KI-th eigenvalue (WR,WI). */ wr = t[ki + ki * t_dim1]; wi = 0.; if (ip != 0) { wi = sqrt((d__1 = t[ki + (ki - 1) * t_dim1], abs(d__1))) * sqrt((d__2 = t[ki - 1 + ki * t_dim1], abs(d__2))); } /* Computing MAX */ d__1 = ulp * (abs(wr) + abs(wi)); smin = max(d__1,smlnum); if (ip == 0) { /* Real right eigenvector */ work[ki + *n] = 1.; /* Form right-hand side */ i__1 = ki - 1; for (k = 1; k <= i__1; ++k) { work[k + *n] = -t[k + ki * t_dim1]; /* L50: */ } /* Solve the upper quasi-triangular system: (T(1:KI-1,1:KI-1) - WR)*X = SCALE*WORK. */ jnxt = ki - 1; for (j = ki - 1; j >= 1; --j) { if (j > jnxt) { goto L60; } j1 = j; j2 = j; jnxt = j - 1; if (j > 1) { if (t[j + (j - 1) * t_dim1] != 0.) { j1 = j - 1; jnxt = j - 2; } } if (j1 == j2) { /* 1-by-1 diagonal block */ dlaln2_(&c_false, &c__1, &c__1, &smin, &c_b15, &t[j + j * t_dim1], ldt, &c_b15, &c_b15, &work[j + * n], n, &wr, &c_b29, x, &c__2, &scale, &xnorm, &ierr); /* Scale X(1,1) to avoid overflow when updating the right-hand side. */ if (xnorm > 1.) { if (work[j] > bignum / xnorm) { x[0] /= xnorm; scale /= xnorm; } } /* Scale if necessary */ if (scale != 1.) { dscal_(&ki, &scale, &work[*n + 1], &c__1); } work[j + *n] = x[0]; /* Update right-hand side */ i__1 = j - 1; d__1 = -x[0]; daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[ *n + 1], &c__1); } else { /* 2-by-2 diagonal block */ dlaln2_(&c_false, &c__2, &c__1, &smin, &c_b15, &t[j - 1 + (j - 1) * t_dim1], ldt, &c_b15, &c_b15, & work[j - 1 + *n], n, &wr, &c_b29, x, &c__2, & scale, &xnorm, &ierr); /* Scale X(1,1) and X(2,1) to avoid overflow when updating the right-hand side. */ if (xnorm > 1.) { /* Computing MAX */ d__1 = work[j - 1], d__2 = work[j]; beta = max(d__1,d__2); if (beta > bignum / xnorm) { x[0] /= xnorm; x[1] /= xnorm; scale /= xnorm; } } /* Scale if necessary */ if (scale != 1.) { dscal_(&ki, &scale, &work[*n + 1], &c__1); } work[j - 1 + *n] = x[0]; work[j + *n] = x[1]; /* Update right-hand side */ i__1 = j - 2; d__1 = -x[0]; daxpy_(&i__1, &d__1, &t[(j - 1) * t_dim1 + 1], &c__1, &work[*n + 1], &c__1); i__1 = j - 2; d__1 = -x[1]; daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[ *n + 1], &c__1); } L60: ; } /* Copy the vector x or Q*x to VR and normalize. */ if (! over) { dcopy_(&ki, &work[*n + 1], &c__1, &vr[is * vr_dim1 + 1], & c__1); ii = idamax_(&ki, &vr[is * vr_dim1 + 1], &c__1); remax = 1. / (d__1 = vr[ii + is * vr_dim1], abs(d__1)); dscal_(&ki, &remax, &vr[is * vr_dim1 + 1], &c__1); i__1 = *n; for (k = ki + 1; k <= i__1; ++k) { vr[k + is * vr_dim1] = 0.; /* L70: */ } } else { if (ki > 1) { i__1 = ki - 1; dgemv_("N", n, &i__1, &c_b15, &vr[vr_offset], ldvr, & work[*n + 1], &c__1, &work[ki + *n], &vr[ki * vr_dim1 + 1], &c__1); } ii = idamax_(n, &vr[ki * vr_dim1 + 1], &c__1); remax = 1. / (d__1 = vr[ii + ki * vr_dim1], abs(d__1)); dscal_(n, &remax, &vr[ki * vr_dim1 + 1], &c__1); } } else { /* Complex right eigenvector. Initial solve [ (T(KI-1,KI-1) T(KI-1,KI) ) - (WR + I* WI)]*X = 0. [ (T(KI,KI-1) T(KI,KI) ) ] */ if ((d__1 = t[ki - 1 + ki * t_dim1], abs(d__1)) >= (d__2 = t[ ki + (ki - 1) * t_dim1], abs(d__2))) { work[ki - 1 + *n] = 1.; work[ki + n2] = wi / t[ki - 1 + ki * t_dim1]; } else { work[ki - 1 + *n] = -wi / t[ki + (ki - 1) * t_dim1]; work[ki + n2] = 1.; } work[ki + *n] = 0.; work[ki - 1 + n2] = 0.; /* Form right-hand side */ i__1 = ki - 2; for (k = 1; k <= i__1; ++k) { work[k + *n] = -work[ki - 1 + *n] * t[k + (ki - 1) * t_dim1]; work[k + n2] = -work[ki + n2] * t[k + ki * t_dim1]; /* L80: */ } /* Solve upper quasi-triangular system: (T(1:KI-2,1:KI-2) - (WR+i*WI))*X = SCALE*(WORK+i*WORK2) */ jnxt = ki - 2; for (j = ki - 2; j >= 1; --j) { if (j > jnxt) { goto L90; } j1 = j; j2 = j; jnxt = j - 1; if (j > 1) { if (t[j + (j - 1) * t_dim1] != 0.) { j1 = j - 1; jnxt = j - 2; } } if (j1 == j2) { /* 1-by-1 diagonal block */ dlaln2_(&c_false, &c__1, &c__2, &smin, &c_b15, &t[j + j * t_dim1], ldt, &c_b15, &c_b15, &work[j + * n], n, &wr, &wi, x, &c__2, &scale, &xnorm, & ierr); /* Scale X(1,1) and X(1,2) to avoid overflow when updating the right-hand side. */ if (xnorm > 1.) { if (work[j] > bignum / xnorm) { x[0] /= xnorm; x[2] /= xnorm; scale /= xnorm; } } /* Scale if necessary */ if (scale != 1.) { dscal_(&ki, &scale, &work[*n + 1], &c__1); dscal_(&ki, &scale, &work[n2 + 1], &c__1); } work[j + *n] = x[0]; work[j + n2] = x[2]; /* Update the right-hand side */ i__1 = j - 1; d__1 = -x[0]; daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[ *n + 1], &c__1); i__1 = j - 1; d__1 = -x[2]; daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[ n2 + 1], &c__1); } else { /* 2-by-2 diagonal block */ dlaln2_(&c_false, &c__2, &c__2, &smin, &c_b15, &t[j - 1 + (j - 1) * t_dim1], ldt, &c_b15, &c_b15, & work[j - 1 + *n], n, &wr, &wi, x, &c__2, & scale, &xnorm, &ierr); /* Scale X to avoid overflow when updating the right-hand side. */ if (xnorm > 1.) { /* Computing MAX */ d__1 = work[j - 1], d__2 = work[j]; beta = max(d__1,d__2); if (beta > bignum / xnorm) { rec = 1. / xnorm; x[0] *= rec; x[2] *= rec; x[1] *= rec; x[3] *= rec; scale *= rec; } } /* Scale if necessary */ if (scale != 1.) { dscal_(&ki, &scale, &work[*n + 1], &c__1); dscal_(&ki, &scale, &work[n2 + 1], &c__1); } work[j - 1 + *n] = x[0]; work[j + *n] = x[1]; work[j - 1 + n2] = x[2]; work[j + n2] = x[3]; /* Update the right-hand side */ i__1 = j - 2; d__1 = -x[0]; daxpy_(&i__1, &d__1, &t[(j - 1) * t_dim1 + 1], &c__1, &work[*n + 1], &c__1); i__1 = j - 2; d__1 = -x[1]; daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[ *n + 1], &c__1); i__1 = j - 2; d__1 = -x[2]; daxpy_(&i__1, &d__1, &t[(j - 1) * t_dim1 + 1], &c__1, &work[n2 + 1], &c__1); i__1 = j - 2; d__1 = -x[3]; daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[ n2 + 1], &c__1); } L90: ; } /* Copy the vector x or Q*x to VR and normalize. */ if (! over) { dcopy_(&ki, &work[*n + 1], &c__1, &vr[(is - 1) * vr_dim1 + 1], &c__1); dcopy_(&ki, &work[n2 + 1], &c__1, &vr[is * vr_dim1 + 1], & c__1); emax = 0.; i__1 = ki; for (k = 1; k <= i__1; ++k) { /* Computing MAX */ d__3 = emax, d__4 = (d__1 = vr[k + (is - 1) * vr_dim1] , abs(d__1)) + (d__2 = vr[k + is * vr_dim1], abs(d__2)); emax = max(d__3,d__4); /* L100: */ } remax = 1. / emax; dscal_(&ki, &remax, &vr[(is - 1) * vr_dim1 + 1], &c__1); dscal_(&ki, &remax, &vr[is * vr_dim1 + 1], &c__1); i__1 = *n; for (k = ki + 1; k <= i__1; ++k) { vr[k + (is - 1) * vr_dim1] = 0.; vr[k + is * vr_dim1] = 0.; /* L110: */ } } else { if (ki > 2) { i__1 = ki - 2; dgemv_("N", n, &i__1, &c_b15, &vr[vr_offset], ldvr, & work[*n + 1], &c__1, &work[ki - 1 + *n], &vr[( ki - 1) * vr_dim1 + 1], &c__1); i__1 = ki - 2; dgemv_("N", n, &i__1, &c_b15, &vr[vr_offset], ldvr, & work[n2 + 1], &c__1, &work[ki + n2], &vr[ki * vr_dim1 + 1], &c__1); } else { dscal_(n, &work[ki - 1 + *n], &vr[(ki - 1) * vr_dim1 + 1], &c__1); dscal_(n, &work[ki + n2], &vr[ki * vr_dim1 + 1], & c__1); } emax = 0.; i__1 = *n; for (k = 1; k <= i__1; ++k) { /* Computing MAX */ d__3 = emax, d__4 = (d__1 = vr[k + (ki - 1) * vr_dim1] , abs(d__1)) + (d__2 = vr[k + ki * vr_dim1], abs(d__2)); emax = max(d__3,d__4); /* L120: */ } remax = 1. / emax; dscal_(n, &remax, &vr[(ki - 1) * vr_dim1 + 1], &c__1); dscal_(n, &remax, &vr[ki * vr_dim1 + 1], &c__1); } } --is; if (ip != 0) { --is; } L130: if (ip == 1) { ip = 0; } if (ip == -1) { ip = 1; } /* L140: */ } } if (leftv) { /* Compute left eigenvectors. */ ip = 0; is = 1; i__1 = *n; for (ki = 1; ki <= i__1; ++ki) { if (ip == -1) { goto L250; } if (ki == *n) { goto L150; } if (t[ki + 1 + ki * t_dim1] == 0.) { goto L150; } ip = 1; L150: if (somev) { if (! select[ki]) { goto L250; } } /* Compute the KI-th eigenvalue (WR,WI). */ wr = t[ki + ki * t_dim1]; wi = 0.; if (ip != 0) { wi = sqrt((d__1 = t[ki + (ki + 1) * t_dim1], abs(d__1))) * sqrt((d__2 = t[ki + 1 + ki * t_dim1], abs(d__2))); } /* Computing MAX */ d__1 = ulp * (abs(wr) + abs(wi)); smin = max(d__1,smlnum); if (ip == 0) { /* Real left eigenvector. */ work[ki + *n] = 1.; /* Form right-hand side */ i__2 = *n; for (k = ki + 1; k <= i__2; ++k) { work[k + *n] = -t[ki + k * t_dim1]; /* L160: */ } /* Solve the quasi-triangular system: (T(KI+1:N,KI+1:N) - WR)'*X = SCALE*WORK */ vmax = 1.; vcrit = bignum; jnxt = ki + 1; i__2 = *n; for (j = ki + 1; j <= i__2; ++j) { if (j < jnxt) { goto L170; } j1 = j; j2 = j; jnxt = j + 1; if (j < *n) { if (t[j + 1 + j * t_dim1] != 0.) { j2 = j + 1; jnxt = j + 2; } } if (j1 == j2) { /* 1-by-1 diagonal block Scale if necessary to avoid overflow when forming the right-hand side. */ if (work[j] > vcrit) { rec = 1. / vmax; i__3 = *n - ki + 1; dscal_(&i__3, &rec, &work[ki + *n], &c__1); vmax = 1.; vcrit = bignum; } i__3 = j - ki - 1; work[j + *n] -= ddot_(&i__3, &t[ki + 1 + j * t_dim1], &c__1, &work[ki + 1 + *n], &c__1); /* Solve (T(J,J)-WR)'*X = WORK */ dlaln2_(&c_false, &c__1, &c__1, &smin, &c_b15, &t[j + j * t_dim1], ldt, &c_b15, &c_b15, &work[j + * n], n, &wr, &c_b29, x, &c__2, &scale, &xnorm, &ierr); /* Scale if necessary */ if (scale != 1.) { i__3 = *n - ki + 1; dscal_(&i__3, &scale, &work[ki + *n], &c__1); } work[j + *n] = x[0]; /* Computing MAX */ d__2 = (d__1 = work[j + *n], abs(d__1)); vmax = max(d__2,vmax); vcrit = bignum / vmax; } else { /* 2-by-2 diagonal block Scale if necessary to avoid overflow when forming the right-hand side. Computing MAX */ d__1 = work[j], d__2 = work[j + 1]; beta = max(d__1,d__2); if (beta > vcrit) { rec = 1. / vmax; i__3 = *n - ki + 1; dscal_(&i__3, &rec, &work[ki + *n], &c__1); vmax = 1.; vcrit = bignum; } i__3 = j - ki - 1; work[j + *n] -= ddot_(&i__3, &t[ki + 1 + j * t_dim1], &c__1, &work[ki + 1 + *n], &c__1); i__3 = j - ki - 1; work[j + 1 + *n] -= ddot_(&i__3, &t[ki + 1 + (j + 1) * t_dim1], &c__1, &work[ki + 1 + *n], &c__1); /* Solve [T(J,J)-WR T(J,J+1) ]'* X = SCALE*( WORK1 ) [T(J+1,J) T(J+1,J+1)-WR] ( WORK2 ) */ dlaln2_(&c_true, &c__2, &c__1, &smin, &c_b15, &t[j + j * t_dim1], ldt, &c_b15, &c_b15, &work[j + * n], n, &wr, &c_b29, x, &c__2, &scale, &xnorm, &ierr); /* Scale if necessary */ if (scale != 1.) { i__3 = *n - ki + 1; dscal_(&i__3, &scale, &work[ki + *n], &c__1); } work[j + *n] = x[0]; work[j + 1 + *n] = x[1]; /* Computing MAX */ d__3 = (d__1 = work[j + *n], abs(d__1)), d__4 = (d__2 = work[j + 1 + *n], abs(d__2)), d__3 = max( d__3,d__4); vmax = max(d__3,vmax); vcrit = bignum / vmax; } L170: ; } /* Copy the vector x or Q*x to VL and normalize. */ if (! over) { i__2 = *n - ki + 1; dcopy_(&i__2, &work[ki + *n], &c__1, &vl[ki + is * vl_dim1], &c__1); i__2 = *n - ki + 1; ii = idamax_(&i__2, &vl[ki + is * vl_dim1], &c__1) + ki - 1; remax = 1. / (d__1 = vl[ii + is * vl_dim1], abs(d__1)); i__2 = *n - ki + 1; dscal_(&i__2, &remax, &vl[ki + is * vl_dim1], &c__1); i__2 = ki - 1; for (k = 1; k <= i__2; ++k) { vl[k + is * vl_dim1] = 0.; /* L180: */ } } else { if (ki < *n) { i__2 = *n - ki; dgemv_("N", n, &i__2, &c_b15, &vl[(ki + 1) * vl_dim1 + 1], ldvl, &work[ki + 1 + *n], &c__1, &work[ ki + *n], &vl[ki * vl_dim1 + 1], &c__1); } ii = idamax_(n, &vl[ki * vl_dim1 + 1], &c__1); remax = 1. / (d__1 = vl[ii + ki * vl_dim1], abs(d__1)); dscal_(n, &remax, &vl[ki * vl_dim1 + 1], &c__1); } } else { /* Complex left eigenvector. Initial solve: ((T(KI,KI) T(KI,KI+1) )' - (WR - I* WI))*X = 0. ((T(KI+1,KI) T(KI+1,KI+1)) ) */ if ((d__1 = t[ki + (ki + 1) * t_dim1], abs(d__1)) >= (d__2 = t[ki + 1 + ki * t_dim1], abs(d__2))) { work[ki + *n] = wi / t[ki + (ki + 1) * t_dim1]; work[ki + 1 + n2] = 1.; } else { work[ki + *n] = 1.; work[ki + 1 + n2] = -wi / t[ki + 1 + ki * t_dim1]; } work[ki + 1 + *n] = 0.; work[ki + n2] = 0.; /* Form right-hand side */ i__2 = *n; for (k = ki + 2; k <= i__2; ++k) { work[k + *n] = -work[ki + *n] * t[ki + k * t_dim1]; work[k + n2] = -work[ki + 1 + n2] * t[ki + 1 + k * t_dim1] ; /* L190: */ } /* Solve complex quasi-triangular system: ( T(KI+2,N:KI+2,N) - (WR-i*WI) )*X = WORK1+i*WORK2 */ vmax = 1.; vcrit = bignum; jnxt = ki + 2; i__2 = *n; for (j = ki + 2; j <= i__2; ++j) { if (j < jnxt) { goto L200; } j1 = j; j2 = j; jnxt = j + 1; if (j < *n) { if (t[j + 1 + j * t_dim1] != 0.) { j2 = j + 1; jnxt = j + 2; } } if (j1 == j2) { /* 1-by-1 diagonal block Scale if necessary to avoid overflow when forming the right-hand side elements. */ if (work[j] > vcrit) { rec = 1. / vmax; i__3 = *n - ki + 1; dscal_(&i__3, &rec, &work[ki + *n], &c__1); i__3 = *n - ki + 1; dscal_(&i__3, &rec, &work[ki + n2], &c__1); vmax = 1.; vcrit = bignum; } i__3 = j - ki - 2; work[j + *n] -= ddot_(&i__3, &t[ki + 2 + j * t_dim1], &c__1, &work[ki + 2 + *n], &c__1); i__3 = j - ki - 2; work[j + n2] -= ddot_(&i__3, &t[ki + 2 + j * t_dim1], &c__1, &work[ki + 2 + n2], &c__1); /* Solve (T(J,J)-(WR-i*WI))*(X11+i*X12)= WK+I*WK2 */ d__1 = -wi; dlaln2_(&c_false, &c__1, &c__2, &smin, &c_b15, &t[j + j * t_dim1], ldt, &c_b15, &c_b15, &work[j + * n], n, &wr, &d__1, x, &c__2, &scale, &xnorm, & ierr); /* Scale if necessary */ if (scale != 1.) { i__3 = *n - ki + 1; dscal_(&i__3, &scale, &work[ki + *n], &c__1); i__3 = *n - ki + 1; dscal_(&i__3, &scale, &work[ki + n2], &c__1); } work[j + *n] = x[0]; work[j + n2] = x[2]; /* Computing MAX */ d__3 = (d__1 = work[j + *n], abs(d__1)), d__4 = (d__2 = work[j + n2], abs(d__2)), d__3 = max(d__3, d__4); vmax = max(d__3,vmax); vcrit = bignum / vmax; } else { /* 2-by-2 diagonal block Scale if necessary to avoid overflow when forming the right-hand side elements. Computing MAX */ d__1 = work[j], d__2 = work[j + 1]; beta = max(d__1,d__2); if (beta > vcrit) { rec = 1. / vmax; i__3 = *n - ki + 1; dscal_(&i__3, &rec, &work[ki + *n], &c__1); i__3 = *n - ki + 1; dscal_(&i__3, &rec, &work[ki + n2], &c__1); vmax = 1.; vcrit = bignum; } i__3 = j - ki - 2; work[j + *n] -= ddot_(&i__3, &t[ki + 2 + j * t_dim1], &c__1, &work[ki + 2 + *n], &c__1); i__3 = j - ki - 2; work[j + n2] -= ddot_(&i__3, &t[ki + 2 + j * t_dim1], &c__1, &work[ki + 2 + n2], &c__1); i__3 = j - ki - 2; work[j + 1 + *n] -= ddot_(&i__3, &t[ki + 2 + (j + 1) * t_dim1], &c__1, &work[ki + 2 + *n], &c__1); i__3 = j - ki - 2; work[j + 1 + n2] -= ddot_(&i__3, &t[ki + 2 + (j + 1) * t_dim1], &c__1, &work[ki + 2 + n2], &c__1); /* Solve 2-by-2 complex linear equation ([T(j,j) T(j,j+1) ]'-(wr-i*wi)*I)*X = SCALE*B ([T(j+1,j) T(j+1,j+1)] ) */ d__1 = -wi; dlaln2_(&c_true, &c__2, &c__2, &smin, &c_b15, &t[j + j * t_dim1], ldt, &c_b15, &c_b15, &work[j + * n], n, &wr, &d__1, x, &c__2, &scale, &xnorm, & ierr); /* Scale if necessary */ if (scale != 1.) { i__3 = *n - ki + 1; dscal_(&i__3, &scale, &work[ki + *n], &c__1); i__3 = *n - ki + 1; dscal_(&i__3, &scale, &work[ki + n2], &c__1); } work[j + *n] = x[0]; work[j + n2] = x[2]; work[j + 1 + *n] = x[1]; work[j + 1 + n2] = x[3]; /* Computing MAX */ d__1 = abs(x[0]), d__2 = abs(x[2]), d__1 = max(d__1, d__2), d__2 = abs(x[1]), d__1 = max(d__1,d__2) , d__2 = abs(x[3]), d__1 = max(d__1,d__2); vmax = max(d__1,vmax); vcrit = bignum / vmax; } L200: ; } /* Copy the vector x or Q*x to VL and normalize. */ if (! over) { i__2 = *n - ki + 1; dcopy_(&i__2, &work[ki + *n], &c__1, &vl[ki + is * vl_dim1], &c__1); i__2 = *n - ki + 1; dcopy_(&i__2, &work[ki + n2], &c__1, &vl[ki + (is + 1) * vl_dim1], &c__1); emax = 0.; i__2 = *n; for (k = ki; k <= i__2; ++k) { /* Computing MAX */ d__3 = emax, d__4 = (d__1 = vl[k + is * vl_dim1], abs( d__1)) + (d__2 = vl[k + (is + 1) * vl_dim1], abs(d__2)); emax = max(d__3,d__4); /* L220: */ } remax = 1. / emax; i__2 = *n - ki + 1; dscal_(&i__2, &remax, &vl[ki + is * vl_dim1], &c__1); i__2 = *n - ki + 1; dscal_(&i__2, &remax, &vl[ki + (is + 1) * vl_dim1], &c__1) ; i__2 = ki - 1; for (k = 1; k <= i__2; ++k) { vl[k + is * vl_dim1] = 0.; vl[k + (is + 1) * vl_dim1] = 0.; /* L230: */ } } else { if (ki < *n - 1) { i__2 = *n - ki - 1; dgemv_("N", n, &i__2, &c_b15, &vl[(ki + 2) * vl_dim1 + 1], ldvl, &work[ki + 2 + *n], &c__1, &work[ ki + *n], &vl[ki * vl_dim1 + 1], &c__1); i__2 = *n - ki - 1; dgemv_("N", n, &i__2, &c_b15, &vl[(ki + 2) * vl_dim1 + 1], ldvl, &work[ki + 2 + n2], &c__1, &work[ ki + 1 + n2], &vl[(ki + 1) * vl_dim1 + 1], & c__1); } else { dscal_(n, &work[ki + *n], &vl[ki * vl_dim1 + 1], & c__1); dscal_(n, &work[ki + 1 + n2], &vl[(ki + 1) * vl_dim1 + 1], &c__1); } emax = 0.; i__2 = *n; for (k = 1; k <= i__2; ++k) { /* Computing MAX */ d__3 = emax, d__4 = (d__1 = vl[k + ki * vl_dim1], abs( d__1)) + (d__2 = vl[k + (ki + 1) * vl_dim1], abs(d__2)); emax = max(d__3,d__4); /* L240: */ } remax = 1. / emax; dscal_(n, &remax, &vl[ki * vl_dim1 + 1], &c__1); dscal_(n, &remax, &vl[(ki + 1) * vl_dim1 + 1], &c__1); } } ++is; if (ip != 0) { ++is; } L250: if (ip == -1) { ip = 0; } if (ip == 1) { ip = -1; } /* L260: */ } } return 0; /* End of DTREVC */ } /* dtrevc_ */ /* Subroutine */ int dtrexc_(char *compq, integer *n, doublereal *t, integer * ldt, doublereal *q, integer *ldq, integer *ifst, integer *ilst, doublereal *work, integer *info) { /* System generated locals */ integer q_dim1, q_offset, t_dim1, t_offset, i__1; /* Local variables */ static integer here; extern logical lsame_(char *, char *); static logical wantq; extern /* Subroutine */ int dlaexc_(logical *, integer *, doublereal *, integer *, doublereal *, integer *, integer *, integer *, integer *, doublereal *, integer *), xerbla_(char *, integer *); static integer nbnext, nbf, nbl; /* -- LAPACK routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= DTREXC reorders the real Schur factorization of a real matrix A = Q*T*Q**T, so that the diagonal block of T with row index IFST is moved to row ILST. The real Schur form T is reordered by an orthogonal similarity transformation Z**T*T*Z, and optionally the matrix Q of Schur vectors is updated by postmultiplying it with Z. T must be in Schur canonical form (as returned by DHSEQR), that is, block upper triangular with 1-by-1 and 2-by-2 diagonal blocks; each 2-by-2 diagonal block has its diagonal elements equal and its off-diagonal elements of opposite sign. Arguments ========= COMPQ (input) CHARACTER*1 = 'V': update the matrix Q of Schur vectors; = 'N': do not update Q. N (input) INTEGER The order of the matrix T. N >= 0. T (input/output) DOUBLE PRECISION array, dimension (LDT,N) On entry, the upper quasi-triangular matrix T, in Schur Schur canonical form. On exit, the reordered upper quasi-triangular matrix, again in Schur canonical form. LDT (input) INTEGER The leading dimension of the array T. LDT >= max(1,N). Q (input/output) DOUBLE PRECISION array, dimension (LDQ,N) On entry, if COMPQ = 'V', the matrix Q of Schur vectors. On exit, if COMPQ = 'V', Q has been postmultiplied by the orthogonal transformation matrix Z which reorders T. If COMPQ = 'N', Q is not referenced. LDQ (input) INTEGER The leading dimension of the array Q. LDQ >= max(1,N). IFST (input/output) INTEGER ILST (input/output) INTEGER Specify the reordering of the diagonal blocks of T. The block with row index IFST is moved to row ILST, by a sequence of transpositions between adjacent blocks. On exit, if IFST pointed on entry to the second row of a 2-by-2 block, it is changed to point to the first row; ILST always points to the first row of the block in its final position (which may differ from its input value by +1 or -1). 1 <= IFST <= N; 1 <= ILST <= N. WORK (workspace) DOUBLE PRECISION array, dimension (N) INFO (output) INTEGER = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value = 1: two adjacent blocks were too close to swap (the problem is very ill-conditioned); T may have been partially reordered, and ILST points to the first row of the current position of the block being moved. ===================================================================== Decode and test the input arguments. */ /* Parameter adjustments */ t_dim1 = *ldt; t_offset = 1 + t_dim1 * 1; t -= t_offset; q_dim1 = *ldq; q_offset = 1 + q_dim1 * 1; q -= q_offset; --work; /* Function Body */ *info = 0; wantq = lsame_(compq, "V"); if (! wantq && ! lsame_(compq, "N")) { *info = -1; } else if (*n < 0) { *info = -2; } else if (*ldt < max(1,*n)) { *info = -4; } else if (*ldq < 1 || wantq && *ldq < max(1,*n)) { *info = -6; } else if (*ifst < 1 || *ifst > *n) { *info = -7; } else if (*ilst < 1 || *ilst > *n) { *info = -8; } if (*info != 0) { i__1 = -(*info); xerbla_("DTREXC", &i__1); return 0; } /* Quick return if possible */ if (*n <= 1) { return 0; } /* Determine the first row of specified block and find out it is 1 by 1 or 2 by 2. */ if (*ifst > 1) { if (t[*ifst + (*ifst - 1) * t_dim1] != 0.) { --(*ifst); } } nbf = 1; if (*ifst < *n) { if (t[*ifst + 1 + *ifst * t_dim1] != 0.) { nbf = 2; } } /* Determine the first row of the final block and find out it is 1 by 1 or 2 by 2. */ if (*ilst > 1) { if (t[*ilst + (*ilst - 1) * t_dim1] != 0.) { --(*ilst); } } nbl = 1; if (*ilst < *n) { if (t[*ilst + 1 + *ilst * t_dim1] != 0.) { nbl = 2; } } if (*ifst == *ilst) { return 0; } if (*ifst < *ilst) { /* Update ILST */ if (nbf == 2 && nbl == 1) { --(*ilst); } if (nbf == 1 && nbl == 2) { ++(*ilst); } here = *ifst; L10: /* Swap block with next one below */ if (nbf == 1 || nbf == 2) { /* Current block either 1 by 1 or 2 by 2 */ nbnext = 1; if (here + nbf + 1 <= *n) { if (t[here + nbf + 1 + (here + nbf) * t_dim1] != 0.) { nbnext = 2; } } dlaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, &here, & nbf, &nbnext, &work[1], info); if (*info != 0) { *ilst = here; return 0; } here += nbnext; /* Test if 2 by 2 block breaks into two 1 by 1 blocks */ if (nbf == 2) { if (t[here + 1 + here * t_dim1] == 0.) { nbf = 3; } } } else { /* Current block consists of two 1 by 1 blocks each of which must be swapped individually */ nbnext = 1; if (here + 3 <= *n) { if (t[here + 3 + (here + 2) * t_dim1] != 0.) { nbnext = 2; } } i__1 = here + 1; dlaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, &i__1, & c__1, &nbnext, &work[1], info); if (*info != 0) { *ilst = here; return 0; } if (nbnext == 1) { /* Swap two 1 by 1 blocks, no problems possible */ dlaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, & here, &c__1, &nbnext, &work[1], info); ++here; } else { /* Recompute NBNEXT in case 2 by 2 split */ if (t[here + 2 + (here + 1) * t_dim1] == 0.) { nbnext = 1; } if (nbnext == 2) { /* 2 by 2 Block did not split */ dlaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, & here, &c__1, &nbnext, &work[1], info); if (*info != 0) { *ilst = here; return 0; } here += 2; } else { /* 2 by 2 Block did split */ dlaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, & here, &c__1, &c__1, &work[1], info); i__1 = here + 1; dlaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, & i__1, &c__1, &c__1, &work[1], info); here += 2; } } } if (here < *ilst) { goto L10; } } else { here = *ifst; L20: /* Swap block with next one above */ if (nbf == 1 || nbf == 2) { /* Current block either 1 by 1 or 2 by 2 */ nbnext = 1; if (here >= 3) { if (t[here - 1 + (here - 2) * t_dim1] != 0.) { nbnext = 2; } } i__1 = here - nbnext; dlaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, &i__1, & nbnext, &nbf, &work[1], info); if (*info != 0) { *ilst = here; return 0; } here -= nbnext; /* Test if 2 by 2 block breaks into two 1 by 1 blocks */ if (nbf == 2) { if (t[here + 1 + here * t_dim1] == 0.) { nbf = 3; } } } else { /* Current block consists of two 1 by 1 blocks each of which must be swapped individually */ nbnext = 1; if (here >= 3) { if (t[here - 1 + (here - 2) * t_dim1] != 0.) { nbnext = 2; } } i__1 = here - nbnext; dlaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, &i__1, & nbnext, &c__1, &work[1], info); if (*info != 0) { *ilst = here; return 0; } if (nbnext == 1) { /* Swap two 1 by 1 blocks, no problems possible */ dlaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, & here, &nbnext, &c__1, &work[1], info); --here; } else { /* Recompute NBNEXT in case 2 by 2 split */ if (t[here + (here - 1) * t_dim1] == 0.) { nbnext = 1; } if (nbnext == 2) { /* 2 by 2 Block did not split */ i__1 = here - 1; dlaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, & i__1, &c__2, &c__1, &work[1], info); if (*info != 0) { *ilst = here; return 0; } here += -2; } else { /* 2 by 2 Block did split */ dlaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, & here, &c__1, &c__1, &work[1], info); i__1 = here - 1; dlaexc_(&wantq, n, &t[t_offset], ldt, &q[q_offset], ldq, & i__1, &c__1, &c__1, &work[1], info); here += -2; } } } if (here > *ilst) { goto L20; } } *ilst = here; return 0; /* End of DTREXC */ } /* dtrexc_ */ integer ieeeck_(integer *ispec, real *zero, real *one) { /* System generated locals */ integer ret_val; /* Local variables */ static real neginf, posinf, negzro, newzro, nan1, nan2, nan3, nan4, nan5, nan6; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= IEEECK is called from the ILAENV to verify that Infinity and possibly NaN arithmetic is safe (i.e. will not trap). Arguments ========= ISPEC (input) INTEGER Specifies whether to test just for inifinity arithmetic or whether to test for infinity and NaN arithmetic. = 0: Verify infinity arithmetic only. = 1: Verify infinity and NaN arithmetic. ZERO (input) REAL Must contain the value 0.0 This is passed to prevent the compiler from optimizing away this code. ONE (input) REAL Must contain the value 1.0 This is passed to prevent the compiler from optimizing away this code. RETURN VALUE: INTEGER = 0: Arithmetic failed to produce the correct answers = 1: Arithmetic produced the correct answers */ ret_val = 1; posinf = *one / *zero; if (posinf <= *one) { ret_val = 0; return ret_val; } neginf = -(*one) / *zero; if (neginf >= *zero) { ret_val = 0; return ret_val; } negzro = *one / (neginf + *one); if (negzro != *zero) { ret_val = 0; return ret_val; } neginf = *one / negzro; if (neginf >= *zero) { ret_val = 0; return ret_val; } newzro = negzro + *zero; if (newzro != *zero) { ret_val = 0; return ret_val; } posinf = *one / newzro; if (posinf <= *one) { ret_val = 0; return ret_val; } neginf *= posinf; if (neginf >= *zero) { ret_val = 0; return ret_val; } posinf *= posinf; if (posinf <= *one) { ret_val = 0; return ret_val; } /* Return if we were only asked to check infinity arithmetic */ if (*ispec == 0) { return ret_val; } nan1 = posinf + neginf; nan2 = posinf / neginf; nan3 = posinf / posinf; nan4 = posinf * *zero; nan5 = neginf * negzro; nan6 = nan5 * 0.f; if (nan1 == nan1) { ret_val = 0; return ret_val; } if (nan2 == nan2) { ret_val = 0; return ret_val; } if (nan3 == nan3) { ret_val = 0; return ret_val; } if (nan4 == nan4) { ret_val = 0; return ret_val; } if (nan5 == nan5) { ret_val = 0; return ret_val; } if (nan6 == nan6) { ret_val = 0; return ret_val; } return ret_val; } /* ieeeck_ */ integer ilaenv_(integer *ispec, char *name__, char *opts, integer *n1, integer *n2, integer *n3, integer *n4, ftnlen name_len, ftnlen opts_len) { /* System generated locals */ integer ret_val; /* Builtin functions */ /* Subroutine */ int s_copy(char *, char *, ftnlen, ftnlen); integer s_cmp(char *, char *, ftnlen, ftnlen); /* Local variables */ static integer i__; static logical cname; static integer nbmin; static logical sname; static char c1[1], c2[2], c3[3], c4[2]; static integer ic, nb; extern integer ieeeck_(integer *, real *, real *); static integer iz, nx; static char subnam[6]; extern integer iparmq_(integer *, char *, char *, integer *, integer *, integer *, integer *); /* -- LAPACK auxiliary routine (version 3.1.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. January 2007 Purpose ======= ILAENV is called from the LAPACK routines to choose problem-dependent parameters for the local environment. See ISPEC for a description of the parameters. ILAENV returns an INTEGER if ILAENV >= 0: ILAENV returns the value of the parameter specified by ISPEC if ILAENV < 0: if ILAENV = -k, the k-th argument had an illegal value. This version provides a set of parameters which should give good, but not optimal, performance on many of the currently available computers. Users are encouraged to modify this subroutine to set the tuning parameters for their particular machine using the option and problem size information in the arguments. This routine will not function correctly if it is converted to all lower case. Converting it to all upper case is allowed. Arguments ========= ISPEC (input) INTEGER Specifies the parameter to be returned as the value of ILAENV. = 1: the optimal blocksize; if this value is 1, an unblocked algorithm will give the best performance. = 2: the minimum block size for which the block routine should be used; if the usable block size is less than this value, an unblocked routine should be used. = 3: the crossover point (in a block routine, for N less than this value, an unblocked routine should be used) = 4: the number of shifts, used in the nonsymmetric eigenvalue routines (DEPRECATED) = 5: the minimum column dimension for blocking to be used; rectangular blocks must have dimension at least k by m, where k is given by ILAENV(2,...) and m by ILAENV(5,...) = 6: the crossover point for the SVD (when reducing an m by n matrix to bidiagonal form, if max(m,n)/min(m,n) exceeds this value, a QR factorization is used first to reduce the matrix to a triangular form.) = 7: the number of processors = 8: the crossover point for the multishift QR method for nonsymmetric eigenvalue problems (DEPRECATED) = 9: maximum size of the subproblems at the bottom of the computation tree in the divide-and-conquer algorithm (used by xGELSD and xGESDD) =10: ieee NaN arithmetic can be trusted not to trap =11: infinity arithmetic can be trusted not to trap 12 <= ISPEC <= 16: xHSEQR or one of its subroutines, see IPARMQ for detailed explanation NAME (input) CHARACTER*(*) The name of the calling subroutine, in either upper case or lower case. OPTS (input) CHARACTER*(*) The character options to the subroutine NAME, concatenated into a single character string. For example, UPLO = 'U', TRANS = 'T', and DIAG = 'N' for a triangular routine would be specified as OPTS = 'UTN'. N1 (input) INTEGER N2 (input) INTEGER N3 (input) INTEGER N4 (input) INTEGER Problem dimensions for the subroutine NAME; these may not all be required. Further Details =============== The following conventions have been used when calling ILAENV from the LAPACK routines: 1) OPTS is a concatenation of all of the character options to subroutine NAME, in the same order that they appear in the argument list for NAME, even if they are not used in determining the value of the parameter specified by ISPEC. 2) The problem dimensions N1, N2, N3, N4 are specified in the order that they appear in the argument list for NAME. N1 is used first, N2 second, and so on, and unused problem dimensions are passed a value of -1. 3) The parameter value returned by ILAENV is checked for validity in the calling subroutine. For example, ILAENV is used to retrieve the optimal blocksize for STRTRI as follows: NB = ILAENV( 1, 'STRTRI', UPLO // DIAG, N, -1, -1, -1 ) IF( NB.LE.1 ) NB = MAX( 1, N ) ===================================================================== */ switch (*ispec) { case 1: goto L10; case 2: goto L10; case 3: goto L10; case 4: goto L80; case 5: goto L90; case 6: goto L100; case 7: goto L110; case 8: goto L120; case 9: goto L130; case 10: goto L140; case 11: goto L150; case 12: goto L160; case 13: goto L160; case 14: goto L160; case 15: goto L160; case 16: goto L160; } /* Invalid value for ISPEC */ ret_val = -1; return ret_val; L10: /* Convert NAME to upper case if the first character is lower case. */ ret_val = 1; s_copy(subnam, name__, (ftnlen)6, name_len); ic = *(unsigned char *)subnam; iz = 'Z'; if (iz == 90 || iz == 122) { /* ASCII character set */ if (ic >= 97 && ic <= 122) { *(unsigned char *)subnam = (char) (ic - 32); for (i__ = 2; i__ <= 6; ++i__) { ic = *(unsigned char *)&subnam[i__ - 1]; if (ic >= 97 && ic <= 122) { *(unsigned char *)&subnam[i__ - 1] = (char) (ic - 32); } /* L20: */ } } } else if (iz == 233 || iz == 169) { /* EBCDIC character set */ if (ic >= 129 && ic <= 137 || ic >= 145 && ic <= 153 || ic >= 162 && ic <= 169) { *(unsigned char *)subnam = (char) (ic + 64); for (i__ = 2; i__ <= 6; ++i__) { ic = *(unsigned char *)&subnam[i__ - 1]; if (ic >= 129 && ic <= 137 || ic >= 145 && ic <= 153 || ic >= 162 && ic <= 169) { *(unsigned char *)&subnam[i__ - 1] = (char) (ic + 64); } /* L30: */ } } } else if (iz == 218 || iz == 250) { /* Prime machines: ASCII+128 */ if (ic >= 225 && ic <= 250) { *(unsigned char *)subnam = (char) (ic - 32); for (i__ = 2; i__ <= 6; ++i__) { ic = *(unsigned char *)&subnam[i__ - 1]; if (ic >= 225 && ic <= 250) { *(unsigned char *)&subnam[i__ - 1] = (char) (ic - 32); } /* L40: */ } } } *(unsigned char *)c1 = *(unsigned char *)subnam; sname = *(unsigned char *)c1 == 'S' || *(unsigned char *)c1 == 'D'; cname = *(unsigned char *)c1 == 'C' || *(unsigned char *)c1 == 'Z'; if (! (cname || sname)) { return ret_val; } s_copy(c2, subnam + 1, (ftnlen)2, (ftnlen)2); s_copy(c3, subnam + 3, (ftnlen)3, (ftnlen)3); s_copy(c4, c3 + 1, (ftnlen)2, (ftnlen)2); switch (*ispec) { case 1: goto L50; case 2: goto L60; case 3: goto L70; } L50: /* ISPEC = 1: block size In these examples, separate code is provided for setting NB for real and complex. We assume that NB will take the same value in single or double precision. */ nb = 1; if (s_cmp(c2, "GE", (ftnlen)2, (ftnlen)2) == 0) { if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) { if (sname) { nb = 64; } else { nb = 64; } } else if (s_cmp(c3, "QRF", (ftnlen)3, (ftnlen)3) == 0 || s_cmp(c3, "RQF", (ftnlen)3, (ftnlen)3) == 0 || s_cmp(c3, "LQF", (ftnlen) 3, (ftnlen)3) == 0 || s_cmp(c3, "QLF", (ftnlen)3, (ftnlen)3) == 0) { if (sname) { nb = 32; } else { nb = 32; } } else if (s_cmp(c3, "HRD", (ftnlen)3, (ftnlen)3) == 0) { if (sname) { nb = 32; } else { nb = 32; } } else if (s_cmp(c3, "BRD", (ftnlen)3, (ftnlen)3) == 0) { if (sname) { nb = 32; } else { nb = 32; } } else if (s_cmp(c3, "TRI", (ftnlen)3, (ftnlen)3) == 0) { if (sname) { nb = 64; } else { nb = 64; } } } else if (s_cmp(c2, "PO", (ftnlen)2, (ftnlen)2) == 0) { if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) { if (sname) { nb = 64; } else { nb = 64; } } } else if (s_cmp(c2, "SY", (ftnlen)2, (ftnlen)2) == 0) { if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) { if (sname) { nb = 64; } else { nb = 64; } } else if (sname && s_cmp(c3, "TRD", (ftnlen)3, (ftnlen)3) == 0) { nb = 32; } else if (sname && s_cmp(c3, "GST", (ftnlen)3, (ftnlen)3) == 0) { nb = 64; } } else if (cname && s_cmp(c2, "HE", (ftnlen)2, (ftnlen)2) == 0) { if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) { nb = 64; } else if (s_cmp(c3, "TRD", (ftnlen)3, (ftnlen)3) == 0) { nb = 32; } else if (s_cmp(c3, "GST", (ftnlen)3, (ftnlen)3) == 0) { nb = 64; } } else if (sname && s_cmp(c2, "OR", (ftnlen)2, (ftnlen)2) == 0) { if (*(unsigned char *)c3 == 'G') { if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, ( ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp( c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", ( ftnlen)2, (ftnlen)2) == 0) { nb = 32; } } else if (*(unsigned char *)c3 == 'M') { if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, ( ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp( c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", ( ftnlen)2, (ftnlen)2) == 0) { nb = 32; } } } else if (cname && s_cmp(c2, "UN", (ftnlen)2, (ftnlen)2) == 0) { if (*(unsigned char *)c3 == 'G') { if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, ( ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp( c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", ( ftnlen)2, (ftnlen)2) == 0) { nb = 32; } } else if (*(unsigned char *)c3 == 'M') { if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, ( ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp( c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", ( ftnlen)2, (ftnlen)2) == 0) { nb = 32; } } } else if (s_cmp(c2, "GB", (ftnlen)2, (ftnlen)2) == 0) { if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) { if (sname) { if (*n4 <= 64) { nb = 1; } else { nb = 32; } } else { if (*n4 <= 64) { nb = 1; } else { nb = 32; } } } } else if (s_cmp(c2, "PB", (ftnlen)2, (ftnlen)2) == 0) { if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) { if (sname) { if (*n2 <= 64) { nb = 1; } else { nb = 32; } } else { if (*n2 <= 64) { nb = 1; } else { nb = 32; } } } } else if (s_cmp(c2, "TR", (ftnlen)2, (ftnlen)2) == 0) { if (s_cmp(c3, "TRI", (ftnlen)3, (ftnlen)3) == 0) { if (sname) { nb = 64; } else { nb = 64; } } } else if (s_cmp(c2, "LA", (ftnlen)2, (ftnlen)2) == 0) { if (s_cmp(c3, "UUM", (ftnlen)3, (ftnlen)3) == 0) { if (sname) { nb = 64; } else { nb = 64; } } } else if (sname && s_cmp(c2, "ST", (ftnlen)2, (ftnlen)2) == 0) { if (s_cmp(c3, "EBZ", (ftnlen)3, (ftnlen)3) == 0) { nb = 1; } } ret_val = nb; return ret_val; L60: /* ISPEC = 2: minimum block size */ nbmin = 2; if (s_cmp(c2, "GE", (ftnlen)2, (ftnlen)2) == 0) { if (s_cmp(c3, "QRF", (ftnlen)3, (ftnlen)3) == 0 || s_cmp(c3, "RQF", ( ftnlen)3, (ftnlen)3) == 0 || s_cmp(c3, "LQF", (ftnlen)3, ( ftnlen)3) == 0 || s_cmp(c3, "QLF", (ftnlen)3, (ftnlen)3) == 0) { if (sname) { nbmin = 2; } else { nbmin = 2; } } else if (s_cmp(c3, "HRD", (ftnlen)3, (ftnlen)3) == 0) { if (sname) { nbmin = 2; } else { nbmin = 2; } } else if (s_cmp(c3, "BRD", (ftnlen)3, (ftnlen)3) == 0) { if (sname) { nbmin = 2; } else { nbmin = 2; } } else if (s_cmp(c3, "TRI", (ftnlen)3, (ftnlen)3) == 0) { if (sname) { nbmin = 2; } else { nbmin = 2; } } } else if (s_cmp(c2, "SY", (ftnlen)2, (ftnlen)2) == 0) { if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) { if (sname) { nbmin = 8; } else { nbmin = 8; } } else if (sname && s_cmp(c3, "TRD", (ftnlen)3, (ftnlen)3) == 0) { nbmin = 2; } } else if (cname && s_cmp(c2, "HE", (ftnlen)2, (ftnlen)2) == 0) { if (s_cmp(c3, "TRD", (ftnlen)3, (ftnlen)3) == 0) { nbmin = 2; } } else if (sname && s_cmp(c2, "OR", (ftnlen)2, (ftnlen)2) == 0) { if (*(unsigned char *)c3 == 'G') { if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, ( ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp( c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", ( ftnlen)2, (ftnlen)2) == 0) { nbmin = 2; } } else if (*(unsigned char *)c3 == 'M') { if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, ( ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp( c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", ( ftnlen)2, (ftnlen)2) == 0) { nbmin = 2; } } } else if (cname && s_cmp(c2, "UN", (ftnlen)2, (ftnlen)2) == 0) { if (*(unsigned char *)c3 == 'G') { if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, ( ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp( c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", ( ftnlen)2, (ftnlen)2) == 0) { nbmin = 2; } } else if (*(unsigned char *)c3 == 'M') { if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, ( ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp( c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", ( ftnlen)2, (ftnlen)2) == 0) { nbmin = 2; } } } ret_val = nbmin; return ret_val; L70: /* ISPEC = 3: crossover point */ nx = 0; if (s_cmp(c2, "GE", (ftnlen)2, (ftnlen)2) == 0) { if (s_cmp(c3, "QRF", (ftnlen)3, (ftnlen)3) == 0 || s_cmp(c3, "RQF", ( ftnlen)3, (ftnlen)3) == 0 || s_cmp(c3, "LQF", (ftnlen)3, ( ftnlen)3) == 0 || s_cmp(c3, "QLF", (ftnlen)3, (ftnlen)3) == 0) { if (sname) { nx = 128; } else { nx = 128; } } else if (s_cmp(c3, "HRD", (ftnlen)3, (ftnlen)3) == 0) { if (sname) { nx = 128; } else { nx = 128; } } else if (s_cmp(c3, "BRD", (ftnlen)3, (ftnlen)3) == 0) { if (sname) { nx = 128; } else { nx = 128; } } } else if (s_cmp(c2, "SY", (ftnlen)2, (ftnlen)2) == 0) { if (sname && s_cmp(c3, "TRD", (ftnlen)3, (ftnlen)3) == 0) { nx = 32; } } else if (cname && s_cmp(c2, "HE", (ftnlen)2, (ftnlen)2) == 0) { if (s_cmp(c3, "TRD", (ftnlen)3, (ftnlen)3) == 0) { nx = 32; } } else if (sname && s_cmp(c2, "OR", (ftnlen)2, (ftnlen)2) == 0) { if (*(unsigned char *)c3 == 'G') { if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, ( ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp( c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", ( ftnlen)2, (ftnlen)2) == 0) { nx = 128; } } } else if (cname && s_cmp(c2, "UN", (ftnlen)2, (ftnlen)2) == 0) { if (*(unsigned char *)c3 == 'G') { if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, ( ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp( c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", ( ftnlen)2, (ftnlen)2) == 0) { nx = 128; } } } ret_val = nx; return ret_val; L80: /* ISPEC = 4: number of shifts (used by xHSEQR) */ ret_val = 6; return ret_val; L90: /* ISPEC = 5: minimum column dimension (not used) */ ret_val = 2; return ret_val; L100: /* ISPEC = 6: crossover point for SVD (used by xGELSS and xGESVD) */ ret_val = (integer) ((real) min(*n1,*n2) * 1.6f); return ret_val; L110: /* ISPEC = 7: number of processors (not used) */ ret_val = 1; return ret_val; L120: /* ISPEC = 8: crossover point for multishift (used by xHSEQR) */ ret_val = 50; return ret_val; L130: /* ISPEC = 9: maximum size of the subproblems at the bottom of the computation tree in the divide-and-conquer algorithm (used by xGELSD and xGESDD) */ ret_val = 25; return ret_val; L140: /* ISPEC = 10: ieee NaN arithmetic can be trusted not to trap ILAENV = 0 */ ret_val = 1; if (ret_val == 1) { ret_val = ieeeck_(&c__0, &c_b4270, &c_b4271); } return ret_val; L150: /* ISPEC = 11: infinity arithmetic can be trusted not to trap ILAENV = 0 */ ret_val = 1; if (ret_val == 1) { ret_val = ieeeck_(&c__1, &c_b4270, &c_b4271); } return ret_val; L160: /* 12 <= ISPEC <= 16: xHSEQR or one of its subroutines. */ /*** FFF MODIF ***/ /*** f2c generated code ***/ /* ret_val = iparmq_(ispec, name__, opts, n1, n2, n3, n4, name_len, opts_len) ;*/ ret_val = iparmq_(ispec, name__, opts, n1, n2, n3, n4); return ret_val; /* End of ILAENV */ } /* ilaenv_ */ integer iparmq_(integer *ispec, char *name__, char *opts, integer *n, integer *ilo, integer *ihi, integer *lwork) { /* System generated locals */ integer ret_val, i__1, i__2; real r__1; /* Builtin functions */ double log(doublereal); integer i_nint(real *); /* Local variables */ static integer nh, ns; /* -- LAPACK auxiliary routine (version 3.1) -- Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. November 2006 Purpose ======= This program sets problem and machine dependent parameters useful for xHSEQR and its subroutines. It is called whenever ILAENV is called with 12 <= ISPEC <= 16 Arguments ========= ISPEC (input) integer scalar ISPEC specifies which tunable parameter IPARMQ should return. ISPEC=12: (INMIN) Matrices of order nmin or less are sent directly to xLAHQR, the implicit double shift QR algorithm. NMIN must be at least 11. ISPEC=13: (INWIN) Size of the deflation window. This is best set greater than or equal to the number of simultaneous shifts NS. Larger matrices benefit from larger deflation windows. ISPEC=14: (INIBL) Determines when to stop nibbling and invest in an (expensive) multi-shift QR sweep. If the aggressive early deflation subroutine finds LD converged eigenvalues from an order NW deflation window and LD.GT.(NW*NIBBLE)/100, then the next QR sweep is skipped and early deflation is applied immediately to the remaining active diagonal block. Setting IPARMQ(ISPEC=14) = 0 causes TTQRE to skip a multi-shift QR sweep whenever early deflation finds a converged eigenvalue. Setting IPARMQ(ISPEC=14) greater than or equal to 100 prevents TTQRE from skipping a multi-shift QR sweep. ISPEC=15: (NSHFTS) The number of simultaneous shifts in a multi-shift QR iteration. ISPEC=16: (IACC22) IPARMQ is set to 0, 1 or 2 with the following meanings. 0: During the multi-shift QR sweep, xLAQR5 does not accumulate reflections and does not use matrix-matrix multiply to update the far-from-diagonal matrix entries. 1: During the multi-shift QR sweep, xLAQR5 and/or xLAQRaccumulates reflections and uses matrix-matrix multiply to update the far-from-diagonal matrix entries. 2: During the multi-shift QR sweep. xLAQR5 accumulates reflections and takes advantage of 2-by-2 block structure during matrix-matrix multiplies. (If xTRMM is slower than xGEMM, then IPARMQ(ISPEC=16)=1 may be more efficient than IPARMQ(ISPEC=16)=2 despite the greater level of arithmetic work implied by the latter choice.) NAME (input) character string Name of the calling subroutine OPTS (input) character string This is a concatenation of the string arguments to TTQRE. N (input) integer scalar N is the order of the Hessenberg matrix H. ILO (input) INTEGER IHI (input) INTEGER It is assumed that H is already upper triangular in rows and columns 1:ILO-1 and IHI+1:N. LWORK (input) integer scalar The amount of workspace available. Further Details =============== Little is known about how best to choose these parameters. It is possible to use different values of the parameters for each of CHSEQR, DHSEQR, SHSEQR and ZHSEQR. It is probably best to choose different parameters for different matrices and different parameters at different times during the iteration, but this has not been implemented --- yet. The best choices of most of the parameters depend in an ill-understood way on the relative execution rate of xLAQR3 and xLAQR5 and on the nature of each particular eigenvalue problem. Experiment may be the only practical way to determine which choices are most effective. Following is a list of default values supplied by IPARMQ. These defaults may be adjusted in order to attain better performance in any particular computational environment. IPARMQ(ISPEC=12) The xLAHQR vs xLAQR0 crossover point. Default: 75. (Must be at least 11.) IPARMQ(ISPEC=13) Recommended deflation window size. This depends on ILO, IHI and NS, the number of simultaneous shifts returned by IPARMQ(ISPEC=15). The default for (IHI-ILO+1).LE.500 is NS. The default for (IHI-ILO+1).GT.500 is 3*NS/2. IPARMQ(ISPEC=14) Nibble crossover point. Default: 14. IPARMQ(ISPEC=15) Number of simultaneous shifts, NS. a multi-shift QR iteration. If IHI-ILO+1 is ... greater than ...but less ... the or equal to ... than default is 0 30 NS = 2+ 30 60 NS = 4+ 60 150 NS = 10 150 590 NS = ** 590 3000 NS = 64 3000 6000 NS = 128 6000 infinity NS = 256 (+) By default matrices of this order are passed to the implicit double shift routine xLAHQR. See IPARMQ(ISPEC=12) above. These values of NS are used only in case of a rare xLAHQR failure. (**) The asterisks (**) indicate an ad-hoc function increasing from 10 to 64. IPARMQ(ISPEC=16) Select structured matrix multiply. (See ISPEC=16 above for details.) Default: 3. ================================================================ */ if (*ispec == 15 || *ispec == 13 || *ispec == 16) { /* ==== Set the number simultaneous shifts ==== */ nh = *ihi - *ilo + 1; ns = 2; if (nh >= 30) { ns = 4; } if (nh >= 60) { ns = 10; } if (nh >= 150) { /* Computing MAX */ r__1 = log((real) nh) / log(2.f); i__1 = 10, i__2 = nh / i_nint(&r__1); ns = max(i__1,i__2); } if (nh >= 590) { ns = 64; } if (nh >= 3000) { ns = 128; } if (nh >= 6000) { ns = 256; } /* Computing MAX */ i__1 = 2, i__2 = ns - ns % 2; ns = max(i__1,i__2); } if (*ispec == 12) { /* ===== Matrices of order smaller than NMIN get sent . to xLAHQR, the classic double shift algorithm. . This must be at least 11. ==== */ ret_val = 75; } else if (*ispec == 14) { /* ==== INIBL: skip a multi-shift qr iteration and . whenever aggressive early deflation finds . at least (NIBBLE*(window size)/100) deflations. ==== */ ret_val = 14; } else if (*ispec == 15) { /* ==== NSHFTS: The number of simultaneous shifts ===== */ ret_val = ns; } else if (*ispec == 13) { /* ==== NW: deflation window size. ==== */ if (nh <= 500) { ret_val = ns; } else { ret_val = ns * 3 / 2; } } else if (*ispec == 16) { /* ==== IACC22: Whether to accumulate reflections . before updating the far-from-diagonal elements . and whether to use 2-by-2 block structure while . doing it. A small amount of work could be saved . by making this choice dependent also upon the . NH=IHI-ILO+1. */ ret_val = 0; if (ns >= 14) { ret_val = 1; } if (ns >= 14) { ret_val = 2; } } else { /* ===== invalid value of ispec ===== */ ret_val = -1; } /* ==== End of IPARMQ ==== */ return ret_val; } /* iparmq_ */ nipy-0.3.0/libcstat/lapack_lite/f2c.h000066400000000000000000000102641210344137400173730ustar00rootroot00000000000000/* f2c.h -- Standard Fortran to C header file */ /** barf [ba:rf] 2. "He suggested using FORTRAN, and everybody barfed." - From The Shogakukan DICTIONARY OF NEW ENGLISH (Second edition) */ #ifndef F2C_INCLUDE #define F2C_INCLUDE typedef int integer; typedef char *address; typedef short int shortint; typedef float real; typedef double doublereal; typedef struct { real r, i; } complex; typedef struct { doublereal r, i; } doublecomplex; typedef int logical; typedef short int shortlogical; typedef char logical1; typedef char integer1; #define TRUE_ (1) #define FALSE_ (0) /* Extern is for use with -E */ #ifndef Extern #define Extern extern #endif /* I/O stuff */ #ifdef f2c_i2 /* for -i2 */ typedef short flag; typedef short ftnlen; typedef short ftnint; #else typedef int flag; typedef int ftnlen; typedef int ftnint; #endif /*external read, write*/ typedef struct { flag cierr; ftnint ciunit; flag ciend; char *cifmt; ftnint cirec; } cilist; /*internal read, write*/ typedef struct { flag icierr; char *iciunit; flag iciend; char *icifmt; ftnint icirlen; ftnint icirnum; } icilist; /*open*/ typedef struct { flag oerr; ftnint ounit; char *ofnm; ftnlen ofnmlen; char *osta; char *oacc; char *ofm; ftnint orl; char *oblnk; } olist; /*close*/ typedef struct { flag cerr; ftnint cunit; char *csta; } cllist; /*rewind, backspace, endfile*/ typedef struct { flag aerr; ftnint aunit; } alist; /* inquire */ typedef struct { flag inerr; ftnint inunit; char *infile; ftnlen infilen; ftnint *inex; /*parameters in standard's order*/ ftnint *inopen; ftnint *innum; ftnint *innamed; char *inname; ftnlen innamlen; char *inacc; ftnlen inacclen; char *inseq; ftnlen inseqlen; char *indir; ftnlen indirlen; char *infmt; ftnlen infmtlen; char *inform; ftnint informlen; char *inunf; ftnlen inunflen; ftnint *inrecl; ftnint *innrec; char *inblank; ftnlen inblanklen; } inlist; #define VOID void union Multitype { /* for multiple entry points */ shortint h; integer i; real r; doublereal d; complex c; doublecomplex z; }; typedef union Multitype Multitype; typedef long Long; /* No longer used; formerly in Namelist */ struct Vardesc { /* for Namelist */ char *name; char *addr; ftnlen *dims; int type; }; typedef struct Vardesc Vardesc; struct Namelist { char *name; Vardesc **vars; int nvars; }; typedef struct Namelist Namelist; #ifndef abs #define abs(x) ((x) >= 0 ? (x) : -(x)) #endif #define dabs(x) (doublereal)abs(x) #ifndef min #define min(a,b) ((a) <= (b) ? (a) : (b)) #endif #ifndef max #define max(a,b) ((a) >= (b) ? (a) : (b)) #endif #define dmin(a,b) (doublereal)min(a,b) #define dmax(a,b) (doublereal)max(a,b) /* procedure parameter types for -A and -C++ */ #define F2C_proc_par_types 1 #ifdef __cplusplus typedef int /* Unknown procedure type */ (*U_fp)(...); typedef shortint (*J_fp)(...); typedef integer (*I_fp)(...); typedef real (*R_fp)(...); typedef doublereal (*D_fp)(...), (*E_fp)(...); typedef /* Complex */ VOID (*C_fp)(...); typedef /* Double Complex */ VOID (*Z_fp)(...); typedef logical (*L_fp)(...); typedef shortlogical (*K_fp)(...); typedef /* Character */ VOID (*H_fp)(...); typedef /* Subroutine */ int (*S_fp)(...); #else typedef int /* Unknown procedure type */ (*U_fp)(void); typedef shortint (*J_fp)(void); typedef integer (*I_fp)(void); typedef real (*R_fp)(void); typedef doublereal (*D_fp)(void), (*E_fp)(void); typedef /* Complex */ VOID (*C_fp)(void); typedef /* Double Complex */ VOID (*Z_fp)(void); typedef logical (*L_fp)(void); typedef shortlogical (*K_fp)(void); typedef /* Character */ VOID (*H_fp)(void); typedef /* Subroutine */ int (*S_fp)(void); #endif /* E_fp is for real functions when -R is not specified */ typedef VOID C_f; /* complex function */ typedef VOID H_f; /* character function */ typedef VOID Z_f; /* double complex function */ typedef doublereal E_f; /* real function with -R not specified */ /* undef any lower-case symbols that your C compiler predefines, e.g.: */ #ifndef Skip_f2c_Undefs #undef cray #undef gcos #undef mc68010 #undef mc68020 #undef mips #undef pdp11 #undef sgi #undef sparc #undef sun #undef sun2 #undef sun3 #undef sun4 #undef u370 #undef u3b #undef u3b2 #undef u3b5 #undef unix #undef vax #endif #endif nipy-0.3.0/libcstat/lapack_lite/f2c_lite.c000066400000000000000000000200751210344137400204040ustar00rootroot00000000000000#include #include #include #include #include "f2c.h" extern void s_wsfe(cilist *f) {;} extern void e_wsfe(void) {;} extern void do_fio(integer *c, char *s, ftnlen l) {;} /* You'll want this if you redo the *_lite.c files with the -C option * to f2c for checking array subscripts. (It's not suggested you do that * for production use, of course.) */ extern int s_rnge(char *var, int index, char *routine, int lineno) { fprintf(stderr, "array index out-of-bounds for %s[%d] in routine %s:%d\n", var, index, routine, lineno); fflush(stderr); abort(); } #ifdef KR_headers extern double sqrt(); double f__cabs(real, imag) double real, imag; #else #undef abs double f__cabs(double real, double imag) #endif { double temp; if(real < 0) real = -real; if(imag < 0) imag = -imag; if(imag > real){ temp = real; real = imag; imag = temp; } if((imag+real) == real) return((double)real); temp = imag/real; temp = real*sqrt(1.0 + temp*temp); /*overflow!!*/ return(temp); } VOID #ifdef KR_headers d_cnjg(r, z) doublecomplex *r, *z; #else d_cnjg(doublecomplex *r, doublecomplex *z) #endif { r->r = z->r; r->i = - z->i; } #ifdef KR_headers double d_imag(z) doublecomplex *z; #else double d_imag(doublecomplex *z) #endif { return(z->i); } #define log10e 0.43429448190325182765 #ifdef KR_headers double log(); double d_lg10(x) doublereal *x; #else #undef abs double d_lg10(doublereal *x) #endif { return( log10e * log(*x) ); } #ifdef KR_headers double d_sign(a,b) doublereal *a, *b; #else double d_sign(doublereal *a, doublereal *b) #endif { double x; x = (*a >= 0 ? *a : - *a); return( *b >= 0 ? x : -x); } #ifdef KR_headers double floor(); integer i_dnnt(x) doublereal *x; #else #undef abs integer i_dnnt(doublereal *x) #endif { return( (*x)>=0 ? floor(*x + .5) : -floor(.5 - *x) ); } /* Additions to the original numpy code for compliency with Lapack 3-1-1 */ #ifdef KR_headers double floor(); double d_nint(x) doublereal *x; #else #undef abs double d_nint(doublereal *x) #endif { return( (*x)>=0 ? floor(*x + .5) : -floor(.5 - *x) ); } #ifdef KR_headers double floor(); integer i_nint(x) real *x; #else #undef abs integer i_nint(real *x) #endif { return (integer)(*x >= 0 ? floor(*x + .5) : -floor(.5 - *x)); } /* End of additions */ #ifdef KR_headers double pow(); double pow_dd(ap, bp) doublereal *ap, *bp; #else #undef abs double pow_dd(doublereal *ap, doublereal *bp) #endif { return(pow(*ap, *bp) ); } #ifdef KR_headers double pow_di(ap, bp) doublereal *ap; integer *bp; #else double pow_di(doublereal *ap, integer *bp) #endif { double pow, x; integer n; unsigned long u; pow = 1; x = *ap; n = *bp; if(n != 0) { if(n < 0) { n = -n; x = 1/x; } for(u = n; ; ) { if(u & 01) pow *= x; if(u >>= 1) x *= x; else break; } } return(pow); } /* Unless compiled with -DNO_OVERWRITE, this variant of s_cat allows the * target of a concatenation to appear on its right-hand side (contrary * to the Fortran 77 Standard, but in accordance with Fortran 90). */ #define NO_OVERWRITE #ifndef NO_OVERWRITE #undef abs #ifdef KR_headers extern char *F77_aloc(); extern void free(); extern void exit_(); #else extern char *F77_aloc(ftnlen, char*); #endif #endif /* NO_OVERWRITE */ VOID #ifdef KR_headers s_cat(lp, rpp, rnp, np, ll) char *lp, *rpp[]; ftnlen rnp[], *np, ll; #else s_cat(char *lp, char *rpp[], ftnlen rnp[], ftnlen *np, ftnlen ll) #endif { ftnlen i, nc; char *rp; ftnlen n = *np; #ifndef NO_OVERWRITE ftnlen L, m; char *lp0, *lp1; lp0 = 0; lp1 = lp; L = ll; i = 0; while(i < n) { rp = rpp[i]; m = rnp[i++]; if (rp >= lp1 || rp + m <= lp) { if ((L -= m) <= 0) { n = i; break; } lp1 += m; continue; } lp0 = lp; lp = lp1 = F77_aloc(L = ll, "s_cat"); break; } lp1 = lp; #endif /* NO_OVERWRITE */ for(i = 0 ; i < n ; ++i) { nc = ll; if(rnp[i] < nc) nc = rnp[i]; ll -= nc; rp = rpp[i]; while(--nc >= 0) *lp++ = *rp++; } while(--ll >= 0) *lp++ = ' '; #ifndef NO_OVERWRITE if (lp0) { memmove(lp0, lp1, L); free(lp1); } #endif } /* compare two strings */ #ifdef KR_headers integer s_cmp(a0, b0, la, lb) char *a0, *b0; ftnlen la, lb; #else integer s_cmp(char *a0, char *b0, ftnlen la, ftnlen lb) #endif { register unsigned char *a, *aend, *b, *bend; a = (unsigned char *)a0; b = (unsigned char *)b0; aend = a + la; bend = b + lb; if(la <= lb) { while(a < aend) if(*a != *b) return( *a - *b ); else { ++a; ++b; } while(b < bend) if(*b != ' ') return( ' ' - *b ); else ++b; } else { while(b < bend) if(*a == *b) { ++a; ++b; } else return( *a - *b ); while(a < aend) if(*a != ' ') return(*a - ' '); else ++a; } return(0); } /* Unless compiled with -DNO_OVERWRITE, this variant of s_copy allows the * target of an assignment to appear on its right-hand side (contrary * to the Fortran 77 Standard, but in accordance with Fortran 90), * as in a(2:5) = a(4:7) . */ /* assign strings: a = b */ #ifdef KR_headers VOID s_copy(a, b, la, lb) register char *a, *b; ftnlen la, lb; #else void s_copy(register char *a, register char *b, ftnlen la, ftnlen lb) #endif { register char *aend, *bend; aend = a + la; if(la <= lb) #ifndef NO_OVERWRITE if (a <= b || a >= b + la) #endif while(a < aend) *a++ = *b++; #ifndef NO_OVERWRITE else for(b += la; a < aend; ) *--aend = *--b; #endif else { bend = b + lb; #ifndef NO_OVERWRITE if (a <= b || a >= bend) #endif while(b < bend) *a++ = *b++; #ifndef NO_OVERWRITE else { a += lb; while(b < bend) *--a = *--bend; a += lb; } #endif while(a < aend) *a++ = ' '; } } #ifdef KR_headers double f__cabs(); double z_abs(z) doublecomplex *z; #else double f__cabs(double, double); double z_abs(doublecomplex *z) #endif { return( f__cabs( z->r, z->i ) ); } #ifdef KR_headers extern void sig_die(); VOID z_div(c, a, b) doublecomplex *a, *b, *c; #else extern void sig_die(char*, int); void z_div(doublecomplex *c, doublecomplex *a, doublecomplex *b) #endif { double ratio, den; double abr, abi; if( (abr = b->r) < 0.) abr = - abr; if( (abi = b->i) < 0.) abi = - abi; if( abr <= abi ) { /*Let IEEE Infinties handle this ;( */ /*if(abi == 0) sig_die("complex division by zero", 1);*/ ratio = b->r / b->i ; den = b->i * (1 + ratio*ratio); c->r = (a->r*ratio + a->i) / den; c->i = (a->i*ratio - a->r) / den; } else { ratio = b->i / b->r ; den = b->r * (1 + ratio*ratio); c->r = (a->r + a->i*ratio) / den; c->i = (a->i - a->r*ratio) / den; } } #ifdef KR_headers double sqrt(), f__cabs(); VOID z_sqrt(r, z) doublecomplex *r, *z; #else #undef abs extern double f__cabs(double, double); void z_sqrt(doublecomplex *r, doublecomplex *z) #endif { double mag; if( (mag = f__cabs(z->r, z->i)) == 0.) r->r = r->i = 0.; else if(z->r > 0) { r->r = sqrt(0.5 * (mag + z->r) ); r->i = z->i / r->r / 2; } else { r->i = sqrt(0.5 * (mag - z->r) ); if(z->i < 0) r->i = - r->i; r->r = z->i / r->i / 2; } } #ifdef __cplusplus extern "C" { #endif #ifdef KR_headers integer pow_ii(ap, bp) integer *ap, *bp; #else integer pow_ii(integer *ap, integer *bp) #endif { integer pow, x, n; unsigned long u; x = *ap; n = *bp; if (n <= 0) { if (n == 0 || x == 1) return 1; if (x != -1) return x == 0 ? 1/x : 0; n = -n; } u = n; for(pow = 1; ; ) { if(u & 01) pow *= x; if(u >>= 1) x *= x; else break; } return(pow); } #ifdef __cplusplus } #endif #ifdef KR_headers extern void f_exit(); VOID s_stop(s, n) char *s; ftnlen n; #else #undef abs #undef min #undef max #ifdef __cplusplus extern "C" { #endif #ifdef __cplusplus extern "C" { #endif void f_exit(void); int s_stop(char *s, ftnlen n) #endif { int i; if(n > 0) { fprintf(stderr, "STOP "); for(i = 0; i #include #define COPY_BUFFERS_USING_NUMPY 1 /* This function must be called before the module can work because PyArray_API is defined static, in order not to share that symbol within the dso. (import_array() asks the pointer value to the python process) */ void fffpy_import_array(void) { import_array(); return; } /* Static functions */ static npy_intp _PyArray_main_axis(const PyArrayObject* x, int* ok); static fff_vector* _fff_vector_new_from_buffer(const char* data, npy_intp dim, npy_intp stride, int type, int itemsize); static fff_vector* _fff_vector_new_from_PyArrayIter(const PyArrayIterObject* it, npy_intp axis); static void _fff_vector_sync_with_PyArrayIter(fff_vector* y, const PyArrayIterObject* it, npy_intp axis); /* Routines for copying 1d arrays into contiguous double arrays */ #if COPY_BUFFERS_USING_NUMPY # define COPY_BUFFER(y, data, stride, type, itemsize) \ fff_vector_fetch_using_NumPy(y, data, stride, type, itemsize); #else # define COPY_BUFFER(y, data, stride, type, itemsize) \ fff_vector_fetch(y, (void*)data, fff_datatype_fromNumPy(type), stride/itemsize) #endif /* Copy a buffer using numpy. Copy buffer x into y assuming that y is contiguous. */ void fff_vector_fetch_using_NumPy(fff_vector* y, const char* x, npy_intp stride, int type, int itemsize) { npy_intp dim[1] = {(npy_intp)y->size}; npy_intp strides[1] = {stride}; PyArrayObject* X = (PyArrayObject*) PyArray_New(&PyArray_Type, 1, dim, type, strides, (void*)x, itemsize, NPY_BEHAVED, NULL); PyArrayObject* Y = (PyArrayObject*) PyArray_SimpleNewFromData(1, dim, NPY_DOUBLE, (void*)y->data); PyArray_CastTo(Y, X); Py_XDECREF(Y); Py_XDECREF(X); return; } /* Create a fff_vector from an already allocated buffer. This function acts as a fff_vector constructor that is compatible with fff_vector_delete. */ static fff_vector* _fff_vector_new_from_buffer(const char* data, npy_intp dim, npy_intp stride, int type, int itemsize) { fff_vector* y; size_t sizeof_double = sizeof(double); /* If the input array is double and is aligned, just wrap without copying */ if ((type == NPY_DOUBLE) && (itemsize==sizeof_double)) { y = (fff_vector*)malloc(sizeof(fff_vector)); y->size = (size_t)dim; y->stride = (size_t)stride/sizeof_double; y->data = (double*)data; y->owner = 0; } /* Otherwise, output a owner contiguous vector with copied data */ else { y = fff_vector_new((size_t)dim); COPY_BUFFER(y, data, stride, type, itemsize); } return y; } /* Find the axis with largest dimension */ npy_intp _PyArray_main_axis(const PyArrayObject* x, int* ok) { npy_intp axis, count, i, dim, ndim = PyArray_NDIM(x); *ok = 1; axis = 0; count = 0; for(i=0; i 1) { count ++; axis = i; } } if (count > 1) *ok = 0; return axis; } fff_vector* fff_vector_fromPyArray(const PyArrayObject* x) { fff_vector* y; int ok; npy_intp axis = _PyArray_main_axis(x, &ok); if (!ok) { FFF_ERROR("Input array is not a vector", EINVAL); return NULL; } y = _fff_vector_new_from_buffer(PyArray_DATA(x), PyArray_DIM(x, axis), PyArray_STRIDE(x, axis), PyArray_TYPE(x), PyArray_ITEMSIZE(x)); return y; } /* Export a fff_vector to a PyArray, and delete it. This function is a fff_vector destructor compatible with any either fff_vector_new or _fff_vector_new_from_buffer. */ PyArrayObject* fff_vector_toPyArray(fff_vector* y) { PyArrayObject* x; size_t size; npy_intp dims[1]; if (y == NULL) return NULL; size = y->size; dims[0] = (npy_intp) size; /* If the fff_vector is owner (hence contiguous), just pass the buffer to Python and transfer ownership */ if (y->owner) { x = (PyArrayObject*) PyArray_SimpleNewFromData(1, dims, NPY_DOUBLE, (void*)y->data); x->flags = (x->flags) | NPY_OWNDATA; } /* Otherwise, create Python array from scratch */ else x = fff_vector_const_toPyArray(y); /* Ciao bella */ free(y); return x; } /* Export without deleting */ PyArrayObject* fff_vector_const_toPyArray(const fff_vector* y) { PyArrayObject* x; size_t i, size = y->size, stride = y->stride; double* data = (double*) malloc(size*sizeof(double)); double* bufX = data; double* bufY = y->data; npy_intp dims[1]; dims[0] = (npy_intp) size; for (i=0; iflags = (x->flags) | NPY_OWNDATA; return x; } /* Get a fff_matrix from an input PyArray. This function acts as a fff_vector constructor that is compatible with fff_vector_delete. */ fff_matrix* fff_matrix_fromPyArray(const PyArrayObject* x) { fff_matrix* y; npy_intp dim[2]; PyArrayObject* xd; /* Check that the input object is a two-dimensional array */ if (PyArray_NDIM(x) != 2) { FFF_ERROR("Input array is not a matrix", EINVAL); return NULL; } /* If the PyArray is double, contiguous and aligned just wrap without copying */ if ((PyArray_TYPE(x) == NPY_DOUBLE) && (PyArray_ISCONTIGUOUS(x)) && (PyArray_ISALIGNED(x))) { y = (fff_matrix*) malloc(sizeof(fff_matrix)); y->size1 = (size_t) PyArray_DIM(x,0); y->size2 = (size_t) PyArray_DIM(x,1); y->tda = y->size2; y->data = (double*) PyArray_DATA(x); y->owner = 0; } /* Otherwise, output a owner (contiguous) matrix with copied data */ else { size_t dim0 = PyArray_DIM(x,0), dim1 = PyArray_DIM(x,1); y = fff_matrix_new((size_t)dim0, (size_t)dim1); dim[0] = dim0; dim[1] = dim1; xd = (PyArrayObject*) PyArray_SimpleNewFromData(2, dim, NPY_DOUBLE, (void*)y->data); PyArray_CastTo(xd, (PyArrayObject*)x); Py_XDECREF(xd); } return y; } /* Export a fff_matrix to a PyArray, and delete it. This function is a fff_matrix destructor compatible with any of the following constructors: fff_matrix_new and fff_matrix_fromPyArray. */ PyArrayObject* fff_matrix_toPyArray(fff_matrix* y) { PyArrayObject* x; size_t size1; size_t size2; size_t tda; npy_intp dims[2]; if (y == NULL) return NULL; size1 = y->size1; size2 = y->size2; tda = y->tda; dims[0] = (npy_intp) size1; dims[1] = (npy_intp) size2; /* If the fff_matrix is contiguous and owner, just pass the buffer to Python and transfer ownership */ if ((tda == size2) && (y->owner)) { x = (PyArrayObject*) PyArray_SimpleNewFromData(2, dims, NPY_DOUBLE, (void*)y->data); x->flags = (x->flags) | NPY_OWNDATA; } /* Otherwise, create PyArray from scratch. Note, the input fff_matrix is necessarily in row-major order. */ else x = fff_matrix_const_toPyArray(y); /* Ciao bella */ free(y); return x; } /* Export without deleting */ PyArrayObject* fff_matrix_const_toPyArray(const fff_matrix* y) { PyArrayObject* x; size_t size1 = y->size1, size2 = y->size2, tda = y->tda; size_t i, j, pos; double* data = (double*) malloc(size1*size2*sizeof(double)); double* bufX = data; double* bufY = y->data; npy_intp dims[2]; dims[0] = (npy_intp) size1; dims[1] = (npy_intp) size2; for (i=0; iflags = (x->flags) | NPY_OWNDATA; return x; } /** Static routines **/ /**** Data type conversions *****/ fff_datatype fff_datatype_fromNumPy(int npy_type) { fff_datatype fff_type; switch (npy_type) { case NPY_UBYTE: fff_type = FFF_UCHAR; break; case NPY_BYTE: fff_type = FFF_SCHAR; break; case NPY_USHORT: fff_type = FFF_USHORT; break; case NPY_SHORT: fff_type = FFF_SSHORT; break; case NPY_UINT: fff_type = FFF_UINT; break; case NPY_INT: fff_type = FFF_INT; break; case NPY_ULONG: fff_type = FFF_ULONG; break; case NPY_LONG: fff_type = FFF_LONG; break; case NPY_FLOAT: fff_type = FFF_FLOAT; break; case NPY_DOUBLE: fff_type = FFF_DOUBLE; break; default: fff_type = FFF_UNKNOWN_TYPE; break; } /* Return the datatype */ return fff_type; } int fff_datatype_toNumPy(fff_datatype fff_type) { int npy_type; switch(fff_type) { case FFF_UCHAR: npy_type = NPY_UBYTE; break; case FFF_SCHAR: npy_type = NPY_BYTE; break; case FFF_USHORT: npy_type = NPY_USHORT; break; case FFF_SSHORT: npy_type = NPY_SHORT; break; case FFF_UINT: npy_type = NPY_UINT; break; case FFF_INT: npy_type = NPY_INT; break; case FFF_ULONG: npy_type = NPY_ULONG; break; case FFF_LONG: npy_type = NPY_LONG; break; case FFF_FLOAT: npy_type = NPY_FLOAT; break; case FFF_DOUBLE: npy_type = NPY_DOUBLE; break; default: npy_type = NPY_NOTYPE; break; } return npy_type; } /**** fff_array interface ****/ fff_array* fff_array_fromPyArray(const PyArrayObject* x) { fff_array* y; fff_datatype datatype; unsigned int nbytes; size_t dimX = 1, dimY = 1, dimZ = 1, dimT = 1; size_t offX = 0, offY = 0, offZ = 0, offT = 0; size_t ndims = (size_t)PyArray_NDIM(x); /* Check that the input array has less than four dimensions */ if (ndims > 4) { FFF_ERROR("Input array has more than four dimensions", EINVAL); return NULL; } /* Check that the input array is aligned */ if (! PyArray_ISALIGNED(x)) { FFF_ERROR("Input array is not aligned", EINVAL); return NULL; } /* Match the data type */ datatype = fff_datatype_fromNumPy(PyArray_TYPE(x)); if (datatype == FFF_UNKNOWN_TYPE) { FFF_ERROR("Unrecognized data type", EINVAL); return NULL; } /* Dimensions and offsets */ nbytes = fff_nbytes(datatype); dimX = PyArray_DIM(x, 0); offX = PyArray_STRIDE(x, 0)/nbytes; if (ndims > 1) { dimY = PyArray_DIM(x, 1); offY = PyArray_STRIDE(x, 1)/nbytes; if (ndims > 2) { dimZ = PyArray_DIM(x, 2); offZ = PyArray_STRIDE(x, 2)/nbytes; if (ndims > 3) { dimT = PyArray_DIM(x, 3); offT = PyArray_STRIDE(x, 3)/nbytes; } } } /* Create array (not owner) */ y = (fff_array*)malloc(sizeof(fff_array)); *y = fff_array_view(datatype, (void*) PyArray_DATA(x), dimX, dimY, dimZ, dimT, offX, offY, offZ, offT); return y; } PyArrayObject* fff_array_toPyArray(fff_array* y) { PyArrayObject* x; npy_intp dims[4]; int datatype; fff_array* yy; if (y == NULL) return NULL; dims[0] = y->dimX; dims[1] = y->dimY; dims[2] = y->dimZ; dims[3] = y->dimT; /* Match data type */ datatype = fff_datatype_toNumPy(y->datatype); if (datatype == NPY_NOTYPE) { FFF_ERROR("Unrecognized data type", EINVAL); return NULL; } /* Make sure the fff array owns its data, which may require a copy */ if (y->owner) yy = y; else { yy = fff_array_new(y->datatype, y->dimX, y->dimY, y->dimZ, y->dimT); fff_array_copy(yy, y); } /* Create a Python array from the array data (which is contiguous since it is owner). We can use PyArray_SimpleNewFromData given that yy is C-contiguous by fff_array_new. */ x = (PyArrayObject*) PyArray_SimpleNewFromData(yy->ndims, dims, datatype, (void*)yy->data); /* Transfer ownership to Python */ x->flags = (x->flags) | NPY_OWNDATA; /* Dealloc memory if needed */ if (! y->owner) free(yy); /* Delete array */ free(y); return x; } /******************************************************************** Multi-iterator object. ********************************************************************/ static int _PyArray_BroadcastAllButAxis (PyArrayMultiIterObject* mit, int axis); /* Create a fff multi iterator object. Involves creating a PyArrayMultiArrayIter instance that lets us iterate simultaneously on an arbitrary number of numpy arrays EXCEPT in one common axis. There does not seem to exist a built-in PyArrayMultiArrayIter constructor for this usage. If it pops up one day, part of the following code should be replaced. Similarly to the default PyArrayMultiArrayIter constructor, we need to set up broadcasting rules. For now, we simply impose that all arrays have exactly the same number of dimensions and that all dimensions be equal except along the "non-iterated" axis. FIXME: The following code does not perform any checking, and will surely crash if the arrays do not fulfill the conditions. */ fffpy_multi_iterator* fffpy_multi_iterator_new(int narr, int axis, ...) { fffpy_multi_iterator* thisone; va_list va; fff_vector** vector; PyArrayMultiIterObject *multi; PyObject *current, *arr; int i, err=0; /* Create new instance */ thisone = (fffpy_multi_iterator*)malloc(sizeof(fffpy_multi_iterator)); multi = PyArray_malloc(sizeof(PyArrayMultiIterObject)); vector = (fff_vector**)malloc(narr*sizeof(fff_vector*)); /* Initialize the PyArrayMultiIterObject instance from the variadic arguments */ PyObject_Init((PyObject *)multi, &PyArrayMultiIter_Type); for (i=0; iiters[i] = NULL; multi->numiter = narr; multi->index = 0; va_start(va, axis); for (i=0; iiters[i] = (PyArrayIterObject *)PyArray_IterAllButAxis(arr, &axis); Py_DECREF(arr); } } va_end(va); /* Test */ if (!err && _PyArray_BroadcastAllButAxis(multi, axis) < 0) err=1; if (err) { FFF_ERROR("Cannot create broadcast object", ENOMEM); free(thisone); free(vector); Py_DECREF(multi); return NULL; } /* Initialize the multi iterator */ PyArray_MultiIter_RESET(multi); /* Create the fff vectors (views or copies) */ for(i=0; iiters[i], axis); /* Instantiate fiels */ thisone->narr = narr; thisone->axis = axis; thisone->vector = vector; thisone->multi = multi; thisone->index = thisone->multi->index; thisone->size = thisone->multi->size; return thisone; } void fffpy_multi_iterator_delete(fffpy_multi_iterator* thisone) { unsigned int i; Py_DECREF(thisone->multi); for(i=0; inarr; i++) fff_vector_delete(thisone->vector[i]); free(thisone->vector); free(thisone); return; } void fffpy_multi_iterator_update(fffpy_multi_iterator* thisone) { unsigned int i; PyArray_MultiIter_NEXT(thisone->multi); for(i=0; inarr; i++) _fff_vector_sync_with_PyArrayIter(thisone->vector[i], (const PyArrayIterObject*)thisone->multi->iters[i], thisone->axis); thisone->index = thisone->multi->index; return; } void fffpy_multi_iterator_reset(fffpy_multi_iterator* thisone) { unsigned int i; PyArray_MultiIter_RESET(thisone->multi); for(i=0; inarr; i++) _fff_vector_sync_with_PyArrayIter(thisone->vector[i], (const PyArrayIterObject*)thisone->multi->iters[i], thisone->axis); thisone->index = thisone->multi->index; return; } static int _PyArray_BroadcastAllButAxis (PyArrayMultiIterObject* mit, int axis) { int i, nd; npy_intp size, tmp; PyArrayIterObject *it; /* Not very robust */ it = mit->iters[0]; /* Set the dimensions */ nd = it->ao->nd; mit->nd = nd; for(i=0, size=1; iao->dimensions[i]; mit->dimensions[i] = tmp; if (i!=axis) size *= tmp; } mit->size = size; /* Not very robust either */ return 0; } /* Create an fff_vector from a PyArrayIter object */ fff_vector* _fff_vector_new_from_PyArrayIter(const PyArrayIterObject* it, npy_intp axis) { fff_vector* y; char* data = PyArray_ITER_DATA(it); PyArrayObject* ao = (PyArrayObject*) it->ao; npy_intp dim = PyArray_DIM(ao, axis); npy_intp stride = PyArray_STRIDE(ao, axis); int type = PyArray_TYPE(ao); int itemsize = PyArray_ITEMSIZE(ao); y = _fff_vector_new_from_buffer(data, dim, stride, type, itemsize); return y; } /* Fetch vector data from an iterator (view or copy) */ void _fff_vector_sync_with_PyArrayIter(fff_vector* y, const PyArrayIterObject* it, npy_intp axis) { if (y->owner) { PyArrayObject* ao = (PyArrayObject*) it->ao; COPY_BUFFER(y, PyArray_ITER_DATA(it), PyArray_STRIDE(ao, axis), PyArray_TYPE(ao), PyArray_ITEMSIZE(ao)); } else y->data = (double*) PyArray_ITER_DATA(it); return; } nipy-0.3.0/libcstat/wrapper/fffpy.h000066400000000000000000000121061210344137400172400ustar00rootroot00000000000000#include #include #include #include #include /*! \file fffpy.h \brief Python interface to \a fff \author Alexis Roche, Benjamin Thyreau, Bertrand Thirion \date 2006-2009 */ #ifndef NPY_VERSION #define npy_intp intp #define NPY_OWNDATA OWNDATA #define NPY_CONTIGUOUS CONTIGUOUS #define NPY_BEHAVED BEHAVED_FLAGS #endif #define fffpyZeroLONG() (PyArrayObject*)PyArray_SimpleNew(1,(npy_intp*)"\0\0\0\0", PyArray_LONG); /*! \brief Import numpy C API Any Python module written in C, and using the fffpy interface, must call this function to work, because \c PyArray_API is defined static, in order not to share that symbol within the dso. (import_array() asks the pointer value to the python process) */ extern void fffpy_import_array(void); /*! \brief Convert \c PyArrayObject to \c fff_vector \param x input numpy array This function may be seen as a \c fff_vector constructor compatible with \c fff_vector_delete. If the input has type \c PyArray_DOUBLE, whether or not it is contiguous, the new \c fff_vector is not self-owned and borrows a reference to the PyArrayObject's data. Otherwise, data are copied and the \c fff_vector is self-owned (hence contiguous) just like when created from scratch. Notice, the function returns \c NULL if the input array has more than one dimension. */ extern fff_vector* fff_vector_fromPyArray(const PyArrayObject* x); /*! \brief Convert \c fff_vector to \c PyArrayObject \param y input vector Conversely to \c fff_vector_fromPyArray, this function acts as a \c fff_vector destructor compatible with \c fff_vector_new, returning a new PyArrayObject reference. If the input vector is contiguous and self-owned, array ownership is simply transferred to Python; otherwise, the data array is copied. */ extern PyArrayObject* fff_vector_toPyArray(fff_vector* y); /*! \brief Convert \c fff_vector to \c PyArrayObject, without destruction \param y input const vector Unlike \c fff_vector_toPyArray, this function does not delete the input fff_vector. It always forces a copy of the data array. This function is useful when exporting to Python a fff_vector that belongs to a local structure having its own destruction method. */ extern PyArrayObject* fff_vector_const_toPyArray(const fff_vector* y); /*! \brief Convert \c PyArrayObject to \c fff_matrix \param x input numpy array This function may be seen as a \c fff_matrix constructor compatible with \c fff_matrix_free. If the input has type \c PyArray_DOUBLE and is contiguous, the new \c fff_matrix is not self-owned and borrows a reference to the PyArrayObject's data. Otherwise, data are copied and the \c fff_matrix is self-owned (hence contiguous) just like when created from scratch. \c NULL is returned if the input array does not have exactly two dimensions. Remarks: 1) non-contiguity provokes a copy because the \c fff_matrix structure does not support strides; 2) matrices in column-major order (Fortran convention) always get copied using this function. */ extern fff_matrix* fff_matrix_fromPyArray(const PyArrayObject* x); /*! \brief Convert \c fff_matrix to \c PyArrayObject \param y input matrix Conversely to \c fff_matrix_fromPyArray, this function acts as a \c fff_matrix destructor compatible with \c fff_matrix_new, returning a new PyArrayObject reference. If the input matrix is contiguous and self-owned, array ownership is simply transferred to Python; otherwise, the data array is copied. */ extern PyArrayObject* fff_matrix_toPyArray(fff_matrix* y); /*! \brief Convert \c fff_matrix to \c PyArrayObject, without destruction \param y input const matrix Unlike \c fff_matrix_toPyArray, this function does not delete the input fff_matrix. It always forces a copy of the data array. This function is useful when exporting to Python a fff_matrix that belongs to a local structure having its own destruction method. */ extern PyArrayObject* fff_matrix_const_toPyArray(const fff_matrix* y); /*! \brief Maps a numpy array to an fff_array \param x input array This function instantiates an fff_array that borrows data from the numpy array. Delete using \c fff_array_delete. */ extern fff_array* fff_array_fromPyArray(const PyArrayObject* x); extern PyArrayObject* fff_array_toPyArray(fff_array* y); extern fff_datatype fff_datatype_fromNumPy(int npy_type); extern int fff_datatype_toNumPy(fff_datatype fff_type); extern void fff_vector_fetch_using_NumPy(fff_vector* y, const char* data, npy_intp stride, int type, int itemsize); /* Multi-iterator object. */ typedef struct { int narr; int axis; fff_vector** vector; size_t index; size_t size; PyArrayMultiIterObject *multi; } fffpy_multi_iterator; extern fffpy_multi_iterator* fffpy_multi_iterator_new(int narr, int axis, ...); extern void fffpy_multi_iterator_delete(fffpy_multi_iterator* thisone); extern void fffpy_multi_iterator_update(fffpy_multi_iterator* thisone); extern void fffpy_multi_iterator_reset(fffpy_multi_iterator* thisone); nipy-0.3.0/nipy/000077500000000000000000000000001210344137400134475ustar00rootroot00000000000000nipy-0.3.0/nipy/COMMIT_INFO.txt000066400000000000000000000004141210344137400160120ustar00rootroot00000000000000# This is an ini file that may contain information about the code state [commit hash] # The line below may contain a valid hash if it has been substituted during 'git archive' archive_subst_hash=afed8d8 # This line may be modified by the install process install_hash= nipy-0.3.0/nipy/__init__.py000066400000000000000000000023571210344137400155670ustar00rootroot00000000000000# emacs: -*- coding: utf-8; mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set fileencoding=utf-8 ft=python sts=4 ts=4 sw=4 et: import os from .info import (LONG_DESCRIPTION as __doc__, URL as __url__, STATUS as __status__, __version__) from nipy.testing import Tester test = Tester().test bench = Tester().bench def _test_local_install(): """ Warn the user that running with nipy being imported locally is a bad idea. """ import os if os.getcwd() == os.sep.join( os.path.abspath(__file__).split(os.sep)[:-2]): import warnings warnings.warn('Running the tests from the install directory may ' 'trigger some failures') _test_local_install() # Add to top-level namespace from nipy.io.api import load_image, save_image, as_image from nipy.core.api import is_image # Set up package information function from .pkg_info import get_pkg_info as _get_pkg_info get_info = lambda : _get_pkg_info(os.path.dirname(__file__)) # Cleanup namespace del _test_local_install # If this file is exec after being imported, the following lines will # fail try: del version del Tester except: pass nipy-0.3.0/nipy/algorithms/000077500000000000000000000000001210344137400156205ustar00rootroot00000000000000nipy-0.3.0/nipy/algorithms/__init__.py000066400000000000000000000006151210344137400177330ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Package containing generic algorithms such as registration, statistics, simulation, etc. """ __docformat__ = 'restructuredtext' import statistics import fwhm, interpolation, kernel_smooth, diagnostics from nipy.testing import Tester test = Tester().test bench = Tester().bench nipy-0.3.0/nipy/algorithms/clustering/000077500000000000000000000000001210344137400177775ustar00rootroot00000000000000nipy-0.3.0/nipy/algorithms/clustering/__init__.py000066400000000000000000000005321210344137400221100ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This sub-package contains functions for clustering. It might be removed in the future, and replaced by an optional dependence on scikit learn. """ from nipy.testing import Tester test = Tester().test bench = Tester().bench nipy-0.3.0/nipy/algorithms/clustering/bgmm.py000066400000000000000000001075231210344137400213030ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Bayesian Gaussian Mixture Model Classes: contains the basic fields and methods of Bayesian GMMs the high level functions are/should be binded in C The base class BGMM relies on an implementation that perfoms Gibbs sampling A derived class VBGMM uses Variational Bayes inference instead A third class is introduces to take advnatge of the old C-bindings, but it is limited to diagonal covariance models Author : Bertrand Thirion, 2008-2011 """ import numpy as np import numpy.random as nr from scipy.linalg import inv, cholesky, eigvalsh from scipy.special import gammaln import math from .utils import kmeans from gmm import GMM ################################################################## # ancillary functions ############################################ ################################################################## def detsh(H): """ Routine for the computation of determinants of symmetric positive matrices Parameters ---------- H array of shape(n,n) the input matrix, assumed symmmetric and positive Returns ------- dh: float, the determinant """ return np.prod(eigvalsh(H)) def dirichlet_eval(w, alpha): """ Evaluate the probability of a certain discrete draw w from the Dirichlet density with parameters alpha Parameters ---------- w: array of shape (n) alpha: array of shape (n) """ if np.shape(w) != np.shape(alpha): raise ValueError("incompatible dimensions") loge = np.sum((alpha-1) * np.log(w)) logb = np.sum(gammaln(alpha)) - gammaln(alpha.sum()) loge -= logb return np.exp(loge) def generate_normals(m, P): """ Generate a Gaussian sample with mean m and precision P Parameters ---------- m array of shape n: the mean vector P array of shape (n,n): the precision matrix Returns ------- ng : array of shape(n): a draw from the gaussian density """ icp = inv(cholesky(P)) ng = nr.randn(m.shape[0]) ng = np.dot(ng, icp) ng += m return ng def generate_Wishart(n, V): """ Generate a sample from Wishart density Parameters ---------- n: float, the number of degrees of freedom of the Wishart density V: array of shape (n,n) the scale matrix of the Wishart density Returns ------- W: array of shape (n,n) the draw from Wishart density """ icv = cholesky(V) p = V.shape[0] A = nr.randn(p, p) for i in range(p): A[i, i:] = 0 A[i, i] = np.sqrt(nr.chisquare(n - i)) R = np.dot(icv, A) W = np.dot(R, R.T) return W def wishart_eval(n, V, W, dV=None, dW=None, piV=None): """Evaluation of the probability of W under Wishart(n,V) Parameters ---------- n: float, the number of degrees of freedom (dofs) V: array of shape (n,n) the scale matrix of the Wishart density W: array of shape (n,n) the sample to be evaluated dV: float, optional, determinant of V dW: float, optional, determinant of W piV: array of shape (n,n), optional inverse of V Returns ------- (float) the density """ # check that shape(V)==shape(W) p = V.shape[0] if dV == None: dV = detsh(V) if dW == None: dW = detsh(W) if piV == None: piV = inv(V) ldW = math.log(dW) * (n - p - 1) / 2 ltr = - np.trace(np.dot(piV, W)) / 2 la = (n * p * math.log(2) + math.log(dV) * n) / 2 lg = math.log(math.pi) * p * (p - 1) / 4 lg += gammaln(np.arange(n - p + 1, n + 1).astype(np.float) / 2).sum() lt = ldW + ltr - la - lg return math.exp(lt) def normal_eval(mu, P, x, dP=None): """ Probability of x under normal(mu, inv(P)) Parameters ---------- mu: array of shape (n), the mean parameter P: array of shape (n, n), the precision matrix x: array of shape (n), the data to be evaluated Returns ------- (float) the density """ dim = P.shape[0] if dP == None: dP = detsh(P) w0 = math.log(dP) - dim * math.log(2 * math.pi) w0 /= 2 dx = mu - x q = np.dot(np.dot(P, dx), dx) w = w0 - q / 2 like = math.exp(w) return like def generate_perm(k, nperm=100): """ returns an array of shape(nbperm, k) representing the permutations of k elements Parameters ---------- k, int the number of elements to be permuted nperm=100 the maximal number of permutations if gamma(k+1)>nperm: only nperm random draws are generated Returns ------- p: array of shape(nperm,k): each row is permutation of k """ from scipy.special import gamma if k == 1: return np.reshape(np.array([0]), (1, 1)).astype(np.int) if gamma(k + 1) < nperm: # exhaustive permutations aux = generate_perm(k - 1) n = aux.shape[0] perm = np.zeros((n * k, k)).astype(np.int) for i in range(k): perm[i * n:(i + 1) * n, :i] = aux[:, :i] perm[i * n:(i + 1) * n, i] = k-1 perm[i * n:(i + 1) * n, i + 1:] = aux[:, i:] else: from numpy.random import rand perm = np.zeros((nperm, k)).astype(np.int) for i in range(nperm): p = np.argsort(rand(k)) perm[i] = p return perm def multinomial(probabilities): """ Generate samples form a miltivariate distribution Parameters ---------- probabilities: array of shape (nelements, nclasses): likelihood of each element belongin to each class each row is assumedt to sum to 1 One sample is draw from each row, resulting in Returns ------- z array of shape (nelements): the draws, that take values in [0..nclasses-1] """ nvox = probabilities.shape[0] nclasses = probabilities.shape[1] cuml = np.zeros((nvox, nclasses + 1)) cuml[:, 1:] = np.cumsum(probabilities, 1) aux = np.random.rand(nvox, 1) z = np.argmax(aux < cuml, 1)-1 return z def dkl_gaussian(m1, P1, m2, P2): """ Returns the KL divergence between gausians densities Parameters ---------- m1: array of shape (n), the mean parameter of the first density P1: array of shape(n,n), the precision parameters of the first density m2: array of shape (n), the mean parameter of the second density P2: array of shape(n,n), the precision parameters of the second density """ tiny = 1.e-15 dim = np.size(m1) if m1.shape != m2.shape: raise ValueError("incompatible dimensions for m1 and m2") if P1.shape != P2.shape: raise ValueError("incompatible dimensions for P1 and P2") if P1.shape[0] != dim: raise ValueError("incompatible dimensions for m1 and P1") d1 = max(detsh(P1), tiny) d2 = max(detsh(P2), tiny) dkl = np.log(d1 / d2) + np.trace(np.dot(P2, inv(P1))) - dim dkl += np.dot(np.dot((m1 - m2).T, P2), (m1 - m2)) dkl /= 2 return dkl def dkl_wishart(a1, B1, a2, B2): """ returns the KL divergence bteween two Wishart distribution of parameters (a1,B1) and (a2,B2), Parameters ---------- a1: Float, degrees of freedom of the first density B1: array of shape(n,n), scale matrix of the first density a2: Float, degrees of freedom of the second density B2: array of shape(n,n), scale matrix of the second density Returns ------- dkl: float, the Kullback-Leibler divergence """ from scipy.special import psi, gammaln tiny = 1.e-15 if B1.shape != B2.shape: raise ValueError("incompatible dimensions for B1 and B2") dim = B1.shape[0] d1 = max(detsh(B1), tiny) d2 = max(detsh(B2), tiny) lgc = dim * (dim - 1) * math.log(np.pi) / 4 lg1 = lgc lg2 = lgc lw1 = - math.log(d1) + dim * math.log(2) lw2 = - math.log(d2) + dim * math.log(2) for i in range(dim): lg1 += gammaln((a1 - i) / 2) lg2 += gammaln((a2 - i) / 2) lw1 += psi((a1 - i) / 2) lw2 += psi((a2 - i) / 2) lz1 = 0.5 * a1 * dim * math.log(2) - 0.5 * a1 * math.log(d1) + lg1 lz2 = 0.5 * a2 * dim * math.log(2) - 0.5 * a2 * math.log(d2) + lg2 dkl = (a1 - dim - 1) * lw1 - (a2 - dim - 1) * lw2 - a1 * dim dkl += a1 * np.trace(np.dot(B2, inv(B1))) dkl /= 2 dkl += (lz2 - lz1) return dkl def dkl_dirichlet(w1, w2): """ Returns the KL divergence between two dirichlet distribution Parameters ---------- w1: array of shape(n), the parameters of the first dirichlet density w2: array of shape(n), the parameters of the second dirichlet density """ if w1.shape != w2.shape: raise ValueError("incompatible dimensions for w1 and w2") dkl = 0 from scipy.special import gammaln, psi dkl = np.sum(gammaln(w2)) - np.sum(gammaln(w1)) dkl += gammaln(np.sum(w1)) - gammaln(np.sum(w2)) dkl += np.sum((w1 - w2) * (psi(w1) - psi(np.sum(w1)))) return dkl ####################################################################### # main GMM class ##################################################### ####################################################################### class BGMM(GMM): """ This class implements Bayesian GMMs this class contains the follwing fields k: int, the number of components in the mixture dim: int, the dimension of the data means: array of shape (k, dim) all the means of the components precisions: array of shape (k, dim, dim) the precisions of the componenets weights: array of shape (k): weights of the mixture shrinkage: array of shape (k): scaling factor of the posterior precisions on the mean dof: array of shape (k) the degrees of freedom of the components prior_means: array of shape (k, dim): the prior on the components means prior_scale: array of shape (k, dim): the prior on the components precisions prior_dof: array of shape (k): the prior on the dof (should be at least equal to dim) prior_shrinkage: array of shape (k): scaling factor of the prior precisions on the mean prior_weights: array of shape (k) the prior on the components weights shrinkage: array of shape (k): scaling factor of the posterior precisions on the mean dof : array of shape (k): the posterior dofs fixme ----- only 'full' precision is supported """ def __init__(self, k=1, dim=1, means=None, precisions=None, weights=None, shrinkage=None, dof=None): """ Initialize the structure with the dimensions of the problem Eventually provide different terms """ GMM.__init__(self, k, dim, 'full', means, precisions, weights) self.shrinkage = shrinkage self.dof = dof if self.shrinkage == None: self.shrinkage = np.ones(self.k) if self.dof == None: self.dof = np.ones(self.k) if self.precisions != None: self._detp = [detsh(self.precisions[k]) for k in range(self.k)] def check(self): """ Checking the shape of sifferent matrices involved in the model """ GMM.check(self) if self.prior_means.shape[0] != self.k: raise ValueError("Incorrect dimension for self.prior_means") if self.prior_means.shape[1] != self.dim: raise ValueError("Incorrect dimension for self.prior_means") if self.prior_scale.shape[0] != self.k: raise ValueError("Incorrect dimension for self.prior_scale") if self.prior_scale.shape[1] != self.dim: raise ValueError("Incorrect dimension for self.prior_scale") if self.prior_dof.shape[0] != self.k: raise ValueError("Incorrect dimension for self.prior_dof") if self.prior_weights.shape[0] != self.k: raise ValueError("Incorrect dimension for self.prior_weights") def set_priors(self, prior_means, prior_weights, prior_scale, prior_dof, prior_shrinkage): """ Set the prior of the BGMM Parameters ---------- prior_means: array of shape (self.k,self.dim) prior_weights: array of shape (self.k) prior_scale: array of shape (self.k,self.dim,self.dim) prior_dof: array of shape (self.k) prior_shrinkage: array of shape (self.k) """ self.prior_means = prior_means self.prior_weights = prior_weights self.prior_scale = prior_scale self.prior_dof = prior_dof self.prior_shrinkage = prior_shrinkage # cache some pre-computations self._dets = [detsh(self.prior_scale[k]) for k in range(self.k)] self._inv_prior_scale = np.array([inv(self.prior_scale[k]) for k in range(self.k)]) self.check() def guess_priors(self, x, nocheck=0): """ Set the priors in order of having them weakly uninformative this is from Fraley and raftery; Journal of Classification 24:155-181 (2007) Parameters ---------- x, array of shape (nb_samples,self.dim) the data used in the estimation process nocheck: boolean, optional, if nocheck==True, check is skipped """ # a few parameters small = 0.01 elshape = (1, self.dim, self.dim) mx = np.reshape(x.mean(0), (1, self.dim)) dx = x - mx vx = np.dot(dx.T, dx) / x.shape[0] px = np.reshape(np.diag(1.0 / np.diag(vx)), elshape) px *= np.exp(2.0 / self.dim * math.log(self.k)) # set the priors self.prior_means = np.repeat(mx, self.k, 0) self.prior_weights = np.ones(self.k) self.prior_scale = np.repeat(px, self.k, 0) self.prior_dof = np.ones(self.k) * (self.dim + 2) self.prior_shrinkage = np.ones(self.k) * small # cache some pre-computations self._dets = np.ones(self.k) * detsh(px[0]) self._inv_prior_scale = np.repeat( np.reshape(inv(px[0]), elshape), self.k, 0) # check that everything is OK if nocheck == True: self.check() def initialize(self, x): """ initialize z using a k-means algorithm, then upate the parameters Parameters ---------- x: array of shape (nb_samples,self.dim) the data used in the estimation process """ if self.k > 1: cent, z, J = kmeans(x, self.k) else: z = np.zeros(x.shape[0]).astype(np.int) self.update(x, z) def pop(self, z): """ compute the population, i.e. the statistics of allocation Parameters ---------- z array of shape (nb_samples), type = np.int the allocation variable Returns ------- hist : array shape (self.k) count variable """ hist = np.array([np.sum(z == k) for k in range(self.k)]) return hist def update_weights(self, z): """ Given the allocation vector z, resample the weights parameter Parameters ---------- z array of shape (nb_samples), type = np.int the allocation variable """ pop = self.pop(z) weights = pop + self.prior_weights self.weights = np.random.dirichlet(weights) def update_means(self, x, z): """ Given the allocation vector z, and the corresponding data x, resample the mean Parameters ---------- x: array of shape (nb_samples,self.dim) the data used in the estimation process z: array of shape (nb_samples), type = np.int the corresponding classification """ pop = self.pop(z) self.shrinkage = self.prior_shrinkage + pop empmeans = np.zeros(np.shape(self.means)) prior_shrinkage = np.reshape(self.prior_shrinkage, (self.k, 1)) shrinkage = np.reshape(self.shrinkage, (self.k, 1)) for k in range(self.k): empmeans[k] = np.sum(x[z == k], 0) means = empmeans + self.prior_means * prior_shrinkage means /= shrinkage for k in range(self.k): self.means[k] = generate_normals(\ means[k], self.precisions[k] * self.shrinkage[k]) def update_precisions(self, x, z): """ Given the allocation vector z, and the corresponding data x, resample the precisions Parameters ---------- x array of shape (nb_samples,self.dim) the data used in the estimation process z array of shape (nb_samples), type = np.int the corresponding classification """ pop = self.pop(z) self.dof = self.prior_dof + pop + 1 rpop = pop + (pop == 0) self._detp = np.zeros(self.k) for k in range(self.k): # empirical means empmeans = np.sum(x[z == k], 0) / rpop[k] dm = np.reshape(empmeans - self.prior_means[k], (1, self.dim)) # scatter dx = np.reshape(x[z == k] - empmeans, (pop[k], self.dim)) scatter = np.dot(dx.T, dx) # bias addcov = np.dot(dm.T, dm) * self.prior_shrinkage[k] # covariance = prior term + scatter + bias covariance = self._inv_prior_scale[k] + scatter + addcov #precision scale = inv(covariance) self.precisions[k] = generate_Wishart(self.dof[k], scale) self._detp[k] = detsh(self.precisions[k]) def update(self, x, z): """ update function (draw a sample of the GMM parameters) Parameters ---------- x array of shape (nb_samples,self.dim) the data used in the estimation process z array of shape (nb_samples), type = np.int the corresponding classification """ self.update_weights(z) self.update_precisions(x, z) self.update_means(x, z) def sample_indicator(self, like): """ sample the indicator from the likelihood Parameters ---------- like: array of shape (nb_samples,self.k) component-wise likelihood Returns ------- z: array of shape(nb_samples): a draw of the membership variable """ tiny = 1 + 1.e-15 like = (like.T / like.sum(1)).T like /= tiny z = multinomial(like) return z def sample(self, x, niter=1, mem=0, verbose=0): """ sample the indicator and parameters Parameters ---------- x array of shape (nb_samples,self.dim) the data used in the estimation process niter=1 : the number of iterations to perform mem=0: if mem, the best values of the parameters are computed verbose=0: verbosity mode Returns ------- best_weights: array of shape (self.k) best_means: array of shape (self.k, self.dim) best_precisions: array of shape (self.k, self.dim, self.dim) possibleZ: array of shape (nb_samples, niter) the z that give the highest posterior to the data is returned first """ self.check_x(x) if mem: possibleZ = - np.ones((x.shape[0], niter)).astype(np.int) score = - np.inf bpz = - np.inf for i in range(niter): like = self.likelihood(x) sll = np.mean(np.log(np.sum(like, 1))) sll += np.log(self.probability_under_prior()) if sll > score: score = sll best_weights = self.weights.copy() best_means = self.means.copy() best_precisions = self.precisions.copy() z = self.sample_indicator(like) if mem: possibleZ[:, i] = z puz = sll # to save time self.update(x, z) if puz > bpz: ibz = i bpz = puz if mem: aux = possibleZ[:, 0].copy() possibleZ[:, 0] = possibleZ[:, ibz].copy() possibleZ[:, ibz] = aux return best_weights, best_means, best_precisions, possibleZ def sample_and_average(self, x, niter=1, verbose=0): """ sample the indicator and parameters the average values for weights,means, precisions are returned Parameters ---------- x = array of shape (nb_samples,dim) the data from which bic is computed niter=1: number of iterations Returns ------- weights: array of shape (self.k) means: array of shape (self.k,self.dim) precisions: array of shape (self.k,self.dim,self.dim) or (self.k, self.dim) these are the average parameters across samplings Notes ----- All this makes sense only if no label switching as occurred so this is wrong in general (asymptotically). fix: implement a permutation procedure for components identification """ aprec = np.zeros(np.shape(self.precisions)) aweights = np.zeros(np.shape(self.weights)) ameans = np.zeros(np.shape(self.means)) for i in range(niter): like = self.likelihood(x) z = self.sample_indicator(like) self.update(x, z) aprec += self.precisions aweights += self.weights ameans += self.means aprec /= niter ameans /= niter aweights /= niter return aweights, ameans, aprec def probability_under_prior(self): """ Compute the probability of the current parameters of self given the priors """ p0 = 1 p0 = dirichlet_eval(self.weights, self.prior_weights) for k in range(self.k): mp = np.reshape(self.precisions[k] * self.prior_shrinkage[k], (self.dim, self.dim)) p0 *= normal_eval(self.prior_means[k], mp, self.means[k]) p0 *= wishart_eval(self.prior_dof[k], self.prior_scale[k], self.precisions[k], dV=self._dets[k], dW=self._detp[k], piV=self._inv_prior_scale[k]) return p0 def conditional_posterior_proba(self, x, z, perm=None): """ Compute the probability of the current parameters of self given x and z Parameters ---------- x: array of shape (nb_samples, dim), the data from which bic is computed z: array of shape (nb_samples), type = np.int, the corresponding classification perm: array ok shape(nperm, self.k),typ=np.int, optional all permutation of z under which things will be recomputed By default, no permutation is performed """ pop = self.pop(z) rpop = (pop + (pop == 0)).astype(np.float) dof = self.prior_dof + pop + 1 shrinkage = self.prior_shrinkage + pop weights = pop + self.prior_weights # initialize the porsterior proba if perm == None: pp = dirichlet_eval(self.weights, weights) else: pp = np.array([dirichlet_eval(self.weights[pj], weights) for pj in perm]) for k in range(self.k): m1 = np.sum(x[z == k], 0) #0. Compute the empirical means empmeans = m1 / rpop[k] #1. the precisions dx = np.reshape(x[z == k] - empmeans, (pop[k], self.dim)) dm = np.reshape(empmeans - self.prior_means[k], (1, self.dim)) addcov = np.dot(dm.T, dm) * self.prior_shrinkage[k] covariance = self._inv_prior_scale[k] + np.dot(dx.T, dx) + addcov scale = inv(covariance) _dets = detsh(scale) #2. the means means = m1 + self.prior_means[k] * self.prior_shrinkage[k] means /= shrinkage[k] #4. update the posteriors if perm == None: pp *= wishart_eval( dof[k], scale, self.precisions[k], dV=_dets, dW=self._detp[k], piV=covariance) else: for j, pj in enumerate(perm): pp[j] *= wishart_eval( dof[k], scale, self.precisions[pj[k]], dV=_dets, dW=self._detp[pj[k]], piV=covariance) mp = scale * shrinkage[k] _dP = _dets * shrinkage[k] ** self.dim if perm == None: pp *= normal_eval(means, mp, self.means[k], dP=_dP) else: for j, pj in enumerate(perm): pp[j] *= normal_eval( means, mp, self.means[pj[k]], dP=_dP) return pp def evidence(self, x, z, nperm=0, verbose=0): """ See bayes_factor(self, x, z, nperm=0, verbose=0) """ return self.bayes_factor(self, x, z, nperm, verbose) def bayes_factor(self, x, z, nperm=0, verbose=0): """ Evaluate the Bayes Factor of the current model using Chib's method Parameters ---------- x: array of shape (nb_samples,dim) the data from which bic is computed z: array of shape (nb_samples), type = np.int the corresponding classification nperm=0: int the number of permutations to sample to model the label switching issue in the computation of the Bayes Factor By default, exhaustive permutations are used verbose=0: verbosity mode Returns ------- bf (float) the computed evidence (Bayes factor) Notes ----- See: Marginal Likelihood from the Gibbs Output Journal article by Siddhartha Chib; Journal of the American Statistical Association, Vol. 90, 1995 """ niter = z.shape[1] p = [] perm = generate_perm(self.k) if nperm > perm.shape[0]: nperm = perm.shape[0] for i in range(niter): if nperm == 0: temp = self.conditional_posterior_proba(x, z[:, i], perm) p.append(temp.mean()) else: drand = np.argsort(np.random.rand(perm.shape[0]))[:nperm] temp = self.conditional_posterior_proba(x, z[:, i], perm[drand]) p.append(temp.mean()) p = np.array(p) mp = np.mean(p) p0 = self.probability_under_prior() like = self.likelihood(x) bf = np.log(p0) + np.sum(np.log(np.sum(like, 1))) - np.log(mp) if verbose: print np.log(p0), np.sum(np.log(np.sum(like, 1))), np.log(mp) return bf # --------------------------------------------------------- # --- Variational Bayes inference ------------------------- # --------------------------------------------------------- class VBGMM(BGMM): """ Subclass of Bayesian GMMs (BGMM) that implements Variational Bayes estimation of the parameters """ def __init__(self, k=1, dim=1, means=None, precisions=None, weights=None, shrinkage=None, dof=None): BGMM.__init__(self, k, dim, means, precisions, weights, shrinkage, dof) self.scale = self.precisions.copy() def _Estep(self, x): """VB-E step Parameters ---------- x array of shape (nb_samples,dim) the data used in the estimation process Returns ------- like: array of shape(nb_samples,self.k), component-wise likelihood """ n = x.shape[0] like = np.zeros((n, self.k)) from scipy.special import psi spsi = psi(np.sum(self.weights)) for k in range(self.k): # compute the data-independent factor first w0 = psi(self.weights[k]) - spsi w0 += 0.5 * np.log(detsh(self.scale[k])) w0 -= self.dim * 0.5 / self.shrinkage[k] w0 += 0.5 * np.log(2) * self.dim for i in range(self.dim): w0 += 0.5 * psi((self.dof[k] - i) / 2) m = np.reshape(self.means[k], (1, self.dim)) b = self.dof[k] * self.scale[k] q = np.sum(np.dot(m - x, b) * (m - x), 1) w = w0 - q / 2 w -= 0.5 * np.log(2 * np.pi) * self.dim like[:, k] = np.exp(w) if like.min() < 0: raise ValueError('Likelihood cannot be negative') return like def evidence(self, x, like=None, verbose=0): """computation of evidence bound aka free energy Parameters ---------- x array of shape (nb_samples,dim) the data from which evidence is computed like=None: array of shape (nb_samples, self.k), optional component-wise likelihood If None, it is recomputed verbose=0: verbosity model Returns ------- ev (float) the computed evidence """ from scipy.special import psi from numpy.linalg import inv tiny = 1.e-15 if like == None: like = self._Estep(x) like = (like.T / np.maximum(like.sum(1), tiny)).T pop = like.sum(0)[:self.k] pop = np.reshape(pop, (self.k, 1)) spsi = psi(np.sum(self.weights)) empmeans = np.dot(like.T[:self.k], x) / np.maximum(pop, tiny) F = 0 # start with the average likelihood term for k in range(self.k): # compute the data-independent factor first Lav = psi(self.weights[k]) - spsi Lav -= np.sum(like[:, k] * np.log(np.maximum(like[:, k], tiny))) \ / pop[k] Lav -= 0.5 * self.dim * np.log(2 * np.pi) Lav += 0.5 * np.log(detsh(self.scale[k])) Lav += 0.5 * np.log(2) * self.dim for i in range(self.dim): Lav += 0.5 * psi((self.dof[k] - i) / 2) Lav -= self.dim * 0.5 / self.shrinkage[k] Lav *= pop[k] empcov = np.zeros((self.dim, self.dim)) dx = x - empmeans[k] empcov = np.dot(dx.T, like[:, k:k + 1] * dx) Lav -= 0.5 * np.trace(np.dot(empcov, self.scale[k] * self.dof[k])) F += Lav #then the KL divergences prior_covariance = np.array(self._inv_prior_scale) covariance = np.array([inv(self.scale[k]) for k in range(self.k)]) Dklw = 0 Dklg = 0 Dkld = dkl_dirichlet(self.weights, self.prior_weights) for k in range(self.k): Dklw += dkl_wishart(self.dof[k], covariance[k], self.prior_dof[k], prior_covariance[k]) nc = self.scale[k] * (self.dof[k] * self.shrinkage[k]) nc0 = self.scale[k] * (self.dof[k] * self.prior_shrinkage[k]) Dklg += dkl_gaussian(self.means[k], nc, self.prior_means[k], nc0) Dkl = Dkld + Dklg + Dklw if verbose: print 'Lav', F, 'Dkl', Dkld, Dklg, Dklw F -= Dkl return F def _Mstep(self, x, like): """VB-M step Parameters ---------- x: array of shape(nb_samples, self.dim) the data from which the model is estimated like: array of shape(nb_samples, self.k) the likelihood of the data under each class """ from numpy.linalg import inv tiny = 1.e-15 pop = like.sum(0) # shrinkage, weights,dof self.weights = self.prior_weights + pop pop = pop[0:self.k] like = like[:, :self.k] self.shrinkage = self.prior_shrinkage + pop self.dof = self.prior_dof + pop #reshape pop = np.reshape(pop, (self.k, 1)) prior_shrinkage = np.reshape(self.prior_shrinkage, (self.k, 1)) shrinkage = np.reshape(self.shrinkage, (self.k, 1)) # means means = np.dot(like.T, x) + self.prior_means * prior_shrinkage self.means = means / shrinkage #precisions empmeans = np.dot(like.T, x) / np.maximum(pop, tiny) empcov = np.zeros(np.shape(self.prior_scale)) for k in range(self.k): dx = x - empmeans[k] empcov[k] = np.dot(dx.T, like[:, k:k + 1] * dx) covariance = np.array(self._inv_prior_scale) + empcov dx = np.reshape(empmeans - self.prior_means, (self.k, self.dim, 1)) addcov = np.array([np.dot(dx[k], dx[k].T) for k in range(self.k)]) apms = np.reshape(prior_shrinkage * pop / shrinkage, (self.k, 1, 1)) covariance += addcov * apms # update scale self.scale = np.array([inv(covariance[k]) for k in range(self.k)]) def initialize(self, x): """ initialize z using a k-means algorithm, then upate the parameters Parameters ---------- x: array of shape (nb_samples,self.dim) the data used in the estimation process """ n = x.shape[0] if self.k > 1: cent, z, J = kmeans(x, self.k) else: z = np.zeros(x.shape[0]).astype(np.int) l = np.zeros((n, self.k)) l[np.arange(n), z] = 1 self._Mstep(x, l) def map_label(self, x, like=None): """ return the MAP labelling of x Parameters ---------- x array of shape (nb_samples,dim) the data under study like=None array of shape(nb_samples,self.k) component-wise likelihood if like==None, it is recomputed Returns ------- z: array of shape(nb_samples): the resulting MAP labelling of the rows of x """ if like == None: like = self.likelihood(x) z = np.argmax(like, 1) return z def estimate(self, x, niter=100, delta=1.e-4, verbose=0): """estimation of self given x Parameters ---------- x array of shape (nb_samples,dim) the data from which the model is estimated z = None: array of shape (nb_samples) a prior labelling of the data to initialize the computation niter=100: maximal number of iterations in the estimation process delta = 1.e-4: increment of data likelihood at which convergence is declared verbose=0: verbosity mode """ # alternation of E/M step until convergence tiny = 1.e-15 av_ll_old = - np.inf for i in range(niter): like = self._Estep(x) av_ll = np.mean(np.log(np.maximum(np.sum(like, 1), tiny))) if av_ll < av_ll_old + delta: if verbose: print 'iteration:', i, 'log-likelihood:', av_ll,\ 'old value:', av_ll_old break else: av_ll_old = av_ll if verbose: print i, av_ll, self.bic(like) like = (like.T / np.maximum(like.sum(1), tiny)).T self._Mstep(x, like) def likelihood(self, x): """ return the likelihood of the model for the data x the values are weighted by the components weights Parameters ---------- x: array of shape (nb_samples, self.dim) the data used in the estimation process Returns ------- like: array of shape(nb_samples, self.k) component-wise likelihood """ x = self.check_x(x) return self._Estep(x) def pop(self, like, tiny=1.e-15): """ compute the population, i.e. the statistics of allocation Parameters ---------- like array of shape (nb_samples, self.k): the likelihood of each item being in each class """ slike = np.maximum(tiny, np.sum(like, 1)) nlike = (like.T / slike).T return np.sum(nlike, 0) nipy-0.3.0/nipy/algorithms/clustering/ggmixture.py000066400000000000000000000504101210344137400223640ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ One-dimensional Gamma-Gaussian mixture density classes : Given a set of points the algo provides approcumate maximum likelihood estimates of the mixture distribution using an EM algorithm. Author: Bertrand Thirion and Merlin Keller 2005-2008 """ import numpy as np import scipy.stats as st import scipy.special as sp ############################################################################# # Auxiliary functions ####################################################### ############################################################################# def _dichopsi_log(u, v, y, eps=0.00001): """ Implements the dichotomic part of the solution of psi(c)-log(c)=y """ if u > v: u, v = v, u t = (u + v) / 2 if np.absolute(u - v) < eps: return t else: if sp.psi(t) - np.log(t) > y: return _dichopsi_log(u, t, y, eps) else: return _dichopsi_log(t, v, y, eps) def _psi_solve(y, eps=0.00001): """ Solve psi(c)-log(c)=y by dichotomy """ if y > 0: print "y", y raise ValueError("y>0, the problem cannot be solved") u = 1. if y > sp.psi(u) - np.log(u): while sp.psi(u) - np.log(u) < y: u *= 2 u /= 2 else: while sp.psi(u) - np.log(u) > y: u /= 2 return _dichopsi_log(u, 2 * u, y, eps) def _compute_c(x, z, eps=0.00001): """ this function returns the mle of the shape parameter if a 1D gamma density """ eps = 1.e-7 y = np.dot(z, np.log(x)) / np.sum(z) - np.log(np.dot(z, x) / np.sum(z)) if y > - eps: c = 10 else: c = _psi_solve(y, eps=0.00001) return c def _gaus_dens(mean, var, x): """ evaluate the gaussian density (mean,var) at points x """ Q = - (x - mean) ** 2 / (2 * var) return 1. / np.sqrt(2 * np.pi * var) * np.exp(Q) def _gam_dens(shape, scale, x): """evaluate the gamma density (shape,scale) at points x Notes ----- Returns 0 on negative subspace """ ng = np.zeros(np.size(x)) cst = - shape * np.log(scale) - sp.gammaln(shape) i = np.ravel(np.nonzero(x > 0)) if np.size(i) > 0: lz = cst + (shape - 1) * np.log(x[i]) - x[i] / scale ng[i] = np.exp(lz) return ng def _gam_param(x, z): """ Compute the parameters of a gamma density from data weighted points Parameters ---------- x: array of shape(nbitem) the learning points z: array of shape(nbitem), their membership within the class Notes ----- if no point is positive then the couple (1, 1) is returned """ eps = 1.e-5 i = np.ravel(np.nonzero(x > 0)) szi = np.sum(z[i]) if szi > 0: shape = _compute_c(x[i], z[i], eps) scale = np.dot(x[i], z[i]) / (szi * shape) else: shape = 1 scale = 1 return shape, scale ############################################################################## # class `Gamma` ############################################################################## class Gamma(object): """ Basic one dimensional Gaussian-Gamma Mixture estimation class Note that it can work with positive or negative values, as long as there is at least one positive value. NB : The gamma distribution is defined only on positive values. 5 parameters are used: - mean: gaussian mean - var: gaussian variance - shape: gamma shape - scale: gamma scale - mixt: mixture parameter (weight of the gamma) """ def __init__(self, shape=1, scale=1): self.shape = shape self.scale = scale def parameters(self): print "shape: ", self.shape, "scale: ", self.scale def check(self, x): if (x.min() < 0): raise ValueError("negative values in input") def estimate(self, x, eps=1.e-7): """ ML estimation of the Gamma parameters """ self.check(x) n = np.size(x) y = np.sum(np.log(x)) / n - np.log(np.sum(x) / n) if y > - eps: self.shape = 1 else: self.shape = _psi_solve(y) self.scale = np.sum(x) / (n * self.shape) ############################################################################## # Gamma-Gaussian Mixture class ############################################################################## class GGM(object): """ This is the basic one dimensional Gaussian-Gamma Mixture estimation class Note that it can work with positive or negative values, as long as there is at least one positive value. NB : The gamma distribution is defined only on positive values. 5 scalar members - mean: gaussian mean - var: gaussian variance (non-negative) - shape: gamma shape (non-negative) - scale: gamma scale (non-negative) - mixt: mixture parameter (non-negative, weight of the gamma) """ def __init__(self, shape=1, scale=1, mean=0, var=1, mixt=0.5): self.shape = shape self.scale = scale self.mean = mean self.var = var self.mixt = mixt def parameters(self): """ print the paramteres of self """ print "Gaussian: mean: ", self.mean, "variance: ", self.var print "Gamma: shape: ", self.shape, "scale: ", self.scale print "Mixture gamma: ", self.mixt, "Gaussian: ", 1 - self.mixt def Mstep(self, x, z): """ Mstep of the model: maximum likelihood estimation of the parameters of the model Parameters ---------- x : array of shape (nbitems,) input data z array of shape(nbitrems, 2) the membership matrix """ # z[0,:] is the likelihood to be generated by the gamma # z[1,:] is the likelihood to be generated by the gaussian tiny = 1.e-15 sz = np.maximum(tiny, np.sum(z, 0)) self.shape, self.scale = _gam_param(x, z[:, 0]) self.mean = np.dot(x, z[:, 1]) / sz[1] self.var = np.dot((x - self.mean) ** 2, z[:, 1]) / sz[1] self.mixt = sz[0] / np.size(x) def Estep(self, x): """ E step of the estimation: Estimation of ata membsership Parameters ---------- x: array of shape (nbitems,) input data Returns ------- z: array of shape (nbitems, 2) the membership matrix """ eps = 1.e-15 z = np.zeros((np.size(x), 2), 'd') z[:, 0] = _gam_dens(self.shape, self.scale, x) z[:, 1] = _gaus_dens(self.mean, self.var, x) z = z * np.array([self.mixt, 1. - self.mixt]) sz = np.maximum(np.sum(z, 1), eps) L = np.sum(np.log(sz)) / np.size(x) z = (z.T / sz).T return z, L def estimate(self, x, niter=10, delta=0.0001, verbose=False): """ Complete EM estimation procedure Parameters ---------- x : array of shape (nbitems,) the data to be processed niter : int, optional max nb of iterations delta : float, optional criterion for convergence verbose : bool, optional If True, print values during iterations Returns ------- LL, float average final log-likelihood """ if x.max() < 0: # all the values are generated by the Gaussian self.mean = np.mean(x) self.var = np.var(x) self.mixt = 0. L = 0.5 * (1 + np.log(2 * np.pi * self.var)) return L # proceed with standard estimate z, L = self.Estep(x) L0 = L - 2 * delta for i in range(niter): self.Mstep(x, z) z, L = self.Estep(x) if verbose: print i, L if (L < L0 + delta): break L0 = L return L def show(self, x): """ Visualization of the mm based on the empirical histogram of x Parameters ---------- x : array of shape (nbitems,) the data to be processed """ step = 3.5 * np.std(x) / np.exp(np.log(np.size(x)) / 3) bins = max(10, int((x.max() - x.min()) / step)) h, c = np.histogram(x, bins) h = h.astype(np.float) / np.size(x) p = self.mixt dc = c[1] - c[0] y = (1 - p) * _gaus_dens(self.mean, self.var, c) * dc z = np.zeros(np.size(c)) z = _gam_dens(self.shape, self.scale, c) * p * dc import matplotlib.pylab as mp mp.figure() mp.plot(0.5 * (c[1:] + c[:-1]), h) mp.plot(c, y, 'r') mp.plot(c, z, 'g') mp.plot(c, z + y, 'k') mp.title('Fit of the density with a Gamma-Gaussians mixture') mp.legend(('data', 'gaussian acomponent', 'gamma component', 'mixture distribution')) def posterior(self, x): """Posterior probability of observing the data x for each component Parameters ---------- x: array of shape (nbitems,) the data to be processed Returns ------- y, pg : arrays of shape (nbitem) the posterior probability """ p = self.mixt pg = p * _gam_dens(self.shape, self.scale, x) y = (1 - p) * _gaus_dens(self.mean, self.var, x) return y / (y + pg), pg / (y + pg) ############################################################################## # double-Gamma-Gaussian Mixture class ############################################################################## class GGGM(object): """ The basic one dimensional Gamma-Gaussian-Gamma Mixture estimation class, where the first gamma has a negative sign, while the second one has a positive sign. 7 parameters are used: - shape_n: negative gamma shape - scale_n: negative gamma scale - mean: gaussian mean - var: gaussian variance - shape_p: positive gamma shape - scale_p: positive gamma scale - mixt: array of mixture parameter (weights of the n-gamma,gaussian and p-gamma) """ def __init__(self, shape_n=1, scale_n=1, mean=0, var=1, shape_p=1, scale_p=1, mixt=np.array([1.0, 1.0, 1.0]) / 3): """ Constructor Parameters ----------- shape_n : float, optional scale_n: float, optional parameters of the nehative gamma; must be positive mean : float, optional var : float, optional parameters of the gaussian ; var must be positive shape_p : float, optional scale_p : float, optional parameters of the positive gamma; must be positive mixt : array of shape (3,), optional the mixing proportions; they should be positive and sum to 1 """ self.shape_n = shape_n self.scale_n = scale_n self.mean = mean self.var = var self.shape_p = shape_p self.scale_p = scale_p self.mixt = mixt def parameters(self): """ Print the parameters """ print "Negative Gamma: shape: ", self.shape_n,\ "scale: ", self.scale_n print "Gaussian: mean: ", self.mean, "variance: ", self.var print "Poitive Gamma: shape: ", self.shape_p, "scale: ",\ self.scale_p mixt = self.mixt print "Mixture neg. gamma: ", mixt[0], "Gaussian: ", mixt[1],\ "pos. gamma: ", mixt[2] def init(self, x, mixt=None): """ initialization of the differnt parameters Parameters ---------- x: array of shape(nbitems) the data to be processed mixt : None or array of shape(3), optional prior mixing proportions. If None, the classes have equal weight """ if mixt != None: if np.size(mixt) == 3: self.mixt = np.ravel(mixt) else: raise ValueError('bad size for mixt') # gaussian self.mean = np.mean(x) self.var = np.var(x) # negative gamma i = np.ravel(np.nonzero(x < 0)) if np.size(i) > 0: mn = - np.mean(x[i]) vn = np.var(x[i]) self.scale_n = vn / mn self.shape_n = mn ** 2 / vn else: self.mixt[0] = 0 # positive gamma i = np.ravel(np.nonzero(x > 0)) if np.size(i) > 0: mp = np.mean(x[i]) vp = np.var(x[i]) self.scale_p = vp / mp self.shape_p = mp ** 2 / vp else: self.mixt[2] = 0 # mixing proportions self.mixt = self.mixt / np.sum(self.mixt) def init_fdr(self, x, dof=-1, copy=True): """ Initilization of the class based on a fdr heuristic: the probability to be in the positive component is proportional to the 'positive fdr' of the data. The same holds for the negative part. The point is that the gamma parts should model nothing more that the tails of the distribution. Parameters ---------- x: array of shape(nbitem) the data under consideration dof: integer, optional number of degrees of freedom if x is thought to be a student variate. By default, it is handeled as a normal copy: boolean, optional If True, copy the data. """ # Safeguard ourselves against modifications of x, both by our # code, and by external code. if copy: x = x.copy() # positive gamma i = np.ravel(np.nonzero(x > 0)) from ..statistics.empirical_pvalue import fdr if np.size(i) > 0: if dof < 0: pvals = st.norm.sf(x) else: pvals = st.t.sf(x, dof) q = fdr(pvals) z = 1 - q[i] self.mixt[2] = np.maximum(0.5, z.sum()) / np.size(x) self.shape_p, self.scale_p = _gam_param(x[i], z) else: self.mixt[2] = 0 # negative gamma i = np.ravel(np.nonzero(x < 0)) if np.size(i) > 0: if dof < 0: pvals = st.norm.cdf(x) else: pvals = st.t.cdf(x, dof) q = fdr(pvals) z = 1 - q[i] self.shape_n, self.scale_n = _gam_param( - x[i], z) self.mixt[0] = np.maximum(0.5, z.sum()) / np.size(x) else: self.mixt[0] = 0 self.mixt[1] = 1 - self.mixt[0] - self.mixt[2] def Mstep(self, x, z): """ Mstep of the estimation: Maximum likelihood update the parameters of the three components Parameters ------------ x: array of shape (nbitem,) input data z: array of shape (nbitems,3) probabilistic membership """ tiny = 1.e-15 sz = np.maximum(np.sum(z, 0), tiny) self.mixt = sz / np.sum(sz) # negative gamma self.shape_n, self.scale_n = _gam_param( - x, z[:, 0]) # gaussian self.mean = np.dot(x, z[:, 1]) / sz[1] self.var = np.dot((x - self.mean) ** 2, z[:, 1]) / sz[1] # positive gamma self.shape_p, self.scale_p = _gam_param(x, z[:, 2]) def Estep(self, x): """ Update probabilistic memberships of the three components Parameters ---------- x: array of shape (nbitems,) the input data Returns ------- z: ndarray of shape (nbitems, 3) probabilistic membership Notes ----- z[0,:] is the membership the negative gamma z[1,:] is the membership of the gaussian z[2,:] is the membership of the positive gamma """ tiny = 1.e-15 z = np.array(self.component_likelihood(x)).T * self.mixt sz = np.maximum(tiny, np.sum(z, 1)) L = np.mean(np.log(sz)) z = (z.T / sz).T return z, L def estimate(self, x, niter=100, delta=1.e-4, bias=0, verbose=0, gaussian_mix=0): """ Whole EM estimation procedure: Parameters ---------- x: array of shape (nbitem) input data niter: integer, optional max number of iterations delta: float, optional increment in LL at which convergence is declared bias: float, optional lower bound on the gaussian variance (to avoid shrinkage) gaussian_mix: float, optional if nonzero, lower bound on the gaussian mixing weight (to avoid shrinkage) verbose: 0, 1 or 2 verbosity level Returns ------- z: array of shape (nbitem, 3) the membership matrix """ z, L = self.Estep(x) L0 = L - 2 * delta for i in range(niter): self.Mstep(x, z) # Constraint the Gaussian variance if bias > 0: self.var = np.maximum(bias, self.var) # Constraint the Gaussian mixing ratio if gaussian_mix > 0 and self.mixt[1] < gaussian_mix: upper, gaussian, lower = self.mixt upper_to_lower = upper / (lower + upper) gaussian = gaussian_mix upper = (1 - gaussian_mix) * upper_to_lower lower = 1 - gaussian_mix - upper self.mixt = lower, gaussian, upper z, L = self.Estep(x) if verbose: print i, L if (L < L0 + delta): break L0 = L return z def posterior(self, x): """ Compute the posterior probability of the three components given the data Parameters ----------- x: array of shape (nbitem,) the data under evaluation Returns -------- ng,y,pg: three arrays of shape(nbitem) the posteriori of the 3 components given the data Notes ----- ng + y + pg = np.ones(nbitem) """ p = self.mixt ng, y, pg = self.component_likelihood(x) total = ng * p[0] + y * p[1] + pg * p[2] return ng * p[0] / total, y * p[1] / total, pg * p[2] / total def component_likelihood(self, x): """ Compute the likelihood of the data x under the three components negative gamma, gaussina, positive gaussian Parameters ----------- x: array of shape (nbitem,) the data under evaluation Returns -------- ng,y,pg: three arrays of shape(nbitem) The likelihood of the data under the 3 components """ ng = _gam_dens(self.shape_n, self.scale_n, - x) y = _gaus_dens(self.mean, self.var, x) pg = _gam_dens(self.shape_p, self.scale_p, x) return ng, y, pg def show(self, x, mpaxes=None): """ Visualization of mixture shown on the empirical histogram of x Parameters ---------- x: ndarray of shape (nditem,) data mpaxes: matplotlib axes, optional axes handle used for the plot if None, new axes are created. """ step = 3.5 * np.std(x) / np.exp(np.log(np.size(x)) / 3) bins = max(10, int((x.max() - x.min()) / step)) h, c = np.histogram(x, bins) h = h.astype('d') / np.size(x) dc = c[1] - c[0] ng = self.mixt[0] * _gam_dens(self.shape_n, self.scale_n, - c) y = self.mixt[1] * _gaus_dens(self.mean, self.var, c) pg = self.mixt[2] * _gam_dens(self.shape_p, self.scale_p, c) z = y + pg + ng import matplotlib.pylab as mp if mpaxes == None: mp.figure() ax = mp.subplot(1, 1, 1) else: ax = mpaxes ax.plot(0.5 * (c[1:] + c[:-1]), h / dc, linewidth=2, label='data') ax.plot(c, ng, 'c', linewidth=2, label='negative gamma component') ax.plot(c, y, 'r', linewidth=2, label='Gaussian component') ax.plot(c, pg, 'g', linewidth=2, label='positive gamma component') ax.plot(c, z, 'k', linewidth=2, label='mixture distribution') ax.set_title('Fit of the density with a Gamma-Gaussian mixture', fontsize=12) l = ax.legend() for t in l.get_texts(): t.set_fontsize(12) ax.set_xticklabels(ax.get_xticks(), fontsize=12) ax.set_yticklabels(ax.get_yticks(), fontsize=12) nipy-0.3.0/nipy/algorithms/clustering/gmm.py000066400000000000000000000712751210344137400211450ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Gaussian Mixture Model Class: contains the basic fields and methods of GMMs The class GMM _old uses C bindings which are computationally and memory efficient. Author : Bertrand Thirion, 2006-2009 """ import numpy as np from scipy.linalg import eigvalsh class GridDescriptor(object): """ A tiny class to handle cartesian grids """ def __init__(self, dim=1, lim=None, n_bins=None): """ Parameters ---------- dim: int, optional, the dimension of the grid lim: list of len(2*self.dim), the limits of the grid as (xmin, xmax, ymin, ymax, ...) n_bins: list of len(self.dim), the number of bins in each direction """ self.dim = dim if lim is not None: self.set(lim, n_bins) if np.size(n_bins) == self.dim: self.n_bins = np.ravel(np.array(n_bins)) def set(self, lim, n_bins=10): """ set the limits of the grid and the number of bins Parameters ---------- lim: list of len(2*self.dim), the limits of the grid as (xmin, xmax, ymin, ymax, ...) n_bins: list of len(self.dim), optional the number of bins in each direction """ if len(lim) == 2 * self.dim: self.lim = lim else: raise ValueError("Wrong dimension for grid definition") if np.size(n_bins) == self.dim: self.n_bins = np.ravel(np.array(n_bins)) else: raise ValueError("Wrong dimension for grid definition") def make_grid(self): """ Compute the grid points Returns ------- grid: array of shape (nb_nodes, self.dim) where nb_nodes is the prod of self.n_bins """ size = np.prod(self.n_bins) grid = np.zeros((size, self.dim)) grange = [] for j in range(self.dim): xm = self.lim[2 * j] xM = self.lim[2 * j + 1] if np.isscalar(self.n_bins): xb = self.n_bins else: xb = self.n_bins[j] gr = xm + float(xM - xm) / (xb - 1) * np.arange(xb).astype('f') grange.append(gr) if self.dim == 1: grid = np.array([[grange[0][i]] for i in range(xb)]) if self.dim == 2: for i in range(self.n_bins[0]): for j in range(self.n_bins[1]): grid[i * self.n_bins[1] + j] = np.array( [grange[0][i], grange[1][j]]) if self.dim == 3: for i in range(self.n_bins[0]): for j in range(self.n_bins[1]): for k in range(self.n_bins[2]): q = (i * self.n_bins[1] + j) * self.n_bins[2] + k grid[q] = np.array([grange[0][i], grange[1][j], grange[2][k]]) if self.dim > 3: raise NotImplementedError( 'only dimensions <4 are currently handled') return grid def best_fitting_GMM(x, krange, prec_type='full', niter=100, delta=1.e-4, ninit=1, verbose=0): """ Given a certain dataset x, find the best-fitting GMM with a number k of classes in a certain range defined by krange Parameters ---------- x: array of shape (n_samples,dim) the data from which the model is estimated krange: list of floats, the range of values to test for k prec_type: string (to be chosen within 'full','diag'), optional, the covariance parameterization niter: int, optional, maximal number of iterations in the estimation process delta: float, optional, increment of data likelihood at which convergence is declared ninit: int number of initialization performed verbose=0: verbosity mode Returns ------- mg : the best-fitting GMM instance """ if np.size(x) == x.shape[0]: x = np.reshape(x, (np.size(x), 1)) dim = x.shape[1] bestbic = - np.inf for k in krange: lgmm = GMM(k, dim, prec_type) gmmk = lgmm.initialize_and_estimate(x, None, niter, delta, ninit, verbose) bic = gmmk.evidence(x) if bic > bestbic: bestbic = bic bgmm = gmmk if verbose: print 'k', k, 'bic', bic return bgmm def plot2D(x, my_gmm, z=None, with_dots=True, log_scale=False, mpaxes=None, verbose=0): """ Given a set of points in a plane and a GMM, plot them Parameters ---------- x: array of shape (npoints, dim=2), sample points my_gmm: GMM instance, whose density has to be ploted z: array of shape (npoints), optional that gives a labelling of the points in x by default, it is not taken into account with_dots, bool, optional whether to plot the dots or not log_scale: bool, optional whether to plot the likelihood in log scale or not mpaxes=None, int, optional if not None, axes handle for plotting verbose: verbosity mode, optional Returns ------- gd, GridDescriptor instance, that represents the grid used in the function ax, handle to the figure axes Notes ----- ``my_gmm`` is assumed to have have a 'nixture_likelihood' method that takes an array of points of shape (np, dim) and returns an array of shape (np,my_gmm.k) that represents the likelihood component-wise """ import matplotlib.pyplot as plt if x.shape[1] != my_gmm.dim: raise ValueError('Incompatible dimension between data and model') if x.shape[1] != 2: raise ValueError('this works only for 2D cases') gd1 = GridDescriptor(2) xmin, xmax = x.min(0), x.max(0) xm = 1.1 * xmin[0] - 0.1 * xmax[0] xs = 1.1 * xmax[0] - 0.1 * xmin[0] ym = 1.1 * xmin[1] - 0.1 * xmax[1] ys = 1.1 * xmax[1] - 0.1 * xmin[1] gd1.set([xm, xs, ym, ys], [51, 51]) grid = gd1.make_grid() L = my_gmm.mixture_likelihood(grid) if verbose: intl = L.sum() * (xs - xm) * (ys - ym) / 2500 print 'integral of the density on the domain ', intl if mpaxes == None: plt.figure() ax = plt.subplot(1, 1, 1) else: ax = mpaxes gdx = gd1.n_bins[0] Pdens = np.reshape(L, (gdx, np.size(L) / gdx)) extent = [xm, xs, ym, ys] if log_scale: plt.imshow(np.log(Pdens.T), alpha=2.0, origin='lower', extent=extent) else: plt.imshow(Pdens.T, alpha=2.0, origin='lower', extent=extent) if with_dots: if z == None: plt.plot(x[:, 0], x[:, 1], 'o') else: hsv = plt.cm.hsv(range(256)) col = hsv[range(0, 256, 256 // int(z.max() + 1))] for k in range(z.max() + 1): plt.plot(x[z == k, 0], x[z == k, 1], 'o', color=col[k]) plt.axis(extent) plt.colorbar() return gd1, ax class GMM(object): """Standard GMM. this class contains the following members k (int): the number of components in the mixture dim (int): is the dimension of the data prec_type = 'full' (string) is the parameterization of the precisions/covariance matrices: either 'full' or 'diagonal'. means: array of shape (k,dim): all the means (mean parameters) of the components precisions: array of shape (k,dim,dim): the precisions (inverse covariance matrix) of the components weights: array of shape(k): weights of the mixture fixme ----- no copy method """ def __init__(self, k=1, dim=1, prec_type='full', means=None, precisions=None, weights=None): """ Initialize the structure, at least with the dimensions of the problem Parameters ---------- k (int) the number of classes of the model dim (int) the dimension of the problem prec_type = 'full' : coavriance:precision parameterization (diagonal 'diag' or full 'full'). means = None: array of shape (self.k,self.dim) precisions = None: array of shape (self.k,self.dim,self.dim) or (self.k, self.dim) weights=None: array of shape (self.k) By default, means, precision and weights are set as zeros() eye() 1/k ones() with the correct dimensions """ self.k = k self.dim = dim self.prec_type = prec_type self.means = means self.precisions = precisions self.weights = weights if self.means == None: self.means = np.zeros((self.k, self.dim)) if self.precisions == None: if prec_type == 'full': prec = np.reshape(np.eye(self.dim), (1, self.dim, self.dim)) self.precisions = np.repeat(prec, self.k, 0) else: self.precisions = np.ones((self.k, self.dim)) if self.weights == None: self.weights = np.ones(self.k) * 1.0 / self.k def plugin(self, means, precisions, weights): """ Set manually the weights, means and precision of the model Parameters ---------- means: array of shape (self.k,self.dim) precisions: array of shape (self.k,self.dim,self.dim) or (self.k, self.dim) weights: array of shape (self.k) """ self.means = means self.precisions = precisions self.weights = weights self.check() def check(self): """ Checking the shape of different matrices involved in the model """ if self.means.shape[0] != self.k: raise ValueError("self.means does not have correct dimensions") if self.means.shape[1] != self.dim: raise ValueError("self.means does not have correct dimensions") if self.weights.size != self.k: raise ValueError("self.weights does not have correct dimensions") if self.dim != self.precisions.shape[1]: raise ValueError( "self.precisions does not have correct dimensions") if self.prec_type == 'full': if self.dim != self.precisions.shape[2]: raise ValueError( "self.precisions does not have correct dimensions") if self.prec_type == 'diag': if np.shape(self.precisions) != np.shape(self.means): raise ValueError( "self.precisions does not have correct dimensions") if self.precisions.shape[0] != self.k: raise ValueError( "self.precisions does not have correct dimensions") if self.prec_type not in ['full', 'diag']: raise ValueError('unknown precisions type') def check_x(self, x): """ essentially check that x.shape[1]==self.dim x is returned with possibly reshaping """ if np.size(x) == x.shape[0]: x = np.reshape(x, (np.size(x), 1)) if x.shape[1] != self.dim: raise ValueError('incorrect size for x') return x def initialize(self, x): """Initializes self according to a certain dataset x: 1. sets the regularizing hyper-parameters 2. initializes z using a k-means algorithm, then 3. upate the parameters Parameters ---------- x, array of shape (n_samples,self.dim) the data used in the estimation process """ from .utils import kmeans n = x.shape[0] #1. set the priors self.guess_regularizing(x, bcheck=1) # 2. initialize the memberships if self.k > 1: _, z, _ = kmeans(x, self.k) else: z = np.zeros(n).astype(np.int) l = np.zeros((n, self.k)) l[np.arange(n), z] = 1 # 3.update the parameters self.update(x, l) def pop(self, like, tiny=1.e-15): """compute the population, i.e. the statistics of allocation Parameters ---------- like: array of shape (n_samples,self.k): the likelihood of each item being in each class """ sl = np.maximum(tiny, np.sum(like, 1)) nl = (like.T / sl).T return np.sum(nl, 0) def update(self, x, l): """ Identical to self._Mstep(x,l) """ self._Mstep(x, l) def likelihood(self, x): """ return the likelihood of the model for the data x the values are weighted by the components weights Parameters ---------- x array of shape (n_samples,self.dim) the data used in the estimation process Returns ------- like, array of shape(n_samples,self.k) component-wise likelihood """ like = self.unweighted_likelihood(x) like *= self.weights return like def unweighted_likelihood_(self, x): """ return the likelihood of each data for each component the values are not weighted by the component weights Parameters ---------- x: array of shape (n_samples,self.dim) the data used in the estimation process Returns ------- like, array of shape(n_samples,self.k) unweighted component-wise likelihood """ n = x.shape[0] like = np.zeros((n, self.k)) for k in range(self.k): # compute the data-independent factor first w = - np.log(2 * np.pi) * self.dim m = np.reshape(self.means[k], (1, self.dim)) b = self.precisions[k] if self.prec_type == 'full': w += np.log(eigvalsh(b)).sum() dx = m - x q = np.sum(np.dot(dx, b) * dx, 1) else: w += np.sum(np.log(b)) q = np.dot((m - x) ** 2, b) w -= q w /= 2 like[:, k] = np.exp(w) return like def unweighted_likelihood(self, x): """ return the likelihood of each data for each component the values are not weighted by the component weights Parameters ---------- x: array of shape (n_samples,self.dim) the data used in the estimation process Returns ------- like, array of shape(n_samples,self.k) unweighted component-wise likelihood Notes ----- Hopefully faster """ xt = x.T.copy() n = x.shape[0] like = np.zeros((n, self.k)) for k in range(self.k): # compute the data-independent factor first w = - np.log(2 * np.pi) * self.dim m = np.reshape(self.means[k], (self.dim, 1)) b = self.precisions[k] if self.prec_type == 'full': w += np.log(eigvalsh(b)).sum() dx = xt - m sqx = dx * np.dot(b, dx) q = np.zeros(n) for d in range(self.dim): q += sqx[d] else: w += np.sum(np.log(b)) q = np.dot(b, (m - xt) ** 2) w -= q w /= 2 like[:, k] = np.exp(w) return like def mixture_likelihood(self, x): """Returns the likelihood of the mixture for x Parameters ---------- x: array of shape (n_samples,self.dim) the data used in the estimation process """ x = self.check_x(x) like = self.likelihood(x) sl = np.sum(like, 1) return sl def average_log_like(self, x, tiny=1.e-15): """returns the averaged log-likelihood of the mode for the dataset x Parameters ---------- x: array of shape (n_samples,self.dim) the data used in the estimation process tiny = 1.e-15: a small constant to avoid numerical singularities """ x = self.check_x(x) like = self.likelihood(x) sl = np.sum(like, 1) sl = np.maximum(sl, tiny) return np.mean(np.log(sl)) def evidence(self, x): """Computation of bic approximation of evidence Parameters ---------- x array of shape (n_samples,dim) the data from which bic is computed Returns ------- the bic value """ x = self.check_x(x) tiny = 1.e-15 like = self.likelihood(x) return self.bic(like, tiny) def bic(self, like, tiny=1.e-15): """Computation of bic approximation of evidence Parameters ---------- like, array of shape (n_samples, self.k) component-wise likelihood tiny=1.e-15, a small constant to avoid numerical singularities Returns ------- the bic value, float """ sl = np.sum(like, 1) sl = np.maximum(sl, tiny) bicc = np.sum(np.log(sl)) # number of parameters n = like.shape[0] if self.prec_type == 'full': eta = self.k * (1 + self.dim + (self.dim * self.dim + 1) / 2) - 1 else: eta = self.k * (1 + 2 * self.dim) - 1 bicc = bicc - np.log(n) * eta return bicc def _Estep(self, x): """ E step of the EM algo returns the likelihood per class of each data item Parameters ---------- x array of shape (n_samples,dim) the data used in the estimation process Returns ------- likelihood array of shape(n_samples,self.k) component-wise likelihood """ return self.likelihood(x) def guess_regularizing(self, x, bcheck=1): """ Set the regularizing priors as weakly informative according to Fraley and raftery; Journal of Classification 24:155-181 (2007) Parameters ---------- x array of shape (n_samples,dim) the data used in the estimation process """ small = 0.01 # the mean of the data mx = np.reshape(x.mean(0), (1, self.dim)) dx = x - mx vx = np.dot(dx.T, dx) / x.shape[0] if self.prec_type == 'full': px = np.reshape(np.diag(1.0 / np.diag(vx)), (1, self.dim, self.dim)) else: px = np.reshape(1.0 / np.diag(vx), (1, self.dim)) px *= np.exp(2.0 / self.dim * np.log(self.k)) self.prior_means = np.repeat(mx, self.k, 0) self.prior_weights = np.ones(self.k) / self.k self.prior_scale = np.repeat(px, self.k, 0) self.prior_dof = self.dim + 2 self.prior_shrinkage = small self.weights = np.ones(self.k) * 1.0 / self.k if bcheck: self.check() def _Mstep(self, x, like): """ M step regularized according to the procedure of Fraley et al. 2007 Parameters ---------- x: array of shape(n_samples,self.dim) the data from which the model is estimated like: array of shape(n_samples,self.k) the likelihood of the data under each class """ from numpy.linalg import pinv tiny = 1.e-15 pop = self.pop(like) sl = np.maximum(tiny, np.sum(like, 1)) like = (like.T / sl).T # shrinkage,weights,dof self.weights = self.prior_weights + pop self.weights = self.weights / self.weights.sum() # reshape pop = np.reshape(pop, (self.k, 1)) prior_shrinkage = self.prior_shrinkage shrinkage = pop + prior_shrinkage # means means = np.dot(like.T, x) + self.prior_means * prior_shrinkage self.means = means / shrinkage #precisions empmeans = np.dot(like.T, x) / np.maximum(pop, tiny) empcov = np.zeros(np.shape(self.precisions)) if self.prec_type == 'full': for k in range(self.k): dx = x - empmeans[k] empcov[k] = np.dot(dx.T, like[:, k:k + 1] * dx) #covariance covariance = np.array([pinv(self.prior_scale[k]) for k in range(self.k)]) covariance += empcov dx = np.reshape(empmeans - self.prior_means, (self.k, self.dim, 1)) addcov = np.array([np.dot(dx[k], dx[k].T) for k in range(self.k)]) apms = np.reshape(prior_shrinkage * pop / shrinkage, (self.k, 1, 1)) covariance += (addcov * apms) dof = self.prior_dof + pop + self.dim + 2 covariance /= np.reshape(dof, (self.k, 1, 1)) # precision self.precisions = np.array([pinv(covariance[k]) \ for k in range(self.k)]) else: for k in range(self.k): dx = x - empmeans[k] empcov[k] = np.sum(dx ** 2 * like[:, k:k + 1], 0) # covariance covariance = np.array([1.0 / self.prior_scale[k] for k in range(self.k)]) covariance += empcov dx = np.reshape(empmeans - self.prior_means, (self.k, self.dim, 1)) addcov = np.array([np.sum(dx[k] ** 2, 0) for k in range(self.k)]) apms = np.reshape(prior_shrinkage * pop / shrinkage, (self.k, 1)) covariance += addcov * apms dof = self.prior_dof + pop + self.dim + 2 covariance /= np.reshape(dof, (self.k, 1)) # precision self.precisions = np.array([1.0 / covariance[k] \ for k in range(self.k)]) def map_label(self, x, like=None): """return the MAP labelling of x Parameters ---------- x array of shape (n_samples,dim) the data under study like=None array of shape(n_samples,self.k) component-wise likelihood if like==None, it is recomputed Returns ------- z: array of shape(n_samples): the resulting MAP labelling of the rows of x """ if like == None: like = self.likelihood(x) z = np.argmax(like, 1) return z def estimate(self, x, niter=100, delta=1.e-4, verbose=0): """ Estimation of the model given a dataset x Parameters ---------- x array of shape (n_samples,dim) the data from which the model is estimated niter=100: maximal number of iterations in the estimation process delta = 1.e-4: increment of data likelihood at which convergence is declared verbose=0: verbosity mode Returns ------- bic : an asymptotic approximation of model evidence """ # check that the data is OK x = self.check_x(x) # alternation of E/M step until convergence tiny = 1.e-15 av_ll_old = - np.inf for i in range(niter): l = self._Estep(x) av_ll = np.mean(np.log(np.maximum(np.sum(l, 1), tiny))) if av_ll < av_ll_old + delta: if verbose: print 'iteration:', i, 'log-likelihood:', av_ll,\ 'old value:', av_ll_old break else: av_ll_old = av_ll if verbose: print i, av_ll, self.bic(l) self._Mstep(x, l) return self.bic(l) def initialize_and_estimate(self, x, z=None, niter=100, delta=1.e-4,\ ninit=1, verbose=0): """Estimation of self given x Parameters ---------- x array of shape (n_samples,dim) the data from which the model is estimated z = None: array of shape (n_samples) a prior labelling of the data to initialize the computation niter=100: maximal number of iterations in the estimation process delta = 1.e-4: increment of data likelihood at which convergence is declared ninit=1: number of initialization performed to reach a good solution verbose=0: verbosity mode Returns ------- the best model is returned """ bestbic = - np.inf bestgmm = GMM(self.k, self.dim, self.prec_type) bestgmm.initialize(x) for i in range(ninit): # initialization -> Kmeans self.initialize(x) # alternation of E/M step until convergence bic = self.estimate(x, niter=niter, delta=delta, verbose=0) if bic > bestbic: bestbic = bic bestgmm.plugin(self.means, self.precisions, self.weights) return bestgmm def train(self, x, z=None, niter=100, delta=1.e-4, ninit=1, verbose=0): """Idem initialize_and_estimate """ return self.initialize_and_estimate(x, z, niter, delta, ninit, verbose) def test(self, x, tiny=1.e-15): """Returns the log-likelihood of the mixture for x Parameters ---------- x array of shape (n_samples,self.dim) the data used in the estimation process Returns ------- ll: array of shape(n_samples) the log-likelihood of the rows of x """ return np.log(np.maximum(self.mixture_likelihood(x), tiny)) def show_components(self, x, gd, density=None, mpaxes=None): """Function to plot a GMM -- Currently, works only in 1D Parameters ---------- x: array of shape(n_samples, dim) the data under study gd: GridDescriptor instance density: array os shape(prod(gd.n_bins)) density of the model one the discrete grid implied by gd by default, this is recomputed mpaxes: axes handle to make the figure, optional, if None, a new figure is created """ import matplotlib.pyplot as plt if density is None: density = self.mixture_likelihood(gd.make_grid()) if gd.dim > 1: raise NotImplementedError("only implemented in 1D") step = 3.5 * np.std(x) / np.exp(np.log(np.size(x)) / 3) bins = max(10, int((x.max() - x.min()) / step)) xmin = 1.1 * x.min() - 0.1 * x.max() xmax = 1.1 * x.max() - 0.1 * x.min() h, c = np.histogram(x, bins, [xmin, xmax], normed=True) # Make code robust to new and old behavior of np.histogram c = c[:len(h)] offset = (xmax - xmin) / (2 * bins) c += offset / 2 grid = gd.make_grid() if mpaxes == None: plt.figure() ax = plt.axes() else: ax = mpaxes ax.plot(c + offset, h, linewidth=2) for k in range(self.k): ax.plot(grid, density[:, k], linewidth=2) ax.set_title('Fit of the density with a mixture of Gaussians', fontsize=12) legend = ['data'] for k in range(self.k): legend.append('component %d' % (k + 1)) l = ax.legend(tuple(legend)) for t in l.get_texts(): t.set_fontsize(12) ax.set_xticklabels(ax.get_xticks(), fontsize=12) ax.set_yticklabels(ax.get_yticks(), fontsize=12) def show(self, x, gd, density=None, axes=None): """ Function to plot a GMM, still in progress Currently, works only in 1D and 2D Parameters ---------- x: array of shape(n_samples, dim) the data under study gd: GridDescriptor instance density: array os shape(prod(gd.n_bins)) density of the model one the discrete grid implied by gd by default, this is recomputed """ import matplotlib.pyplot as plt # recompute the density if necessary if density is None: density = self.mixture_likelihood(gd, x) if axes is None: axes = plt.figure() if gd.dim == 1: from ..statistics.empirical_pvalue import \ smoothed_histogram_from_samples h, c = smoothed_histogram_from_samples(x, normalized=True) offset = (c.max() - c.min()) / (2 * c.size) grid = gd.make_grid() h /= h.sum() h /= (2 * offset) plt.plot(c[: -1] + offset, h) plt.plot(grid, density) if gd.dim == 2: plt.figure() xm, xM, ym, yM = gd.lim[0:3] gd0 = gd.n_bins[0] Pdens = np.reshape(density, (gd0, np.size(density) / gd0)) axes.imshow(Pdens.T, None, None, None, 'nearest', 1.0, None, None, 'lower', [xm, xM, ym, yM]) axes.plot(x[:, 0], x[:, 1], '.k') axes.axis([xm, xM, ym, yM]) return axes nipy-0.3.0/nipy/algorithms/clustering/hierarchical_clustering.py000066400000000000000000000711551210344137400252370ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ These routines perform some hierrachical agglomerative clustering of some input data. The following alternatives are proposed: - Distance based average-link - Similarity-based average-link - Distance based maximum-link - Ward's algorithm under graph constraints - Ward's algorithm without graph constraints In this latest version, the results are returned in a 'WeightedForest' structure, which gives access to the clustering hierarchy, facilitates the plot of the result etc. For back-compatibility, *_segment versions of the algorithms have been appended, with the old API (except the qmax parameter, which now represents the number of wanted clusters) Author : Bertrand Thirion,Pamela Guevara, 2006-2009 """ #--------------------------------------------------------------------------- # ------ Routines for Agglomerative Hierarchical Clustering ---------------- # -------------------------------------------------------------------------- import numpy as np from ..graph.graph import WeightedGraph from ..graph.forest import Forest class WeightedForest(Forest): """ This is a weighted Forest structure, i.e. a tree - each node has one parent and children (hierarchical structure) - some of the nodes can be viewed as leaves, other as roots - the edges within a tree are associated with a weight: +1 from child to parent -1 from parent to child - additionally, the nodes have a value, which is called 'height', especially useful from dendrograms members ------- V : (int, >0) the number of vertices E : (int) the number of edges parents: array of shape (self.V) the parent array edges: array of shape (self.E,2) reprensenting pairwise neighbors weights, array of shape (self.E), +1/-1 for scending/descending links children: list of arrays that represents the childs of any node height: array of shape(self.V) """ def __init__(self, V, parents=None, height=None): """ Parameters ---------- V: the number of edges of the graph parents=None: array of shape (V) the parents of the graph by default, the parents are set to range(V), i.e. each node is its own parent, and each node is a tree height=None: array of shape(V) the height of the nodes """ V = int(V) if V < 1: raise ValueError('cannot create graphs with no vertex') self.V = int(V) # define the parents if parents == None: self.parents = np.arange(self.V) else: if np.size(parents) != V: raise ValueError('Incorrect size for parents') if parents.max() > self.V: raise ValueError('Incorrect value for parents') self.parents = np.reshape(parents, self.V) self.define_graph_attributes() if self.check() == 0: raise ValueError('The proposed structure is not a forest') self.children = [] if height == None: height = np.zeros(self.V) else: if np.size(height) != V: raise ValueError('Incorrect size for height') self.height = np.reshape(height, self.V) def set_height(self, height=None): """Set the height array """ if height == None: height = np.zeros(self.V) if np.size(height) != self.V: raise ValueError('Incorrect size for height') self.height = np.reshape(height, self.V) def get_height(self): """Get the height array """ return self.height def check_compatible_height(self): """Check that height[parents[i]]>=height[i] for all nodes """ OK = True for i in range(self.V): if self.height[self.parents[i]] < self.height[i]: OK = False return OK def plot(self, ax=None): """Plot the dendrogram associated with self the rank of the data in the dendogram is returned Parameters ---------- ax: axis handle, optional Returns ------- ax, the axis handle """ import matplotlib.pylab as mp if self.check_compatible_height() == False: raise ValueError('cannot plot myself in my current state') n = np.sum(self.isleaf()) # 1. find a permutation of the leaves that makes it nice aux = _label(self.parents) temp = np.zeros(self.V) rank = np.arange(self.V) temp[:n] = np.argsort(aux[:n]) for i in range(n): rank[temp[i]] = i # 2. derive the abscissa in the dendrogram idx = np.zeros(self.V) temp = np.argsort(rank[:n]) for i in range(n): idx[temp[i]] = i for i in range(n, self.V): j = np.nonzero(self.parents == i)[0] idx[i] = np.mean(idx[j]) # 3. plot if ax == None: mp.figure() ax = mp.subplot(1, 1, 1) for i in range(self.V): h1 = self.height[i] h2 = self.height[self.parents[i]] mp.plot([idx[i], idx[i]], [h1, h2], 'k') ch = self.get_children() for i in range(self.V): if np.size(ch[i]) > 0: lidx = idx[ch[i]] m = lidx.min() M = lidx.max() h = self.height[i] mp.plot([m, M], [h, h], 'k') cM = 1.05 * self.height.max() - 0.05 * self.height.min() cm = 1.05 * self.height.min() - 0.05 * self.height.max() mp.axis([-1, idx.max() + 1, cm, cM]) return ax def partition(self, threshold): """ Partition the tree according to a cut criterion """ valid = self.height < threshold f = self.subforest(valid) u = f.cc() return u[f.isleaf()] def split(self, k): """ idem as partition, but a number of components are supplied instead """ k = int(k) if k > self.V: k = self.V nbcc = self.cc().max() + 1 if k <= nbcc: u = self.cc() return u[self.isleaf()] sh = np.sort(self.height) th = sh[nbcc - k] u = self.partition(th) return u def plot_height(self): """Plot the height of the non-leaves nodes """ import matplotlib.pylab as mp mp.figure() sh = np.sort(self.height[self.isleaf() == False]) n = np.sum(self.isleaf() == False) mp.bar(np.arange(n), sh) def list_of_subtrees(self): """ returns the list of all non-trivial subtrees in the graph Caveat: theis function assumes that the vertices are sorted in a way such that parent[i]>i forall i Only the leaves are listeed, not the subtrees themselves """ lst = [] n = np.sum(self.isleaf()) for i in range(self.V): lst.append(np.array([], np.int)) for i in range(n): lst[i] = np.array([i], np.int) for i in range(self.V - 1): j = self.parents[i] lst[j] = np.hstack((lst[i], lst[j])) return lst[n:self.V] #-------------------------------------------------------------------------- #------------- Average link clustering on a graph ------------------------- # ------------------------------------------------------------------------- def fusion(K, pop, i, j, k): """ Modifies the graph K to merge nodes i and j into nodes k The similarity values are weighted averaged, where pop[i] and pop[j] yield the relative weights. this is used in average_link_slow (deprecated) """ # fi = float(pop[i]) / (pop[k]) fj = 1.0 - fi # # replace i ny k # idxi = np.nonzero(K.edges[:, 0] == i) K.weights[idxi] = K.weights[idxi] * fi K.edges[idxi, 0] = k idxi = np.nonzero(K.edges[:, 1] == i) K.weights[idxi] = K.weights[idxi] * fi K.edges[idxi, 1] = k # # replace j by k # idxj = np.nonzero(K.edges[:, 0] == j) K.weights[idxj] = K.weights[idxj] * fj K.edges[idxj, 0] = k idxj = np.nonzero(K.edges[:, 1] == j) K.weights[idxj] = K.weights[idxj] * fj K.edges[idxj, 1] = k # #sum/remove double edges # #left side idxk = np.nonzero(K.edges[:, 0] == k)[0] corr = K.edges[idxk, 1] scorr = np.sort(corr) acorr = np.argsort(corr) for a in range(np.size(scorr) - 1): if scorr[a] == scorr[a + 1]: i1 = idxk[acorr[a]] i2 = idxk[acorr[a + 1]] K.weights[i1] = K.weights[i1] + K.weights[i2] K.weights[i2] = - np.inf K.edges[i2] = -1 #right side idxk = np.nonzero(K.edges[:, 1] == k)[0] corr = K.edges[idxk, 0] scorr = np.sort(corr) acorr = np.argsort(corr) for a in range(np.size(scorr) - 1): if scorr[a] == scorr[a + 1]: i1 = idxk[acorr[a]] i2 = idxk[acorr[a + 1]] K.weights[i1] = K.weights[i1] + K.weights[i2] K.weights[i2] = - np.inf K.edges[i2] = - 1 def average_link_graph(G): """ Agglomerative function based on a (hopefully sparse) similarity graph Parameters ---------- G the input graph Returns ------- t a weightForest structure that represents the dendrogram of the data CAVEAT ------ In that case, the homogeneity is associated with high similarity (as opposed to low cost as in most clustering procedures, e.g. distance-based procedures). Thus the tree is created with negated affinity values, in roder to respect the traditional ordering of cluster potentials. individual points have the potential (-np.inf). This problem is handled transparently inthe associated segment functionp. """ # prepare a graph with twice the number of vertices n = G.V nbcc = G.cc().max() + 1 K = WeightedGraph(2 * G.V) K.E = G.E K.edges = G.edges.copy() K.weights = G.weights.copy() parent = np.arange(2 * n - nbcc, dtype=np.int) pop = np.ones(2 * n - nbcc, np.int) height = np.inf * np.ones(2 * n - nbcc) # iteratively merge clusters for q in range(n - nbcc): # 1. find the heaviest edge m = (K.weights).argmax() cost = K.weights[m] k = q + n height[k] = cost i = K.edges[m, 0] j = K.edges[m, 1] # 2. remove the current edge K.edges[m] = -1 K.weights[m] = - np.inf m = np.nonzero((K.edges[:, 0] == j) * (K.edges[:, 1] == i))[0] K.edges[m] = - 1 K.weights[m] = - np.inf # 3. merge the edges with third part edges parent[i] = k parent[j] = k pop[k] = pop[i] + pop[j] fusion(K, pop, i, j, k) height[height < 0] = 0 height[np.isinf(height)] = height[n] + 1 t = WeightedForest(2 * n - nbcc, parent, - height) return t def average_link_graph_segment(G, stop=0, qmax=1, verbose=False): """Agglomerative function based on a (hopefully sparse) similarity graph Parameters ---------- G the input graph stop: float the stopping criterion qmax: int, optional the number of desired clusters (in the limit of the stopping criterion) verbose : bool, optional If True, print diagnostic information Returns ------- u: array of shape (G.V) a labelling of the graph vertices according to the criterion cost: array of shape (G.V (?)) the cost of each merge step during the clustering procedure """ # prepare a graph with twice the number of vertices n = G.V if qmax == - 1: qmax = n qmax = int(np.minimum(qmax, n)) t = average_link_graph(G) if verbose: t.plot() u1 = np.zeros(n, np.int) u2 = np.zeros(n, np.int) if stop >= 0: u1 = t.partition( - stop) if qmax > 0: u2 = t.split(qmax) if u1.max() < u2.max(): u = u2 else: u = u1 cost = - t.get_height() cost = cost[t.isleaf() == False] return u, cost #-------------------------------------------------------------------------- #------------- Ward's algorithm with graph constraints -------------------- # ------------------------------------------------------------------------- def _inertia_(i, j, Features): """ Compute the variance of the set which is the concatenation of Feature[i] and Features[j] """ if np.size(np.shape(Features[i])) < 2: print i, np.shape(Features[i]), Features[i] if np.size(np.shape(Features[i])) < 2: print j, np.shape(Features[j]), Features[j] if np.shape(Features[i])[1] != np.shape(Features[j])[1]: print i, j, np.shape(Features[i]), np.shape(Features[j]) localset = np.vstack((Features[i], Features[j])) return np.var(localset, 0).sum() def _inertia(i, j, Features): """ Compute the variance of the set which is the concatenation of Feature[i] and Features[j] """ n = Features[0][i] + Features[0][j] s = Features[1][i] + Features[1][j] q = Features[2][i] + Features[2][j] return np.sum(q - (s ** 2 / n)) def _initial_inertia(K, Features, seeds=None): """ Compute the variance associated with each edge-related pair of vertices Thre sult is written in K;weights if seeds if provided (seeds!=None) this is done only for vertices adjacent to the seeds """ if seeds == None: for e in range(K.E): i = K.edges[e, 0] j = K.edges[e, 1] ESS = _inertia(i, j, Features) K.weights[e] = ESS else: aux = np.zeros(K.V).astype('bool') aux[seeds] = 1 for e in range(K.E): i = K.edges[e, 0] j = K.edges[e, 1] if (aux[i] or aux[j]): K.weights[e] = _inertia(i, j, Features) else: K.weights[e] = np.inf def _auxiliary_graph(G, Features): """ prepare a graph with twice the number of vertices this graph will contain the connectivity information along the merges. """ K = WeightedGraph(2 * G.V - 1) K.E = G.E K.edges = G.edges.copy() K.weights = np.ones(K.E) K.symmeterize() if K.E > 0: valid = K.edges[:, 0] < K.edges[:, 1] K.remove_edges(valid) # K.remove_trivial_edges() _initial_inertia(K, Features) return K def _remap(K, i, j, k, Features, linc, rinc): """Modifies the graph K to merge nodes i and j into nodes k the graph weights are modified accordingly Parameters ---------- K graph instance: the existing graphical model i,j,k: int indexes of the nodes to be merged and of the parent respectively Features: list of node-per-node features linc: array of shape(K.V) left incidence matrix rinc: array of shape(K.V) right incidencematrix """ # ------- # replace i by k # -------- idxi = np.array(linc[i]).astype(np.int) if np.size(idxi) > 1: for l in idxi: K.weights[l] = _inertia(k, K.edges[l, 1], Features) elif np.size(idxi) == 1: K.weights[idxi] = _inertia(k, K.edges[idxi, 1], Features) if np.size(idxi) > 0: K.edges[idxi, 0] = k idxi = np.array(rinc[i]).astype(np.int) if np.size(idxi) > 1: for l in idxi: K.weights[l] = _inertia(K.edges[l, 0], k, Features) elif np.size(idxi) == 1: K.weights[idxi] = _inertia(K.edges[idxi, 0], k, Features) if np.size(idxi) > 0: K.edges[idxi, 1] = k #------ # replace j by k #------- idxj = np.array(linc[j]).astype(np.int) if np.size(idxj) > 1: for l in idxj: K.weights[l] = _inertia(k, K.edges[l, 1], Features) elif np.size(idxj) == 1: K.weights[idxj] = _inertia(k, K.edges[idxj, 1], Features) if np.size(idxj) > 0: K.edges[idxj, 0] = k idxj = np.array(rinc[j]).astype(np.int) if np.size(idxj) > 1: for l in idxj: K.weights[l] = _inertia(k, K.edges[l, 0], Features) elif np.size(idxj) == 1: K.weights[idxj] = _inertia(k, K.edges[idxj, 0], Features) if np.size(idxj) > 0: K.edges[idxj, 1] = k #------ # update linc,rinc #------ lidxk = list(np.concatenate((linc[j], linc[i]))) for l in lidxk: if K.edges[l, 1] == - 1: lidxk.remove(l) linc[k] = lidxk linc[i] = [] linc[j] = [] ridxk = list(np.concatenate((rinc[j], rinc[i]))) for l in ridxk: if K.edges[l, 0] == - 1: ridxk.remove(l) rinc[k] = ridxk rinc[i] = [] rinc[j] = [] #------ #remove double edges #------ #left side idxk = np.array(linc[k]).astype(np.int) if np.size(idxk) > 0: corr = K.edges[idxk, 1] scorr = np.sort(corr) acorr = np.argsort(corr) for a in range(np.size(scorr) - 1): if scorr[a] == scorr[a + 1]: i2 = idxk[acorr[a + 1]] K.weights[i2] = np.inf rinc[K.edges[i2, 1]].remove(i2) K.edges[i2] = - 1 linc[k].remove(i2) #right side idxk = np.array(rinc[k]).astype(np.int) if np.size(idxk) > 0: corr = K.edges[idxk, 0] scorr = np.sort(corr) acorr = np.argsort(corr) for a in range(np.size(scorr) - 1): if scorr[a] == scorr[a + 1]: i2 = idxk[acorr[a + 1]] K.weights[i2] = np.inf linc[K.edges[i2, 0]].remove(i2) K.edges[i2] = - 1 rinc[k].remove(i2) return linc, rinc def ward_quick(G, feature, verbose=False): """ Agglomerative function based on a topology-defining graph and a feature matrix. Parameters ---------- G : graph instance topology-defining graph feature: array of shape (G.V,dim_feature) some vectorial information related to the graph vertices verbose : bool, optional If True, print diagnostic information Returns ------- t: weightForest instance, that represents the dendrogram of the data Notes ---- Hopefully a quicker version A euclidean distance is used in the feature space Caveat : only approximate """ # basic check if feature.ndim == 1: feature = np.reshape(feature, (-1, 1)) if feature.shape[0] != G.V: raise ValueError( "Incompatible dimension for the feature matrix and the graph") Features = [np.ones(2 * G.V), np.zeros((2 * G.V, feature.shape[1])), np.zeros((2 * G.V, feature.shape[1]))] Features[1][:G.V] = feature Features[2][:G.V] = feature ** 2 n = G.V nbcc = G.cc().max() + 1 # prepare a graph with twice the number of vertices K = _auxiliary_graph(G, Features) parent = np.arange(2 * n - nbcc).astype(np.int) height = np.zeros(2 * n - nbcc) linc = K.left_incidence() rinc = K.right_incidence() # iteratively merge clusters q = 0 while (q < n - nbcc): # 1. find the lightest edges aux = np.zeros(2 * n) ape = np.nonzero(K.weights < np.inf) ape = np.reshape(ape, np.size(ape)) idx = np.argsort(K.weights[ape]) for e in range(n - nbcc - q): i, j = K.edges[ape[idx[e]], 0], K.edges[ape[idx[e]], 1] if (aux[i] == 1) or (aux[j] == 1): break aux[i] = 1 aux[j] = 1 emax = np.maximum(e, 1) for e in range(emax): m = ape[idx[e]] cost = K.weights[m] k = q + n i = K.edges[m, 0] j = K.edges[m, 1] height[k] = cost if verbose: print q, i, j, m, cost # 2. remove the current edge K.edges[m] = -1 K.weights[m] = np.inf linc[i].remove(m) rinc[j].remove(m) ml = linc[j] if np.sum(K.edges[ml, 1] == i) > 0: m = ml[np.flatnonzero(K.edges[ml, 1] == i)] K.edges[m] = -1 K.weights[m] = np.inf linc[j].remove(m) rinc[i].remove(m) # 3. merge the edges with third part edges parent[i] = k parent[j] = k for p in range(3): Features[p][k] = Features[p][i] + Features[p][j] linc, rinc = _remap(K, i, j, k, Features, linc, rinc) q += 1 # build a tree to encode the results t = WeightedForest(2 * n - nbcc, parent, height) return t def ward_field_segment(F, stop=-1, qmax=-1, verbose=False): """Agglomerative function based on a field structure Parameters ---------- F the input field (graph+feature) stop: float, optional the stopping crterion. if stop==-1, then no stopping criterion is used qmax: int, optional the maximum number of desired clusters (in the limit of the stopping criterion) verbose : bool, optional If True, print diagnostic information Returns ------- u: array of shape (F.V) labelling of the graph vertices according to the criterion cost array of shape (F.V - 1) the cost of each merge step during the clustering procedure Notes ----- See ward_quick_segment for more information Caveat : only approximate """ u, cost = ward_quick_segment(F, F.field, stop, qmax, verbose) return u, cost def ward_quick_segment(G, feature, stop=-1, qmax=1, verbose=False): """ Agglomerative function based on a topology-defining graph and a feature matrix. Parameters ---------- G: labs.graph.WeightedGraph instance the input graph (a topological graph essentially) feature array of shape (G.V,dim_feature) vectorial information related to the graph vertices stop1 : int or float, optional the stopping crterion if stop==-1, then no stopping criterion is used qmax : int, optional the maximum number of desired clusters (in the limit of the stopping criterion) verbose : bool, optional If True, print diagnostic information Returns ------- u: array of shape (G.V) labelling of the graph vertices according to the criterion cost: array of shape (G.V - 1) the cost of each merge step during the clustering procedure Notes ----- Hopefully a quicker version A euclidean distance is used in the feature space Caveat : only approximate """ # basic check if feature.ndim == 1: feature = np.reshape(feature, (-1, 1)) if feature.shape[0] != G.V: raise ValueError( "Incompatible dimension for the feature matrix and the graph") n = G.V if stop == - 1: stop = np.inf qmax = int(np.minimum(qmax, n - 1)) t = ward_quick(G, feature, verbose) if verbose: t.plot() u1 = np.zeros(n, np.int) u2 = np.zeros(n, np.int) if stop >= 0: u1 = t.partition(stop) if qmax > 0: u2 = t.split(qmax) if u1.max() < u2.max(): u = u2 else: u = u1 cost = t.get_height() cost = cost[t.isleaf() == False] return u, cost def ward_segment(G, feature, stop=-1, qmax=1, verbose=False): """ Agglomerative function based on a topology-defining graph and a feature matrix. Parameters ---------- G : graph object the input graph (a topological graph essentially) feature : array of shape (G.V,dim_feature) some vectorial information related to the graph vertices stop : int or float, optional the stopping crterion. if stop==-1, then no stopping criterion is used qmax : int, optional the maximum number of desired clusters (in the limit of the stopping criterion) verbose : bool, optional If True, print diagnostic information Returns ------- u: array of shape (G.V): a labelling of the graph vertices according to the criterion cost: array of shape (G.V - 1) the cost of each merge step during the clustering procedure Notes ----- A euclidean distance is used in the feature space Caveat : when the number of cc in G (nbcc) is greter than qmax, u contains nbcc values, not qmax ! """ # basic check if feature.ndim == 1: feature = np.reshape(feature, (-1, 1)) if feature.shape[0] != G.V: raise ValueError( "Incompatible dimension for the feature matrix and the graph") # prepare a graph with twice the number of vertices n = G.V if qmax == -1: qmax = n - 1 if stop == -1: stop = np.inf qmax = int(np.minimum(qmax, n - 1)) t = ward(G, feature, verbose) u1 = np.zeros(n, np.int) u2 = np.zeros(n, np.int) if stop >= 0: u1 = t.partition(stop) if qmax > 0: u2 = t.split(qmax) if u1.max() < u2.max(): u = u2 else: u = u1 cost = t.get_height() cost = cost[t.isleaf() == False] return u, cost def ward(G, feature, verbose=False): """ Agglomerative function based on a topology-defining graph and a feature matrix. Parameters ---------- G : graph the input graph (a topological graph essentially) feature : array of shape (G.V,dim_feature) vectorial information related to the graph vertices verbose : bool, optional If True, print diagnostic information Returns -------- t : ``WeightedForest`` instance structure that represents the dendrogram Notes ----- When G has more than 1 connected component, t is no longer a tree. This case is handled cleanly now """ # basic check if feature.ndim == 1: feature = np.reshape(feature, (-1, 1)) if feature.shape[0] != G.V: raise ValueError( "Incompatible dimension for the feature matrix and the graph") Features = [np.ones(2 * G.V), np.zeros((2 * G.V, feature.shape[1])), np.zeros((2 * G.V, feature.shape[1]))] Features[1][:G.V] = feature Features[2][:G.V] = feature ** 2 # prepare a graph with twice the number of vertices # this graph will contain the connectivity information # along the merges. n = G.V nbcc = G.cc().max() + 1 K = _auxiliary_graph(G, Features) # prepare some variables that are useful tp speed up the algorithm parent = np.arange(2 * n - nbcc).astype(np.int) height = np.zeros(2 * n - nbcc) linc = K.left_incidence() rinc = K.right_incidence() # iteratively merge clusters for q in range(n - nbcc): # 1. find the lightest edge m = (K.weights).argmin() cost = K.weights[m] k = q + n i = K.edges[m, 0] j = K.edges[m, 1] height[k] = cost if verbose: print q, i, j, m, cost # 2. remove the current edge K.edges[m] = - 1 K.weights[m] = np.inf linc[i].remove(m) rinc[j].remove(m) ml = linc[j] if np.sum(K.edges[ml, 1] == i) > 0: m = ml[np.flatnonzero(K.edges[ml, 1] == i)] K.edges[m] = -1 K.weights[m] = np.inf linc[j].remove(m) rinc[i].remove(m) # 3. merge the edges with third part edges parent[i] = k parent[j] = k for p in range(3): Features[p][k] = Features[p][i] + Features[p][j] linc, rinc = _remap(K, i, j, k, Features, linc, rinc) # build a tree to encode the results t = WeightedForest(2 * n - nbcc, parent, height) return t #-------------------------------------------------------------------------- #----------------------- Visualization ------------------------------------ # ------------------------------------------------------------------------- def _label_(f, parent, left, labelled): temp = np.nonzero(parent == f) if np.size(temp) > 0: i = temp[0][np.nonzero(left[temp[0]] == 1)] j = temp[0][np.nonzero(left[temp[0]] == 0)] labelled = _label_(i, parent, left, labelled) labelled[f] = labelled.max() + 1 labelled = _label_(j, parent, left, labelled) if labelled[f] < 0: labelled[f] = labelled.max() + 1 return labelled def _label(parent): # find the root root = np.nonzero(parent == np.arange(np.size(parent)))[0] # define left left = np.zeros(np.size(parent)) for f in range(np.size(parent)): temp = np.nonzero(parent == f) if np.size(temp) > 0: left[temp[0][0]] = 1 left[root] = .5 # define labelled labelled = - np.ones(np.size(parent)) # compute labelled for j in range(np.size(root)): labelled = _label_(root[j], parent, left, labelled) return labelled nipy-0.3.0/nipy/algorithms/clustering/imm.py000066400000000000000000000531141210344137400211370ustar00rootroot00000000000000""" Infinite mixture model : A generalization of Bayesian mixture models with an unspecified number of classes """ import numpy as np from bgmm import BGMM, detsh from scipy.special import gammaln def co_labelling(z, kmax=None, kmin=None): """ return a sparse co-labelling matrix given the label vector z Parameters ---------- z: array of shape(n_samples), the input labels kmax: int, optional, considers only the labels in the range [0, kmax[ Returns ------- colabel: a sparse coo_matrix, yields the co labelling of the data i.e. c[i,j]= 1 if z[i]==z[j], 0 otherwise """ from scipy.sparse import coo_matrix n = z.size colabel = coo_matrix((n, n)) if kmax == None: kmax = z.max() + 1 if kmin == None: kmin = z.min() - 1 for k in np.unique(z): if (k < kmax) & (k > kmin): i = np.array(np.nonzero(z == k)) row = np.repeat(i, i.size) col = np.ravel(np.tile(i, i.size)) data = np.ones((i.size) ** 2) colabel = colabel + coo_matrix((data, (row, col)), shape=(n, n)) return colabel class IMM(BGMM): """ The class implements Infinite Gaussian Mixture model or Dirichlet Proces Mixture Model. This simply a generalization of Bayesian Gaussian Mixture Models with an unknown number of classes. """ def __init__(self, alpha=.5, dim=1): """ Parameters ---------- alpha: float, optional, the parameter for cluster creation dim: int, optional, the dimension of the the data Note: use the function set_priors() to set adapted priors """ self.dim = dim self.alpha = alpha self.k = 0 self.prec_type = 'full' # initialize weights self.weights = [1] def set_priors(self, x): """ Set the priors in order of having them weakly uninformative this is from Fraley and raftery; Journal of Classification 24:155-181 (2007) Parameters ---------- x, array of shape (n_samples,self.dim) the data used in the estimation process """ # a few parameters small = 0.01 elshape = (1, self.dim, self.dim) mx = np.reshape(x.mean(0), (1, self.dim)) dx = x - mx vx = np.maximum(1.e-15, np.dot(dx.T, dx) / x.shape[0]) px = np.reshape(np.diag(1.0 / np.diag(vx)), elshape) # set the priors self._prior_means = mx self.prior_means = mx self.prior_weights = self.alpha self._prior_scale = px self.prior_scale = px self._prior_dof = self.dim + 2 self.prior_dof = [self._prior_dof] self._prior_shrinkage = small self.prior_shrinkage = [self._prior_shrinkage] # cache some pre-computations self._dets_ = detsh(px[0]) self._dets = [self._dets_] self._inv_prior_scale_ = np.reshape(np.linalg.inv(px[0]), elshape) self.prior_dens = None def set_constant_densities(self, prior_dens=None): """Set the null and prior densities as constant (assuming a compact domain) Parameters ---------- prior_dens: float, optional constant for the prior density """ self.prior_dens = prior_dens def sample(self, x, niter=1, sampling_points=None, init=False, kfold=None, verbose=0): """sample the indicator and parameters Parameters ---------- x: array of shape (n_samples, self.dim) the data used in the estimation process niter: int, the number of iterations to perform sampling_points: array of shape(nbpoints, self.dim), optional points where the likelihood will be sampled this defaults to x kfold: int or array, optional, parameter of cross-validation control by default, no cross-validation is used the procedure is faster but less accurate verbose=0: verbosity mode Returns ------- likelihood: array of shape(nbpoints) total likelihood of the model """ self.check_x(x) if sampling_points == None: average_like = np.zeros(x.shape[0]) else: average_like = np.zeros(sampling_points.shape[0]) splike = self.likelihood_under_the_prior(sampling_points) plike = self.likelihood_under_the_prior(x) if init: self.k = 1 z = np.zeros(x.shape[0]) self.update(x, z) like = self.likelihood(x, plike) z = self.sample_indicator(like) for i in range(niter): if kfold == None: like = self.simple_update(x, z, plike) else: like = self.cross_validated_update(x, z, plike, kfold) if sampling_points == None: average_like += like else: average_like += np.sum( self.likelihood(sampling_points, splike), 1) average_like /= niter return average_like def simple_update(self, x, z, plike): """ This is a step in the sampling procedure that uses internal corss_validation Parameters ---------- x: array of shape(n_samples, dim), the input data z: array of shape(n_samples), the associated membership variables plike: array of shape(n_samples), the likelihood under the prior Returns ------- like: array od shape(n_samples), the likelihood of the data """ like = self.likelihood(x, plike) # standard + likelihood under the prior # like has shape (x.shape[0], self.k+1) z = self.sample_indicator(like) # almost standard, but many new components can be created self.reduce(z) self.update(x, z) return like.sum(1) def cross_validated_update(self, x, z, plike, kfold=10): """ This is a step in the sampling procedure that uses internal corss_validation Parameters ---------- x: array of shape(n_samples, dim), the input data z: array of shape(n_samples), the associated membership variables plike: array of shape(n_samples), the likelihood under the prior kfold: int, or array of shape(n_samples), optional, folds in the cross-validation loop Returns ------- like: array od shape(n_samples), the (cross-validated) likelihood of the data """ n_samples = x.shape[0] slike = np.zeros(n_samples) if np.isscalar(kfold): aux = np.argsort(np.random.rand(n_samples)) idx = - np.ones(n_samples).astype(np.int) j = np.ceil(n_samples / kfold) kmax = kfold for k in range(kmax): idx[aux[k * j:min(n_samples, j * (k + 1))]] = k else: if np.array(kfold).size != n_samples: raise ValueError('kfold and x do not have the same size') uk = np.unique(kfold) np.random.shuffle(uk) idx = np.zeros(n_samples).astype(np.int) for i, k in enumerate(uk): idx += (i * (kfold == k)) kmax = uk.max() + 1 for k in range(kmax): test = np.zeros(n_samples).astype('bool') test[idx == k] = 1 train = np.logical_not(test) # remove a fraction of the data # and re-estimate the clusters z[train] = self.reduce(z[train]) self.update(x[train], z[train]) # draw the membership for the left-out datas alike = self.likelihood(x[test], plike[test]) slike[test] = alike.sum(1) # standard + likelihood under the prior # like has shape (x.shape[0], self.k+1) z[test] = self.sample_indicator(alike) # almost standard, but many new components can be created return slike def reduce(self, z): """Reduce the assignments by removing empty clusters and update self.k Parameters ---------- z: array of shape(n), a vector of membership variables changed in place Returns ------- z: the remapped values """ uz = np.unique(z[z > - 1]) for i, k in enumerate(uz): z[z == k] = i self.k = z.max() + 1 return z def update(self, x, z): """ Update function (draw a sample of the IMM parameters) Parameters ---------- x array of shape (n_samples,self.dim) the data used in the estimation process z array of shape (n_samples), type = np.int the corresponding classification """ # re-dimension the priors in order to match self.k self.prior_means = np.repeat(self._prior_means, self.k, 0) self.prior_dof = self._prior_dof * np.ones(self.k) self.prior_shrinkage = self._prior_shrinkage * np.ones(self.k) self._dets = self._dets_ * np.ones(self.k) self._inv_prior_scale = np.repeat(self._inv_prior_scale_, self.k, 0) # initialize some variables self.means = np.zeros((self.k, self.dim)) self.precisions = np.zeros((self.k, self.dim, self.dim)) # proceed with the update BGMM.update(self, x, z) def update_weights(self, z): """ Given the allocation vector z, resmaple the weights parameter Parameters ---------- z array of shape (n_samples), type = np.int the allocation variable """ pop = np.hstack((self.pop(z), 0)) self.weights = pop + self.prior_weights self.weights /= self.weights.sum() def sample_indicator(self, like): """ Sample the indicator from the likelihood Parameters ---------- like: array of shape (nbitem,self.k) component-wise likelihood Returns ------- z: array of shape(nbitem): a draw of the membership variable Notes ----- The behaviour is different from standard bgmm in that z can take arbitrary values """ z = BGMM.sample_indicator(self, like) z[z == self.k] = self.k + np.arange(np.sum(z == self.k)) return z def likelihood_under_the_prior(self, x): """ Computes the likelihood of x under the prior Parameters ---------- x, array of shape (self.n_samples,self.dim) returns ------- w, the likelihood of x under the prior model (unweighted) """ if self.prior_dens is not None: return self.prior_dens * np.ones(x.shape[0]) a = self._prior_dof tau = self._prior_shrinkage tau /= (1 + tau) m = self._prior_means b = self._prior_scale ib = np.linalg.inv(b[0]) ldb = np.log(detsh(b[0])) scalar_w = np.log(tau / np.pi) * self.dim scalar_w += 2 * gammaln((a + 1) / 2) scalar_w -= 2 * gammaln((a - self.dim) / 2) scalar_w -= ldb * a w = scalar_w * np.ones(x.shape[0]) for i in range(x.shape[0]): w[i] -= (a + 1) * np.log(detsh(ib + tau * (m - x[i:i + 1]) * (m - x[i:i + 1]).T)) w /= 2 return np.exp(w) def likelihood(self, x, plike=None): """ return the likelihood of the model for the data x the values are weighted by the components weights Parameters ---------- x: array of shape (n_samples, self.dim), the data used in the estimation process plike: array os shape (n_samples), optional,x the desnity of each point under the prior Returns ------- like, array of shape(nbitem,self.k) component-wise likelihood """ if plike == None: plike = self.likelihood_under_the_prior(x) plike = np.reshape(plike, (x.shape[0], 1)) if self.k > 0: like = self.unweighted_likelihood(x) like = np.hstack((like, plike)) else: like = plike like *= self.weights return like class MixedIMM(IMM): """ Particular IMM with an additional null class. The data is supplied together with a sample-related probability of being under the null. """ def __init__(self, alpha=.5, dim=1): """ Parameters ---------- alpha: float, optional, the parameter for cluster creation dim: int, optional, the dimension of the the data Note: use the function set_priors() to set adapted priors """ IMM.__init__(self, alpha, dim) def set_constant_densities(self, null_dens=None, prior_dens=None): """ Set the null and prior densities as constant (over a supposedly compact domain) Parameters ---------- null_dens: float, optional constant for the null density prior_dens: float, optional constant for the prior density """ self.null_dens = null_dens self.prior_dens = prior_dens def sample(self, x, null_class_proba, niter=1, sampling_points=None, init=False, kfold=None, co_clustering=False, verbose=0): """ sample the indicator and parameters Parameters ---------- x: array of shape (n_samples, self.dim), the data used in the estimation process null_class_proba: array of shape(n_samples), the probability to be under the null niter: int, the number of iterations to perform sampling_points: array of shape(nbpoints, self.dim), optional points where the likelihood will be sampled this defaults to x kfold: int, optional, parameter of cross-validation control by default, no cross-validation is used the procedure is faster but less accurate co_clustering: bool, optional if True, return a model of data co-labelling across iterations verbose=0: verbosity mode Returns ------- likelihood: array of shape(nbpoints) total likelihood of the model pproba: array of shape(n_samples), the posterior of being in the null (the posterior of null_class_proba) coclust: only if co_clustering==True, sparse_matrix of shape (n_samples, n_samples), frequency of co-labelling of each sample pairs across iterations """ self.check_x(x) pproba = np.zeros(x.shape[0]) if sampling_points == None: average_like = np.zeros(x.shape[0]) else: average_like = np.zeros(sampling_points.shape[0]) splike = self.likelihood_under_the_prior(sampling_points) plike = self.likelihood_under_the_prior(x) if init: self.k = 1 z = np.zeros(x.shape[0]) self.update(x, z) like = self.likelihood(x, plike) z = self.sample_indicator(like, null_class_proba) if co_clustering: from scipy.sparse import coo_matrix coclust = coo_matrix((x.shape[0], x.shape[0])) for i in range(niter): if kfold == None: like = self.simple_update(x, z, plike, null_class_proba) else: like, z = self.cross_validated_update(x, z, plike, null_class_proba, kfold) llike = self.likelihood(x, plike) z = self.sample_indicator(llike, null_class_proba) pproba += (z == - 1) if co_clustering: coclust = coclust + co_labelling(z, self.k, -1) if sampling_points == None: average_like += like else: average_like += np.sum( self.likelihood(sampling_points, splike), 1) average_like /= niter pproba /= niter if co_clustering: coclust /= niter return average_like, pproba, coclust return average_like, pproba def simple_update(self, x, z, plike, null_class_proba): """ One step in the sampling procedure (one data sweep) Parameters ---------- x: array of shape(n_samples, dim), the input data z: array of shape(n_samples), the associated membership variables plike: array of shape(n_samples), the likelihood under the prior null_class_proba: array of shape(n_samples), prior probability to be under the null Returns ------- like: array od shape(n_samples), the likelihood of the data under the H1 hypothesis """ like = self.likelihood(x, plike) # standard + likelihood under the prior # like has shape (x.shape[0], self.k+1) z = self.sample_indicator(like, null_class_proba) # almost standard, but many new components can be created self.reduce(z) self.update(x, z) return like.sum(1) def cross_validated_update(self, x, z, plike, null_class_proba, kfold=10): """ This is a step in the sampling procedure that uses internal corss_validation Parameters ---------- x: array of shape(n_samples, dim), the input data z: array of shape(n_samples), the associated membership variables plike: array of shape(n_samples), the likelihood under the prior kfold: int, optional, or array number of folds in cross-validation loop or set of indexes for the cross-validation procedure null_class_proba: array of shape(n_samples), prior probability to be under the null Returns ------- like: array od shape(n_samples), the (cross-validated) likelihood of the data z: array of shape(n_samples), the associated membership variables Notes ----- When kfold is an array, there is an internal reshuffling to randomize the order of updates """ n_samples = x.shape[0] slike = np.zeros(n_samples) if np.isscalar(kfold): aux = np.argsort(np.random.rand(n_samples)) idx = - np.ones(n_samples).astype(np.int) j = np.ceil(n_samples / kfold) kmax = kfold for k in range(kmax): idx[aux[k * j:min(n_samples, j * (k + 1))]] = k else: if np.array(kfold).size != n_samples: raise ValueError('kfold and x do not have the same size') uk = np.unique(kfold) np.random.shuffle(uk) idx = np.zeros(n_samples).astype(np.int) for i, k in enumerate(uk): idx += (i * (kfold == k)) kmax = uk.max() + 1 for k in range(kmax): # split at iteration k test = np.zeros(n_samples).astype('bool') test[idx == k] = 1 train = np.logical_not(test) # remove a fraction of the data # and re-estimate the clusters z[train] = self.reduce(z[train]) self.update(x[train], z[train]) # draw the membership for the left-out data alike = self.likelihood(x[test], plike[test]) slike[test] = alike.sum(1) # standard + likelihood under the prior # like has shape (x.shape[0], self.k+1) z[test] = self.sample_indicator(alike, null_class_proba[test]) # almost standard, but many new components can be created return slike, z def sample_indicator(self, like, null_class_proba): """ sample the indicator from the likelihood Parameters ---------- like: array of shape (nbitem,self.k) component-wise likelihood null_class_proba: array of shape(n_samples), prior probability to be under the null Returns ------- z: array of shape(nbitem): a draw of the membership variable Notes ----- Here z=-1 encodes for the null class """ n = like.shape[0] conditional_like_1 = ((1 - null_class_proba) * like.T).T conditional_like_0 = np.reshape(null_class_proba * self.null_dens, (n, 1)) conditional_like = np.hstack((conditional_like_0, conditional_like_1)) z = BGMM.sample_indicator(self, conditional_like) - 1 z[z == self.k] = self.k + np.arange(np.sum(z == self.k)) return z def main(): """ Illustrative example of the behaviour of imm """ n = 100 dim = 2 alpha = .5 aff = np.random.randn(dim, dim) x = np.dot(np.random.randn(n, dim), aff) igmm = IMM(alpha, dim) igmm.set_priors(x) # warming igmm.sample(x, niter=100, kfold=10) print 'number of components: ', igmm.k # print 'number of components: ', igmm.k if dim < 3: from gmm import plot2D plot2D(x, igmm, verbose=1) return igmm if __name__ == '__main__': main() nipy-0.3.0/nipy/algorithms/clustering/setup.py000066400000000000000000000017471210344137400215220ustar00rootroot00000000000000#!/usr/bin/env python def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('clustering', parent_package, top_path) config.add_subpackage('tests') # We need this because libcstat.a is linked to lapack, which can # be a fortran library, and the linker needs this information. from numpy.distutils.system_info import get_info lapack_info = get_info('lapack_opt',0) if 'libraries' not in lapack_info: # But on OSX that may not give us what we need, so try with 'lapack' # instead. NOTE: scipy.linalg uses lapack_opt, not 'lapack'... lapack_info = get_info('lapack',0) #config.add_extension( # '_clustering', # sources=['clustering.c'], # libraries=['cstat'], # extra_info=lapack_info, # ) return config if __name__ == '__main__': print('This is the wrong setup.py file to run') nipy-0.3.0/nipy/algorithms/clustering/tests/000077500000000000000000000000001210344137400211415ustar00rootroot00000000000000nipy-0.3.0/nipy/algorithms/clustering/tests/__init__.py000066400000000000000000000000001210344137400232400ustar00rootroot00000000000000nipy-0.3.0/nipy/algorithms/clustering/tests/test_bgmm.py000066400000000000000000000116151210344137400235000ustar00rootroot00000000000000""" # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: Test the Bayesian GMM. fixme : some of these tests take too much time at the moment to be real unit tests Author : Bertrand Thirion, 2009 """ import numpy as np import numpy.random as nr from ..bgmm import BGMM, VBGMM, dirichlet_eval, multinomial, dkl_gaussian from nose.tools import assert_true def test_dirichlet_eval(): # check that the Dirichlet evaluation function sums to one on a simple # example alpha = np.array([0.5, 0.5]) sd = 0 for i in range(10000): e = i * 0.0001 + 0.00005 sd += dirichlet_eval(np.array([e, 1 - e]), alpha) assert_true(np.absolute(sd.sum() * 0.0001 - 1) < 0.01) def test_multinomial(): """ test of the generate_multinomial function: check that is sums to 1 in a simple case """ n_samples = 100000 n_classes = 5 aux = np.reshape(np.random.rand(n_classes), (1, n_classes)) aux /= aux.sum() likelihood = np.repeat(aux, n_samples, 0) z = multinomial(likelihood) res = np.array([np.sum(z == k) for k in range(n_classes)]) res = res * 1.0 / n_samples assert_true(np.sum((aux-res) ** 2) < 1.e-4) def test_dkln1(): dim = 3 m1 = np.zeros(dim) P1 = np.eye(dim) m2 = m1 P2 = P1 assert_true(dkl_gaussian(m1, P1, m2, P2) == 0) def test_dkln2(): dim, offset = 3, 4. m1 = np.zeros(dim) P1 = np.eye(dim) m2 = offset * np.ones(dim) P2 = np.eye(dim) assert_true(dkl_gaussian(m1, P1, m2, P2) == .5 * dim * offset ** 2) def test_dkln3(): dim, scale = 3, 4 m1, m2 = np.zeros(dim), np.zeros(dim) P1, P2 = np.eye(dim), scale * np.eye(dim) test1 = .5 * (dim * np.log(scale) + dim * (1. / scale - 1)) test2 = .5 * (-dim * np.log(scale) + dim * (scale - 1)) assert_true(dkl_gaussian(m1, P1, m2, P2) == test2) def test_bgmm_gibbs(): # Perform the estimation of a gmm using Gibbs sampling n_samples, k, dim, niter, offset = 100, 2, 2, 1000, 2. x = nr.randn(n_samples,dim) x[:30] += offset b = BGMM(k,dim) b.guess_priors(x) b.initialize(x) b.sample(x, 1) w, cent, prec, pz = b.sample(x, niter, mem=1) b.plugin(cent, prec, w) z = pz[:, 0] # fixme : find a less trivial test assert_true(z.max() + 1 == b.k) def test_gmm_bf(kmax=4, seed=1): """ Perform a model selection procedure on a gmm with Bayes factor estimations Parameters ---------- kmax : range of values that are tested seed=False: int, optionnal If seed is not False, the random number generator is initialized at a certain value fixme : this one often fails. I don't really see why """ n_samples, dim, niter = 30, 2, 1000 if seed: nr = np.random.RandomState([seed]) else: import numpy.random as nr x = nr.randn(n_samples, dim) bbf = -np.inf for k in range(1, kmax): b = BGMM(k, dim) b.guess_priors(x) b.initialize(x) b.sample(x, 100) w, cent, prec, pz = b.sample(x, niter=niter, mem=1) bplugin = BGMM(k, dim, cent, prec, w) bplugin.guess_priors(x) bfk = bplugin.bayes_factor(x, pz.astype(np.int)) if bfk > bbf: bestk = k bbf = bfk assert_true(bestk < 3) def test_vbgmm(): """perform the estimation of a variational gmm """ n_samples, dim, offset, k = 100, 2, 2, 2 x = nr.randn(n_samples, dim) x[:30] += offset b = VBGMM(k,dim) b.guess_priors(x) b.initialize(x) b.estimate(x) z = b.map_label(x) # fixme : find a less trivial test assert_true(z.max() + 1 == b.k) def test_vbgmm_select(kmax=6): """ perform the estimation of a variational gmm + model selection """ nr.seed([0]) n_samples, dim, offset=100, 3, 2 x = nr.randn(n_samples, dim) x[:30] += offset be = - np.inf for k in range(1, kmax): b = VBGMM(k, dim) b.guess_priors(x) b.initialize(x) b.estimate(x) ek = b.evidence(x) if ek > be: be = ek bestk = k assert_true(bestk < 3) def test_evidence(k=1): """ Compare the evidence estimated by Chib's method with the variational evidence (free energy) fixme : this one really takes time """ np.random.seed(0) n_samples, dim, offset = 50, 2, 3 x = nr.randn(n_samples, dim) x[:15] += offset b = VBGMM(k, dim) b.guess_priors(x) b.initialize(x) b.estimate(x) vbe = b.evidence(x) niter = 1000 b = BGMM(k, dim) b.guess_priors(x) b.initialize(x) b.sample(x, 100) w, cent, prec, pz = b.sample(x, niter=niter, mem=1) bplugin = BGMM(k, dim, cent, prec, w) bplugin.guess_priors(x) bfchib = bplugin.bayes_factor(x, pz.astype(np.int), 1) assert_true(bfchib > vbe) if __name__ == '__main__': import nose nose.run(argv=['', __file__]) nipy-0.3.0/nipy/algorithms/clustering/tests/test_clustering.py000066400000000000000000000016261210344137400247360ustar00rootroot00000000000000#!/usr/bin/env python # to run only the simple tests: # python testClustering.py Test_Clustering from ..utils import kmeans import nose import numpy as np import numpy.random as nr from unittest import TestCase class TestClustering(TestCase): def testkmeans1(self): X = nr.randn(10, 2) A = np.concatenate([np.ones((7, 2)),np.zeros((3, 2))]) X = X + 3 * A; L = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) C, L, J = kmeans(X, 2, L) self.assert_(np.mean(L[:7]) < 0.5) def testkmeans2(self): X = nr.randn(10000, 2) A = np.concatenate([np.ones((7000, 2)), np.zeros((3000, 2))]) X = X + 3 * A L = np.concatenate([np.ones(5000), np.zeros(5000)]).astype(np.int) C, L, J = kmeans(X, 2, L) l = L[:7000].astype(np.float) self.assert_(np.mean(l) > 0.9) if __name__ == '__main__': nose.run(argv=['', __file__]) nipy-0.3.0/nipy/algorithms/clustering/tests/test_ggm.py000066400000000000000000000041721210344137400233300ustar00rootroot00000000000000#!/usr/bin/env python import numpy as np import numpy.random as nr from ..ggmixture import GGGM, GGM, Gamma import scipy.stats as st def test_GGM1(verbose=0): shape = 1 scale = 1 mean = 0 var = 1 G = GGM(shape,scale,mean,var) sx = 1000 x = -2.5 + nr.randn(sx) G.estimate(x) b = np.absolute(G.mean+2.5)<0.5 if verbose: #G.parameters() print x.max() assert(b) def test_GGM2(verbose=0): shape = 1 scale = 1 mean = 0 var = 1 G = GGM(shape,scale,mean,var) sx = 1000 x = -2.5 + nr.randn(sx) G.estimate(x) if verbose: G.parameters() b = np.absolute(G.mixt)<0.1 assert(b) def test_GGGM0(verbose=0, seed=1): G = GGGM() sx = 1000 #x = np.array([float(st.t.rvs(dof)) for i in range(sx)]) if seed: nr = np.random.RandomState([seed]) else: import numpy.random as nr x = nr.randn(sx) G.init(x) G.estimate(x) if verbose: G.parameters() assert(np.absolute(G.mean)<0.3) def test_GGGM1(verbose=0): G = GGGM() sx = 10000 x = np.array([float(st.t.rvs(5)) for i in range(sx)]) G.init_fdr(x) G.estimate(x) if verbose: G.parameters() assert(np.absolute(G.mean)<0.1) def test_GGGM2(verbose=0): G = GGGM() sx = 10000 x = nr.randn(sx) G.init_fdr(x) G.estimate(x) assert(G.mixt[1]>0.9) def test_GGGM3(verbose=0): G = GGGM() sx = 1000 x = 100 + np.array([float(st.t.rvs(5)) for i in range(sx)]) G.init(x) G.estimate(x) if verbose: G.parameters() assert(np.absolute(G.mixt[0])<1.e-15) def test_gamma_parameters1(verbose=0): import numpy.random as nr n = 1000 X = nr.gamma(11., 3., n) G = Gamma() G.estimate(X) if verbose: G.parameters() assert(np.absolute(G.shape-11)<2.) def test_gamma_parameters2(verbose=0): import numpy.random as nr n = 1000 X = nr.gamma(11., 3., n) G = Gamma() G.estimate(X) if verbose: G.parameters() assert(np.absolute(G.scale-3)<0.5) if __name__ == '__main__': import nose nose.run(argv=['', __file__]) nipy-0.3.0/nipy/algorithms/clustering/tests/test_gmm.py000066400000000000000000000171721210344137400233420ustar00rootroot00000000000000#!/usr/bin/env python # to run only the simple tests: # python testClustering.py Test_Clustering import numpy as np from nose.tools import assert_true from ..gmm import GMM, best_fitting_GMM # seed the random number generator to avoid rare random failures seed = 1 nr = np.random.RandomState([seed]) def test_em_loglike0(): # Test that the likelihood of the GMM is expected on standard data # 1-cluster model dim, k, n = 1, 1, 1000 x = nr.randn(n,dim) lgmm = GMM(k, dim) lgmm.initialize(x) lgmm.estimate(x) ll = lgmm.average_log_like(x) ent = 0.5 * (1 + np.log(2 * np.pi)) assert_true(np.absolute(ll + ent) < 3. / np.sqrt(n)) def test_em_loglike1(): # Test that the likelihood of the GMM is expected on standard data # 3-cluster model dim, k, n = 1, 3, 1000 x = nr.randn(n, dim) lgmm = GMM(k, dim) lgmm.initialize(x) lgmm.estimate(x) ll = lgmm.average_log_like(x) ent = 0.5 * (1 + np.log(2 * np.pi)) assert_true(np.absolute(ll + ent) < 3. / np.sqrt(n)) def test_em_loglike2(): # Test that the likelihood of the GMM is expected on standard data # non-centered data, non-unit variance dim, k, n = 1, 1, 1000 scale, offset = 3., 4. x = offset + scale * nr.randn(n, dim) lgmm = GMM(k, dim) lgmm.initialize(x) lgmm.estimate(x) ll = lgmm.average_log_like(x) ent = 0.5 * (1 + np.log(2 * np.pi * scale ** 2)) assert_true(np.absolute(ll + ent) < 3. / np.sqrt(n)) def test_em_loglike3(): # Test that the likelihood of the GMM is expected on standard data # here dimension = 2 dim, k, n = 2, 1, 1000 scale, offset = 3., 4. x = offset + scale * nr.randn(n,dim) lgmm = GMM(k, dim) lgmm.initialize(x) lgmm.estimate(x) ll = lgmm.average_log_like(x) ent = dim * 0.5 * (1 + np.log(2 * np.pi * scale ** 2)) assert_true(np.absolute(ll + ent) < dim * 3. / np.sqrt(n)) def test_em_loglike4(): # Test that the likelihood of the GMM is expected on standard data # here dim = 5 dim, k, n = 5, 1, 1000 scale, offset = 3., 4. x = offset + scale * nr.randn(n, dim) lgmm = GMM(k,dim) lgmm.initialize(x) lgmm.estimate(x) ll = lgmm.average_log_like(x) ent = dim * 0.5 * (1 + np.log(2 * np.pi * scale ** 2)) assert_true(np.absolute(ll + ent) < dim * 3. / np.sqrt(n)) def test_em_loglike5(): # Test that the likelihood of the GMM is expected on standard data # Here test that this works also on test data generated iid dim, k, n = 2, 1, 1000 scale, offset = 3., 4. x = offset + scale * nr.randn(n, dim) y = offset + scale * nr.randn(n, dim) lgmm = GMM(k, dim) lgmm.initialize(x) lgmm.estimate(x) ll = lgmm.average_log_like(y) ent = dim * 0.5 * (1 + np.log(2 * np.pi * scale ** 2)) assert_true(np.absolute(ll + ent) < dim * 3. / np.sqrt(n)) def test_em_loglike6(): # Test that the likelihood of shifted data is lower # than the likelihood of non-shifted data dim, k, n = 1, 1, 100 offset = 3. x = nr.randn(n, dim) y = offset + nr.randn(n, dim) lgmm = GMM(k, dim) lgmm.initialize(x) lgmm.estimate(x) ll1 = lgmm.average_log_like(x) ll2 = lgmm.average_log_like(y) assert_true(ll2 < ll1) def test_em_selection(): # test that the basic GMM-based model selection tool # returns something sensible # (i.e. the gmm used to represent the data has indeed one or two classes) dim = 2 x = np.concatenate((nr.randn(100, dim), 3 + 2 * nr.randn(100, dim))) krange = range(1, 10) lgmm = best_fitting_GMM(x, krange, prec_type='full', niter=100, delta = 1.e-4, ninit=1) assert_true(lgmm.k < 4) def test_em_gmm_full(): # Computing the BIC value for different configurations # of a GMM with ful diagonal matrices # The BIC should be maximal for a number of classes of 1 or 2 # generate some data dim = 2 x = np.concatenate((nr.randn(100, dim), 3 + 2 * nr.randn(100, dim))) # estimate different GMMs of that data maxiter, delta = 100, 1.e-4 bic = np.zeros(5) for k in range(1,6): lgmm = GMM(k, dim) lgmm.initialize(x) bic[k - 1] = lgmm.estimate(x, maxiter, delta) assert_true(bic[4] < bic[1]) def test_em_gmm_diag(): # Computing the BIC value for GMMs with different number of classes, # with diagonal covariance models # The BIC should maximal for a number of classes of 1 or 2 # generate some data dim = 2 x = np.concatenate((nr.randn(1000, dim), 3 + 2 * nr.randn(1000, dim))) # estimate different GMMs of that data maxiter, delta = 100, 1.e-8 prec_type = 'diag' bic = np.zeros(5) for k in range(1, 6): lgmm = GMM(k, dim, prec_type) lgmm.initialize(x) bic[k - 1] = lgmm.estimate(x, maxiter, delta) z = lgmm.map_label(x) assert_true(z.max() + 1 == lgmm.k) assert_true(bic[4] < bic[1]) def test_em_gmm_multi(): # Playing with various initilizations on the same data # generate some data dim = 2 x = np.concatenate((nr.randn(1000, dim), 3 + 2 * nr.randn(100, dim))) # estimate different GMMs of that data maxiter, delta, ninit, k = 100, 1.e-4, 5, 2 lgmm = GMM(k,dim) bgmm = lgmm.initialize_and_estimate(x, niter=maxiter, delta=delta, ninit=ninit) bic = bgmm.evidence(x) assert_true(np.isfinite(bic)) def test_em_gmm_largedim(): # testing the GMM model in larger dimensions # generate some data dim = 10 x = nr.randn(100, dim) x[:30] += 2 # estimate different GMMs of that data maxiter, delta = 100, 1.e-4 for k in range(2, 3): lgmm = GMM(k,dim) bgmm = lgmm.initialize_and_estimate(x, None, maxiter, delta, ninit=5) z = bgmm.map_label(x) # define the correct labelling u = np.zeros(100) u[:30] = 1 #check the correlation between the true labelling # and the computed one eta = np.absolute(np.dot(z - z.mean(), u - u.mean()) /\ (np.std(z) * np.std(u) * 100)) assert_true(eta > 0.3) def test_em_gmm_heterosc(): # testing the model in very ellipsoidal data: # compute the bic values for several values of k # and check that the maximal one is 1 or 2 # generate some data dim = 2 x = nr.randn(100, dim) x[:50] += 3 # estimate different GMMs of that data maxiter, delta = 100, 1.e-4 bic = np.zeros(5) for k in range(1,6): lgmm = GMM(k, dim) lgmm.initialize(x) bic[k - 1] = lgmm.estimate(x, maxiter, delta, 0) assert_true(bic[4] < bic[1]) def test_em_gmm_cv(): # Comparison of different GMMs using cross-validation # generate some data dim = 2 xtrain = np.concatenate((nr.randn(100, dim), 3 + 2 * nr.randn(100, dim))) xtest = np.concatenate((nr.randn(1000, dim), 3 + 2 * nr.randn(1000, dim))) #estimate different GMMs for xtrain, and test it on xtest prec_type = 'full' k, maxiter, delta = 2, 300, 1.e-4 ll = [] # model 1 lgmm = GMM(k,dim,prec_type) lgmm.initialize(xtrain) bic = lgmm.estimate(xtrain,maxiter, delta) ll.append(lgmm.test(xtest).mean()) # model 2 prec_type = 'diag' lgmm = GMM(k, dim, prec_type) lgmm.initialize(xtrain) bic = lgmm.estimate(xtrain, maxiter, delta) ll.append(lgmm.test(xtest).mean()) for k in [1, 3, 10]: lgmm = GMM(k,dim,prec_type) lgmm.initialize(xtrain) ll.append(lgmm.test(xtest).mean()) assert_true(ll[4] < ll[1]) if __name__ == '__main__': import nose nose.run(argv=['', __file__]) nipy-0.3.0/nipy/algorithms/clustering/tests/test_hierarchical_clustering.py000066400000000000000000000105461210344137400274350ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Several basic tests for hierarchical clustering procedures. Should be cast soon in a nicer unitest framework Author : Bertrand Thirion, 2008-2009 """ import numpy as np from numpy.random import randn from ..hierarchical_clustering import (average_link_graph, average_link_graph_segment, ward, ward_quick, ward_segment, ward_field_segment, ward_quick_segment) from nipy.algorithms.graph.graph import WeightedGraph, knn from nipy.algorithms.graph.field import field_from_graph_and_data from nose.tools import assert_true, assert_equal def alg_test_basic(n=100,k=5): # Check that we obtain the correct solution in a simplistic case np.random.seed(0) x = np.random.randn(n, 2) x[:int(0.7*n)] += 3 G = knn(x, k) t = average_link_graph(G) u = t.split(2) v = np.zeros(n) v[:int(0.7*n)]=1 w = np.absolute(u-v) assert_true(np.sum(w*(1-w))==0) def alg_test_2(): # Do we handle case of graph with too many connected components? np.random.seed(0) n = 100 k = 5 x = np.random.randn(n, 2) x[:int(0.3*n)] += 10 x[int(0.8*n):] -= 10 G = knn(x, k) t = average_link_graph(G) u = t.split(2) assert_true(u.max()==2) def alg_test_3(n=100,k=5): # Check that we obtain the correct solution in a simplistic case np.random.seed(0) x = np.random.randn(n, 2) x[:int(0.7*n)] += 3 G = knn(x, k) u, cost = average_link_graph_segment(G, qmax=2) v = np.zeros(n) v[:int(0.7*n)]=1 w = np.absolute(u-v) assert_true(np.sum(w*(1-w))==0) def ward_test_basic(n=100,k=5): # Basic check of ward's algorithm np.random.seed(0) x = np.random.randn(n, 2) x[:int(0.7*n)] += 3 G = knn(x, k) t = ward(G,x) u = t.split(2) v = np.zeros(n) v[:int(0.7*n)]=1 w = np.absolute(u-v) assert_true(np.sum(w*(1-w))==0) def wardq_test_basic(n=100,k=5): # Basic check of ward's algorithm np.random.seed(0) x = np.random.randn(n, 2) x[:int(0.7*n)] += 3 G = knn(x, k) t = ward_quick(G, x) u = t.split(2) v = np.zeros(n) v[:int(0.7*n)]=1 w = np.absolute(u-v) assert_true(np.sum(w*(1-w))==0) def wardq_test_2(): # Do we handle case of graph with too many connected components? np.random.seed(0) n = 100 k = 5 x = np.random.randn(n, 2) x[:int(0.3*n)] += 10 x[int(0.8*n):] -= 10 G = knn(x, k) t = ward_quick(G, x) u = t.split(2) assert_equal(u.max(), 2) def wardf_test(n=100,k=5): np.random.seed(0) x = np.random.randn(n,2) x[:int(0.7*n)] += 3 G = knn(x, 5) F = field_from_graph_and_data(G, x) u, cost = ward_field_segment(F, qmax=2) v = np.zeros(n) v[:int(0.7*n)]=1 w = np.absolute(u-v) assert_equal(np.sum(w*(1-w)), 0) def wards_test_basic(n=100,k=5): # Basic check of ward's segmentation algorithm np.random.seed(0) x = np.random.randn(n, 2) x[:int(0.7*n)] += 3 G = knn(x, k) u,cost = ward_segment(G, x, qmax=2) v = np.zeros(n) v[:int(0.7*n)]=1 w = np.absolute(u-v) assert_equal(np.sum(w*(1-w)), 0) def wards_test_3(): # Check ward_segment np.random.seed(0) n = 100 k = 5 x = np.random.randn(n,2) x[:int(0.3*n)] += 10 x[int(0.8*n):] -= 10 G = knn(x,k) u,cost = ward_segment(G, x, qmax=2) assert_equal(u.max(), 2) def cost_test(n=100, k=5): # check that cost.max() is equal to the data variance np.random.seed(0) x = np.random.randn(n, 2) G = knn(x, k) u, cost = ward_segment(G, x) assert_true(np.abs(cost.max()/(n*np.var(x,0).sum()) - 1) < 1e-6) def ward_test_more(n=100, k=5, verbose=0): # Check that two implementations give the same result np.random.seed(0) X = randn(n,2) X[:np.ceil(n/3)] += 5 G = knn(X, 5) u,c = ward_segment(G, X, stop=-1, qmax=1, verbose=verbose) u1,c = ward_segment(G, X, stop=-1, qmax=k, verbose=verbose) u,c = ward_quick_segment(G, X, stop=-1, qmax=1, verbose=verbose) u2,c = ward_quick_segment(G, X, stop=-1, qmax=k, verbose=verbose) assert_equal(np.sum(u1==u2), n) if __name__ == '__main__': import nose nose.run(argv=['', __file__]) nipy-0.3.0/nipy/algorithms/clustering/tests/test_imm.py000066400000000000000000000144611210344137400233420ustar00rootroot00000000000000""" Test the Infinite GMM. Author : Bertrand Thirion, 2010 """ import numpy as np from ..imm import IMM, MixedIMM, co_labelling from nose.tools import assert_true from numpy.testing import assert_array_equal def test_colabel(): # test the co_labelling functionality z = np.array([0,1,1,0,2]) c = co_labelling(z).todense() tc = np.array([[ 1., 0., 0., 1., 0.], [ 0., 1., 1., 0., 0.], [ 0., 1., 1., 0., 0.], [ 1., 0., 0., 1., 0.], [ 0., 0., 0., 0., 1.]]) assert_array_equal(c, tc) def test_imm_loglike_1D(): # Check that the log-likelihood of the data under the infinite gaussian # mixture model is close to the theoretical data likelihood n = 100 dim = 1 alpha = .5 x = np.random.randn(n, dim) igmm = IMM(alpha, dim) igmm.set_priors(x) # warming igmm.sample(x, niter=100) # sampling like = igmm.sample(x, niter=300) theoretical_ll = -dim*.5*(1+np.log(2*np.pi)) empirical_ll = np.log(like).mean() assert_true(np.absolute(theoretical_ll-empirical_ll)<0.25*dim) def test_imm_loglike_known_groups(): # Check that the log-likelihood of the data under IGMM close to theory n = 50 dim = 1 alpha = .5 x = np.random.randn(n, dim) igmm = IMM(alpha, dim) igmm.set_priors(x) kfold = np.floor(np.random.rand(n)*5).astype(np.int) # warming igmm.sample(x, niter=100) # sampling like = igmm.sample(x, niter=300, kfold=kfold) theoretical_ll = -dim*.5*(1+np.log(2*np.pi)) empirical_ll = np.log(like).mean() assert_true(np.absolute(theoretical_ll-empirical_ll)<0.25*dim) def test_imm_loglike_1D_k10(): # Check with k-fold cross validation (k=10) n = 50 dim = 1 alpha = .5 k = 5 x = np.random.randn(n, dim) igmm = IMM(alpha, dim) igmm.set_priors(x) # warming igmm.sample(x, niter=100, kfold=k) # sampling like = igmm.sample(x, niter=300, kfold=k) theoretical_ll = -dim*.5*(1+np.log(2*np.pi)) empirical_ll = np.log(like).mean() assert_true(np.absolute(theoretical_ll-empirical_ll)<0.25*dim) def test_imm_loglike_2D_fast(): # Faster version for log-likelihood imm n = 100 dim = 2 alpha = .5 x = np.random.randn(n, dim) igmm = IMM(alpha, dim) igmm.set_priors(x) # warming igmm.sample(x, niter=100, init=True) # sampling like = igmm.sample(x, niter=300) theoretical_ll = -dim*.5*(1+np.log(2*np.pi)) empirical_ll = np.log(like).mean() assert_true(np.absolute(theoretical_ll-empirical_ll)<0.25*dim) def test_imm_loglike_2D(): # Slower cross-validated logL check n = 50 dim = 2 alpha = .5 k = 5 x = np.random.randn(n, dim) igmm = IMM(alpha, dim) igmm.set_priors(x) # warming igmm.sample(x, niter=100, init=True, kfold=k) # sampling like = igmm.sample(x, niter=300, kfold=k) theoretical_ll = -dim*.5*(1+np.log(2*np.pi)) empirical_ll = np.log(like).mean() assert_true(np.absolute(theoretical_ll-empirical_ll)<0.25*dim) def test_imm_loglike_2D_a0_1(): # Check with alpha=.1 n = 100 dim = 2 alpha = .1 x = np.random.randn(n, dim) igmm = IMM(alpha, dim) igmm.set_priors(x) # warming igmm.sample(x, niter=100, init=True) # sampling like = igmm.sample(x, niter=300) theoretical_ll = -dim*.5*(1+np.log(2*np.pi)) empirical_ll = np.log(like).mean() print theoretical_ll, empirical_ll assert_true(np.absolute(theoretical_ll-empirical_ll)<0.2*dim) def test_imm_wnc(): # Test the basic imm_wnc n = 50 dim = 1 alpha = .5 g0 = 1. x = np.random.rand(n, dim) x[:.3*n] *= .2 x[:.1*n] *= .3 # instantiate migmm = MixedIMM(alpha, dim) migmm.set_priors(x) migmm.set_constant_densities(null_dens=g0) ncp = 0.5*np.ones(n) # warming migmm.sample(x, null_class_proba=ncp, niter=100, init=True) g = np.reshape(np.linspace(0, 1, 101), (101, dim)) # sampling like, pproba = migmm.sample(x, null_class_proba=ncp, niter=300, sampling_points=g) # the density should sum to 1 ds = 0.01*like.sum() assert_true(ds<1) assert_true(ds>.8) assert_true(np.sum(pproba>.5)>1) assert_true(np.sum(pproba<.5)>1) def test_imm_wnc1(): # Test the basic imm_wnc, where the probaility under the null is random n = 50 dim = 1 alpha = .5 g0 = 1. x = np.random.rand(n, dim) x[:.3*n] *= .2 x[:.1*n] *= .3 # instantiate migmm = MixedIMM(alpha, dim) migmm.set_priors(x) migmm.set_constant_densities(null_dens=g0) ncp = np.random.rand(n) # warming migmm.sample(x, null_class_proba=ncp, niter=100, init=True) g = np.reshape(np.linspace(0, 1, 101), (101, dim)) #sampling like, pproba = migmm.sample(x, null_class_proba=ncp, niter=300, sampling_points=g) # the density should sum to 1 ds = 0.01*like.sum() assert_true(ds<1) assert_true(ds>.8) assert_true(np.sum(pproba>.5)>1) assert_true(np.sum(pproba<.5)>1) def test_imm_wnc2(): # Test the basic imm_wnc when null class is shrunk to 0 n = 50 dim = 1 alpha = .5 g0 = 1. x = np.random.rand(n, dim) x[:.3*n] *= .2 x[:.1*n] *= .3 # instantiate migmm = MixedIMM(alpha, dim) migmm.set_priors(x) migmm.set_constant_densities(null_dens=g0) ncp = np.zeros(n) # warming migmm.sample(x, null_class_proba=ncp, niter=100, init=True) # sampling like, pproba = migmm.sample(x, null_class_proba=ncp, niter=300) assert_true(like.min()>.1) assert_true(like.max()<5.) assert_array_equal(pproba, ncp) def test_imm_wnc3(): # Test the basic imm_wnc when null class is of prob 1 (nothing is estimated) n = 50 dim = 1 alpha = .5 g0 = 1. x = np.random.rand(n, dim) x[:.3*n] *= .2 x[:.1*n] *= .3 # instantiate migmm = MixedIMM(alpha, dim) migmm.set_priors(x) migmm.set_constant_densities(null_dens=g0) ncp = np.ones(n) # warming migmm.sample(x, null_class_proba=ncp, niter=100, init=True) # sampling like, pproba = migmm.sample(x, null_class_proba=ncp, niter=300) assert_array_equal(pproba, ncp) if __name__ == '__main__': import nose nose.run(argv=['', __file__]) nipy-0.3.0/nipy/algorithms/clustering/tests/test_vmm.py000066400000000000000000000042751210344137400233610ustar00rootroot00000000000000""" Test the Von-Mises-Fisher mixture model Author : Bertrand Thirion, 2010 """ import numpy as np from ..von_mises_fisher_mixture import (VonMisesMixture, sphere_density, select_vmm, select_vmm_cv) from nose.tools import assert_true, assert_equal def test_spherical_area(): # test the co_labelling functionality points, area = sphere_density(100) assert_true(np.abs(area.sum()-4*np.pi)<1.e-2) def test_von_mises_fisher_density(): # test that a density is indeed computed on the unit sphere for a # one-component and three-component model (k == 1, 3) x = np.random.randn(100, 3) x = (x.T/np.sqrt(np.sum(x**2, 1))).T s, area = sphere_density(100) for k in (1, 3): for precision in [.1, 1., 10., 100.]: for null_class in (False, True): vmd = VonMisesMixture(k, precision, null_class=null_class) vmd.estimate(x) # check that it sums to 1 assert_true(np.abs((vmd.mixture_density(s)*area).sum() - 1) < 1e-2) def test_dimension_selection_bic(): # Tests whether dimension selection yields correct results x1 = [0.6, 0.48, 0.64] x2 = [-0.8, 0.48, 0.36] x3 = [0.48, 0.64, -0.6] x = np.random.randn(200, 3) * .1 x[:40] += x1 x[40:150] += x2 x[150:] += x3 x = (x.T / np.sqrt(np.sum(x**2, 1))).T precision = 100. my_vmm = select_vmm(range(1,8), precision, False, x) assert_equal(my_vmm.k, 3) def test_dimension_selection_cv(): # Tests the dimension selection using cross validation x1 = [1, 0, 0] x2 = [-1, 0, 0] x = np.random.randn(20, 3)*.1 x[0::2] += x1 x[1::2] += x2 x = (x.T / np.sqrt(np.sum(x**2,1))).T precision = 50. sub = np.repeat(np.arange(10), 2) my_vmm = select_vmm_cv(range(1,8), precision, x, cv_index=sub, null_class=False, ninit=5) z = np.argmax(my_vmm.responsibilities(x), 1) assert_true(len(np.unique(z))>1) assert_true(len(np.unique(z))<4) if __name__ == '__main__': import nose nose.run(argv=['', __file__]) nipy-0.3.0/nipy/algorithms/clustering/utils.py000066400000000000000000000152341210344137400215160ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: #from _clustering import * #from _clustering import __doc__ import numpy as np def kmeans(X, nbclusters=2, Labels=None, maxiter=300, delta=0.0001, verbose=0, ninit=1): """ kmeans clustering algorithm Parameters ---------- X: array of shape (n,p): n = number of items, p = dimension data array nbclusters (int), the number of desired clusters Labels = None array of shape (n) prior Labels. if None or inadequate a random initilization is performed. maxiter=300 (int), the maximum number of iterations before convergence delta: float, optional, the relative increment in the results before declaring convergence. verbose: verbosity mode, optionall ninit: int, optional, number of random initalizations Returns ------- Centers: array of shape (nbclusters, p), the centroids of the resulting clusters Labels : array of size n, the discrete labels of the input items J (float): the final value of the inertia criterion """ nbitems = X.shape[0] if nbitems < 1: if verbose: raise ValueError(" I need at least one item to cluster") if np.size(X.shape) > 2: if verbose: raise ValueError("Please enter a two-dimensional array \ for clustering") if np.size(X.shape) == 1: X = np.reshape(X, (nbitems, 1)) X = X.astype('d') nbclusters = int(nbclusters) if nbclusters < 1: if verbose: print " cannot compute less than 1 cluster" nbclusters = 1 if nbclusters > nbitems: if verbose: print " cannot find more clusters than items" nbclusters = nbitems if (ninit < 1) & verbose: print "making at least one iteration" ninit = np.maximum(int(ninit), 1) if Labels != None: if np.size(Labels) == nbitems: Labels = Labels.astype(np.int) OK = (Labels.min() > -1) & (Labels.max() < nbclusters + 1) if OK: maxiter = int(maxiter) if maxiter > 0: delta = float(delta) if delta < 0: if verbose: print "incorrect stopping criterion - ignored" delta = 0.0001 else: pass else: if verbose: print "incorrect number of iterations - ignored" maxiter = 300 else: if verbose: print "incorrect labelling - ignored" else: if verbose: print "incompatible number of labels provided - ignored" Centers, labels, J = _kmeans(X, nbclusters, Labels, maxiter, delta, ninit) return Centers, labels, J def _MStep(x, z, k): """Computation of cluster centers/means Parameters ---------- x array of shape (n,p) where n = number of samples, p = data dimension z, array of shape (x.shape[0]) current assignment k, int, number of desired clusters Returns ------- centers, array of shape (k,p) the resulting centers """ dim = x.shape[1] centers = np.repeat(np.reshape(x.mean(0), (1, dim)), k, 0) for q in range(k): if np.sum(z == q) == 0: pass else: centers[q] = np.mean(x[z == q], 0) return centers def _EStep(x, centers): """ Computation of the input-to-cluster assignment Parameters ---------- x array of shape (n,p) n = number of items, p = data dimension centers, array of shape (k,p) the cluster centers Returns ------- z vector of shape(n), the resulting assignment """ nbitem = x.shape[0] z = - np.ones(nbitem).astype(np.int) mindist = np.inf * np.ones(nbitem) k = centers.shape[0] for q in range(k): dist = np.sum((x - centers[q]) ** 2, 1) z[dist < mindist] = q mindist = np.minimum(dist, mindist) J = mindist.sum() return z, J def voronoi(x, centers): """ Assignment of data items to nearest cluster center Parameters ---------- x array of shape (n,p) n = number of items, p = data dimension centers, array of shape (k, p) the cluster centers Returns ------- z vector of shape(n), the resulting assignment """ if np.size(x) == x.shape[0]: x = np.reshape(x, (np.size(x), 1)) if np.size(centers) == centers.shape[0]: centers = np.reshape(centers, (np.size(centers), 1)) if x.shape[1] != centers.shape[1]: raise ValueError("Inconsistent dimensions for x and centers") return _EStep(x, centers)[0] def _kmeans(X, nbclusters=2, Labels=None, maxiter=300, delta=1.e-4, ninit=1, verbose=0): """ kmeans clustering algorithm Parameters ---------- X: array of shape (n,p): n = number of items, p = dimension data array nbclusters (int), the number of desired clusters Labels: array of shape (n) prior Labels, optional if None or inadequate a random initilization is performed. maxiter: int, optional the maximum number of iterations before convergence delta: float, optional the relative increment in the results before declaring convergence. verbose=0: verboseity mode Returns ------- Centers: array of shape (nbclusters, p), the centroids of the resulting clusters Labels: array of size n, the discrete labels of the input items J, float, the final value of the inertia criterion """ # fixme: do the checks nbitem = X.shape[0] vdata = np.mean(np.var(X, 0)) bJ = np.inf for it in range(ninit): # init if Labels == None: seeds = np.argsort(np.random.rand(nbitem))[:nbclusters] centers = X[seeds] else: centers = _MStep(X, Labels, nbclusters) centers_old = centers.copy() # iterations for i in range(maxiter): z, J = _EStep(X, centers) centers = _MStep(X, z, nbclusters) if verbose: print i, J if np.sum((centers_old - centers) ** 2) < delta * vdata: if verbose: print i break centers_old = centers.copy() if J < bJ: bJ = J centers_output = centers.copy() z_output = z.copy() else: centers_output = centers z_output = z return centers_output, z_output, bJ nipy-0.3.0/nipy/algorithms/clustering/von_mises_fisher_mixture.py000066400000000000000000000317531210344137400255010ustar00rootroot00000000000000""" Implementation of Von-Mises-Fisher Mixture models, i.e. the equaivalent of mixture of Gaussian on the sphere. Author: Bertrand Thirion, 2010-2011 """ import numpy as np class VonMisesMixture(object): """ Model for Von Mises mixture distribution with fixed variance on a two-dimensional sphere """ def __init__(self, k, precision, means=None, weights=None, null_class=False): """ Initialize Von Mises mixture Parameters ---------- k: int, number of components precision: float, the fixed precision parameter means: array of shape(self.k, 3), optional input component centers weights: array of shape(self.k), optional input components weights null_class: bool, optional Inclusion of a null class within the model (related to k=0) fixme ----- consistency checks """ self.k = k self.dim = 2 self.em_dim = 3 self.means = means self.precision = precision self.weights = weights self.null_class = null_class def log_density_per_component(self, x): """Compute the per-component density of the data Parameters ---------- x: array fo shape(n,3) should be on the unit sphere Returns ------- like: array of shape(n, self.k), with non-neagtive values the density """ n = x.shape[0] constant = self.precision / (2 * np.pi * (1 - np.exp( - \ 2 * self.precision))) loglike = np.log(constant) + \ (np.dot(x, self.means.T) - 1) * self.precision if self.null_class: loglike = np.hstack((np.log(1. / (4 * np.pi)) * np.ones((n, 1)), loglike)) return loglike def density_per_component(self, x): """ Compute the per-component density of the data Parameters ---------- x: array fo shape(n,3) should be on the unit sphere Returns ------- like: array of shape(n, self.k), with non-neagtive values the density """ return np.exp(self.log_density_per_component(x)) def weighted_density(self, x): """ Return weighted density Parameters ---------- x: array shape(n,3) should be on the unit sphere Returns ------- like: array of shape(n, self.k) """ return(self.density_per_component(x) * self.weights) def log_weighted_density(self, x): """ Return log weighted density Parameters ---------- x: array fo shape(n,3) should be on the unit sphere Returns ------- log_like: array of shape(n, self.k) """ return(self.log_density_per_component(x) + np.log(self.weights)) def mixture_density(self, x): """ Return mixture density Parameters ---------- x: array fo shape(n,3) should be on the unit sphere Returns ------- like: array of shape(n) """ wl = self.weighted_density(x) return np.sum(wl, 1) def responsibilities(self, x): """ Return responsibilities Parameters ---------- x: array fo shape(n,3) should be on the unit sphere Returns ------- resp: array of shape(n, self.k) """ lwl = self.log_weighted_density(x) wl = np.exp(lwl.T - lwl.mean(1)).T swl = np.sum(wl, 1) resp = (wl.T / swl).T return resp def estimate_weights(self, z): """ Calculate and set weights from `z` Parameters ---------- z: array of shape(self.k) """ self.weights = np.sum(z, 0) / z.sum() def estimate_means(self, x, z): """ Calculate and set means from `x` and `z` Parameters ---------- x: array fo shape(n,3) should be on the unit sphere z: array of shape(self.k) """ m = np.dot(z.T, x) self.means = (m.T / np.sqrt(np.sum(m ** 2, 1))).T def estimate(self, x, maxiter=100, miniter=1, bias=None): """ Return average log density across samples Parameters ---------- x: array of shape (n,3) should be on the unit sphere maxiter : int, optional maximum number of iterations of the algorithms miniter : int, optional minimum number of iterations bias : array of shape(n), optional prior probability of being in a non-null class Returns ------- ll : float average (across samples) log-density """ # initialization with random positions and constant weights if self.weights is None: self.weights = np.ones(self.k) / self.k if self.null_class: self.weights = np.ones(self.k + 1) / (self.k + 1) if self.means is None: aux = np.arange(x.shape[0]) np.random.shuffle(aux) self.means = x[aux[:self.k]] # EM algorithm assert not(np.isnan(self.means).any()) pll = - np.inf for i in range(maxiter): ll = np.log(self.mixture_density(x)).mean() z = self.responsibilities(x) assert not(np.isnan(z).any()) # bias z if bias is not None: z[:, 0] *= (1 - bias) z[:, 1:] = ((z[:, 1:].T) * bias).T z = (z.T / np.sum(z, 1)).T self.estimate_weights(z) if self.null_class: self.estimate_means(x, z[:, 1:]) else: self.estimate_means(x, z) assert not(np.isnan(self.means).any()) if (i > miniter) and (ll < pll + 1.e-6): break pll = ll return ll def show(self, x): """ Visualization utility Parameters ---------- x: array fo shape(n,3) should be on the unit sphere """ # label the data z = np.argmax(self.responsibilities(x), 1) import pylab import mpl_toolkits.mplot3d.axes3d as p3 fig = pylab.figure() ax = p3.Axes3D(fig) colors = (['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w'] * \ (1 + (1 + self.k) / 8))[:self.k + 1] if (self.null_class) and (z == 0).any(): ax.plot3D(x[z == 0, 0], x[z == 0, 1], x[z == 0, 2], '.', color=colors[0]) for k in range(self.k): if self.null_class: if np.sum(z == (k + 1)) == 0: continue uk = z == (k + 1) ax.plot3D(x[uk, 0], x[uk, 1], x[uk, 2], '.', color=colors[k + 1]) ax.plot3D([self.means[k, 0]], [self.means[k, 1]], [self.means[k, 2]], 'o', color=colors[k + 1]) else: if np.sum(z == k) == 0: continue ax.plot3D(x[z == k, 0], x[z == k, 1], x[z == k, 2], '.', color=colors[k]) ax.plot3D([self.means[k, 0]], [self.means[k, 1]], [self.means[k, 2]], 'o', color=colors[k]) pylab.show() def estimate_robust_vmm(k, precision, null_class, x, ninit=10, bias=None, maxiter=100): """ Return the best von_mises mixture after severla initialization Parameters ---------- k: int, number of classes precision: float, priori precision parameter null class: bool, optional, should a null class be included or not x: array fo shape(n,3) input data, should be on the unit sphere ninit: int, optional, number of iterations bias: array of shape(n), optional prior probability of being in a non-null class maxiter: int, optional, maximum number of iterations after each initialization """ score = - np.inf for i in range(ninit): aux = VonMisesMixture(k, precision, null_class=null_class) ll = aux.estimate(x, bias=bias) if ll > score: best_model = aux score = ll return best_model def select_vmm(krange, precision, null_class, x, ninit=10, bias=None, maxiter=100, verbose=0): """Return the best von_mises mixture after severla initialization Parameters ---------- krange: list of ints, number of classes to consider precision: null class: x: array fo shape(n,3) should be on the unit sphere ninit: int, optional, number of iterations maxiter: int, optional, bias: array of shape(n), a prior probability of not being in the null class verbose: Bool, optional """ score = - np.inf for k in krange: aux = estimate_robust_vmm(k, precision, null_class, x, ninit, bias, maxiter) ll = aux.estimate(x) if null_class: bic = ll - np.log(x.shape[0]) * k * 3 / x.shape[0] else: bic = ll - np.log(x.shape[0]) * (k * 3 - 1) / x.shape[0] if verbose: print k, bic if bic > score: best_model = aux score = bic return best_model def select_vmm_cv(krange, precision, x, null_class, cv_index, ninit=5, maxiter=100, bias=None, verbose=0): """Return the best von_mises mixture after severla initialization Parameters ---------- krange: list of ints, number of classes to consider precision: float, precision parameter of the von-mises densities x: array fo shape(n, 3) should be on the unit sphere null class: bool, whether a null class should be included or not cv_index: set of indices for cross validation ninit: int, optional, number of iterations maxiter: int, optional, bias: array of shape (n), prior """ score = - np.inf mll = [] for k in krange: mll.append( - np.inf) for j in range(1): ll = np.zeros_like(cv_index).astype(np.float) for i in np.unique(cv_index): xl = x[cv_index != i] xt = x[cv_index == i] bias_l = None if bias is not None: bias_l = bias[cv_index != i] aux = estimate_robust_vmm(k, precision, null_class, xl, ninit=ninit, bias=bias_l, maxiter=maxiter) if bias is None: ll[cv_index == i] = np.log(aux.mixture_density(xt)) else: bias_t = bias[cv_index == i] lwd = aux.weighted_density(xt) ll[cv_index == i] = np.log(lwd[:, 0] * (1 - bias_t) + \ lwd[:, 1:].sum(1) * bias_t) if ll.mean() > mll[-1]: mll[-1] = ll.mean() aux = estimate_robust_vmm(k, precision, null_class, x, ninit, bias=bias, maxiter=maxiter) if verbose: print k, mll[ - 1] if mll[ - 1] > score: best_model = aux score = mll[ - 1] return best_model def sphere_density(npoints): """Return the points and area of a npoints**2 points sampled on a sphere Returns ------- s : array of shape(npoints ** 2, 3) area: array of shape(npoints) """ u = np.linspace(0, 2 * np.pi, npoints + 1)[:npoints] v = np.linspace(0, np.pi, npoints + 1)[:npoints] s = np.vstack((np.ravel(np.outer(np.cos(u), np.sin(v))), np.ravel(np.outer(np.sin(u), np.sin(v))), np.ravel(np.outer(np.ones(np.size(u)), np.cos(v))))).T area = np.abs(np.ravel(np.outer(np.ones(np.size(u)), np.sin(v)))) * \ np.pi ** 2 * 2 * 1. / (npoints ** 2) return s, area def example_noisy(): x1 = [0.6, 0.48, 0.64] x2 = [-0.8, 0.48, 0.36] x3 = [0.48, 0.64, -0.6] x = np.random.randn(200, 3) * .1 x[:30] += x1 x[40:150] += x2 x[150:] += x3 x = (x.T / np.sqrt(np.sum(x ** 2, 1))).T precision = 100. vmm = select_vmm(range(2, 7), precision, True, x) vmm.show(x) # check that it sums to 1 s, area = sphere_density(100) print (vmm.mixture_density(s) * area).sum() def example_cv_nonoise(): x1 = [0.6, 0.48, 0.64] x2 = [-0.8, 0.48, 0.36] x3 = [0.48, 0.64, -0.6] x = np.random.randn(30, 3) * .1 x[0::3] += x1 x[1::3] += x2 x[2::3] += x3 x = (x.T / np.sqrt(np.sum(x ** 2, 1))).T precision = 50. sub = np.repeat(np.arange(10), 3) vmm = select_vmm_cv(range(1, 8), precision, x, cv_index=sub, null_class=False, ninit=20) vmm.show(x) # check that it sums to 1 s, area = sphere_density(100) return vmm nipy-0.3.0/nipy/algorithms/diagnostics/000077500000000000000000000000001210344137400201275ustar00rootroot00000000000000nipy-0.3.0/nipy/algorithms/diagnostics/__init__.py000066400000000000000000000004601210344137400222400ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: # Initialization for diagnostics package from .timediff import time_slice_diffs from .tsdiffplot import plot_tsdiffs, plot_tsdiffs_image from .screens import screen from ..utils import pca nipy-0.3.0/nipy/algorithms/diagnostics/screens.py000066400000000000000000000107371210344137400221530ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ''' Diagnostic 4d image screen ''' from os.path import join as pjoin import numpy as np from ...core.api import Image, drop_io_dim, append_io_dim from ...io.api import save_image from ..utils import pca from .timediff import time_slice_diffs from .tsdiffplot import plot_tsdiffs def screen(img4d, ncomp=10): ''' Diagnostic screen for 4d FMRI image Includes PCA, tsdiffana and mean, std, min, max images. Parameters ---------- img4d : ``Image`` 4d image file ncomp : int, optional number of component images to return. Default is 10 Returns ------- screen : dict with keys: * mean : mean image (all summaries are over last dimension) * std : standard deviation image * max : image of max * min : min * pca : 4D image of PCA component images * pca_res : dict of results from PCA * ts_res : dict of results from tsdiffana Examples -------- >>> import nipy as ni >>> from nipy.testing import funcfile >>> img = ni.load_image(funcfile) >>> screen_res = screen(img) >>> screen_res['mean'].ndim 3 >>> screen_res['pca'].ndim 4 ''' if img4d.ndim != 4: raise ValueError('Expecting a 4d image') data = img4d.get_data() cmap = img4d.coordmap cmap_3d = drop_io_dim(cmap, 't') screen_res = {} # standard processed images screen_res['mean'] = Image(np.mean(data, axis=-1), cmap_3d) screen_res['std'] = Image(np.std(data, axis=-1), cmap_3d) screen_res['max'] = Image(np.max(data, axis=-1), cmap_3d) screen_res['min'] = Image(np.min(data, axis=-1), cmap_3d) # PCA screen_res['pca_res'] = pca.pca(data, axis=-1, standardize=False, ncomp=ncomp) cmap_4d = append_io_dim(cmap_3d, 'l' , 't') screen_res['pca'] = Image(screen_res['pca_res']['basis_projections'], cmap_4d) # tsdiffana screen_res['ts_res'] = time_slice_diffs(data) return screen_res def write_screen_res(res, out_path, out_root, out_img_ext='.nii', pcnt_var_thresh=0.1): ''' Write results from ``screen`` to disk as images Parameters ---------- res : dict output from ``screen`` function out_path : str directory to which to write output images out_root : str part of filename between image-specific prefix and image-specific extension to use for writing images out_img_ext : str, optional extension (identifying image type) to which to write volume images. Default is '.nii' pcnt_var_thresh : float, optional threshold below which we do not plot percent variance explained by components; default is 0.1. This removes the long tail from percent variance plots. Returns ------- None ''' import matplotlib.pyplot as plt # save volume images for key in ('mean', 'min', 'max', 'std', 'pca'): fname = pjoin(out_path, '%s_%s%s' % (key, out_root, out_img_ext)) save_image(res[key], fname) # plot, save component time courses and some tsdiffana stuff ncomp = res['pca'].shape[-1] vectors = res['pca_res']['basis_vectors'] pcnt_var = res['pca_res']['pcnt_var'] np.savez(pjoin(out_path, 'vectors_components_%s.npz' % out_root), basis_vectors=vectors, pcnt_var=pcnt_var, volume_means=res['ts_res']['volume_means'], slice_mean_diff2=res['ts_res']['slice_mean_diff2'], ) plt.figure() for c in range(ncomp): plt.subplot(ncomp, 1, c+1) plt.plot(vectors[:,c]) plt.axis('tight') plt.suptitle(out_root + ': PCA basis vectors') plt.savefig(pjoin(out_path, 'components_%s.png' % out_root)) # plot percent variance plt.figure() plt.plot(pcnt_var[pcnt_var >= pcnt_var_thresh]) plt.axis('tight') plt.suptitle(out_root + ': PCA percent variance') plt.savefig(pjoin(out_path, 'pcnt_var_%s.png' % out_root)) # plot tsdiffana plt.figure() axes = [plt.subplot(4, 1, i+1) for i in range(4)] plot_tsdiffs(res['ts_res'], axes) plt.suptitle(out_root + ': tsdiffana') plt.savefig(pjoin(out_path, 'tsdiff_%s.png' % out_root)) nipy-0.3.0/nipy/algorithms/diagnostics/setup.py000066400000000000000000000007671210344137400216530ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('diagnostics', parent_package, top_path) config.add_subpackage('tests') config.add_data_dir('tests/data') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipy-0.3.0/nipy/algorithms/diagnostics/tests/000077500000000000000000000000001210344137400212715ustar00rootroot00000000000000nipy-0.3.0/nipy/algorithms/diagnostics/tests/__init__.py000066400000000000000000000000521210344137400233770ustar00rootroot00000000000000# Making diagnostics tests into a package nipy-0.3.0/nipy/algorithms/diagnostics/tests/data/000077500000000000000000000000001210344137400222025ustar00rootroot00000000000000nipy-0.3.0/nipy/algorithms/diagnostics/tests/data/generate_tsdiff_results.m000066400000000000000000000007611210344137400272760ustar00rootroot00000000000000% matlab script to regenerate tsdiff results % % First copy nipy.testing.functional.nii.gz to current working directory % % gunzip functional.nii.gz % % Make sure ``timediff.m`` in this directory is on your matlab path, as % is SPM >= version 5 P = spm_select('ExtList', pwd, '^functional\.nii', 1:20); [imgdiff g slicediff] = timediff(P); diff2_mean_vol = spm_read_vols(spm_vol('vscmeanfunctional.nii')); slice_diff2_max_vol = spm_read_vols(spm_vol('vsmaxfunctional.nii')); save tsdiff_results nipy-0.3.0/nipy/algorithms/diagnostics/tests/data/timediff.m000066400000000000000000000072651210344137400241610ustar00rootroot00000000000000function [imdiff, g, slicediff] = timediff(imgs, flags) % Analyses slice by slice variance across time series % FORMAT [imdiff, g, slicediff] = timediff(imgs, flags) % % imgs - string or cell or spm_vol list of images % flags - specify options; if contains: % m - create mean var image (vmean*), max slice var image % (vsmax*) and scan to scan variance image (vscmean*) % v - create variance image for between each time point % % imdiff - mean variance between each image in time series % g - mean voxel signal intensity for each image % slicediff - slice by slice variance between each image % % Matthew Brett 17/7/00 [imdiff, g, slicediff] = deal([]); if nargin < 1 imgs = []; end if isempty(imgs) imgs = cbu_get_imgs(Inf, 'Select time series images'); end if isempty(imgs), return, end if iscell(imgs) imgs = char(imgs); end if ischar(imgs) imgs = spm_vol(imgs); end if nargin < 2 flags = 'm'; end nimgs = size(imgs,1); if isempty(nimgs) | nimgs < 2 return end V1 = imgs(1); Vr = imgs(2:end); ndimgs = nimgs-1; Hold = 0; if any(flags == 'v') % create variance images for i = 1:ndimgs vVr(i) = makevol(Vr(i),'v',16); % float end end if any(flags == 'm') % mean /max variance mVr = makevol(V1,'vmean',16); sVr = makevol(V1,'vscmean',16); xVr = makevol(V1,'vsmax',16); end [xydim zno] = deal(V1.dim(1:2),V1.dim(3)); p1 = spm_read_vols(V1); slicediff = zeros(ndimgs,zno); g = zeros(ndimgs,1); for z = 1:zno % across slices M = spm_matrix([0 0 z]); pr = p1(:,:,z); % this slice from first volume if any(flags == 'm') [mv sx2 sx mxvs] = deal(zeros(size(pr))); end % SVD is squared voxel difference (usually a slice of same) % MSVD is the mean of this measure across voxels (one value) % DTP is a difference time point (1:T-1) cmax = 0; % counter for which slice has the largest MSVD % note that Vr contains volumes 2:T (not the first) for i = 1:ndimgs % across DTPs c = spm_slice_vol(Vr(i),M,xydim,Hold); % get slice from this time point v = (c - pr).^2; % SVD from this slice to last slicediff(i,z) = mean(v(:)) % MSVD for this slice g(i) = g(i) + mean(c(:)); % simple mean of data if slicediff(i,z)>cmax % if this slice has larger MSVD, keep mxvs = v; cmax = slicediff(i,z); end pr = c; % set current slice data as previous, for next iteration of loop if any(flags == 'v') % write individual SVD slice for DTP vVr(i) = spm_write_plane(vVr(i),v,z); end if any(flags == 'm') mv = mv + v; % sum up SVDs for mean SVD (across time points) sx = sx + c; % sum up data for simple variance calculation sx2 = sx2 + c.^2; % sum up squared data for simple variance % calculation end end if any(flags == 'm') % mean variance etc sVr = spm_write_plane(sVr,mv/(ndimgs),z); % write mean of SVDs % across time xVr = spm_write_plane(xVr,mxvs,z); % write maximum SVD mVr = spm_write_plane(mVr,(sx2-((sx.^2)/ndimgs))./(ndimgs-1),z); % (above) this is the one-pass simple variance formula end end if any(findstr(spm('ver'), '99')) spm_close_vol([vVr sVr xVr mVr]); end g = [mean(p1(:)); g/zno]; imdiff = mean(slicediff')'; return function Vo = makevol(Vi, prefix, datatype) Vo = Vi; fn = Vi.fname; [p f e] = fileparts(fn); Vo.fname = fullfile(p, [prefix f e]); switch spm('ver') case {'SPM5','SPM8','SPM8b'} Vo.dt = [datatype 0]; Vo = spm_create_vol(Vo, 'noopen'); case 'SPM2' Vo.dim(4) = datatype; Vo = spm_create_vol(Vo, 'noopen'); case 'SPM99' Vo.dim(4) = datatype; Vo = spm_create_image(Vo); otherwise error(sprintf('What ees thees version "%s"', spm('ver'))); end return nipy-0.3.0/nipy/algorithms/diagnostics/tests/data/tsdiff_results.mat000066400000000000000000000353571210344137400257620ustar00rootroot00000000000000MATLAB 5.0 MAT-file, Platform: GLNX86, Created on: Wed Dec 16 13:02:32 2009 IMxmI@@qEXpdn` aZM<Dz`\שT:=T%ݎ"棆)_64NfWϠ@ Tw Xո^H$gwo57pxc``l@AbF(f̴4N {_]V-A'f_[dR{Yr9[t7:,`ӬuMe bW8Maa6/ox֯tXs`-+{\W:TN#s0mtmcAGwJ:d9twjZVGxc``l@AbF0fdHҜ@x qx6H=%t 9kD9Uqػa\5/ *qZD+87DfqR^aks SkDBCѶ-=h٢)𽂧>[z@^tFÀ+?\[o`>Sy1:JDŽYr<79"I ƊLL%VK<$͉ݮn#Qsb  a,?ȵ+<鼳ŧĄe^pK:c:s=}~yKG\uLDW`jVN7+Jfp F&T21`myM`L/D{Ĺl kwum|)q<ߴ  .Y_e)ӓN}\3s}4~̓]<bN]o y4Ko$==B_.{uκJ&xכ]L 3_*9F+Wvsbid"j@ 7}U 8H..i:W|a%_l"1v_.m||v; u5!IAG o ~WˎuI;`|[g3o%NJ1q=A&YêI㿬5wf*:{nF|2056o*֙=/7 tj'1ҥP3V]$a UMۋQq^و!?"v}Eo3΄<;5Rn6Eύ[1xRWbnS/fӭt21J95=鮛o`zYʪCPS$+7iVoY!`tşj5/ϰ{ґG]c:ѽɀ/gwmڒԿOsCt+f'z\æPa[r>s0([ȝMtc#WMx49r0_*péQS8Ͻu-;>[L5˄cZۚ '~1S3R@~V|pCؕR$>XiFQN]Z&DzGC?p:Z}܏}lQ{eFx_qhܵՄ` q6C79s.\;nNkoa~oPo/-{ѕ&#1=2?#pQ'b+O>d9x<崯FcO2=ǿJ髆Qv~7: -#[(Wov ,7/)# , 8N8 #k<+c7ز|b})/xe  F.l 'E">"VD"X77HfyI4l?[x}a|S[3/UR)ȏ+>1l]eqAj$-]\ 3wmr~bٞhx_S l}PH|ƩmP?o:RZ"˜7ÅYW <3=$~|!O\!19gwj⋙>FXUGᄆ#Q.:*bn{kAfQ!eƇ#Í`>?+-Puc{EM_ֱu=%=StY<Ǹs]EKtH/̂}n)GtFI2PL+Qc}}OJҡpT"8t%}Mf=g 95i RQό3et`Ų=eԾ4plI{A$wMT.ΰ5** =є ^'ja;nJ>wV^Dɡ.rQ$E9WWsnh F5|𕙶 s[ﻌF#K?p@'lq8[47s.[tym*nXod|/%20ַPY뿲 W}w# tHQr[Oy&[`(8m0nE{yZC@9{Q]Xc擋O]Q*bB";uВ#c~.ǝЋ,߭| ~NR 3??WoW C7̋<4t̆٤_g|37!yV./Tf5#t`tAA7Nvxm! [R*ɁUc\cg~8 zf}ԋܡh|wĝqjΉ&^2 W$I76N}kG<91a;ZlU&X$ohhݶ0M@:N"Q{ٶZ2BvKH|ՆЗzfkuaE&(/s,d??EAb5+dV w/߆/;T}Yq?`=_ᄤ;)-zZtYWhoѡ+*o Ia*qɪu,/De@oI*.^p^J!T礉-BعBP[ ۽Ps5nyṳ}pwQ6Tݥ ÷2F'Ioc_Hta8ӌU~PP'X L]2M<<e3BNE>Kf{w?CeXff>>w^C_s1O vO.it_vr0L2JsdL2ٶɰ]q_٦aU2r?G[9%JnJP'OrqQtʾdcFoNiŔfB2G}N8OS5އ}OYdލ@t@=imP!*mcdj 8[ )'; _z!2aox~4Eeh1C%fZTQzS^:'ϻpiзӒk@"%^0~7?י WyAyGlk/#{7~]uwXש^!دk5D_!U{ BޱGc88o{֟ę lT6澯*ohݟ?DajJ[NL7Q߼ g};/+\Z>SE>T@Քŭ1R8ns_H=Y u9OMKiL'?3>05&bN;tG/f9VVy#>) WM;qO;lH1jїh'/4"sq3-gѿ{O`K]oH!0, w$猱f敬NY=|H3ZC=(Zv$ P>Yuj0#an\g?2I?:A83{Ov!ǩ>!~3|g~O̷z[dT/ 䟅8v"~ ,x:CQi{SsLd~z8eF "H$BEӯs',3%o2O>?cQ% S ̺TevXˁygӁ {}30`b u`h ?=(~㣿\]W2 O2*7s;0_vȁc7Θ9 we3Z^^ܛT w n;فaA}e~u +SV%eX| Uρicm Vd:0L͞q{Úv^m j6\^}$М4@sfu:04jPcTY@sw T@}%Vݑvp05[ y yũ:d@ꡊ@J 3$swi晻@%GR)/x;́hnD:KdC[CdUm,g>>|^47fxUC+K=GC +es#w8j~ &d B=۰S7nM 8P[ĴzQfń?P2QZEf+%;ڟA%T'ZrDB՞:B2 C)XQ&VJL_qm:1P|| k8 [ŸByo=B ovzp/e[E+5:ҏ K^gM"A4g ;Omnnsi(_Fk]#C ŝC,u7f3Cwjٶ0Bev*C?(afWFCG?RK[2QyO FG>"-44|7b\jrX7նB̮P}o϶¿%'UGy7I??c|56xI*Ǽ!NXzipDpy-[gu1cߞIŊþg( vf$*dX%/ }3x# 䋘4ɼrp8*<%c j̗L凳/e]nжihDܥ+SuHnS݃yZ+9a`E9ew~xBU'ZWǜ}Zm>tR}C!.$ïd"Wj:6+tGЕ#ŃB2t_xޏ&w[O*Œ=>C>|KShk['u<3m?+X19 rBQKӚͶ(nG:2/} '&-`SJ~q: ր΋KIWr4bMR;i~eOF)ͨǼR≑Ym͗X3nw&ގ!>"Ec>?+T}PoERyV72<'~!5km^珑a}̛)zBruűV |w6M)|pS*Eiv|oLv* y=Q \ $ŀo?zC96]LE_h Fܖoy9;1V>=j\Ӣ|o?a=t6L m8tMq1aF>kNBJa#WiT΢lBn}mݚOHB'{a4d :ᘷOI W/mqVs)>M OB!d~rg@"mg (sޏ][Y&Б^.ScΦ3gKstuŧ(t?vy!)~>DPMK_,tQ]ֽ}V3G{}~3z~x JK/'97Slm^T>yv߳.߱E.F "~.Ihи{~&`4%ecYy>mOt=A?0Wzw>\]ҭӣC4f τ5Ɖ2^EYd[-LvY|ϕw(S6nQROwt#f9@_‡zƓ!_w'nCg#Tx)~jnrB&wm=gR~ޝWrQ"(0= o*^WBnֺM4{:t<̻D&8آSkG} %sm_1'vy|}笋&/?;ϕUkF Z]UN/@"z_ݦRB\S#>nex.kX+ܦR0X|Q6*yy3iΝnR+BuwMh}>*r76=&%5<)Цх 7c%+w#\<=W^zۦDB+HDy|픃D\;||e*V/+&!'-7W կudB#ڣO#ϋw} ŹW-5_eysLM{˄$.sn"gٜē&uxS8G׿O@OoBmA[?]N- ⓋCO0yąu{lE`LY6Z'Vx~%μդ0WU+3 4e0۠ڲhbo 1O$_ nnq7n*+Hm_I,+\1ùʒ O!O]DN:v\R/X[K=+u?ظ̍]4.3P" >7g `COcpȲV6 _B7>oZY aX01ttD7n]g;'{vFG~j-D_IoF\;6~PP{V߭(Еb?(vN+uOvX$ |֝4]?S~|gRu%~F?Jɧ\ ^=U&SN+V^dea>:Ui|9-/Ի4rG?MGfлj!wMʳlq$|2rrUJBLJ=M,SD}ֶy<#D,Y]6Wxr{8Lp΂aC2{ j"ߝ:rB^p{<q{_sFp)xm1SuWǼȳ\=>*;qM`N$/ ߕedPatB YE-/tf8iG5U4J®AV5n3έ.'ӵE҂ϩt9" Kg+_4|8c;}qLJ#rs%'eLjl|:wD o|>K봓>'ٯq pnOmZ㋂UO*HMaۀXk#^1d ]84?; c<S?]f[SAZQ$W;4BV$ s qQ`^6n2.Ӷ]ͳx#}xnϏZxR:?&x>Sm,a>~ ߬੩xs膯I?m70Zy9d0F๵R3Թ)%&!lD0ŗsi.YizW{cΏeψ ~;גONpYzж^ч¥+&\?>;4' NRO8ݜ| vB& R/?.]E3gܿ<=7=uMxu!7gߓ%6nENG.7Bg g3=XWh$]+-,d_rsGOiza_yO ͘l <WyAiU46A%|%ǜ5jѩ[|{,[(TXyRa}btjB X~xG:P)*xli\,,5O+";ixQ$[&z9L.%dt]%1ԱnmuLEsM|aዀ5Yk[a}~>ųI {~Zev񝩥`t'~ˇG&Qwr12ͥ!xrqnAO͚-ߴ1\8~e)s]t3^ r\ƻ3#9wX9MV\}ga/8sڱ}j\Cx^c>!]aubvn&޲.s*f_}T^i %əe;M Y+:3qH2[cy[5뽋/z62ft|E 3ַ> EB[E J u4g7oGk:[Oȓ;ϰVVBk#?,"COW߿. U(|<(d @/k9L+C]&ؑ00f;nK?|tdzfad["7~ze37C*ՋvO+T6_ .3W|u=ŕ8Y|i>~PX]|%ZK {TzctwI]<ݗ9/p6}kVO7dh^'EѨo ]9ѷ-4k"2Cp䄏u[ D.7ͮFs^58 =,14BWU.8WҍJw{𛀎ckVjBYݺ~gxf!`F33C {&׍oעIz{U)\? !썠,2sW6+tn%V2?0v}?た^}+$qu.i5K*u?mDm:q5QJuo% 7$RujE&pÿg'Ѝu?#3u/i;]TkV랈O983[<1d6:V6P'Itee[<^%yw3ͪ Ι^r. {JWN!}4XH>&p.$P8!b߷e?(jD~6U֛ }l^u=U:qދ| F_3\z9l7Kn*tob݆ȣֹԷ6:2}WÍ&cyyޛwrNJB'i"ox%$Hzgp]+L7Zg,{d-+x'9J麪/s8yÍCzbз$g"ajA_Lyi&aNI| q*_Nq>`LL7t;g$iy8Y9"6ֲ_1_-tgc~[Kț]^-f2T N"<{AvnJ_ i 89 v-8EǼgoQT~->{W>RWc?E?f=p(S};t^GSEzCp?9b:?CY7˼,_WA(~5]hl 7h@WӠ@@9c@@Ye@Ijjw@N@Q5@ݏ~@5@qҗ@^ "@nđ@`H^A@j?@rE»I@0g*@Y@$4 m@ 6@kV@&Ț@v@ AcS@h\sq@R@bSN@>I@}@diɚ@BGg@GgF@&ƣ@Eɷǟ@[&kf@83Ւ@::@}@ć&@@\=@kM2@ 0jߠ@@lǫ@0CR?@p(u@ @hb@S|@<27@^q@v͟@#-@v@ 'o@Zoޜ@@Z@Ny@i]knipy-0.3.0/nipy/algorithms/diagnostics/tests/test_screen.py000066400000000000000000000033021210344137400241570ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Testing diagnostic screen """ import numpy as np import nipy as ni from ..screens import screen from ..timediff import time_slice_diffs from ...utils.pca import pca from ...utils.tests.test_pca import res2pos1 from nose.tools import (assert_true, assert_false, assert_equal, assert_raises) from numpy.testing import (assert_array_equal, assert_array_almost_equal, assert_almost_equal) from nipy.testing import funcfile, anatfile def test_screen(): img = ni.load_image(funcfile) res = screen(img) assert_equal(res['mean'].ndim, 3) assert_equal(res['pca'].ndim, 4) assert_equal(sorted(res.keys()), ['max', 'mean', 'min', 'pca', 'pca_res', 'std', 'ts_res']) data = img.get_data() assert_array_equal(np.max(data, axis=-1), res['max'].get_data()) assert_array_equal(np.mean(data, axis=-1), res['mean'].get_data()) assert_array_equal(np.min(data, axis=-1), res['min'].get_data()) assert_array_equal(np.std(data, axis=-1), res['std'].get_data()) pca_res = pca(data, axis=-1, standardize=False, ncomp=10) # On windows, there seems to be some randomness in the PCA output vector # signs; this routine sets the basis vectors to have first value positive, # and therefore standardized the signs pca_res = res2pos1(pca_res) screen_pca_res = res2pos1(res['pca_res']) for key in pca_res: assert_almost_equal(pca_res[key], screen_pca_res[key]) ts_res = time_slice_diffs(data) for key in ts_res: assert_array_equal(ts_res[key], res['ts_res'][key]) nipy-0.3.0/nipy/algorithms/diagnostics/tests/test_time_difference.py000066400000000000000000000065661210344137400260270ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Testing tsdiffana """ from os.path import dirname, join as pjoin import numpy as np import scipy.io as sio from .. import timediff as tsd from nose.tools import assert_true, assert_false, \ assert_equal, assert_raises from numpy.testing import assert_array_equal, assert_array_almost_equal from nipy import load_image from nipy.testing import funcfile TEST_DATA_PATH = pjoin(dirname(__file__), 'data') def test_time_slice_diffs(): n_tps = 10 n_slices = 4 slice_shape = (2,3) slice_size = np.prod(slice_shape) vol_shape = slice_shape + (n_slices,) vol_size = np.prod(vol_shape) ts = np.random.normal(size=vol_shape + (n_tps,)) * 100 + 10 expected = {} expected['volume_means'] = ts.reshape((vol_size, -1)).mean(0) # difference over time ^2 diffs2 = np.diff(ts, axis=-1)**2 expected['volume_mean_diff2'] = np.mean( diffs2.reshape((vol_size, -1)), 0) expected['slice_mean_diff2'] = np.zeros((n_tps-1, n_slices)) for s in range(n_slices): v = diffs2[:,:,s,:].reshape((slice_size, -1)) expected['slice_mean_diff2'][:,s] = np.mean(v, 0) expected['diff2_mean_vol'] = np.mean(diffs2, -1) max_diff_is = np.argmax(expected['slice_mean_diff2'], 0) sdmv = np.empty(vol_shape) for si, dti in enumerate(max_diff_is): sdmv[:,:,si] = diffs2[:,:,si,dti] expected['slice_diff2_max_vol'] = sdmv results = tsd.time_slice_diffs(ts) for key in expected: assert_array_almost_equal(results[key], expected[key]) # tranposes, reset axes, get the same result results = tsd.time_slice_diffs(ts.T, 0, 1) results['diff2_mean_vol'] = results['diff2_mean_vol'].T results['slice_diff2_max_vol'] = results['slice_diff2_max_vol'].T for key in expected: assert_array_almost_equal(results[key], expected[key]) ts_t = ts.transpose((1, 3, 0, 2)) results = tsd.time_slice_diffs(ts_t, 1, -1) results['diff2_mean_vol'] = results['diff2_mean_vol'].transpose( ((1,0,2))) results['slice_diff2_max_vol'] = results['slice_diff2_max_vol'].transpose( ((1,0,2))) for key in expected: assert_array_almost_equal(results[key], expected[key]) def test_against_matlab_results(): fimg = load_image(funcfile) results = tsd.time_slice_diffs(fimg.get_data()) # struct as record only to avoid deprecation warning tsd_results = sio.loadmat(pjoin(TEST_DATA_PATH, 'tsdiff_results.mat'), struct_as_record=True, squeeze_me=True) assert_array_almost_equal(results['volume_means'], tsd_results['g']) assert_array_almost_equal(results['volume_mean_diff2'], tsd_results['imgdiff']) assert_array_almost_equal(results['slice_mean_diff2'], tsd_results['slicediff']) # next tests are from saved, reloaded volumes at 16 bit integer # precision, so are not exact, but very close, given that the mean # of this array is around 3200 assert_array_almost_equal(results['diff2_mean_vol'], tsd_results['diff2_mean_vol'], decimal=1) assert_array_almost_equal(results['slice_diff2_max_vol'], tsd_results['slice_diff2_max_vol'], decimal=1) nipy-0.3.0/nipy/algorithms/diagnostics/timediff.py000066400000000000000000000111321210344137400222660ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ''' Time series diagnostics These started life as ``tsdiffana.m`` - see http://imaging.mrc-cbu.cam.ac.uk/imaging/DataDiagnostics Oliver Josephs (FIL) gave me the idea of time-point to time-point subtraction as a diagnostic for motion and other sudden image changes. ''' import numpy as np def time_slice_diffs(arr, time_axis=-1, slice_axis=-2): ''' Time-point to time-point differences over volumes and slices We think of the passed array as an image. The image has a "time" dimension given by `time_axis` and a "slice" dimension, given by `slice_axis`, and one or other dimensions. In the case of imaging there will usually be two more dimensions (the dimensions defining the size of an image slice). A single slice in the time dimension we call a "volume". A single entry in `arr` is a "voxel". For example, if `time_axis` == 0, then ``v = arr[0]`` would be the first volume in the series. The volume ``v`` above has ``v.size`` voxels. If, in addition, `slice_axis` == 1, then for the volume ``v`` (above) ``s = v[0]`` would be a "slice", with ``s.size`` voxels. These are obviously terms from neuroimaging. Parameters ---------- arr : array_like Array over which to calculate time and slice differences. We'll call this array an 'image' in this doc. time_axis : int axis of `arr` that varies over time. slice_axis : int axis of `arr` that varies over image slice. Returns ------- results : dict Here ``T`` is the number of time points (``arr.shape[time_axis]``) and ``S`` is the number of slices (``arr.shape[slice_axis]``), ``v`` is the shape of a volume, and ``d2[t]`` is the volume of squared differences between voxels at time point ``t`` and time point ``t+1`` `results` has keys: * 'volume_mean_diff2' : (T-1,) array array containing the mean (over voxels in volume) of the squared difference from one time point to the next * 'slice_mean_diff2' : (T-1, S) array giving the mean (over voxels in slice) of the difference from one time point to the next, one value per slice, per timepoint * 'volume_means' : (T,) array mean over voxels for each volume ``vol[t] for t in 0:T * 'slice_diff2_max_vol' : v[:] array volume, of same shape as input volumes, where each slice is is the slice from ``d2[t]`` for t in 0:T-1, that has the largest variance across ``t``. Thus each slice in the volume may well result from a different difference time point. * 'diff2_mean_vol`` : v[:] array volume with the mean of ``d2[t]`` across t for t in 0:T-1. ''' arr = np.asarray(arr) ndim = arr.ndim # roll time axis to 0, slice axis to 1 for convenience if time_axis < 0: time_axis += ndim if slice_axis < 0: slice_axis += ndim arr = np.rollaxis(arr, time_axis) # we may have changed the position of slice_axis if time_axis > slice_axis: slice_axis += 1 arr = np.rollaxis(arr, slice_axis, 1) # shapes of things shape = arr.shape T = shape[0] S = shape[1] vol_shape = shape[1:] # loop over time points to save memory volds = np.empty((T-1,)) sliceds = np.empty((T-1,S)) means = np.empty((T,)) diff_mean_vol = np.zeros(vol_shape) slice_diff_max_vol = np.zeros(vol_shape) slice_diff_maxes = np.zeros(S) last_tp = arr[0] means[0] = last_tp.mean() for dtpi in range(0,T-1): tp = arr[dtpi+1] # shape vol_shape means[dtpi+1] = tp.mean() dtp_diff2 = (tp - last_tp)**2 diff_mean_vol += dtp_diff2 sliceds[dtpi] = dtp_diff2.reshape(S, -1).mean(-1) # check whether we have found a highest-diff slice sdmx_higher = sliceds[dtpi] > slice_diff_maxes if any(sdmx_higher): slice_diff_maxes[sdmx_higher] = sliceds[dtpi][sdmx_higher] slice_diff_max_vol[sdmx_higher] = dtp_diff2[sdmx_higher] last_tp = tp volds = sliceds.mean(1) diff_mean_vol /= (T-1) # roll vol shapes back to match input diff_mean_vol = np.rollaxis(diff_mean_vol, 0, slice_axis) slice_diff_max_vol = np.rollaxis(slice_diff_max_vol, 0, slice_axis) return {'volume_mean_diff2': volds, 'slice_mean_diff2': sliceds, 'volume_means': means, 'diff2_mean_vol': diff_mean_vol, 'slice_diff2_max_vol': slice_diff_max_vol} nipy-0.3.0/nipy/algorithms/diagnostics/tsdiffplot.py000066400000000000000000000064121210344137400226620ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ''' plot tsdiffana parameters ''' import numpy as np import nipy from .timediff import time_slice_diffs def plot_tsdiffs(results, axes=None): ''' Plotting routine for time series difference metrics Requires matplotlib Parameters ---------- results : dict Results of format returned from :func:`nipy.algorithms.diagnostics.time_slice_diff` ''' import matplotlib.pyplot as plt T = len(results['volume_means']) S = results['slice_mean_diff2'].shape[1] mean_means = np.mean(results['volume_means']) scaled_slice_diff = results['slice_mean_diff2'] / mean_means if axes is None: n_plots = 4 fig = plt.figure() fig.set_size_inches([10,10]) axes = [plt.subplot(n_plots, 1, i+1) for i in range(n_plots)] def xmax_labels(ax, val, xlabel, ylabel): xlims = ax.axis() ax.axis((0, val) + xlims[2:]) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) # plot of mean volume variance ax = axes[0] ax.plot(results['volume_mean_diff2'] / mean_means) xmax_labels(ax, T-1, 'Difference image number', 'Scaled variance') # plot of diff by slice ax = axes[1] #Set up the color map for the different slices: X, Y = np.meshgrid(np.arange(scaled_slice_diff.shape[0]), np.arange(scaled_slice_diff.shape[1])) # Use HSV in order to code the slices from bottom to top: ax.scatter(X.T.ravel(),scaled_slice_diff.ravel(), c=Y.T.ravel(),cmap=plt.cm.hsv, alpha=0.2) xmax_labels(ax, T-1, 'Difference image number', 'Slice by slice variance') # mean intensity ax = axes[2] ax.plot(results['volume_means'] / mean_means) xmax_labels(ax, T, 'Image number', 'Scaled mean \n voxel intensity') # slice plots min max mean ax = axes[3] ax.hold(True) ax.plot(np.mean(scaled_slice_diff, 0), 'k') ax.plot(np.min(scaled_slice_diff, 0), 'b') ax.plot(np.max(scaled_slice_diff, 0), 'r') ax.hold(False) xmax_labels(ax, S+1, 'Slice number', 'Max/mean/min \n slice variation') return axes def plot_tsdiffs_image(img, axes=None, show=True): ''' Plot time series diagnostics for image Parameters ---------- img : image-like or filename str image on which to do diagnostics axes : None or sequence, optional Axes on which to plot the diagnostics. If None, then we create a figure and subplots for the plots. Sequence should have length >=4. show : bool, optional If True, show the figure after plotting it Returns ------- axes : Matplotlib axes Axes on which we have done the plots. Will be same as `axes` input if not None ''' if isinstance(img, basestring): title = img else: title = 'Difference plots' img = nipy.as_image(img) res = time_slice_diffs(img) axes = plot_tsdiffs(res, axes) axes[0].set_title(title) if show: # show the plot import matplotlib.pyplot as plt plt.show() return axes nipy-0.3.0/nipy/algorithms/fwhm.py000066400000000000000000000137431210344137400171430ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This module provides classes and definitions for using full width at half maximum (FWHM) to be used in conjunction with Gaussian Random Field Theory to determine resolution elements (resels). A resolution element (resel) is defined as a block of pixels of the same size as the FWHM of the smoothed image. There are two methods implemented to estimate (3d, or volumewise) FWHM based on a 4d Image: fastFHWM: used if the entire 4d Image is available iterFWHM: used when 4d Image is being filled in by slices of residuals """ __docformat__ = 'restructuredtext' import numpy as np from numpy.linalg import det from nipy.core.api import Image from .utils.matrices import pos_recipr class Resels(object): """The Resels class. """ def __init__(self, coordmap, normalized=False, fwhm=None, resels=None, mask=None, clobber=False, D=3): """ Initialize resels class Parameters ---------- coordmap : ``CoordinateMap`` CoordinateMap over which fwhm and resels are to be estimated. Used in fwhm/resel conversion. fwhm : ``Image`` Optional Image of FWHM. Used to convert FWHM Image to resels if FWHM is not being estimated. resels : ``Image`` Optional Image of resels. Used to compute resels within a mask, for instance, if FWHM has already been estimated. mask : ``Image`` Mask over which to integrate resels. clobber : ``bool`` Clobber output FWHM and resel images? D : ``int`` Can be 2 or 3, the dimension of the final volume. """ self.fwhm = fwhm self.resels = resels self.mask = mask self.clobber = clobber self.coordmap = coordmap self.D = D self.normalized = normalized _transform = self.coordmap.affine self.wedge = np.power(np.fabs(det(_transform)), 1./self.D) def integrate(self, mask=None): """ Integrate resels within `mask` (or use self.mask) Parameters ---------- mask : ``Image`` Optional mask over which to integrate (add) resels. Returns ------- total_resels : the resels contained in the mask FWHM : float an estimate of FWHM based on the average resel per voxel nvoxel: int the number of voxels in the mask """ _resels = self.resels[:] if mask is not None: _mask = mask else: _mask = self.mask if _mask is not None: _mask = _mask[:].astype(np.int32) nvoxel = _mask.sum() else: _mask = 1. nvoxel = _resels.size _resels = (_resels * _mask).sum() _fwhm = self.resel2fwhm(_resels / nvoxel) return _resels, _fwhm, nvoxel def resel2fwhm(self, resels): """ Convert resels as `resels` to isotropic FWHM Parameters ---------- resels : float Convert a resel value to an equivalent isotropic FWHM based on step sizes in self.coordmap. Returns ------- fwhm : float """ return np.sqrt(4*np.log(2.)) * self.wedge * pos_recipr(np.power(resels, 1./self.D)) def fwhm2resel(self, fwhm): """ Convert FWHM `fwhm` to equivalent reseels per voxel Parameters ---------- fwhm : float Convert an FWHM value to an equivalent resels per voxel based on step sizes in self.coordmap. Returns ------- resels : float """ return pos_recipr(np.power(fwhm / np.sqrt(4*np.log(2)) * self.wedge, self.D)) def __iter__(self): """ Return iterator Returns ------- itor : iterator self """ if not self.fwhm: im = Image(np.zeros(self.resid.shape), coordmap=self.coordmap) else: im = \ Image(self.fwhm, clobber=self.clobber, mode='w', coordmap=self.coordmap) self.fwhm = im if not self.resels: im = Image(np.zeros(self.resid.shape), coordmap=self.coordmap) else: im = \ Image(self.resels, clobber=self.clobber, mode='w', coordmap=self.coordmap) self.resels = im return self class ReselImage(Resels): def __init__(self, resels=None, fwhm=None, **keywords): """ Initialize resel image Parameters ---------- resels : `core.api.Image` Image of resel per voxel values. fwhm : `core.api.Image` Image of FWHM values. keywords : ``dict`` Passed as keywords arguments to `core.api.Image` """ if not resels and not fwhm: raise ValueError, 'need either a resels image or an FWHM image' if fwhm is not None: fwhm = Image(fwhm, **keywords) Resels.__init__(self, fwhm, resels=resels, fwhm=fwhm) if resels is not None: resels = Image(resels, **keywords) Resels.__init__(self, resels, resels=resels, fwhm=fwhm) if not self.fwhm: self.fwhm = Image(self.resel2fwhm(self.resels[:]), coordmap=self.coordmap, **keywords) if not self.resels: self.resels = Image(self.fwhm2resel(self.fwhm[:]), coordmap=self.coordmap, **keywords) def __iter__(self): """ Return iterator Returns ------- itor : iterator ``self`` """ return self def _calc_detlam(xx, yy, zz, yx, zx, zy): """ Calculate determinant of symmetric 3x3 matrix [[xx,yx,xz], [yx,yy,zy], [zx,zy,zz]] """ return zz * (yy*xx - yx**2) - \ zy * (zy*xx - zx*yx) + \ zx * (zy*yx - zx*yy) nipy-0.3.0/nipy/algorithms/graph/000077500000000000000000000000001210344137400167215ustar00rootroot00000000000000nipy-0.3.0/nipy/algorithms/graph/__init__.py000066400000000000000000000006661210344137400210420ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from .graph import (Graph, WeightedGraph, wgraph_from_coo_matrix, wgraph_from_adjacency, complete_graph, mst, knn, eps_nn, lil_cc, graph_3d_grid, wgraph_from_3d_grid, concatenate_graphs) from nipy.testing import Tester test = Tester().test bench = Tester().bench nipy-0.3.0/nipy/algorithms/graph/_graph.c000066400000000000000000007101651210344137400203370ustar00rootroot00000000000000/* Generated by Cython 0.17.4 on Sat Jan 12 17:27:26 2013 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02040000 #error Cython requires Python 2.4+. #else #include /* For offsetof */ #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #endif #if PY_VERSION_HEX < 0x02050000 typedef int Py_ssize_t; #define PY_SSIZE_T_MAX INT_MAX #define PY_SSIZE_T_MIN INT_MIN #define PY_FORMAT_SIZE_T "" #define CYTHON_FORMAT_SSIZE_T "" #define PyInt_FromSsize_t(z) PyInt_FromLong(z) #define PyInt_AsSsize_t(o) __Pyx_PyInt_AsInt(o) #define PyNumber_Index(o) ((PyNumber_Check(o) && !PyFloat_Check(o)) ? PyNumber_Int(o) : \ (PyErr_Format(PyExc_TypeError, \ "expected index value, got %.200s", Py_TYPE(o)->tp_name), \ (PyObject*)0)) #define __Pyx_PyIndex_Check(o) (PyNumber_Check(o) && !PyFloat_Check(o) && \ !PyComplex_Check(o)) #define PyIndex_Check __Pyx_PyIndex_Check #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message) #define __PYX_BUILD_PY_SSIZE_T "i" #else #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #define __Pyx_PyIndex_Check PyIndex_Check #endif #if PY_VERSION_HEX < 0x02060000 #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) #define PyVarObject_HEAD_INIT(type, size) \ PyObject_HEAD_INIT(type) size, #define PyType_Modified(t) typedef struct { void *buf; PyObject *obj; Py_ssize_t len; Py_ssize_t itemsize; int readonly; int ndim; char *format; Py_ssize_t *shape; Py_ssize_t *strides; Py_ssize_t *suboffsets; void *internal; } Py_buffer; #define PyBUF_SIMPLE 0 #define PyBUF_WRITABLE 0x0001 #define PyBUF_FORMAT 0x0004 #define PyBUF_ND 0x0008 #define PyBUF_STRIDES (0x0010 | PyBUF_ND) #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) #define PyBUF_RECORDS (PyBUF_STRIDES | PyBUF_FORMAT | PyBUF_WRITABLE) #define PyBUF_FULL (PyBUF_INDIRECT | PyBUF_FORMAT | PyBUF_WRITABLE) typedef int (*getbufferproc)(PyObject *, Py_buffer *, int); typedef void (*releasebufferproc)(PyObject *, Py_buffer *); #endif #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #endif #if PY_MAJOR_VERSION < 3 && PY_MINOR_VERSION < 6 #define PyUnicode_FromString(s) PyUnicode_Decode(s, strlen(s), "UTF-8", "strict") #endif #if PY_MAJOR_VERSION >= 3 #define Py_TPFLAGS_CHECKTYPES 0 #define Py_TPFLAGS_HAVE_INDEX 0 #endif #if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3) #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ? \ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #else #define CYTHON_PEP393_ENABLED 0 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_READ(k, d, i) ((k=k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_VERSION_HEX < 0x02060000 #define PyBytesObject PyStringObject #define PyBytes_Type PyString_Type #define PyBytes_Check PyString_Check #define PyBytes_CheckExact PyString_CheckExact #define PyBytes_FromString PyString_FromString #define PyBytes_FromStringAndSize PyString_FromStringAndSize #define PyBytes_FromFormat PyString_FromFormat #define PyBytes_DecodeEscape PyString_DecodeEscape #define PyBytes_AsString PyString_AsString #define PyBytes_AsStringAndSize PyString_AsStringAndSize #define PyBytes_Size PyString_Size #define PyBytes_AS_STRING PyString_AS_STRING #define PyBytes_GET_SIZE PyString_GET_SIZE #define PyBytes_Repr PyString_Repr #define PyBytes_Concat PyString_Concat #define PyBytes_ConcatAndDel PyString_ConcatAndDel #endif #if PY_VERSION_HEX < 0x02060000 #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type) #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_VERSION_HEX < 0x03020000 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300) #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b) #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value) #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b) #else #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0))) #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1))) #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1))) #endif #if PY_MAJOR_VERSION >= 3 #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n))) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n))) #else #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n)) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_NAMESTR(n) ((char *)(n)) #define __Pyx_DOCSTR(n) ((char *)(n)) #else #define __Pyx_NAMESTR(n) (n) #define __Pyx_DOCSTR(n) (n) #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include #define __PYX_HAVE__nipy__algorithms__graph___graph #define __PYX_HAVE_API__nipy__algorithms__graph___graph #include "stdio.h" #include "stdlib.h" #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #ifdef _OPENMP #include #endif /* _OPENMP */ #ifdef PYREX_WITHOUT_ASSERTIONS #define CYTHON_WITHOUT_ASSERTIONS #endif /* inline attribute */ #ifndef CYTHON_INLINE #if defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif /* unused attribute */ #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif typedef struct {PyObject **p; char *s; const long n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/ /* Type Conversion Predeclarations */ #define __Pyx_PyBytes_FromUString(s) PyBytes_FromString((char*)s) #define __Pyx_PyBytes_AsUString(s) ((unsigned char*) PyBytes_AsString(s)) #define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x); static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject*); #if CYTHON_COMPILING_IN_CPYTHON #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #ifdef __GNUC__ /* Test for GCC > 2.95 */ #if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* __GNUC__ > 2 ... */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ > 2 ... */ #else /* __GNUC__ */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static PyObject *__pyx_m; static PyObject *__pyx_b; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include #else #include #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "_graph.pyx", "numpy.pxd", "type.pxd", }; #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; /* for error messages only */ struct __Pyx_StructField_* fields; size_t size; /* sizeof(type) */ size_t arraysize[8]; /* length of array in each dimension */ int ndim; char typegroup; /* _R_eal, _C_omplex, Signed _I_nt, _U_nsigned int, _S_truct, _P_ointer, _O_bject, c_H_ar */ char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /* "numpy.pxd":723 * # in Cython to enable them only on the right systems. * * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t */ typedef npy_int8 __pyx_t_5numpy_int8_t; /* "numpy.pxd":724 * * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t */ typedef npy_int16 __pyx_t_5numpy_int16_t; /* "numpy.pxd":725 * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< * ctypedef npy_int64 int64_t * #ctypedef npy_int96 int96_t */ typedef npy_int32 __pyx_t_5numpy_int32_t; /* "numpy.pxd":726 * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< * #ctypedef npy_int96 int96_t * #ctypedef npy_int128 int128_t */ typedef npy_int64 __pyx_t_5numpy_int64_t; /* "numpy.pxd":730 * #ctypedef npy_int128 int128_t * * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t */ typedef npy_uint8 __pyx_t_5numpy_uint8_t; /* "numpy.pxd":731 * * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t */ typedef npy_uint16 __pyx_t_5numpy_uint16_t; /* "numpy.pxd":732 * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< * ctypedef npy_uint64 uint64_t * #ctypedef npy_uint96 uint96_t */ typedef npy_uint32 __pyx_t_5numpy_uint32_t; /* "numpy.pxd":733 * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< * #ctypedef npy_uint96 uint96_t * #ctypedef npy_uint128 uint128_t */ typedef npy_uint64 __pyx_t_5numpy_uint64_t; /* "numpy.pxd":737 * #ctypedef npy_uint128 uint128_t * * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< * ctypedef npy_float64 float64_t * #ctypedef npy_float80 float80_t */ typedef npy_float32 __pyx_t_5numpy_float32_t; /* "numpy.pxd":738 * * ctypedef npy_float32 float32_t * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< * #ctypedef npy_float80 float80_t * #ctypedef npy_float128 float128_t */ typedef npy_float64 __pyx_t_5numpy_float64_t; /* "numpy.pxd":747 * # The int types are mapped a bit surprising -- * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t # <<<<<<<<<<<<<< * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t */ typedef npy_long __pyx_t_5numpy_int_t; /* "numpy.pxd":748 * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< * ctypedef npy_longlong longlong_t * */ typedef npy_longlong __pyx_t_5numpy_long_t; /* "numpy.pxd":749 * ctypedef npy_long int_t * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< * * ctypedef npy_ulong uint_t */ typedef npy_longlong __pyx_t_5numpy_longlong_t; /* "numpy.pxd":751 * ctypedef npy_longlong longlong_t * * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t */ typedef npy_ulong __pyx_t_5numpy_uint_t; /* "numpy.pxd":752 * * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulonglong_t * */ typedef npy_ulonglong __pyx_t_5numpy_ulong_t; /* "numpy.pxd":753 * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< * * ctypedef npy_intp intp_t */ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; /* "numpy.pxd":755 * ctypedef npy_ulonglong ulonglong_t * * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< * ctypedef npy_uintp uintp_t * */ typedef npy_intp __pyx_t_5numpy_intp_t; /* "numpy.pxd":756 * * ctypedef npy_intp intp_t * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< * * ctypedef npy_double float_t */ typedef npy_uintp __pyx_t_5numpy_uintp_t; /* "numpy.pxd":758 * ctypedef npy_uintp uintp_t * * ctypedef npy_double float_t # <<<<<<<<<<<<<< * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t */ typedef npy_double __pyx_t_5numpy_float_t; /* "numpy.pxd":759 * * ctypedef npy_double float_t * ctypedef npy_double double_t # <<<<<<<<<<<<<< * ctypedef npy_longdouble longdouble_t * */ typedef npy_double __pyx_t_5numpy_double_t; /* "numpy.pxd":760 * ctypedef npy_double float_t * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cfloat cfloat_t */ typedef npy_longdouble __pyx_t_5numpy_longdouble_t; /* "nipy/algorithms/graph/_graph.pyx":4 * cimport numpy as np * cimport cython * ctypedef np.float64_t DOUBLE # <<<<<<<<<<<<<< * ctypedef np.int_t INT * */ typedef __pyx_t_5numpy_float64_t __pyx_t_4nipy_10algorithms_5graph_6_graph_DOUBLE; /* "nipy/algorithms/graph/_graph.pyx":5 * cimport cython * ctypedef np.float64_t DOUBLE * ctypedef np.int_t INT # <<<<<<<<<<<<<< * * */ typedef __pyx_t_5numpy_int_t __pyx_t_4nipy_10algorithms_5graph_6_graph_INT; #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< float > __pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< double > __pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif /*--- Type declarations ---*/ /* "numpy.pxd":762 * ctypedef npy_longdouble longdouble_t * * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t */ typedef npy_cfloat __pyx_t_5numpy_cfloat_t; /* "numpy.pxd":763 * * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< * ctypedef npy_clongdouble clongdouble_t * */ typedef npy_cdouble __pyx_t_5numpy_cdouble_t; /* "numpy.pxd":764 * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cdouble complex_t */ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; /* "numpy.pxd":766 * ctypedef npy_clongdouble clongdouble_t * * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew1(a): */ typedef npy_cdouble __pyx_t_5numpy_complex_t; #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/ #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil) \ if (acquire_gil) { \ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ PyGILState_Release(__pyx_gilstate_save); \ } else { \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil) \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext() \ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif /* CYTHON_REFNANNY */ #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /*proto*/ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], \ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, \ const char* function_name); /*proto*/ static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact); /*proto*/ static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/ #define __Pyx_BufPtrStrided2d(type, buf, i0, s0, i1, s1) (type)((char*)buf + i0 * s0 + i1 * s1) #define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0) static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/ static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); static CYTHON_INLINE int __Pyx_IterFinish(void); /*proto*/ static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); /*proto*/ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif static Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0}; static Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1}; static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level); /*proto*/ static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_npy_long(npy_long); #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if defined(_WIN32) && defined(__cplusplus) && CYTHON_CCOMPLEX #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); #if CYTHON_CCOMPLEX #define __Pyx_c_eqf(a, b) ((a)==(b)) #define __Pyx_c_sumf(a, b) ((a)+(b)) #define __Pyx_c_difff(a, b) ((a)-(b)) #define __Pyx_c_prodf(a, b) ((a)*(b)) #define __Pyx_c_quotf(a, b) ((a)/(b)) #define __Pyx_c_negf(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zerof(z) ((z)==(float)0) #define __Pyx_c_conjf(z) (::std::conj(z)) #if 1 #define __Pyx_c_absf(z) (::std::abs(z)) #define __Pyx_c_powf(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zerof(z) ((z)==0) #define __Pyx_c_conjf(z) (conjf(z)) #if 1 #define __Pyx_c_absf(z) (cabsf(z)) #define __Pyx_c_powf(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); #if CYTHON_CCOMPLEX #define __Pyx_c_eq(a, b) ((a)==(b)) #define __Pyx_c_sum(a, b) ((a)+(b)) #define __Pyx_c_diff(a, b) ((a)-(b)) #define __Pyx_c_prod(a, b) ((a)*(b)) #define __Pyx_c_quot(a, b) ((a)/(b)) #define __Pyx_c_neg(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero(z) ((z)==(double)0) #define __Pyx_c_conj(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs(z) (::std::abs(z)) #define __Pyx_c_pow(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero(z) ((z)==0) #define __Pyx_c_conj(z) (conj(z)) #if 1 #define __Pyx_c_abs(z) (cabs(z)) #define __Pyx_c_pow(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject *); static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject *); static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject *); static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject *); static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject *); static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject *); static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject *); static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject *); static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject *); static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject *); static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject *); static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject *); static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject *); static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject *); static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject *); static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject *); static int __Pyx_check_binary_version(void); #if !defined(__Pyx_PyIdentifier_FromString) #if PY_MAJOR_VERSION < 3 #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) #else #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) #endif #endif static PyObject *__Pyx_ImportModule(const char *name); /*proto*/ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /*proto*/ typedef struct { int code_line; PyCodeObject* code_object; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /*proto*/ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/ /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from 'cpython.object' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'libc.stdlib' */ /* Module declarations from 'numpy' */ /* Module declarations from 'numpy' */ static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ /* Module declarations from 'cython' */ /* Module declarations from 'nipy.algorithms.graph._graph' */ static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_4nipy_10algorithms_5graph_6_graph_DOUBLE = { "DOUBLE", NULL, sizeof(__pyx_t_4nipy_10algorithms_5graph_6_graph_DOUBLE), { 0 }, 0, 'R', 0, 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_4nipy_10algorithms_5graph_6_graph_INT = { "INT", NULL, sizeof(__pyx_t_4nipy_10algorithms_5graph_6_graph_INT), { 0 }, 0, IS_UNSIGNED(__pyx_t_4nipy_10algorithms_5graph_6_graph_INT) ? 'U' : 'I', IS_UNSIGNED(__pyx_t_4nipy_10algorithms_5graph_6_graph_INT), 0 }; #define __Pyx_MODULE_NAME "nipy.algorithms.graph._graph" int __pyx_module_is_main_nipy__algorithms__graph___graph = 0; /* Implementation of 'nipy.algorithms.graph._graph' */ static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_RuntimeError; static PyObject *__pyx_pf_4nipy_10algorithms_5graph_6_graph_dilation(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_field, PyArrayObject *__pyx_v_idx, PyArrayObject *__pyx_v_neighb); /* proto */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ static char __pyx_k_3[] = "ndarray is not C contiguous"; static char __pyx_k_5[] = "ndarray is not Fortran contiguous"; static char __pyx_k_7[] = "Non-native byte order not supported"; static char __pyx_k_9[] = "unknown dtype code in numpy.pxd (%d)"; static char __pyx_k_10[] = "Format string allocated too short, see comment in numpy.pxd"; static char __pyx_k_13[] = "Format string allocated too short."; static char __pyx_k_17[] = "/Users/mb312/dev_trees/nipy/nipy/algorithms/graph/_graph.pyx"; static char __pyx_k_18[] = "nipy.algorithms.graph._graph"; static char __pyx_k__B[] = "B"; static char __pyx_k__H[] = "H"; static char __pyx_k__I[] = "I"; static char __pyx_k__L[] = "L"; static char __pyx_k__O[] = "O"; static char __pyx_k__Q[] = "Q"; static char __pyx_k__b[] = "b"; static char __pyx_k__d[] = "d"; static char __pyx_k__f[] = "f"; static char __pyx_k__g[] = "g"; static char __pyx_k__h[] = "h"; static char __pyx_k__i[] = "i"; static char __pyx_k__j[] = "j"; static char __pyx_k__l[] = "l"; static char __pyx_k__q[] = "q"; static char __pyx_k__Zd[] = "Zd"; static char __pyx_k__Zf[] = "Zf"; static char __pyx_k__Zg[] = "Zg"; static char __pyx_k__np[] = "np"; static char __pyx_k__dim[] = "dim"; static char __pyx_k__idx[] = "idx"; static char __pyx_k__res[] = "res"; static char __pyx_k__fmax[] = "fmax"; static char __pyx_k__field[] = "field"; static char __pyx_k__numpy[] = "numpy"; static char __pyx_k__range[] = "range"; static char __pyx_k__neighb[] = "neighb"; static char __pyx_k____main__[] = "__main__"; static char __pyx_k____test__[] = "__test__"; static char __pyx_k__dilation[] = "dilation"; static char __pyx_k__size_max[] = "size_max"; static char __pyx_k__ValueError[] = "ValueError"; static char __pyx_k__RuntimeError[] = "RuntimeError"; static PyObject *__pyx_kp_u_10; static PyObject *__pyx_kp_u_13; static PyObject *__pyx_kp_s_17; static PyObject *__pyx_n_s_18; static PyObject *__pyx_kp_u_3; static PyObject *__pyx_kp_u_5; static PyObject *__pyx_kp_u_7; static PyObject *__pyx_kp_u_9; static PyObject *__pyx_n_s__RuntimeError; static PyObject *__pyx_n_s__ValueError; static PyObject *__pyx_n_s____main__; static PyObject *__pyx_n_s____test__; static PyObject *__pyx_n_s__d; static PyObject *__pyx_n_s__dilation; static PyObject *__pyx_n_s__dim; static PyObject *__pyx_n_s__field; static PyObject *__pyx_n_s__fmax; static PyObject *__pyx_n_s__i; static PyObject *__pyx_n_s__idx; static PyObject *__pyx_n_s__j; static PyObject *__pyx_n_s__neighb; static PyObject *__pyx_n_s__np; static PyObject *__pyx_n_s__numpy; static PyObject *__pyx_n_s__range; static PyObject *__pyx_n_s__res; static PyObject *__pyx_n_s__size_max; static PyObject *__pyx_int_0; static PyObject *__pyx_int_15; static PyObject *__pyx_k_slice_1; static PyObject *__pyx_k_tuple_2; static PyObject *__pyx_k_tuple_4; static PyObject *__pyx_k_tuple_6; static PyObject *__pyx_k_tuple_8; static PyObject *__pyx_k_tuple_11; static PyObject *__pyx_k_tuple_12; static PyObject *__pyx_k_tuple_14; static PyObject *__pyx_k_tuple_15; static PyObject *__pyx_k_codeobj_16; /* Python wrapper */ static PyObject *__pyx_pw_4nipy_10algorithms_5graph_6_graph_1dilation(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4nipy_10algorithms_5graph_6_graph_1dilation = {__Pyx_NAMESTR("dilation"), (PyCFunction)__pyx_pw_4nipy_10algorithms_5graph_6_graph_1dilation, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}; static PyObject *__pyx_pw_4nipy_10algorithms_5graph_6_graph_1dilation(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_field = 0; PyArrayObject *__pyx_v_idx = 0; PyArrayObject *__pyx_v_neighb = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("dilation (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__field,&__pyx_n_s__idx,&__pyx_n_s__neighb,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__field)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__idx)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("dilation", 1, 3, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__neighb)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("dilation", 1, 3, 3, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "dilation") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_field = ((PyArrayObject *)values[0]); __pyx_v_idx = ((PyArrayObject *)values[1]); __pyx_v_neighb = ((PyArrayObject *)values[2]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("dilation", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.algorithms.graph._graph.dilation", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_field), __pyx_ptype_5numpy_ndarray, 1, "field", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_idx), __pyx_ptype_5numpy_ndarray, 1, "idx", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_neighb), __pyx_ptype_5numpy_ndarray, 1, "neighb", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_4nipy_10algorithms_5graph_6_graph_dilation(__pyx_self, __pyx_v_field, __pyx_v_idx, __pyx_v_neighb); goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/algorithms/graph/_graph.pyx":12 * @cython.cdivision(True) * * def dilation(np.ndarray[DOUBLE, ndim=2] field,\ # <<<<<<<<<<<<<< * np.ndarray[INT, ndim=1] idx,\ * np.ndarray[INT, ndim=1] neighb): */ static PyObject *__pyx_pf_4nipy_10algorithms_5graph_6_graph_dilation(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_field, PyArrayObject *__pyx_v_idx, PyArrayObject *__pyx_v_neighb) { int __pyx_v_size_max; int __pyx_v_dim; int __pyx_v_i; int __pyx_v_j; int __pyx_v_d; __pyx_t_4nipy_10algorithms_5graph_6_graph_DOUBLE __pyx_v_fmax; PyArrayObject *__pyx_v_res = 0; __Pyx_LocalBuf_ND __pyx_pybuffernd_field; __Pyx_Buffer __pyx_pybuffer_field; __Pyx_LocalBuf_ND __pyx_pybuffernd_idx; __Pyx_Buffer __pyx_pybuffer_idx; __Pyx_LocalBuf_ND __pyx_pybuffernd_neighb; __Pyx_Buffer __pyx_pybuffer_neighb; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyArrayObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; long __pyx_t_10; __pyx_t_4nipy_10algorithms_5graph_6_graph_INT __pyx_t_11; int __pyx_t_12; int __pyx_t_13; int __pyx_t_14; __pyx_t_4nipy_10algorithms_5graph_6_graph_INT __pyx_t_15; int __pyx_t_16; int __pyx_t_17; int __pyx_t_18; __pyx_t_4nipy_10algorithms_5graph_6_graph_INT __pyx_t_19; int __pyx_t_20; int __pyx_t_21; int __pyx_t_22; int __pyx_t_23; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("dilation", 0); __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; __pyx_pybuffer_field.pybuffer.buf = NULL; __pyx_pybuffer_field.refcount = 0; __pyx_pybuffernd_field.data = NULL; __pyx_pybuffernd_field.rcbuffer = &__pyx_pybuffer_field; __pyx_pybuffer_idx.pybuffer.buf = NULL; __pyx_pybuffer_idx.refcount = 0; __pyx_pybuffernd_idx.data = NULL; __pyx_pybuffernd_idx.rcbuffer = &__pyx_pybuffer_idx; __pyx_pybuffer_neighb.pybuffer.buf = NULL; __pyx_pybuffer_neighb.refcount = 0; __pyx_pybuffernd_neighb.data = NULL; __pyx_pybuffernd_neighb.rcbuffer = &__pyx_pybuffer_neighb; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_field.rcbuffer->pybuffer, (PyObject*)__pyx_v_field, &__Pyx_TypeInfo_nn___pyx_t_4nipy_10algorithms_5graph_6_graph_DOUBLE, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_field.diminfo[0].strides = __pyx_pybuffernd_field.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_field.diminfo[0].shape = __pyx_pybuffernd_field.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_field.diminfo[1].strides = __pyx_pybuffernd_field.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_field.diminfo[1].shape = __pyx_pybuffernd_field.rcbuffer->pybuffer.shape[1]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_idx.rcbuffer->pybuffer, (PyObject*)__pyx_v_idx, &__Pyx_TypeInfo_nn___pyx_t_4nipy_10algorithms_5graph_6_graph_INT, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_idx.diminfo[0].strides = __pyx_pybuffernd_idx.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_idx.diminfo[0].shape = __pyx_pybuffernd_idx.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_neighb.rcbuffer->pybuffer, (PyObject*)__pyx_v_neighb, &__Pyx_TypeInfo_nn___pyx_t_4nipy_10algorithms_5graph_6_graph_INT, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_neighb.diminfo[0].strides = __pyx_pybuffernd_neighb.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_neighb.diminfo[0].shape = __pyx_pybuffernd_neighb.rcbuffer->pybuffer.shape[0]; /* "nipy/algorithms/graph/_graph.pyx":15 * np.ndarray[INT, ndim=1] idx,\ * np.ndarray[INT, ndim=1] neighb): * cdef int size_max = field.shape[0] # <<<<<<<<<<<<<< * cdef int dim = field.shape[1] * cdef int i, j, d */ __pyx_v_size_max = (__pyx_v_field->dimensions[0]); /* "nipy/algorithms/graph/_graph.pyx":16 * np.ndarray[INT, ndim=1] neighb): * cdef int size_max = field.shape[0] * cdef int dim = field.shape[1] # <<<<<<<<<<<<<< * cdef int i, j, d * cdef DOUBLE fmax */ __pyx_v_dim = (__pyx_v_field->dimensions[1]); /* "nipy/algorithms/graph/_graph.pyx":19 * cdef int i, j, d * cdef DOUBLE fmax * cdef np.ndarray[DOUBLE, ndim=1] res = 0 * field[:, 0] # <<<<<<<<<<<<<< * for d in range(dim): * for i in range(size_max): */ __pyx_t_1 = PyObject_GetItem(((PyObject *)__pyx_v_field), ((PyObject *)__pyx_k_tuple_2)); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyNumber_Multiply(__pyx_int_0, __pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_3 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_t_3, &__Pyx_TypeInfo_nn___pyx_t_4nipy_10algorithms_5graph_6_graph_DOUBLE, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) { __pyx_v_res = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_res.rcbuffer->pybuffer.buf = NULL; {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } else {__pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; } } __pyx_t_3 = 0; __pyx_v_res = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "nipy/algorithms/graph/_graph.pyx":20 * cdef DOUBLE fmax * cdef np.ndarray[DOUBLE, ndim=1] res = 0 * field[:, 0] * for d in range(dim): # <<<<<<<<<<<<<< * for i in range(size_max): * fmax = field[i, d] */ __pyx_t_4 = __pyx_v_dim; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_d = __pyx_t_5; /* "nipy/algorithms/graph/_graph.pyx":21 * cdef np.ndarray[DOUBLE, ndim=1] res = 0 * field[:, 0] * for d in range(dim): * for i in range(size_max): # <<<<<<<<<<<<<< * fmax = field[i, d] * for j in range(idx[i], idx[i + 1]): */ __pyx_t_6 = __pyx_v_size_max; for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_6; __pyx_t_7+=1) { __pyx_v_i = __pyx_t_7; /* "nipy/algorithms/graph/_graph.pyx":22 * for d in range(dim): * for i in range(size_max): * fmax = field[i, d] # <<<<<<<<<<<<<< * for j in range(idx[i], idx[i + 1]): * if field[neighb[j], d] > fmax: */ __pyx_t_8 = __pyx_v_i; __pyx_t_9 = __pyx_v_d; __pyx_v_fmax = (*__Pyx_BufPtrStrided2d(__pyx_t_4nipy_10algorithms_5graph_6_graph_DOUBLE *, __pyx_pybuffernd_field.rcbuffer->pybuffer.buf, __pyx_t_8, __pyx_pybuffernd_field.diminfo[0].strides, __pyx_t_9, __pyx_pybuffernd_field.diminfo[1].strides)); /* "nipy/algorithms/graph/_graph.pyx":23 * for i in range(size_max): * fmax = field[i, d] * for j in range(idx[i], idx[i + 1]): # <<<<<<<<<<<<<< * if field[neighb[j], d] > fmax: * fmax = field[neighb[j], d] */ __pyx_t_10 = (__pyx_v_i + 1); __pyx_t_11 = (*__Pyx_BufPtrStrided1d(__pyx_t_4nipy_10algorithms_5graph_6_graph_INT *, __pyx_pybuffernd_idx.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_idx.diminfo[0].strides)); __pyx_t_12 = __pyx_v_i; for (__pyx_t_13 = (*__Pyx_BufPtrStrided1d(__pyx_t_4nipy_10algorithms_5graph_6_graph_INT *, __pyx_pybuffernd_idx.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_idx.diminfo[0].strides)); __pyx_t_13 < __pyx_t_11; __pyx_t_13+=1) { __pyx_v_j = __pyx_t_13; /* "nipy/algorithms/graph/_graph.pyx":24 * fmax = field[i, d] * for j in range(idx[i], idx[i + 1]): * if field[neighb[j], d] > fmax: # <<<<<<<<<<<<<< * fmax = field[neighb[j], d] * res[i] = fmax */ __pyx_t_14 = __pyx_v_j; __pyx_t_15 = (*__Pyx_BufPtrStrided1d(__pyx_t_4nipy_10algorithms_5graph_6_graph_INT *, __pyx_pybuffernd_neighb.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_neighb.diminfo[0].strides)); __pyx_t_16 = __pyx_v_d; __pyx_t_17 = ((*__Pyx_BufPtrStrided2d(__pyx_t_4nipy_10algorithms_5graph_6_graph_DOUBLE *, __pyx_pybuffernd_field.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_field.diminfo[0].strides, __pyx_t_16, __pyx_pybuffernd_field.diminfo[1].strides)) > __pyx_v_fmax); if (__pyx_t_17) { /* "nipy/algorithms/graph/_graph.pyx":25 * for j in range(idx[i], idx[i + 1]): * if field[neighb[j], d] > fmax: * fmax = field[neighb[j], d] # <<<<<<<<<<<<<< * res[i] = fmax * for i in range(size_max): */ __pyx_t_18 = __pyx_v_j; __pyx_t_19 = (*__Pyx_BufPtrStrided1d(__pyx_t_4nipy_10algorithms_5graph_6_graph_INT *, __pyx_pybuffernd_neighb.rcbuffer->pybuffer.buf, __pyx_t_18, __pyx_pybuffernd_neighb.diminfo[0].strides)); __pyx_t_20 = __pyx_v_d; __pyx_v_fmax = (*__Pyx_BufPtrStrided2d(__pyx_t_4nipy_10algorithms_5graph_6_graph_DOUBLE *, __pyx_pybuffernd_field.rcbuffer->pybuffer.buf, __pyx_t_19, __pyx_pybuffernd_field.diminfo[0].strides, __pyx_t_20, __pyx_pybuffernd_field.diminfo[1].strides)); goto __pyx_L9; } __pyx_L9:; } /* "nipy/algorithms/graph/_graph.pyx":26 * if field[neighb[j], d] > fmax: * fmax = field[neighb[j], d] * res[i] = fmax # <<<<<<<<<<<<<< * for i in range(size_max): * field[i, d] = res[i] */ __pyx_t_13 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_4nipy_10algorithms_5graph_6_graph_DOUBLE *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_13, __pyx_pybuffernd_res.diminfo[0].strides) = __pyx_v_fmax; } /* "nipy/algorithms/graph/_graph.pyx":27 * fmax = field[neighb[j], d] * res[i] = fmax * for i in range(size_max): # <<<<<<<<<<<<<< * field[i, d] = res[i] * return res */ __pyx_t_6 = __pyx_v_size_max; for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_6; __pyx_t_7+=1) { __pyx_v_i = __pyx_t_7; /* "nipy/algorithms/graph/_graph.pyx":28 * res[i] = fmax * for i in range(size_max): * field[i, d] = res[i] # <<<<<<<<<<<<<< * return res */ __pyx_t_21 = __pyx_v_i; __pyx_t_22 = __pyx_v_i; __pyx_t_23 = __pyx_v_d; *__Pyx_BufPtrStrided2d(__pyx_t_4nipy_10algorithms_5graph_6_graph_DOUBLE *, __pyx_pybuffernd_field.rcbuffer->pybuffer.buf, __pyx_t_22, __pyx_pybuffernd_field.diminfo[0].strides, __pyx_t_23, __pyx_pybuffernd_field.diminfo[1].strides) = (*__Pyx_BufPtrStrided1d(__pyx_t_4nipy_10algorithms_5graph_6_graph_DOUBLE *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_21, __pyx_pybuffernd_res.diminfo[0].strides)); } } /* "nipy/algorithms/graph/_graph.pyx":29 * for i in range(size_max): * field[i, d] = res[i] * return res # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_res)); __pyx_r = ((PyObject *)__pyx_v_res); goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_field.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_idx.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_neighb.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("nipy.algorithms.graph._graph.dilation", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_field.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_idx.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_neighb.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_res); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":194 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_copy_shape; int __pyx_v_i; int __pyx_v_ndim; int __pyx_v_endian_detector; int __pyx_v_little_endian; int __pyx_v_t; char *__pyx_v_f; PyArray_Descr *__pyx_v_descr = 0; int __pyx_v_offset; int __pyx_v_hasfields; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; char *__pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "numpy.pxd":200 * # of flags * * if info == NULL: return # <<<<<<<<<<<<<< * * cdef int copy_shape, i, ndim */ __pyx_t_1 = (__pyx_v_info == NULL); if (__pyx_t_1) { __pyx_r = 0; goto __pyx_L0; goto __pyx_L3; } __pyx_L3:; /* "numpy.pxd":203 * * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((&endian_detector)[0] != 0) * */ __pyx_v_endian_detector = 1; /* "numpy.pxd":204 * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * * ndim = PyArray_NDIM(self) */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "numpy.pxd":206 * cdef bint little_endian = ((&endian_detector)[0] != 0) * * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< * * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); /* "numpy.pxd":208 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); if (__pyx_t_1) { /* "numpy.pxd":209 * * if sizeof(npy_intp) != sizeof(Py_ssize_t): * copy_shape = 1 # <<<<<<<<<<<<<< * else: * copy_shape = 0 */ __pyx_v_copy_shape = 1; goto __pyx_L4; } /*else*/ { /* "numpy.pxd":211 * copy_shape = 1 * else: * copy_shape = 0 # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) */ __pyx_v_copy_shape = 0; } __pyx_L4:; /* "numpy.pxd":213 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS); if (__pyx_t_1) { /* "numpy.pxd":214 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not C contiguous") * */ __pyx_t_2 = (!PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS)); __pyx_t_3 = __pyx_t_2; } else { __pyx_t_3 = __pyx_t_1; } if (__pyx_t_3) { /* "numpy.pxd":215 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_4), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L5; } __pyx_L5:; /* "numpy.pxd":217 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ __pyx_t_3 = ((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS); if (__pyx_t_3) { /* "numpy.pxd":218 * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not Fortran contiguous") * */ __pyx_t_1 = (!PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS)); __pyx_t_2 = __pyx_t_1; } else { __pyx_t_2 = __pyx_t_3; } if (__pyx_t_2) { /* "numpy.pxd":219 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_6), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L6; } __pyx_L6:; /* "numpy.pxd":221 * raise ValueError(u"ndarray is not Fortran contiguous") * * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< * info.ndim = ndim * if copy_shape: */ __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); /* "numpy.pxd":222 * * info.buf = PyArray_DATA(self) * info.ndim = ndim # <<<<<<<<<<<<<< * if copy_shape: * # Allocate new buffer for strides and shape info. */ __pyx_v_info->ndim = __pyx_v_ndim; /* "numpy.pxd":223 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ if (__pyx_v_copy_shape) { /* "numpy.pxd":226 * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) # <<<<<<<<<<<<<< * info.shape = info.strides + ndim * for i in range(ndim): */ __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2))); /* "numpy.pxd":227 * # This is allocated as one block, strides first. * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) * info.shape = info.strides + ndim # <<<<<<<<<<<<<< * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] */ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); /* "numpy.pxd":228 * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) * info.shape = info.strides + ndim * for i in range(ndim): # <<<<<<<<<<<<<< * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] */ __pyx_t_5 = __pyx_v_ndim; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "numpy.pxd":229 * info.shape = info.strides + ndim * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< * info.shape[i] = PyArray_DIMS(self)[i] * else: */ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); /* "numpy.pxd":230 * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< * else: * info.strides = PyArray_STRIDES(self) */ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); } goto __pyx_L7; } /*else*/ { /* "numpy.pxd":232 * info.shape[i] = PyArray_DIMS(self)[i] * else: * info.strides = PyArray_STRIDES(self) # <<<<<<<<<<<<<< * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL */ __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); /* "numpy.pxd":233 * else: * info.strides = PyArray_STRIDES(self) * info.shape = PyArray_DIMS(self) # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) */ __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); } __pyx_L7:; /* "numpy.pxd":234 * info.strides = PyArray_STRIDES(self) * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) */ __pyx_v_info->suboffsets = NULL; /* "numpy.pxd":235 * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< * info.readonly = not PyArray_ISWRITEABLE(self) * */ __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); /* "numpy.pxd":236 * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< * * cdef int t */ __pyx_v_info->readonly = (!PyArray_ISWRITEABLE(__pyx_v_self)); /* "numpy.pxd":239 * * cdef int t * cdef char* f = NULL # <<<<<<<<<<<<<< * cdef dtype descr = self.descr * cdef list stack */ __pyx_v_f = NULL; /* "numpy.pxd":240 * cdef int t * cdef char* f = NULL * cdef dtype descr = self.descr # <<<<<<<<<<<<<< * cdef list stack * cdef int offset */ __pyx_t_4 = ((PyObject *)__pyx_v_self->descr); __Pyx_INCREF(__pyx_t_4); __pyx_v_descr = ((PyArray_Descr *)__pyx_t_4); __pyx_t_4 = 0; /* "numpy.pxd":244 * cdef int offset * * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< * * if not hasfields and not copy_shape: */ __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); /* "numpy.pxd":246 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ __pyx_t_2 = (!__pyx_v_hasfields); if (__pyx_t_2) { __pyx_t_3 = (!__pyx_v_copy_shape); __pyx_t_1 = __pyx_t_3; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "numpy.pxd":248 * if not hasfields and not copy_shape: * # do not call releasebuffer * info.obj = None # <<<<<<<<<<<<<< * else: * # need to call releasebuffer */ __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = Py_None; goto __pyx_L10; } /*else*/ { /* "numpy.pxd":251 * else: * # need to call releasebuffer * info.obj = self # <<<<<<<<<<<<<< * * if not hasfields: */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); } __pyx_L10:; /* "numpy.pxd":253 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ __pyx_t_1 = (!__pyx_v_hasfields); if (__pyx_t_1) { /* "numpy.pxd":254 * * if not hasfields: * t = descr.type_num # <<<<<<<<<<<<<< * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): */ __pyx_t_5 = __pyx_v_descr->type_num; __pyx_v_t = __pyx_t_5; /* "numpy.pxd":255 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_1 = (__pyx_v_descr->byteorder == '>'); if (__pyx_t_1) { __pyx_t_2 = __pyx_v_little_endian; } else { __pyx_t_2 = __pyx_t_1; } if (!__pyx_t_2) { /* "numpy.pxd":256 * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" */ __pyx_t_1 = (__pyx_v_descr->byteorder == '<'); if (__pyx_t_1) { __pyx_t_3 = (!__pyx_v_little_endian); __pyx_t_7 = __pyx_t_3; } else { __pyx_t_7 = __pyx_t_1; } __pyx_t_1 = __pyx_t_7; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "numpy.pxd":257 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_8), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L12; } __pyx_L12:; /* "numpy.pxd":258 * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" */ __pyx_t_1 = (__pyx_v_t == NPY_BYTE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__b; goto __pyx_L13; } /* "numpy.pxd":259 * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" */ __pyx_t_1 = (__pyx_v_t == NPY_UBYTE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__B; goto __pyx_L13; } /* "numpy.pxd":260 * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" */ __pyx_t_1 = (__pyx_v_t == NPY_SHORT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__h; goto __pyx_L13; } /* "numpy.pxd":261 * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" */ __pyx_t_1 = (__pyx_v_t == NPY_USHORT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__H; goto __pyx_L13; } /* "numpy.pxd":262 * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" */ __pyx_t_1 = (__pyx_v_t == NPY_INT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__i; goto __pyx_L13; } /* "numpy.pxd":263 * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" */ __pyx_t_1 = (__pyx_v_t == NPY_UINT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__I; goto __pyx_L13; } /* "numpy.pxd":264 * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" */ __pyx_t_1 = (__pyx_v_t == NPY_LONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__l; goto __pyx_L13; } /* "numpy.pxd":265 * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" */ __pyx_t_1 = (__pyx_v_t == NPY_ULONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__L; goto __pyx_L13; } /* "numpy.pxd":266 * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" */ __pyx_t_1 = (__pyx_v_t == NPY_LONGLONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__q; goto __pyx_L13; } /* "numpy.pxd":267 * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" */ __pyx_t_1 = (__pyx_v_t == NPY_ULONGLONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Q; goto __pyx_L13; } /* "numpy.pxd":268 * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" */ __pyx_t_1 = (__pyx_v_t == NPY_FLOAT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__f; goto __pyx_L13; } /* "numpy.pxd":269 * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" */ __pyx_t_1 = (__pyx_v_t == NPY_DOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__d; goto __pyx_L13; } /* "numpy.pxd":270 * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" */ __pyx_t_1 = (__pyx_v_t == NPY_LONGDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__g; goto __pyx_L13; } /* "numpy.pxd":271 * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" */ __pyx_t_1 = (__pyx_v_t == NPY_CFLOAT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zf; goto __pyx_L13; } /* "numpy.pxd":272 * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" */ __pyx_t_1 = (__pyx_v_t == NPY_CDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zd; goto __pyx_L13; } /* "numpy.pxd":273 * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f = "O" * else: */ __pyx_t_1 = (__pyx_v_t == NPY_CLONGDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zg; goto __pyx_L13; } /* "numpy.pxd":274 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_1 = (__pyx_v_t == NPY_OBJECT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__O; goto __pyx_L13; } /*else*/ { /* "numpy.pxd":276 * elif t == NPY_OBJECT: f = "O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * info.format = f * return */ __pyx_t_4 = PyInt_FromLong(__pyx_v_t); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_8 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_9), __pyx_t_4); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_8)); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_8)); __Pyx_GIVEREF(((PyObject *)__pyx_t_8)); __pyx_t_8 = 0; __pyx_t_8 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L13:; /* "numpy.pxd":277 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f # <<<<<<<<<<<<<< * return * else: */ __pyx_v_info->format = __pyx_v_f; /* "numpy.pxd":278 * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f * return # <<<<<<<<<<<<<< * else: * info.format = stdlib.malloc(_buffer_format_string_len) */ __pyx_r = 0; goto __pyx_L0; goto __pyx_L11; } /*else*/ { /* "numpy.pxd":280 * return * else: * info.format = stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 */ __pyx_v_info->format = ((char *)malloc(255)); /* "numpy.pxd":281 * else: * info.format = stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< * offset = 0 * f = _util_dtypestring(descr, info.format + 1, */ (__pyx_v_info->format[0]) = '^'; /* "numpy.pxd":282 * info.format = stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 # <<<<<<<<<<<<<< * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, */ __pyx_v_offset = 0; /* "numpy.pxd":285 * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, * &offset) # <<<<<<<<<<<<<< * f[0] = c'\0' # Terminate format string * */ __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_9; /* "numpy.pxd":286 * info.format + _buffer_format_string_len, * &offset) * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< * * def __releasebuffer__(ndarray self, Py_buffer* info): */ (__pyx_v_f[0]) = '\x00'; } __pyx_L11:; __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_descr); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":288 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__releasebuffer__", 0); /* "numpy.pxd":289 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_t_1 = PyArray_HASFIELDS(__pyx_v_self); if (__pyx_t_1) { /* "numpy.pxd":290 * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): * stdlib.free(info.format) # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) */ free(__pyx_v_info->format); goto __pyx_L3; } __pyx_L3:; /* "numpy.pxd":291 * if PyArray_HASFIELDS(self): * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * stdlib.free(info.strides) * # info.shape was stored after info.strides in the same block */ __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); if (__pyx_t_1) { /* "numpy.pxd":292 * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) # <<<<<<<<<<<<<< * # info.shape was stored after info.strides in the same block * */ free(__pyx_v_info->strides); goto __pyx_L4; } __pyx_L4:; __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":768 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, a) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); /* "numpy.pxd":769 * * cdef inline object PyArray_MultiIterNew1(a): * return PyArray_MultiIterNew(1, a) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew2(a, b): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 769; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":771 * return PyArray_MultiIterNew(1, a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, a, b) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); /* "numpy.pxd":772 * * cdef inline object PyArray_MultiIterNew2(a, b): * return PyArray_MultiIterNew(2, a, b) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew3(a, b, c): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":774 * return PyArray_MultiIterNew(2, a, b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, a, b, c) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); /* "numpy.pxd":775 * * cdef inline object PyArray_MultiIterNew3(a, b, c): * return PyArray_MultiIterNew(3, a, b, c) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":777 * return PyArray_MultiIterNew(3, a, b, c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, a, b, c, d) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); /* "numpy.pxd":778 * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): * return PyArray_MultiIterNew(4, a, b, c, d) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 778; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":780 * return PyArray_MultiIterNew(4, a, b, c, d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, a, b, c, d, e) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); /* "numpy.pxd":781 * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): * return PyArray_MultiIterNew(5, a, b, c, d, e) # <<<<<<<<<<<<<< * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 781; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":783 * return PyArray_MultiIterNew(5, a, b, c, d, e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { PyArray_Descr *__pyx_v_child = 0; int __pyx_v_endian_detector; int __pyx_v_little_endian; PyObject *__pyx_v_fields = 0; PyObject *__pyx_v_childname = NULL; PyObject *__pyx_v_new_offset = NULL; PyObject *__pyx_v_t = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *(*__pyx_t_6)(PyObject *); int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_t_10; long __pyx_t_11; char *__pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_util_dtypestring", 0); /* "numpy.pxd":790 * cdef int delta_offset * cdef tuple i * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((&endian_detector)[0] != 0) * cdef tuple fields */ __pyx_v_endian_detector = 1; /* "numpy.pxd":791 * cdef tuple i * cdef int endian_detector = 1 * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * cdef tuple fields * */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "numpy.pxd":794 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ if (unlikely(((PyObject *)__pyx_v_descr->names) == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_1 = ((PyObject *)__pyx_v_descr->names); __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; for (;;) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif __Pyx_XDECREF(__pyx_v_childname); __pyx_v_childname = __pyx_t_3; __pyx_t_3 = 0; /* "numpy.pxd":795 * * for childname in descr.names: * fields = descr.fields[childname] # <<<<<<<<<<<<<< * child, new_offset = fields * */ __pyx_t_3 = PyObject_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (!__pyx_t_3) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected tuple, got %.200s", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF(((PyObject *)__pyx_v_fields)); __pyx_v_fields = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "numpy.pxd":796 * for childname in descr.names: * fields = descr.fields[childname] * child, new_offset = fields # <<<<<<<<<<<<<< * * if (end - f) - (new_offset - offset[0]) < 15: */ if (likely(PyTuple_CheckExact(((PyObject *)__pyx_v_fields)))) { PyObject* sequence = ((PyObject *)__pyx_v_fields); #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else if (1) { __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } else { Py_ssize_t index = -1; __pyx_t_5 = PyObject_GetIter(((PyObject *)__pyx_v_fields)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = Py_TYPE(__pyx_t_5)->tp_iternext; index = 0; __pyx_t_3 = __pyx_t_6(__pyx_t_5); if (unlikely(!__pyx_t_3)) goto __pyx_L5_unpacking_failed; __Pyx_GOTREF(__pyx_t_3); index = 1; __pyx_t_4 = __pyx_t_6(__pyx_t_5); if (unlikely(!__pyx_t_4)) goto __pyx_L5_unpacking_failed; __Pyx_GOTREF(__pyx_t_4); if (__Pyx_IternextUnpackEndCheck(__pyx_t_6(__pyx_t_5), 2) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = NULL; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L6_unpacking_done; __pyx_L5_unpacking_failed:; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_L6_unpacking_done:; } if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF(((PyObject *)__pyx_v_child)); __pyx_v_child = ((PyArray_Descr *)__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_v_new_offset); __pyx_v_new_offset = __pyx_t_4; __pyx_t_4 = 0; /* "numpy.pxd":798 * child, new_offset = fields * * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ __pyx_t_4 = PyInt_FromLong((__pyx_v_end - __pyx_v_f)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_Subtract(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyObject_RichCompare(__pyx_t_3, __pyx_int_15, Py_LT); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { /* "numpy.pxd":799 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_t_5 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_11), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L7; } __pyx_L7:; /* "numpy.pxd":801 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_7 = (__pyx_v_child->byteorder == '>'); if (__pyx_t_7) { __pyx_t_8 = __pyx_v_little_endian; } else { __pyx_t_8 = __pyx_t_7; } if (!__pyx_t_8) { /* "numpy.pxd":802 * * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * # One could encode it in the format string and have Cython */ __pyx_t_7 = (__pyx_v_child->byteorder == '<'); if (__pyx_t_7) { __pyx_t_9 = (!__pyx_v_little_endian); __pyx_t_10 = __pyx_t_9; } else { __pyx_t_10 = __pyx_t_7; } __pyx_t_7 = __pyx_t_10; } else { __pyx_t_7 = __pyx_t_8; } if (__pyx_t_7) { /* "numpy.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_12), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L8; } __pyx_L8:; /* "numpy.pxd":813 * * # Output padding bytes * while offset[0] < new_offset: # <<<<<<<<<<<<<< * f[0] = 120 # "x"; pad byte * f += 1 */ while (1) { __pyx_t_5 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_t_5, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!__pyx_t_7) break; /* "numpy.pxd":814 * # Output padding bytes * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< * f += 1 * offset[0] += 1 */ (__pyx_v_f[0]) = 120; /* "numpy.pxd":815 * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte * f += 1 # <<<<<<<<<<<<<< * offset[0] += 1 * */ __pyx_v_f = (__pyx_v_f + 1); /* "numpy.pxd":816 * f[0] = 120 # "x"; pad byte * f += 1 * offset[0] += 1 # <<<<<<<<<<<<<< * * offset[0] += child.itemsize */ __pyx_t_11 = 0; (__pyx_v_offset[__pyx_t_11]) = ((__pyx_v_offset[__pyx_t_11]) + 1); } /* "numpy.pxd":818 * offset[0] += 1 * * offset[0] += child.itemsize # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(child): */ __pyx_t_11 = 0; (__pyx_v_offset[__pyx_t_11]) = ((__pyx_v_offset[__pyx_t_11]) + __pyx_v_child->elsize); /* "numpy.pxd":820 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ __pyx_t_7 = (!PyDataType_HASFIELDS(__pyx_v_child)); if (__pyx_t_7) { /* "numpy.pxd":821 * * if not PyDataType_HASFIELDS(child): * t = child.type_num # <<<<<<<<<<<<<< * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") */ __pyx_t_3 = PyInt_FromLong(__pyx_v_child->type_num); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 821; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_XDECREF(__pyx_v_t); __pyx_v_t = __pyx_t_3; __pyx_t_3 = 0; /* "numpy.pxd":822 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ __pyx_t_7 = ((__pyx_v_end - __pyx_v_f) < 5); if (__pyx_t_7) { /* "numpy.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_t_3 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_14), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L12; } __pyx_L12:; /* "numpy.pxd":826 * * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" */ __pyx_t_3 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 98; goto __pyx_L13; } /* "numpy.pxd":827 * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" */ __pyx_t_5 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 66; goto __pyx_L13; } /* "numpy.pxd":828 * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" */ __pyx_t_3 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 104; goto __pyx_L13; } /* "numpy.pxd":829 * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" */ __pyx_t_5 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 72; goto __pyx_L13; } /* "numpy.pxd":830 * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" */ __pyx_t_3 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 105; goto __pyx_L13; } /* "numpy.pxd":831 * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" */ __pyx_t_5 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 73; goto __pyx_L13; } /* "numpy.pxd":832 * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" */ __pyx_t_3 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 108; goto __pyx_L13; } /* "numpy.pxd":833 * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" */ __pyx_t_5 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 76; goto __pyx_L13; } /* "numpy.pxd":834 * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" */ __pyx_t_3 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 113; goto __pyx_L13; } /* "numpy.pxd":835 * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" */ __pyx_t_5 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 81; goto __pyx_L13; } /* "numpy.pxd":836 * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" */ __pyx_t_3 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 102; goto __pyx_L13; } /* "numpy.pxd":837 * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf */ __pyx_t_5 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 100; goto __pyx_L13; } /* "numpy.pxd":838 * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd */ __pyx_t_3 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 103; goto __pyx_L13; } /* "numpy.pxd":839 * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg */ __pyx_t_5 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 102; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":840 * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" */ __pyx_t_3 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 100; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":841 * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: */ __pyx_t_5 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 103; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":842 * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_3 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 79; goto __pyx_L13; } /*else*/ { /* "numpy.pxd":844 * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * f += 1 * else: */ __pyx_t_5 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_9), __pyx_v_t); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_5)); __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_5)); __Pyx_GIVEREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L13:; /* "numpy.pxd":845 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * f += 1 # <<<<<<<<<<<<<< * else: * # Cython ignores struct boundary information ("T{...}"), */ __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L11; } /*else*/ { /* "numpy.pxd":849 * # Cython ignores struct boundary information ("T{...}"), * # so don't output it * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< * return f * */ __pyx_t_12 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_12 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 849; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_12; } __pyx_L11:; } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "numpy.pxd":850 * # so don't output it * f = _util_dtypestring(child, f, end, offset) * return f # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_f; goto __pyx_L0; __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_child); __Pyx_XDECREF(__pyx_v_fields); __Pyx_XDECREF(__pyx_v_childname); __Pyx_XDECREF(__pyx_v_new_offset); __Pyx_XDECREF(__pyx_v_t); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":965 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { PyObject *__pyx_v_baseptr; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("set_array_base", 0); /* "numpy.pxd":967 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ __pyx_t_1 = (__pyx_v_base == Py_None); if (__pyx_t_1) { /* "numpy.pxd":968 * cdef PyObject* baseptr * if base is None: * baseptr = NULL # <<<<<<<<<<<<<< * else: * Py_INCREF(base) # important to do this before decref below! */ __pyx_v_baseptr = NULL; goto __pyx_L3; } /*else*/ { /* "numpy.pxd":970 * baseptr = NULL * else: * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< * baseptr = base * Py_XDECREF(arr.base) */ Py_INCREF(__pyx_v_base); /* "numpy.pxd":971 * else: * Py_INCREF(base) # important to do this before decref below! * baseptr = base # <<<<<<<<<<<<<< * Py_XDECREF(arr.base) * arr.base = baseptr */ __pyx_v_baseptr = ((PyObject *)__pyx_v_base); } __pyx_L3:; /* "numpy.pxd":972 * Py_INCREF(base) # important to do this before decref below! * baseptr = base * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< * arr.base = baseptr * */ Py_XDECREF(__pyx_v_arr->base); /* "numpy.pxd":973 * baseptr = base * Py_XDECREF(arr.base) * arr.base = baseptr # <<<<<<<<<<<<<< * * cdef inline object get_array_base(ndarray arr): */ __pyx_v_arr->base = __pyx_v_baseptr; __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":975 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("get_array_base", 0); /* "numpy.pxd":976 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ __pyx_t_1 = (__pyx_v_arr->base == NULL); if (__pyx_t_1) { /* "numpy.pxd":977 * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: * return None # <<<<<<<<<<<<<< * else: * return arr.base */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; goto __pyx_L3; } /*else*/ { /* "numpy.pxd":979 * return None * else: * return arr.base # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); __pyx_r = ((PyObject *)__pyx_v_arr->base); goto __pyx_L0; } __pyx_L3:; __pyx_r = Py_None; __Pyx_INCREF(Py_None); __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, __Pyx_NAMESTR("_graph"), 0, /* m_doc */ -1, /* m_size */ __pyx_methods /* m_methods */, NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_kp_u_10, __pyx_k_10, sizeof(__pyx_k_10), 0, 1, 0, 0}, {&__pyx_kp_u_13, __pyx_k_13, sizeof(__pyx_k_13), 0, 1, 0, 0}, {&__pyx_kp_s_17, __pyx_k_17, sizeof(__pyx_k_17), 0, 0, 1, 0}, {&__pyx_n_s_18, __pyx_k_18, sizeof(__pyx_k_18), 0, 0, 1, 1}, {&__pyx_kp_u_3, __pyx_k_3, sizeof(__pyx_k_3), 0, 1, 0, 0}, {&__pyx_kp_u_5, __pyx_k_5, sizeof(__pyx_k_5), 0, 1, 0, 0}, {&__pyx_kp_u_7, __pyx_k_7, sizeof(__pyx_k_7), 0, 1, 0, 0}, {&__pyx_kp_u_9, __pyx_k_9, sizeof(__pyx_k_9), 0, 1, 0, 0}, {&__pyx_n_s__RuntimeError, __pyx_k__RuntimeError, sizeof(__pyx_k__RuntimeError), 0, 0, 1, 1}, {&__pyx_n_s__ValueError, __pyx_k__ValueError, sizeof(__pyx_k__ValueError), 0, 0, 1, 1}, {&__pyx_n_s____main__, __pyx_k____main__, sizeof(__pyx_k____main__), 0, 0, 1, 1}, {&__pyx_n_s____test__, __pyx_k____test__, sizeof(__pyx_k____test__), 0, 0, 1, 1}, {&__pyx_n_s__d, __pyx_k__d, sizeof(__pyx_k__d), 0, 0, 1, 1}, {&__pyx_n_s__dilation, __pyx_k__dilation, sizeof(__pyx_k__dilation), 0, 0, 1, 1}, {&__pyx_n_s__dim, __pyx_k__dim, sizeof(__pyx_k__dim), 0, 0, 1, 1}, {&__pyx_n_s__field, __pyx_k__field, sizeof(__pyx_k__field), 0, 0, 1, 1}, {&__pyx_n_s__fmax, __pyx_k__fmax, sizeof(__pyx_k__fmax), 0, 0, 1, 1}, {&__pyx_n_s__i, __pyx_k__i, sizeof(__pyx_k__i), 0, 0, 1, 1}, {&__pyx_n_s__idx, __pyx_k__idx, sizeof(__pyx_k__idx), 0, 0, 1, 1}, {&__pyx_n_s__j, __pyx_k__j, sizeof(__pyx_k__j), 0, 0, 1, 1}, {&__pyx_n_s__neighb, __pyx_k__neighb, sizeof(__pyx_k__neighb), 0, 0, 1, 1}, {&__pyx_n_s__np, __pyx_k__np, sizeof(__pyx_k__np), 0, 0, 1, 1}, {&__pyx_n_s__numpy, __pyx_k__numpy, sizeof(__pyx_k__numpy), 0, 0, 1, 1}, {&__pyx_n_s__range, __pyx_k__range, sizeof(__pyx_k__range), 0, 0, 1, 1}, {&__pyx_n_s__res, __pyx_k__res, sizeof(__pyx_k__res), 0, 0, 1, 1}, {&__pyx_n_s__size_max, __pyx_k__size_max, sizeof(__pyx_k__size_max), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_range = __Pyx_GetName(__pyx_b, __pyx_n_s__range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_ValueError = __Pyx_GetName(__pyx_b, __pyx_n_s__ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_RuntimeError = __Pyx_GetName(__pyx_b, __pyx_n_s__RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "nipy/algorithms/graph/_graph.pyx":19 * cdef int i, j, d * cdef DOUBLE fmax * cdef np.ndarray[DOUBLE, ndim=1] res = 0 * field[:, 0] # <<<<<<<<<<<<<< * for d in range(dim): * for i in range(size_max): */ __pyx_k_slice_1 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_k_slice_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_slice_1); __Pyx_GIVEREF(__pyx_k_slice_1); __pyx_k_tuple_2 = PyTuple_New(2); if (unlikely(!__pyx_k_tuple_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_2); __Pyx_INCREF(__pyx_k_slice_1); PyTuple_SET_ITEM(__pyx_k_tuple_2, 0, __pyx_k_slice_1); __Pyx_GIVEREF(__pyx_k_slice_1); __Pyx_INCREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_k_tuple_2, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_2)); /* "numpy.pxd":215 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_k_tuple_4 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_4); __Pyx_INCREF(((PyObject *)__pyx_kp_u_3)); PyTuple_SET_ITEM(__pyx_k_tuple_4, 0, ((PyObject *)__pyx_kp_u_3)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_3)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_4)); /* "numpy.pxd":219 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_k_tuple_6 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_6); __Pyx_INCREF(((PyObject *)__pyx_kp_u_5)); PyTuple_SET_ITEM(__pyx_k_tuple_6, 0, ((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_6)); /* "numpy.pxd":257 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_k_tuple_8 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_8); __Pyx_INCREF(((PyObject *)__pyx_kp_u_7)); PyTuple_SET_ITEM(__pyx_k_tuple_8, 0, ((PyObject *)__pyx_kp_u_7)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_7)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_8)); /* "numpy.pxd":799 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_k_tuple_11 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_11)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_11); __Pyx_INCREF(((PyObject *)__pyx_kp_u_10)); PyTuple_SET_ITEM(__pyx_k_tuple_11, 0, ((PyObject *)__pyx_kp_u_10)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_10)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_11)); /* "numpy.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_k_tuple_12 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_12)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_12); __Pyx_INCREF(((PyObject *)__pyx_kp_u_7)); PyTuple_SET_ITEM(__pyx_k_tuple_12, 0, ((PyObject *)__pyx_kp_u_7)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_7)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_12)); /* "numpy.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_k_tuple_14 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_14)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_14); __Pyx_INCREF(((PyObject *)__pyx_kp_u_13)); PyTuple_SET_ITEM(__pyx_k_tuple_14, 0, ((PyObject *)__pyx_kp_u_13)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_13)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_14)); /* "nipy/algorithms/graph/_graph.pyx":12 * @cython.cdivision(True) * * def dilation(np.ndarray[DOUBLE, ndim=2] field,\ # <<<<<<<<<<<<<< * np.ndarray[INT, ndim=1] idx,\ * np.ndarray[INT, ndim=1] neighb): */ __pyx_k_tuple_15 = PyTuple_New(10); if (unlikely(!__pyx_k_tuple_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_15); __Pyx_INCREF(((PyObject *)__pyx_n_s__field)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 0, ((PyObject *)__pyx_n_s__field)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__field)); __Pyx_INCREF(((PyObject *)__pyx_n_s__idx)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 1, ((PyObject *)__pyx_n_s__idx)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__idx)); __Pyx_INCREF(((PyObject *)__pyx_n_s__neighb)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 2, ((PyObject *)__pyx_n_s__neighb)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__neighb)); __Pyx_INCREF(((PyObject *)__pyx_n_s__size_max)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 3, ((PyObject *)__pyx_n_s__size_max)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__size_max)); __Pyx_INCREF(((PyObject *)__pyx_n_s__dim)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 4, ((PyObject *)__pyx_n_s__dim)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__dim)); __Pyx_INCREF(((PyObject *)__pyx_n_s__i)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 5, ((PyObject *)__pyx_n_s__i)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__i)); __Pyx_INCREF(((PyObject *)__pyx_n_s__j)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 6, ((PyObject *)__pyx_n_s__j)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__j)); __Pyx_INCREF(((PyObject *)__pyx_n_s__d)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 7, ((PyObject *)__pyx_n_s__d)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__d)); __Pyx_INCREF(((PyObject *)__pyx_n_s__fmax)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 8, ((PyObject *)__pyx_n_s__fmax)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__fmax)); __Pyx_INCREF(((PyObject *)__pyx_n_s__res)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 9, ((PyObject *)__pyx_n_s__res)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__res)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_15)); __pyx_k_codeobj_16 = (PyObject*)__Pyx_PyCode_New(3, 0, 10, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_15, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__dilation, 12, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_15 = PyInt_FromLong(15); if (unlikely(!__pyx_int_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC init_graph(void); /*proto*/ PyMODINIT_FUNC init_graph(void) #else PyMODINIT_FUNC PyInit__graph(void); /*proto*/ PyMODINIT_FUNC PyInit__graph(void) #endif { PyObject *__pyx_t_1 = NULL; __Pyx_RefNannyDeclarations #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit__graph(void)", 0); if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #ifdef __Pyx_CyFunction_USED if (__Pyx_CyFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4(__Pyx_NAMESTR("_graph"), __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (!PyDict_GetItemString(modules, "nipy.algorithms.graph._graph")) { if (unlikely(PyDict_SetItemString(modules, "nipy.algorithms.graph._graph", __pyx_m) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } } #endif __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); if (unlikely(!__pyx_b)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; /*--- Initialize various global constants etc. ---*/ if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_module_is_main_nipy__algorithms__graph___graph) { if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s____main__) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; } /*--- Builtin init code ---*/ if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Constants init code ---*/ if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Global init code ---*/ /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ /*--- Type import code ---*/ __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", #if CYTHON_COMPILING_IN_PYPY sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 861; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Variable import code ---*/ /*--- Function import code ---*/ /*--- Execution code ---*/ /* "nipy/algorithms/graph/_graph.pyx":1 * import numpy as np # <<<<<<<<<<<<<< * cimport numpy as np * cimport cython */ __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__numpy), 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__np, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/graph/_graph.pyx":12 * @cython.cdivision(True) * * def dilation(np.ndarray[DOUBLE, ndim=2] field,\ # <<<<<<<<<<<<<< * np.ndarray[INT, ndim=1] idx,\ * np.ndarray[INT, ndim=1] neighb): */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_10algorithms_5graph_6_graph_1dilation, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__dilation, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/graph/_graph.pyx":1 * import numpy as np # <<<<<<<<<<<<<< * cimport numpy as np * cimport cython */ __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); if (PyObject_SetAttr(__pyx_m, __pyx_n_s____test__, ((PyObject *)__pyx_t_1)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; /* "numpy.pxd":975 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); if (__pyx_m) { __Pyx_AddTraceback("init nipy.algorithms.graph._graph", __pyx_clineno, __pyx_lineno, __pyx_filename); Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init nipy.algorithms.graph._graph"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if PY_MAJOR_VERSION < 3 return; #else return __pyx_m; #endif } /* Runtime support code */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* CYTHON_REFNANNY */ static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) { PyObject *result; result = PyObject_GetAttr(dict, name); if (!result) { if (dict != __pyx_b) { PyErr_Clear(); result = PyObject_GetAttr(__pyx_b, name); } if (!result) { PyErr_SetObject(PyExc_NameError, name); } } return result; } static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%s() takes %s %" CYTHON_FORMAT_SSIZE_T "d positional argument%s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%s() got an unexpected keyword argument '%s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact) { if (!type) { PyErr_Format(PyExc_SystemError, "Missing type object"); return 0; } if (none_allowed && obj == Py_None) return 1; else if (exact) { if (Py_TYPE(obj) == type) return 1; } else { if (PyObject_TypeCheck(obj, type)) return 1; } PyErr_Format(PyExc_TypeError, "Argument '%s' has incorrect type (expected %s, got %s)", name, type->tp_name, Py_TYPE(obj)->tp_name); return 0; } static CYTHON_INLINE int __Pyx_IsLittleEndian(void) { unsigned int n = 1; return *(unsigned char*)(&n) != 0; } static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t < '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) /* First char was not a digit */ PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; /* Consume from buffer string */ while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; /* breaks both loops as ctx->enc_count == 0 */ } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; /* empty struct */ field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static CYTHON_INLINE PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number; int ndim = ctx->head->field->type->ndim; ; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; while (*ts && *ts != ')') { if (isspace(*ts)) continue; number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case 10: case 13: ++ts; break; case '<': if (!__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': /* substruct */ { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; /* Erase processed last struct element */ ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': /* end of substruct; either repeat or move on */ { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; /* Erase processed last struct element */ if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } /* fall through */ case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 's': case 'p': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { ctx->enc_count += ctx->new_count; } else { if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; } ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) { buf->buf = NULL; buf->obj = NULL; buf->strides = __Pyx_zeros; buf->shape = __Pyx_zeros; buf->suboffsets = __Pyx_minusones; } static CYTHON_INLINE int __Pyx_GetBufferAndValidate( Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack) { if (obj == Py_None || obj == NULL) { __Pyx_ZeroBuffer(buf); return 0; } buf->buf = NULL; if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail; if (buf->ndim != nd) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", nd, buf->ndim); goto fail; } if (!cast) { __Pyx_BufFmt_Context ctx; __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned)buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; return 0; fail:; __Pyx_ZeroBuffer(buf); return -1; } static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { if (info->buf == NULL) return; if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; __Pyx_ReleaseBuffer(info); } static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_Format(PyExc_SystemError, "Missing type object"); return 0; } if (likely(PyObject_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) { #if CYTHON_COMPILING_IN_CPYTHON PyObject *tmp_type, *tmp_value, *tmp_tb; PyThreadState *tstate = PyThreadState_GET(); tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_Restore(type, value, tb); #endif } static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(type, value, tb); #endif } #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } #if PY_VERSION_HEX < 0x02050000 if (PyClass_Check(type)) { #else if (PyType_Check(type)) { #endif #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; #if PY_VERSION_HEX < 0x02050000 if (PyInstance_Check(type)) { type = (PyObject*) ((PyInstanceObject*)type)->in_class; Py_INCREF(type); } else { type = 0; PyErr_SetString(PyExc_TypeError, "raise: exception must be an old-style class or instance"); goto raise_error; } #else type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } #endif } __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else /* Python 3+ */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyEval_CallObject(type, args); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause && cause != Py_None) { PyObject *fixed_cause; if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { PyThreadState *tstate = PyThreadState_GET(); PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } } bad: Py_XDECREF(owned_instance); return; } #endif static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%s to unpack", index, (index == 1) ? "" : "s"); } static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } static CYTHON_INLINE int __Pyx_IterFinish(void) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); PyObject* exc_type = tstate->curexc_type; if (unlikely(exc_type)) { if (likely(exc_type == PyExc_StopIteration) || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration)) { PyObject *exc_value, *exc_tb; exc_value = tstate->curexc_value; exc_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; Py_DECREF(exc_type); Py_XDECREF(exc_value); Py_XDECREF(exc_tb); return 0; } else { return -1; } } return 0; #else if (unlikely(PyErr_Occurred())) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) { PyErr_Clear(); return 0; } else { return -1; } } return 0; #endif } static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) { if (unlikely(retval)) { Py_DECREF(retval); __Pyx_RaiseTooManyValuesError(expected); return -1; } else { return __Pyx_IterFinish(); } return 0; } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { CYTHON_UNUSED PyObject *getbuffer_cobj; #if PY_VERSION_HEX >= 0x02060000 if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); #endif if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags); #if PY_VERSION_HEX < 0x02060000 if (obj->ob_type->tp_dict && (getbuffer_cobj = PyMapping_GetItemString(obj->ob_type->tp_dict, "__pyx_getbuffer"))) { getbufferproc func; #if PY_VERSION_HEX >= 0x02070000 && !(PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION == 0) func = (getbufferproc) PyCapsule_GetPointer(getbuffer_cobj, "getbuffer(obj, view, flags)"); #else func = (getbufferproc) PyCObject_AsVoidPtr(getbuffer_cobj); #endif Py_DECREF(getbuffer_cobj); if (!func) goto fail; return func(obj, view, flags); } else { PyErr_Clear(); } #endif PyErr_Format(PyExc_TypeError, "'%100s' does not have the buffer interface", Py_TYPE(obj)->tp_name); #if PY_VERSION_HEX < 0x02060000 fail: #endif return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; CYTHON_UNUSED PyObject *releasebuffer_cobj; if (!obj) return; #if PY_VERSION_HEX >= 0x02060000 if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } #endif if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) { __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view); return; } #if PY_VERSION_HEX < 0x02060000 if (obj->ob_type->tp_dict && (releasebuffer_cobj = PyMapping_GetItemString(obj->ob_type->tp_dict, "__pyx_releasebuffer"))) { releasebufferproc func; #if PY_VERSION_HEX >= 0x02070000 && !(PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION == 0) func = (releasebufferproc) PyCapsule_GetPointer(releasebuffer_cobj, "releasebuffer(obj, view)"); #else func = (releasebufferproc) PyCObject_AsVoidPtr(releasebuffer_cobj); #endif Py_DECREF(releasebuffer_cobj); if (!func) goto fail; func(obj, view); return; } else { PyErr_Clear(); } #endif goto nofail; #if PY_VERSION_HEX < 0x02060000 fail: #endif PyErr_WriteUnraisable(obj); nofail: Py_DECREF(obj); view->obj = NULL; } #endif /* PY_MAJOR_VERSION < 3 */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level) { PyObject *py_import = 0; PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; py_import = __Pyx_GetAttrString(__pyx_b, "__import__"); if (!py_import) goto bad; if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; #if PY_VERSION_HEX >= 0x02050000 { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { /* try package relative import first */ PyObject *py_level = PyInt_FromLong(1); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; /* try absolute import on failure */ } #endif if (!module) { PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); } } #else if (level>0) { PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4."); goto bad; } module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, NULL); #endif bad: Py_XDECREF(empty_list); Py_XDECREF(py_import); Py_XDECREF(empty_dict); return module; } static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_npy_long(npy_long val) { const npy_long neg_one = (npy_long)-1, const_zero = (npy_long)0; const int is_unsigned = const_zero < neg_one; if ((sizeof(npy_long) == sizeof(char)) || (sizeof(npy_long) == sizeof(short))) { return PyInt_FromLong((long)val); } else if ((sizeof(npy_long) == sizeof(int)) || (sizeof(npy_long) == sizeof(long))) { if (is_unsigned) return PyLong_FromUnsignedLong((unsigned long)val); else return PyInt_FromLong((long)val); } else if (sizeof(npy_long) == sizeof(PY_LONG_LONG)) { if (is_unsigned) return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG)val); else return PyLong_FromLongLong((PY_LONG_LONG)val); } else { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; return _PyLong_FromByteArray(bytes, sizeof(npy_long), little, !is_unsigned); } } #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return ::std::complex< float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y*(__pyx_t_float_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real*z.real + z.imag*z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(a, a); case 3: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, a); case 4: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_absf(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return ::std::complex< double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y*(__pyx_t_double_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real*z.real + z.imag*z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(a, a); case 3: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, a); case 4: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_abs(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject* x) { const unsigned char neg_one = (unsigned char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned char" : "value too large to convert to unsigned char"); } return (unsigned char)-1; } return (unsigned char)val; } return (unsigned char)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject* x) { const unsigned short neg_one = (unsigned short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned short" : "value too large to convert to unsigned short"); } return (unsigned short)-1; } return (unsigned short)val; } return (unsigned short)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject* x) { const unsigned int neg_one = (unsigned int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned int" : "value too large to convert to unsigned int"); } return (unsigned int)-1; } return (unsigned int)val; } return (unsigned int)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject* x) { const char neg_one = (char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to char" : "value too large to convert to char"); } return (char)-1; } return (char)val; } return (char)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject* x) { const short neg_one = (short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to short" : "value too large to convert to short"); } return (short)-1; } return (short)val; } return (short)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject* x) { const int neg_one = (int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to int" : "value too large to convert to int"); } return (int)-1; } return (int)val; } return (int)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject* x) { const signed char neg_one = (signed char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed char" : "value too large to convert to signed char"); } return (signed char)-1; } return (signed char)val; } return (signed char)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject* x) { const signed short neg_one = (signed short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed short" : "value too large to convert to signed short"); } return (signed short)-1; } return (signed short)val; } return (signed short)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject* x) { const signed int neg_one = (signed int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed int" : "value too large to convert to signed int"); } return (signed int)-1; } return (signed int)val; } return (signed int)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject* x) { const int neg_one = (int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to int" : "value too large to convert to int"); } return (int)-1; } return (int)val; } return (int)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) { const unsigned long neg_one = (unsigned long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned long"); return (unsigned long)-1; } return (unsigned long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned long"); return (unsigned long)-1; } return (unsigned long)PyLong_AsUnsignedLong(x); } else { return (unsigned long)PyLong_AsLong(x); } } else { unsigned long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned long)-1; val = __Pyx_PyInt_AsUnsignedLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject* x) { const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned PY_LONG_LONG"); return (unsigned PY_LONG_LONG)-1; } return (unsigned PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned PY_LONG_LONG"); return (unsigned PY_LONG_LONG)-1; } return (unsigned PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (unsigned PY_LONG_LONG)PyLong_AsLongLong(x); } } else { unsigned PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned PY_LONG_LONG)-1; val = __Pyx_PyInt_AsUnsignedLongLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject* x) { const long neg_one = (long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long)-1; } return (long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long)-1; } return (long)PyLong_AsUnsignedLong(x); } else { return (long)PyLong_AsLong(x); } } else { long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (long)-1; val = __Pyx_PyInt_AsLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) { const PY_LONG_LONG neg_one = (PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to PY_LONG_LONG"); return (PY_LONG_LONG)-1; } return (PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to PY_LONG_LONG"); return (PY_LONG_LONG)-1; } return (PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (PY_LONG_LONG)PyLong_AsLongLong(x); } } else { PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (PY_LONG_LONG)-1; val = __Pyx_PyInt_AsLongLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) { const signed long neg_one = (signed long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed long"); return (signed long)-1; } return (signed long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed long"); return (signed long)-1; } return (signed long)PyLong_AsUnsignedLong(x); } else { return (signed long)PyLong_AsLong(x); } } else { signed long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (signed long)-1; val = __Pyx_PyInt_AsSignedLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject* x) { const signed PY_LONG_LONG neg_one = (signed PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed PY_LONG_LONG"); return (signed PY_LONG_LONG)-1; } return (signed PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed PY_LONG_LONG"); return (signed PY_LONG_LONG)-1; } return (signed PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (signed PY_LONG_LONG)PyLong_AsLongLong(x); } } else { signed PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (signed PY_LONG_LONG)-1; val = __Pyx_PyInt_AsSignedLongLong(tmp); Py_DECREF(tmp); return val; } } static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); #if PY_VERSION_HEX < 0x02050000 return PyErr_Warn(NULL, message); #else return PyErr_WarnEx(NULL, message, 1); #endif } return 0; } #ifndef __PYX_HAVE_RT_ImportModule #define __PYX_HAVE_RT_ImportModule static PyObject *__Pyx_ImportModule(const char *name) { PyObject *py_name = 0; PyObject *py_module = 0; py_name = __Pyx_PyIdentifier_FromString(name); if (!py_name) goto bad; py_module = PyImport_Import(py_name); Py_DECREF(py_name); return py_module; bad: Py_XDECREF(py_name); return 0; } #endif #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict) { PyObject *py_module = 0; PyObject *result = 0; PyObject *py_name = 0; char warning[200]; py_module = __Pyx_ImportModule(module_name); if (!py_module) goto bad; py_name = __Pyx_PyIdentifier_FromString(class_name); if (!py_name) goto bad; result = PyObject_GetAttr(py_module, py_name); Py_DECREF(py_name); py_name = 0; Py_DECREF(py_module); py_module = 0; if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%s.%s is not a type object", module_name, class_name); goto bad; } if (!strict && (size_t)((PyTypeObject *)result)->tp_basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility", module_name, class_name); #if PY_VERSION_HEX < 0x02050000 if (PyErr_Warn(NULL, warning) < 0) goto bad; #else if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; #endif } else if ((size_t)((PyTypeObject *)result)->tp_basicsize != size) { PyErr_Format(PyExc_ValueError, "%s.%s has the wrong size, try recompiling", module_name, class_name); goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(py_module); Py_XDECREF(result); return NULL; } #endif static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = (start + end) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, /*int argcount,*/ 0, /*int kwonlyargcount,*/ 0, /*int nlocals,*/ 0, /*int stacksize,*/ 0, /*int flags,*/ __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, /*int firstlineno,*/ __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_globals = 0; PyFrameObject *py_frame = 0; py_code = __pyx_find_code_object(c_line ? c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? c_line : py_line, py_code); } py_globals = PyModule_GetDict(__pyx_m); if (!py_globals) goto bad; py_frame = PyFrame_New( PyThreadState_GET(), /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ py_globals, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; py_frame->f_lineno = py_line; PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else /* Python 3+ has unicode identifiers */ if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; ++t; } return 0; } /* Type Conversion Functions */ static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) { PyNumberMethods *m; const char *name = NULL; PyObject *res = NULL; #if PY_VERSION_HEX < 0x03000000 if (PyInt_Check(x) || PyLong_Check(x)) #else if (PyLong_Check(x)) #endif return Py_INCREF(x), x; m = Py_TYPE(x)->tp_as_number; #if PY_VERSION_HEX < 0x03000000 if (m && m->nb_int) { name = "int"; res = PyNumber_Int(x); } else if (m && m->nb_long) { name = "long"; res = PyNumber_Long(x); } #else if (m && m->nb_int) { name = "int"; res = PyNumber_Long(x); } #endif if (res) { #if PY_VERSION_HEX < 0x03000000 if (!PyInt_Check(res) && !PyLong_Check(res)) { #else if (!PyLong_Check(res)) { #endif PyErr_Format(PyExc_TypeError, "__%s__ returned non-%s (type %.200s)", name, name, Py_TYPE(res)->tp_name); Py_DECREF(res); return NULL; } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject* x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { #if PY_VERSION_HEX < 0x02050000 if (ival <= LONG_MAX) return PyInt_FromLong((long)ival); else { unsigned char *bytes = (unsigned char *) &ival; int one = 1; int little = (int)*(unsigned char*)&one; return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0); } #else return PyInt_FromSize_t(ival); #endif } static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject* x) { unsigned PY_LONG_LONG val = __Pyx_PyInt_AsUnsignedLongLong(x); if (unlikely(val == (unsigned PY_LONG_LONG)-1 && PyErr_Occurred())) { return (size_t)-1; } else if (unlikely(val != (unsigned PY_LONG_LONG)(size_t)val)) { PyErr_SetString(PyExc_OverflowError, "value too large to convert to size_t"); return (size_t)-1; } return (size_t)val; } #endif /* Py_PYTHON_H */ nipy-0.3.0/nipy/algorithms/graph/_graph.pyx000066400000000000000000000015131210344137400207230ustar00rootroot00000000000000import numpy as np cimport numpy as np cimport cython ctypedef np.float64_t DOUBLE ctypedef np.int_t INT @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) def dilation(np.ndarray[DOUBLE, ndim=2] field,\ np.ndarray[INT, ndim=1] idx,\ np.ndarray[INT, ndim=1] neighb): cdef int size_max = field.shape[0] cdef int dim = field.shape[1] cdef int i, j, d cdef DOUBLE fmax cdef np.ndarray[DOUBLE, ndim=1] res = 0 * field[:, 0] for d in range(dim): for i in range(size_max): fmax = field[i, d] for j in range(idx[i], idx[i + 1]): if field[neighb[j], d] > fmax: fmax = field[neighb[j], d] res[i] = fmax for i in range(size_max): field[i, d] = res[i] return res nipy-0.3.0/nipy/algorithms/graph/bipartite_graph.py000066400000000000000000000222101210344137400224340ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This module implements the BipartiteGraph class, used to represent weighted bipartite graph: it contains two types of vertices, say 'left' and 'right'; then edges can only exist between 'left' and 'right' vertices. For simplicity the vertices of either side are labeled [1..V] and [1..W] respectively. Author: Bertrand Thirion, 2006--2011 """ import numpy as np def check_feature_matrices(X, Y): """ checks wether the dismension of X and Y are consistent Parameters ---------- X, Y arrays of shape (n1, p) and (n2, p) where p = common dimension of the features """ if np.size(X) == X.shape[0]: X = np.reshape(X, (np.size(X), 1)) if np.size(Y) == Y.shape[0]: Y = np.reshape(Y, (np.size(Y), 1)) if X.shape[1] != Y.shape[1]: raise ValueError('X.shape[1] should = Y.shape[1]') def bipartite_graph_from_coo_matrix(x): """ Instantiates a weighted graph from a (sparse) coo_matrix Parameters ---------- x: scipy.sparse.coo_matrix instance, the input matrix Returns ------- bg: BipartiteGraph instance """ i, j = x.nonzero() edges = np.vstack((i, j)).T weights = x.data wg = BipartiteGraph(x.shape[0], x.shape[1], edges, weights) return wg def bipartite_graph_from_adjacency(x): """Instantiates a weighted graph from a square 2D array Parameters ---------- x: 2D array instance, the input array Returns ------- wg: BipartiteGraph instance """ from scipy.sparse import coo_matrix return bipartite_graph_from_coo_matrix(coo_matrix(x)) def cross_eps(X, Y, eps=1.): """Return the eps-neighbours graph of from X to Y Parameters ---------- X, Y arrays of shape (n1, p) and (n2, p) where p = common dimension of the features eps=1, float: the neighbourhood size considered Returns ------- the resulting bipartite graph instance Notes ----- for the sake of speed it is advisable to give PCA-preprocessed matrices X and Y. """ from scipy.sparse import coo_matrix check_feature_matrices(X, Y) try: eps = float(eps) except: "eps cannot be cast to a float" if np.isnan(eps): raise ValueError('eps is nan') if np.isinf(eps): raise ValueError('eps is inf') ij = np.zeros((0, 2)) data = np.zeros(0) for i, x in enumerate(X): dist = np.sum((Y - x) ** 2, 1) idx = np.asanyarray(np.where(dist < eps)) data = np.hstack((data, dist[idx.ravel()])) ij = np.vstack((ij, np.hstack(( i * np.ones((idx.size, 1)), idx.T)))).astype(np.int) data = np.maximum(data, 1.e-15) adj = coo_matrix((data, ij.T), shape=(X.shape[0], Y.shape[0])) return bipartite_graph_from_coo_matrix(adj) def cross_knn(X, Y, k=1): """return the k-nearest-neighbours graph of from X to Y Parameters ---------- X, Y arrays of shape (n1, p) and (n2, p) where p = common dimension of the features eps=1, float: the neighbourhood size considered Returns ------- BipartiteGraph instance Notes ----- For the sake of speed it is advised to give PCA-transformed matrices X and Y. """ from scipy.sparse import coo_matrix check_feature_matrices(X, Y) try: k = int(k) except: "k cannot be cast to an int" if np.isnan(k): raise ValueError('k is nan') if np.isinf(k): raise ValueError('k is inf') k = min(k, Y.shape[0] -1) ij = np.zeros((0, 2)) data = np.zeros(0) for i, x in enumerate(X): dist = np.sum((Y - x) ** 2, 1) idx = np.argsort(dist)[:k] data = np.hstack((data, dist[idx])) ij = np.vstack((ij, np.hstack(( i * np.ones((k, 1)), np.reshape(idx, (k, 1)))))) data = np.maximum(data, 1.e-15) adj = coo_matrix((data, ij.T), shape=(X.shape[0], Y.shape[0])) return bipartite_graph_from_coo_matrix(adj) class BipartiteGraph(object): """ Bipartite graph class A graph for which there are two types of nodes, such that edges can exist only between nodes of type 1 and type 2 (not within) fields of this class: V (int, > 0) the number of type 1 vertices W (int, > 0) the number of type 2 vertices E: (int) the number of edges edges: array of shape (self.E, 2) reprensenting pairwise neighbors weights, array of shape (self.E), +1/-1 for scending/descending links """ def __init__(self, V, W, edges=None, weights=None): """ Constructor Parameters ---------- V (int), the number of vertices of subset 1 W (int), the number of vertices of subset 2 edges=None: array of shape (self.E, 2) the edge array of the graph weights=None: array of shape (self.E) the asociated weights array """ V = int(V) W = int(W) if (V < 1) or (W < 1): raise ValueError('cannot create graph with no vertex') self.V = V self.W = W self.E = 0 if (edges == None) & (weights == None): self.edges = np.array([], np.int) self.weights = np.array([]) else: if edges.shape[0] == np.size(weights): E = edges.shape[0] self.E = E self.edges = - np.ones((E, 2), np.int) self.set_edges(edges) self.set_weights(weights) else: raise ValueError('Incompatible size of the edges and \ weights matrices') def set_weights(self, weights): """ Set weights `weights` to edges Parameters ---------- weights, array of shape(self.V): edges weights """ if np.size(weights) != self.E: raise ValueError('The weight size is not the edges size') else: self.weights = np.reshape(weights, (self.E)) def set_edges(self, edges): """ Set edges to graph sets self.edges=edges if 1. edges has a correct size 2. edges take values in [0..V-1]*[0..W-1] Parameters ---------- edges: array of shape(self.E, 2): set of candidate edges """ if np.shape(edges) != np.shape(self.edges): raise ValueError('Incompatible size of the edge matrix') if np.size(edges) > 0: if edges.max(0)[0] + 1 > self.V: raise ValueError('Incorrect edge specification') if edges.max(0)[1] + 1 > self.W: raise ValueError('Incorrect edge specification') self.edges = edges def copy(self): """ returns a copy of self """ G = BipartiteGraph(self.V, self.W, self.edges.copy(), self.weights.copy()) return G def subgraph_left(self, valid, renumb=True): """Extraction of a subgraph Parameters ---------- valid, boolean array of shape self.V renumb, boolean: renumbering of the (left) edges Returns ------- G : None or ``BipartiteGraph`` instance A new BipartiteGraph instance with only the left vertices that are True. If sum(valid)==0, None is returned """ if np.size(valid) != self.V: raise ValueError('valid does not have the correct size') if np.sum(valid > 0) == 0: return None if self.E > 0: win_edges = valid[self.edges[:, 0]] edges = self.edges[win_edges] weights = self.weights[win_edges] if renumb: rindex = np.hstack((0, np.cumsum(valid > 0))) edges[:, 0] = rindex[edges[:, 0]] G = BipartiteGraph(np.sum(valid), self.W, edges, weights) else: G = BipartiteGraph(self.V, self.W, edges, weights) else: G = self.copy() return G def subgraph_right(self, valid, renumb=True): """ Extraction of a subgraph Parameters ---------- valid : bool array of shape self.V renumb : bool, optional renumbering of the (right) edges Returns ------- G : None or ``BipartiteGraph`` instance. A new BipartiteGraph instance with only the right vertices that are True. If sum(valid)==0, None is returned """ if np.size(valid) != self.V: raise ValueError('valid does not have the correct size') if np.sum(valid > 0) == 0: return None if self.E > 0: win_edges = valid[self.edges[:, 1]] edges = self.edges[win_edges] weights = self.weights[win_edges] if renumb: rindex = np.hstack((0, np.cumsum(valid > 0))) edges[:, 1] = rindex[edges[:, 1]] G = BipartiteGraph(self.V, np.sum(valid), edges, weights) else: G = BipartiteGraph(self.V, self.W, edges, weights) else: G = self.copy() return G nipy-0.3.0/nipy/algorithms/graph/field.py000066400000000000000000000457111210344137400203660ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This module implements the Field class, which simply a WeightedGraph (see the graph.py) module, plus an arrray that yields (possibly multi-dimnesional) features associated with graph vertices. This allows some kinds of computations (all thoses relating to mathematical morphology, diffusion etc.) Certain functions are provided to Instantiate Fields easily, given a WeightedGraph and feature data. Author:Bertrand Thirion, 2006--2011 """ import numpy as np from .graph import WeightedGraph NEGINF = -np.inf def field_from_coo_matrix_and_data(x, data): """ Instantiates a weighted graph from a (sparse) coo_matrix Parameters ---------- x: (V, V) scipy.sparse.coo_matrix instance, the input matrix data: array of shape (V, dim), the field data Returns ------- ifield: resulting Field instance """ if x.shape[0] != x.shape[1]: raise ValueError("the input coo_matrix is not square") if data.shape[0] != x.shape[0]: raise ValueError("data and x do not have consistent shapes") i, j = x.nonzero() edges = np.vstack((i, j)).T weights = x.data ifield = Field(x.shape[0], edges, weights, data) return ifield def field_from_graph_and_data(g, data): """ Instantiate a Fieldfrom a WeightedGraph plus some feature data Parameters ---------- x: (V, V) scipy.sparse.coo_matrix instance, the input matrix data: array of shape (V, dim), the field data Returns ------- ifield: resulting field instance """ if data.shape[0] != g.V: raise ValueError("data and g do not have consistent shapes") ifield = Field(g.V, g.edges, g.weights, data) return ifield class Field(WeightedGraph): """ This is the basic field structure, which contains the weighted graph structure plus an array of data (the 'field') field is an array of size(n, p) where n is the number of vertices of the graph and p is the field dimension """ def __init__(self, V, edges=None, weights=None, field=None): """ Parameters ---------- V (int > 0) the number of vertices of the graph edges=None: the edge array of the graph weights=None: the asociated weights array field=None: the field data itself """ V = int(V) if V < 1: raise ValueError('cannot create graph with no vertex') self.V = int(V) self.E = 0 self.edges = [] self.weights = [] if (edges is not None) or (weights is not None): if len(edges) == 0: E = 0 elif edges.shape[0] == np.size(weights): E = edges.shape[0] else: raise ValueError('Incompatible size of the edges \ and weights matrices') self.V = V self.E = E self.edges = edges self.weights = weights self.field = [] if field == None: pass else: if np.size(field) == self.V: field = np.reshape(field, (self.V, 1)) if field.shape[0] != self.V: raise ValueError('field does not have a correct size') else: self.field = field def get_field(self): return self.field def set_field(self, field): if np.size(field) == self.V: field = np.reshape(field, (self.V, 1)) if field.shape[0] != self.V: raise ValueError('field does not have a correct size') else: self.field = field def closing(self, nbiter=1): """Morphological closing of the field data. self.field is changed inplace Parameters ---------- nbiter=1 : the number of iterations required """ nbiter = int(nbiter) self.dilation(nbiter) self.erosion(nbiter) def opening(self, nbiter=1): """Morphological opening of the field data. self.field is changed inplace Parameters ---------- nbiter: int, optional, the number of iterations required """ nbiter = int(nbiter) self.erosion(nbiter) self.dilation(nbiter) def dilation(self, nbiter=1, fast=True): """ Morphological dimlation of the field data. self.field is changed Parameters ---------- nbiter: int, optional, the number of iterations required fixme ----- cython """ nbiter = int(nbiter) if fast: from ._graph import dilation if self.E > 0: if (self.field.size == self.V): self.field = self.field.reshape((self.V, 1)) idx, neighb, _ = self.compact_neighb() for i in range(nbiter): dilation(self.field, idx, neighb) else: from scipy.sparse import dia_matrix adj = self.to_coo_matrix() + dia_matrix( (np.ones(self.V), 0), (self.V, self.V)) rows = adj.tolil().rows for i in range(nbiter): self.field = np.array([self.field[row].max(0) for row in rows]) def highest_neighbor(self, refdim=0): """ Computes the neighbor with highest field value along refdim Parameters ---------- refdim: int optiontal, the dimension to consider Returns ------- hneighb: array of shape(self.V), index of the neighbor with highest value """ from scipy.sparse import dia_matrix refdim = int(refdim) # add self-edges to avoid singularities, when taking the maximum adj = self.to_coo_matrix() + dia_matrix( (np.ones(self.V), 0), (self.V, self.V)) rows = adj.tolil().rows hneighb = np.array([row[self.field[row].argmax()] for row in rows]) return hneighb def erosion(self, nbiter=1): """Morphological openeing of the field Parameters ---------- nbiter: int, optional, the number of iterations required """ nbiter = int(nbiter) lil = self.to_coo_matrix().tolil().rows.tolist() for i in range(nbiter): nf = np.zeros_like(self.field) for k, neighbors in enumerate(lil): nf[k] = self.field[neighbors].min(0) self.field = nf def get_local_maxima(self, refdim=0, th=NEGINF): """ Look for the local maxima of one dimension (refdim) of self.field Parameters ---------- refdim (int) the field dimension over which the maxima are looked after th = float, optional threshold so that only values above th are considered Returns ------- idx: array of shape (nmax) indices of the vertices that are local maxima depth: array of shape (nmax) topological depth of the local maxima : depth[idx[i]] = q means that idx[i] is a q-order maximum """ depth_all = self.local_maxima(refdim, th) idx = np.ravel(np.where(depth_all)) depth = depth_all[idx] return idx, depth def local_maxima(self, refdim=0, th=NEGINF): """Returns all the local maxima of a field Parameters ---------- refdim (int) field dimension over which the maxima are looked after th: float, optional threshold so that only values above th are considered Returns ------- depth: array of shape (nmax) a labelling of the vertices such that depth[v] = 0 if v is not a local maximum depth[v] = 1 if v is a first order maximum ... depth[v] = q if v is a q-order maximum """ refdim = int(refdim) if np.size(self.field) == 0: raise ValueError('No field has been defined so far') if self.field.shape[1] - 1 < refdim: raise ValueError(refdim > self.shape[1]) depth = np.zeros(self.V, np.int) # create a subfield(thresholding) sf = self.subfield(self.field.T[refdim] >= th) initial_field = sf.field.T[refdim] sf.field = initial_field.copy() # compute the depth in the subgraph ldepth = sf.V * np.ones(sf.V, np.int) for k in range(sf.V): dilated_field_old = sf.field.ravel().copy() sf.dilation(1) non_max = sf.field.ravel() > dilated_field_old ldepth[non_max] = np.minimum(k, ldepth[non_max]) if (non_max == False).all(): ldepth[sf.field.ravel() == initial_field] = np.maximum(k, 1) break # write all the depth values depth[self.field[:, refdim] >= th] = ldepth return depth def diffusion(self, nbiter=1): """diffusion of the field data in the weighted graph structure self.field is changed inplace Parameters ---------- nbiter: int, optional the number of iterations required Notes ----- The process is run for all the dimensions of the field """ nbiter = int(nbiter) adj = self.to_coo_matrix() for i in range(nbiter): self.field = adj * self.field def custom_watershed(self, refdim=0, th=NEGINF): """ customized watershed analysis of the field. Note that bassins are found around each maximum (and not minimum as conventionally) Parameters ---------- refdim: int, optional th: float optional, threshold of the field Returns ------- idx: array of shape (nbassins) indices of the vertices that are local maxima label : array of shape (self.V) labelling of the vertices according to their bassin """ import numpy.ma as ma from graph import Graph if (np.size(self.field) == 0): raise ValueError('No field has been defined so far') if self.field.shape[1] - 1 < refdim: raise ValueError('refdim>field.shape[1]') label = - np.ones(self.V, np.int) # create a subfield(thresholding) sf = self.subfield(self.field[:, refdim] >= th) # compute the basins hneighb = sf.highest_neighbor() edges = np.vstack((hneighb, np.arange(sf.V))).T edges = np.vstack((edges, np.vstack((np.arange(sf.V), hneighb)).T)) aux = Graph(sf.V, edges.shape[0], edges) llabel = aux.cc() n_bassins = len(np.unique(llabel)) # write all the depth values label[self.field[:, refdim] >= th] = llabel idx = np.array([ma.array( self.field[:, refdim], mask=(label != c)).argmax() for c in range(n_bassins)]) return idx, label def threshold_bifurcations(self, refdim=0, th=NEGINF): """Analysis of the level sets of the field: Bifurcations are defined as changes in the topology in the level sets when the level (threshold) is varied This can been thought of as a kind of Morse analysis Parameters ---------- th: float, optional, threshold so that only values above th are considered Returns ------- idx: array of shape (nlsets) indices of the vertices that are local maxima height: array of shape (nlsets) the depth of the local maxima depth[idx[i]] = q means that idx[i] is a q-order maximum Note that this is also the diameter of the basins associated with local maxima parents: array of shape (nlsets) the label of the maximum which dominates each local maximum i.e. it describes the hierarchy of the local maxima label: array of shape (self.V) a labelling of thevertices according to their bassin """ import numpy.ma as ma if (np.size(self.field) == 0): raise ValueError('No field has been defined so far') if self.field.shape[1] - 1 < refdim: raise ValueError('refdim>field.shape[1]') label = - np.ones(self.V, np.int) # create a subfield(thresholding) sf = self.subfield(self.field[:, refdim] >= th) initial_field = sf.field[:, refdim].copy() sf.field = initial_field.copy() # explore the subfield order = np.argsort(- initial_field) rows = sf.to_coo_matrix().tolil().rows llabel = - np.ones(sf.V, np.int) parent, root = np.arange(2 * self.V), np.arange(2 * self.V) # q will denote the region index q = 0 for i in order: if (llabel[rows[i]] > - 1).any(): nlabel = np.unique(llabel[rows[i]]) if nlabel[0] == -1: nlabel = nlabel[1:] nlabel = np.unique(root[nlabel]) if len(nlabel) == 1: # we are at a regular point llabel[i] = nlabel[0] else: # we are at a saddle point llabel[i] = q parent[nlabel] = q root[nlabel] = q for j in nlabel: root[root == j] = q q += 1 else: # this is a new component llabel[i] = q q += 1 parent = parent[:q] # write all the depth values label[self.field[:, refdim] >= th] = llabel idx = np.array([ma.array( self.field[:, refdim], mask=(label != c)).argmax() for c in range(q)]) return idx, parent, label def constrained_voronoi(self, seed): """Voronoi parcellation of the field starting from the input seed Parameters ---------- seed: int array of shape(p), the input seeds Returns ------- label: The resulting labelling of the data Fixme ----- deal with graphs with several ccs """ if np.size(self.field) == 0: raise ValueError('No field has been defined so far') seed = seed.astype(np.int) weights = np.sqrt(np.sum((self.field[self.edges.T[0]] - self.field[self.edges.T[1]]) ** 2, 1)) g = WeightedGraph(self.V, self.edges, weights) label = g.voronoi_labelling(seed) return label def geodesic_kmeans(self, seeds=None, label=None, maxiter=100, eps=1.e-4, verbose=0): """ Geodesic k-means algorithm i.e. obtention of clusters that are topologically connected and minimally variable concerning the information of self.field Parameters ---------- seeds: array of shape(p), optional, initial indices of the seeds within the field if seeds==None the labels are used as initialization labels: array of shape(self.V) initial labels, optional, it is expected that labels take their values in a certain range (0..lmax) if Labels==None, this is not used if seeds==None and labels==None, an ewxception is raised maxiter: int, optional, maximal number of iterations eps: float, optional, increase of inertia at which convergence is declared Returns ------- seeds: array of shape (p), the final seeds label : array of shape (self.V), the resulting field label J: float, inertia value """ if np.size(self.field) == 0: raise ValueError('No field has been defined so far') if (seeds == None) and (label == None): raise ValueError('No initialization has been provided') k = np.size(seeds) inertia_old = NEGINF if seeds == None: k = label.max() + 1 if np.size(np.unique(label)) != k: raise ValueError('missing values, cannot proceed') seeds = np.zeros(k).astype(np.int) for j in range(k): lj = np.nonzero(label == j)[0] cent = np.mean(self.field[lj], 0) tj = np.argmin(np.sum((cent - self.field[lj]) ** 2, 1)) seeds[j] = lj[tj] else: k = np.size(seeds) for i in range(maxiter): # voronoi labelling label = self.constrained_voronoi(seeds) # update the seeds inertia = 0 pinteria = 0 for j in range(k): lj = np.nonzero(label == j)[0] pinteria += np.sum( (self.field[seeds[j]] - self.field[lj]) ** 2) cent = np.mean(self.field[lj], 0) tj = np.argmin(np.sum((cent - self.field[lj]) ** 2, 1)) seeds[j] = lj[tj] inertia += np.sum((cent - self.field[lj]) ** 2) if verbose: print i, inertia if np.absolute(inertia_old - inertia) < eps: break inertia_old = inertia return seeds, label, inertia def ward(self, nbcluster): """Ward's clustering of self Parameters ---------- nbcluster: int, the number of desired clusters Returns ------- label: array of shape (self.V) the resulting field label J (float): the resulting inertia """ from nipy.algorithms.clustering.hierarchical_clustering\ import ward_segment label, J = ward_segment(self, self.field, qmax=nbcluster) # compute the resulting inertia inertia = 0 for j in range(nbcluster): lj = np.nonzero(label == j)[0] cent = np.mean(self.field[lj], 0) inertia += np.sum((cent - self.field[lj]) ** 2) return label, inertia def copy(self): """ copy function """ return Field(self.V, self.edges.copy(), self.weights.copy(), self.field.copy()) def subfield(self, valid): """Returns a subfield of self, with only vertices such that valid > 0 Parameters ---------- valid: array of shape (self.V), nonzero for vertices to be retained Returns ------- F: Field instance, the desired subfield of self Notes ----- The vertices are renumbered as [1..p] where p = sum(valid>0) when sum(valid) == 0 then None is returned """ G = self.subgraph(valid) if G == None: return None field = self.field[valid] if len(G.edges) == 0: edges = np.array([[], []]).T else: edges = G.edges return Field(G.V, edges, G.weights, field) nipy-0.3.0/nipy/algorithms/graph/forest.py000066400000000000000000000351131210344137400206000ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Module implements the Forest class A Forest is a graph with a hierarchical structure. Each connected component of a forest is a tree. The main characteristic is that each node has a single parent, so that a Forest is fully characterized by a "parent" array, that defines the unique parent of each node. The directed relationships are encoded by the weight sign. Note that some methods of WeightedGraph class (e.g. dijkstra's algorithm) require positive weights, so that they cannot work on forests in the current implementation. Specific methods (e.g. all_sidtance()) have been set instead. Main author: Bertrand thirion, 2007-2011 """ import numpy as np from .graph import WeightedGraph class Forest(WeightedGraph): """ Forest structure, i.e. a set of trees The nodes can be segmented into trees. Within each tree a node has one parent and children that describe the associated hierarchical structure. Some of the nodes can be viewed as leaves, other as roots The edges within a tree are associated with a weight: * +1 from child to parent * -1 from parent to child Attributes ---------- V : int int > 0, the number of vertices E : int the number of edges parents : (self.V,) array the parent array edges : (self.E, 2) array representing pairwise neighbors weights : (self.E,) array +1/-1 for ascending/descending links children: list list of arrays that represents the children any node """ def __init__(self, V, parents=None): """Constructor Parameters ---------- V : int the number of edges of the graph parents : None or (V,) array the parents of zach vertex. If `parents`==None , the parents are set to range(V), i.e. each node is its own parent, and each node is a tree """ V = int(V) if V < 1: raise ValueError('cannot create graphs with no vertex') self.V = int(V) # define the parents if parents == None: self.parents = np.arange(self.V).astype(np.int) else: if np.size(parents) != V: raise ValueError('Incorrect size for parents') if parents.max() > self.V: raise ValueError('Incorrect value for parents') self.parents = np.reshape(parents, self.V).astype(np.int) self.define_graph_attributes() if self.check() == 0: raise ValueError('The proposed structure is not a forest') self.children = [] def define_graph_attributes(self): """define the edge and weights array """ self.edges = np.array([]).astype(np.int) self.weights = np.array([]) i = np.nonzero(self.parents != np.arange(self.V))[0] if np.size(i) > 0: E1 = np.hstack((i, self.parents[i])) E2 = np.hstack((self.parents[i], i)) self.edges = (np.vstack((E1, E2))).astype(np.int).T self.weights = np.hstack((np.ones(np.size(i)), - np.ones(np.size(i)))) self.E = np.size(self.weights) self.edges = self.edges def compute_children(self): """Define the children of each node (stored in self.children) """ self.children = [np.array([]) for v in range(self.V)] if self.E > 0: K = self.copy() K.remove_edges(K.weights < 0) self.children = K.to_coo_matrix().tolil().rows.tolist() def get_children(self, v=-1): """ Get the children of a node/each node Parameters ---------- v: int, optional a node index Returns ------- children: list of int the list of children of node v (if v is provided) a list of lists of int, the children of all nodes otherwise """ v = int(v) if v > -1: if v > self.V - 1: raise ValueError('the given node index is too high') if self.children == []: self.compute_children() if v == -1: return self.children else: return self.children[v] def get_descendants(self, v, exclude_self=False): """returns the nodes that are children of v as a list Parameters ---------- v: int, a node index Returns ------- desc: list of int, the list of all descendant of the input node """ v = int(v) if v < 0: raise ValueError('the given node index is too low') if v > self.V - 1: raise ValueError('the given node index is too high') if self.children == []: self.compute_children() if len(self.children[v]) == 0: return [v] else: desc = [v] for w in self.children[v]: temp = self.get_descendants(w) for q in temp: desc.append(q) desc.sort() if exclude_self and v in desc: desc = [i for i in desc if i != v] return desc def check(self): """Check that self is indeed a forest, i.e. contains no loop Returns ------- a boolean b=0 iff there are loops, 1 otherwise Notes ----- Slow implementation, might be rewritten in C or cython """ b = 1 if self.V == 1: return b for v in range(self.V): w = v q = 0 while(self.parents[w] != w): w = self.parents[w] if w == v: b = 0 break q += 1 if q > self.V: b = 0 break if b == 0: break return b def isleaf(self): """ Identification of the leaves of the forest Returns ------- leaves: bool array of shape(self.V), indicator of the forest's leaves """ leaves = np.ones(self.V).astype('bool') if self.E > 0: leaves[self.edges[self.weights > 0, 1]] = 0 return leaves def isroot(self): """ Returns an indicator of nodes being roots Returns ------- roots, array of shape(self.V, bool), indicator of the forest's roots """ roots = np.array(self.parents == np.arange(self.V)) return roots def subforest(self, valid): """ Creates a subforest with the vertices for which valid > 0 Parameters ---------- valid: array of shape (self.V): idicator of the selected nodes Returns ------- subforest: a new forest instance, with a reduced set of nodes Notes ----- The children of deleted vertices become their own parent """ if np.size(valid) != self.V: raise ValueError("incompatible size for self anf valid") parents = self.parents.copy() j = np.nonzero(valid[self.parents] == 0)[0] parents[j] = j parents = parents[valid.astype(bool)] renumb = np.hstack((0, np.cumsum(valid))) parents = renumb[parents] F = Forest(np.sum(valid), parents) return F def merge_simple_branches(self): """ Return a subforest, where chained branches are collapsed Returns ------- sf, Forest instance, same as self, without any chain """ valid = np.ones(self.V).astype('bool') children = self.get_children() for k in range(self.V): if np.size(children[k]) == 1: valid[k] = 0 return self.subforest(valid) def all_distances(self, seed=None): """returns all the distances of the graph as a tree Parameters ---------- seed=None array of shape(nbseed) with valuesin [0..self.V-1] set of vertices from which tehe distances are computed Returns ------- dg: array of shape(nseed, self.V), the resulting distances Notes ----- By convention infinite distances are given the distance np.inf """ if (hasattr(seed, '__iter__') == False) & (seed is not None): seed = [seed] if self.E > 0: w = self.weights.copy() self.weights = np.absolute(self.weights) dg = self.floyd(seed) dg[dg == (np.sum(self.weights) + 1)] = np.inf self.weights = w return dg else: return np.inf * np.ones((self.V, self.V)) def depth_from_leaves(self): """compute an index for each node: 0 for the leaves, 1 for their parents etc. and maximal for the roots. Returns ------- depth: array of shape (self.V): the depth values of the vertices """ depth = self.isleaf().astype(np.int)-1 for j in range(self.V): dc = depth.copy() for i in range(self.V): if self.parents[i] != i: depth[self.parents[i]] = np.maximum(depth[i] + 1,\ depth[self.parents[i]]) if dc.max() == depth.max(): break return depth def reorder_from_leaves_to_roots(self): """reorder the tree so that the leaves come first then their parents and so on, and the roots are last. Returns ------- order: array of shape(self.V) the order of the old vertices in the reordered graph """ depth = self.depth_from_leaves() order = np.argsort(depth) iorder = np.arange(self.V) for i in range(self.V): iorder[order[i]] = i parents = iorder[self.parents[order]] self.parents = parents self.define_graph_attributes() return order def leaves_of_a_subtree(self, ids, custom=False): """tests whether the given nodes are the leaves of a certain subtree Parameters ---------- ids: array of shape (n) that takes values in [0..self.V-1] custom == False, boolean if custom==true the behavior of the function is more specific - the different connected components are considered as being in a same greater tree - when a node has more than two subbranches, any subset of these children is considered as a subtree """ leaves = self.isleaf().astype('bool') for i in ids: if leaves[i] == 0: raise ValueError("some of the ids are not leaves") #1. find the highest node that is a common ancestor to all leaves # if there is none, common ancestor is -1 com_ancestor = ids[0] for i in ids: ca = i dca = self.get_descendants(ca) while com_ancestor not in dca: ca = self.parents[ca] dca = self.get_descendants(ca) if (ca == self.parents[ca]) & (com_ancestor not in dca): ca = -1 break com_ancestor = ca #2. check whether all the children of this ancestor are within ids if com_ancestor > -1: st = self.get_descendants(com_ancestor) valid = [i in ids for i in st if leaves[i]] bresult = (np.sum(valid) == np.size(valid)) if custom == False: return bresult # now, custom =True # check that subtrees of ancestor are consistently labelled kids = self.get_children(com_ancestor) if np.size(kids) > 2: bresult = True for v in kids: st = np.array(self.get_descendants(v)) st = st[leaves[st]] if np.size(st) > 1: valid = [i in ids for i in st] bresult *= ((np.sum(valid) == np.size(valid)) + np.sum(valid == 0)) return bresult # now, common ancestor is -1 if custom == False: st = np.squeeze(np.nonzero(leaves)) valid = [i in ids for i in st] bresult = (np.sum(valid) == np.size(valid)) else: cc = self.cc() bresult = True for i in ids: st = np.squeeze(np.nonzero((cc == cc[i]) * leaves)) if np.size(st) > 1: valid = [i in ids for i in st] bresult *= (np.sum(valid) == np.size(valid)) else: bresult *= (st in ids) return bresult def tree_depth(self): """ Returns the number of hierarchical levels in the tree """ depth = self.depth_from_leaves() return depth.max() + 1 def propagate_upward_and(self, prop): """propagates from leaves to roots some binary property of the nodes so that prop[parents] = logical_and(prop[children]) Parameters ---------- prop, array of shape(self.V), the input property Returns ------- prop, array of shape(self.V), the output property field """ prop = np.asanyarray(prop).copy() if np.size(prop) != self.V: raise ValueError("incoherent size for prop") prop[self.isleaf() == False] = True for j in range(self.tree_depth()): for i in range(self.V): if prop[i] == False: prop[self.parents[i]] = False return prop def propagate_upward(self, label): """ Propagation of a certain labelling from leves to roots Assuming that label is a certain positive integer field this propagates these labels to the parents whenever the children nodes have coherent properties otherwise the parent value is unchanged Parameters ---------- label: array of shape(self.V) Returns ------- label: array of shape(self.V) """ label = np.asanyarray(label).copy() if np.size(label) != self.V: raise ValueError("incoherent size for label") ch = self.get_children() depth = self.depth_from_leaves() for j in range(1, depth.max() + 1): for i in range(self.V): if depth[i] == j: if np.size(np.unique(label[ch[i]])) == 1: label[i] = np.unique(label[ch[i]]) return label nipy-0.3.0/nipy/algorithms/graph/graph.py000066400000000000000000001144361210344137400204050ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This module implements two graph classes: Graph: basic topological graph, i.e. vertices and edges. This kind of object only has topological properties WeightedGraph (Graph): also has a value associated with edges, called weights, that are used in some computational procedures (e.g. path length computation). Importantly these objects are equivalent to square sparse matrices, which is used to perform certain computations. This module also provides several functions to instantiate WeightedGraphs from data: - k nearest neighbours (where samples are rows of a 2D-array) - epsilon-neighbors (where sample rows of a 2D-array) - representation of the neighbors on a 3d grid (6-, 18- and 26-neighbors) - Minimum Spanning Tree (where samples are rows of a 2D-array) Author: Bertrand Thirion, 2006--2011 """ import numpy as np from scipy.sparse import coo_matrix class Graph(object): """ Basic topological (non-weighted) directed Graph class Member variables: * V (int > 0): the number of vertices * E (int >= 0): the number of edges Properties: * vertices (list, type=int, shape=(V,)) vertices id * edges (list, type=int, shape=(E,2)): edges as vertices id tuples """ ### Constructor def __init__(self, V, E=0, edges=None): """ Constructor Parameters ---------- V : int the number of vertices E : int, optional the number of edges edges : None or shape (E, 2) array, optional edges of graph """ # deal with vertices self.__set_V(V) self.vertices = np.arange(self.V) # deal with edges if not isinstance(edges, None.__class__): self.__set_E(np.shape(edges)[0]) self.set_edges(edges) else: self.__set_E(E) self.set_edges(np.zeros((self.E, 2), dtype=int)) ### Accessors def get_vertices(self): """ To get the graph's vertices (as id) """ return self.vertices def get_edges(self): """To get the graph's edges """ try: temp = self.edges except: temp = [] return temp def get_V(self): """To get the number of vertices in the graph """ return self.V def get_E(self): """To get the number of edges in the graph """ return self.E ### Mutators def __set_V(self, V): """ Sets the graph's number of vertices. This methods is defined as private since we don't want the number of vertices to be modified outside the graph object methods. """ self.V = int(V) if self.V < 1: raise ValueError('Empty graphs cannot be created') def __set_E(self, E): """Sets the graph's number of edges. This methods is defined as private since we don't want the number of edges to be modified outside the graph object methods. """ self.E = int(E) if self.E < 0: self.E = 0 def set_edges(self, edges): """Sets the graph's edges Preconditions ------------- - edges has a correct size - edges take values in [1..V] """ if (not isinstance(edges, None.__class__) and (edges.size != 0)): if ((np.shape(edges)[0] != self.E) or (np.shape(edges)[1] != 2)): raise ValueError('Incompatible size of the edge matrix') if edges.max() + 1 > self.V: raise ValueError('Incorrect edge specification') self.edges = edges else: self.edges = [] ### Methods def adjacency(self): """returns the adjacency matrix of the graph as a sparse coo matrix Returns ------- adj: scipy.sparse matrix instance, that encodes the adjacency matrix of self """ if self.E > 0: i = self.edges[:, 0] j = self.edges[:, 1] adj = coo_matrix((np.ones(self.E), (i, j)), shape=(self.V, self.V)) else: adj = coo_matrix((self.V, self.V)) return adj def cc(self): """Compte the different connected components of the graph. Returns ------- label: array of shape(self.V), labelling of the vertices """ try: from scipy.sparse import cs_graph_components _, label = cs_graph_components(self.adjacency()) except: pass lil = self.to_coo_matrix().tolil().rows.tolist() label = lil_cc(lil) return label def degrees(self): """Returns the degree of the graph vertices. Returns ------- rdegree: (array, type=int, shape=(self.V,)), the right degrees ldegree: (array, type=int, shape=(self.V,)), the left degrees """ A = self.adjacency() A.data = np.ones(A.nnz) right = np.array(A.sum(1)).ravel() left = np.array(A.sum(0)).ravel() return right, left def main_cc(self): """Returns the indexes of the vertices within the main cc Returns ------- idx: array of shape (sizeof main cc) """ if self.E > 0: cc = self.cc() pop = np.array([np.sum(cc == k) for k in np.unique(cc)]) idx = np.nonzero(cc == pop.argmax())[0] else: idx = 0 return idx def to_coo_matrix(self): """ Return adjacency matrix as coo sparse Returns ------- sp: scipy.sparse matrix instance, that encodes the adjacency matrix of self """ if self.E > 0: i, j = self.edges.T sm = coo_matrix((np.ones(self.E), (i, j)), shape=(self.V, self.V)) else: sm = coo_matrix((self.V, self.V)) return sm def show(self, ax=None): """Shows the graph as a planar one. Parameters ---------- ax, axis handle Returns ------- ax, axis handle """ import matplotlib.pylab as plt if ax == None: plt.figure() ax = plt.subplot(1, 1, 1) t = (2 * np.pi * np.arange(self.V)) / self.V plt.plot(np.cos(t), np.sin(t), '.') planar_edges = np.ravel((self.edges * 2 * np.pi) / self.V) ax.plot(np.cos(planar_edges), np.sin(planar_edges), 'k') ax.axis('off') return ax ##################################################################### # WeightedGraph ##################################################################### def wgraph_from_coo_matrix(x): """ Instantiates a weighted graph from a (sparse) coo_matrix Parameters ---------- x: scipy.sparse.coo_matrix instance, the input matrix Returns ------- wg: WeightedGraph instance """ if x.shape[0] != x.shape[1]: raise ValueError("the input coo_matrix is not square") i, j = x.nonzero() edges = np.vstack((i, j)).T weights = x.data wg = WeightedGraph(x.shape[0], edges, weights) return wg def wgraph_from_adjacency(x): """Instantiates a weighted graph from a square 2D array Parameters ---------- x: 2D array instance, the input array Returns ------- wg: WeightedGraph instance """ a = coo_matrix(x) return wgraph_from_coo_matrix(a) def complete_graph(n): """ returns a complete graph with n vertices """ return wgraph_from_adjacency(np.ones((n, n))) def mst(X): """ Returns the WeightedGraph that is the minimum Spanning Tree of X Parameters ---------- X: data array, of shape(n_samples, n_features) Returns ------- the corresponding WeightedGraph instance """ n = X.shape[0] label = np.arange(n).astype(np.int) edges = np.zeros((0, 2)).astype(np.int) # upper bound on maxdist**2 maxdist = 4 * np.sum((X - X[0]) ** 2, 1).max() nbcc = n while nbcc > 1: mindist = maxdist * np.ones(nbcc) link = - np.ones((nbcc, 2)).astype(np.int) # find nearest neighbors for n1 in range(n): j = label[n1] newdist = np.sum((X[n1] - X) ** 2, 1) newdist[label == j] = maxdist n2 = np.argmin(newdist) if newdist[n2] < mindist[j]: mindist[j] = newdist[n2] link[j] = np.array([n1, n2]) # merge nearest neighbors nnbcc = nbcc idx = np.arange(nbcc) for i in range(nnbcc): k, j = label[link[i]] while k > idx[k]: k = idx[k] while j > idx[j]: j = idx[j] if k != j: edges = np.vstack((edges, link[i], np.array([link[i, 1], link[i, 0]]))) idx[max(j, k)] = min(j, k) nbcc -= 1 # relabel the graph label = WeightedGraph(n, edges, np.ones(edges.shape[0])).cc() nbcc = label.max() + 1 d = np.sqrt(np.sum((X[edges[:, 0]] - X[edges[:, 1]]) ** 2, 1)) return WeightedGraph(n, edges, d) def knn(X, k=1): """returns the k-nearest-neighbours graph of the data Parameters ---------- X, array of shape (n_samples, n_features): the input data k, int, optional: is the number of neighbours considered Returns ------- the corresponding WeightedGraph instance Notes ----- The knn system is symmeterized: if (ab) is one of the edges then (ba) is also included """ from ..utils.fast_distance import euclidean_distance if np.size(X) == X.shape[0]: X = np.reshape(X, (np.size(X), 1)) try: k = int(k) except: "k cannot be cast to an int" if np.isnan(k): raise ValueError('k is nan') if np.isinf(k): raise ValueError('k is inf') k = min(k, X.shape[0] - 1) # create the distance matrix dist = euclidean_distance(X) sorted_dist = dist.copy() sorted_dist.sort(0) # neighbour system bool_knn = dist < sorted_dist[k + 1] bool_knn += bool_knn.T bool_knn -= np.diag(np.diag(bool_knn)) dist *= (bool_knn > 0) return wgraph_from_adjacency(dist) def eps_nn(X, eps=1.): """Returns the eps-nearest-neighbours graph of the data Parameters ---------- X, array of shape (n_samples, n_features), input data eps, float, optional: the neighborhood width Returns ------- the resulting graph instance """ from ..utils.fast_distance import euclidean_distance if np.size(X) == X.shape[0]: X = np.reshape(X, (np.size(X), 1)) try: eps = float(eps) except: "eps cannot be cast to a float" if np.isnan(eps): raise ValueError('eps is nan') if np.isinf(eps): raise ValueError('eps is inf') dist = euclidean_distance(X) dist = np.maximum(dist, 1.e-16) dist[dist >= eps] = 0 # this would is just for numerical reasons dist -= np.diag(np.diag(dist)) return wgraph_from_adjacency(dist) def lil_cc(lil): """ Returns the connected comonents of a graph represented as a list of lists Parameters ---------- lil: a list of list representing the graph neighbors Returns ------- label a vector of shape len(lil): connected components labelling Notes ----- Dramatically slow for non-sparse graphs """ n = len(lil) visited = np.zeros(n).astype(np.int) label = - np.ones(n).astype(np.int) k = 0 while (visited == 0).any(): front = [np.argmin(visited)] while len(front) > 0: pivot = front.pop(0) if visited[pivot] == 0: visited[pivot] = 1 label[pivot] = k front += lil[pivot] k += 1 return label def graph_3d_grid(xyz, k=18): """ Utility that computes the six neighbors on a 3d grid Parameters ---------- xyz: array of shape (n_samples, 3); grid coordinates of the points k: neighboring system, equal to 6, 18, or 26 Returns ------- i, j, d 3 arrays of shape (E), where E is the number of edges in the resulting graph (i, j) represent the edges, d their weights """ if np.size(xyz) == 0: return None lxyz = xyz - xyz.min(0) m = 3 * lxyz.max(0).sum() + 2 # six neighbours n6 = [np.array([1, m, m ** 2]), np.array([m ** 2, 1, m]), np.array([m, m ** 2, 1])] # eighteen neighbours n18 = [np.array([1 + m, 1 - m, m ** 2]), np.array([1 + m, m - 1, m ** 2]), np.array([m ** 2, 1 + m, 1 - m]), np.array([m ** 2, 1 + m, m - 1]), np.array([1 - m, m ** 2, 1 + m]), np.array([m - 1, m ** 2, 1 + m])] # twenty-six neighbours n26 = [np.array([1 + m + m ** 2, 1 - m, 1 - m ** 2]), np.array([1 + m + m ** 2, m - 1, 1 - m ** 2]), np.array([1 + m + m ** 2, 1 - m, m ** 2 - 1]), np.array([1 + m + m ** 2, m - 1, m ** 2 - 1])] # compute the edges in each possible direction def create_edges(lxyz, nn, l1dist=1, left=np.array([]), right=np.array([]), weights=np.array([])): q = 0 for nn_row in nn: v1 = np.dot(lxyz, nn_row) o1 = np.argsort(v1) sv1 = v1[o1] nz = np.squeeze(np.nonzero(sv1[: - 1] - sv1[1:] == - l1dist)) o1z, o1z1 = o1[nz], o1[nz + 1] left = np.hstack((left, o1z, o1z1)) right = np.hstack((right, o1z1, o1z)) q += 2 * np.size(nz) weights = np.hstack((weights, np.sqrt(l1dist) * np.ones(q))) return left, right, weights i, j, d = create_edges(lxyz, n6, 1.) if k >= 18: i, j, d = create_edges(lxyz, n18, 2, i, j, d) if k == 26: i, j, d = create_edges(lxyz, n26, 3, i, j, d) i, j = i.astype(np.int), j.astype(np.int) # reorder the edges to have a more standard order order = np.argsort(i + j * (len(i) + 1)) i, j, d = i[order], j[order], d[order] return i, j, d def wgraph_from_3d_grid(xyz, k=18): """Create graph as the set of topological neighbours of the three-dimensional coordinates set xyz, in the k-connectivity scheme Parameters ---------- xyz: array of shape (nsamples, 3) and type np.int, k = 18: the number of neighbours considered. (6, 18 or 26) Returns ------- the WeightedGraph instance """ if xyz.shape[1] != 3: raise ValueError('xyz should have shape n * 3') if k not in [6, 18, 26]: raise ValueError('k should be equal to 6, 18 or 26') i, j, d = graph_3d_grid(xyz, k) edges = np.vstack((i, j)).T return WeightedGraph(xyz.shape[0], edges, d) def concatenate_graphs(G1, G2): """Returns the concatenation of the graphs G1 and G2 It is thus assumed that the vertices of G1 and G2 represent disjoint sets Parameters ---------- G1, G2: the two WeightedGraph instances to be concatenated Returns ------- G, WeightedGraph, the concatenated graph Notes ----- This implies that the vertices of G corresponding to G2 are labeled [G1.V .. G1.V+G2.V] """ V = G1.V + G2.V edges = np.vstack((G1.edges, G1.V + G2.edges)) weights = np.hstack((G1.weights, G2.weights)) G = WeightedGraph(V, edges, weights) return G class WeightedGraph(Graph): """Basic weighted, directed graph class Member variables: * V (int): the number of vertices * E (int): the number of edges Methods * vertices (list, type=int, shape=(V,)): vertices id * edges (list, type=int, shape=(E,2)): edges as vertices id tuples * weights (list, type=int, shape=(E,)): weights / lengths of the graph's edges """ ### Constructor def __init__(self, V, edges=None, weights=None): """ Constructor Parameters ---------- V : int (int > 0) the number of vertices edges : (E, 2) array, type int edges of the graph weights : (E, 2) array, type=int weights/lenghts of the edges """ Graph.__init__(self, V, edges=edges) if isinstance(weights, None.__class__): new_weights = [] else: new_weights = weights self.set_weights(new_weights) def set_weights(self, weights): """ Set edge weights Parameters ---------- weights: array array shape(self.V): edges weights """ if np.size(weights) != self.E: raise ValueError('The weight size is not the edges size') else: self.weights = np.reshape(weights, (self.E)) def get_weights(self): return self.weights def from_3d_grid(self, xyz, k=18): """Sets the graph to be the topological neighbours graph of the three-dimensional coordinates set xyz, in the k-connectivity scheme Parameters ---------- xyz: array of shape (self.V, 3) and type np.int, k = 18: the number of neighbours considered. (6, 18 or 26) Returns ------- E(int): the number of edges of self """ if xyz.shape[0] != self.V: raise ValueError('xyz should have shape n * 3, with n = self.V') if xyz.shape[1] != 3: raise ValueError('xyz should have shape n * 3') graph = graph_3d_grid(xyz, k) if graph is not None: i, j, d = graph else: raise TypeError('Creating graph from grid failed. '\ 'Maybe the grid is too big') self.E = np.size(i) self.edges = np.zeros((self.E, 2), np.int) self.edges[:, 0] = i self.edges[:, 1] = j self.weights = np.array(d) return self.E def cut_redundancies(self): """ Returns a graph with redundant edges removed: ecah edge (ab) is present ony once in the edge matrix: the correspondng weights are added. Returns ------- the resulting WeightedGraph """ A = self.to_coo_matrix().tocsr().tocoo() return wgraph_from_coo_matrix(A) def dijkstra(self, seed=0): """ Returns all the [graph] geodesic distances starting from seed x Parameters ---------- seed (int, >-1, for each vertex a, sum{edge[e, 0]=a} D[e]=1 c == 1 => for each vertex b, sum{edge[e, 1]=b} D[e]=1 c == 2 => symmetric ('l2') normalization Notes ----- Note that when sum_{edge[e, .] == a } D[e] = 0, nothing is performed """ from scipy.sparse import dia_matrix c = int(c) if not c in [0, 1, 2]: raise ValueError('c must be equal to 0, 1 or 2') if self.E == 0: if c < 2: return np.zeros(self.V) else: return np.zeros(self.V), np.zeros(self.V) adj = self.to_coo_matrix().tocsr() s1 = adj.sum(0) s2 = adj.sum(1) if c == 1: s = dia_matrix((1. / s1, 0), shape=(self.V, self.V)) adj = adj * s self.weights = wgraph_from_adjacency(adj).get_weights() return np.asarray(s1) if c == 0: s = dia_matrix((1. / s2.T, 0), shape=(self.V, self.V)) adj = s * adj self.weights = wgraph_from_adjacency(adj).get_weights() return np.asarray(s2) if c == 2: s1 = dia_matrix((1. / np.sqrt(s1), 0), shape=(self.V, self.V)) s2 = dia_matrix((1. / np.sqrt(adj.sum(1)), 0), shape=(self.V, self.V)) adj = (s1 * adj) * s2 self.weights = wgraph_from_adjacency(adj).get_weights() return np.asarray(s1), np.asarray(s2) def set_euclidian(self, X): """ Compute the weights of the graph as the distances between the corresponding rows of X, which represents an embdedding of self Parameters ---------- X array of shape (self.V, edim), the coordinate matrix of the embedding """ if np.size(X) == X.shape[0]: X = np.reshape(X, (np.size(X), 1)) if X.shape[0] != self.V: raise ValueError('X.shape[0] != self.V') if self.E > 0: d = np.sum((X[self.edges[:, 0]] - X[self.edges[:, 1]]) ** 2, 1) self.weights = np.sqrt(d) def set_gaussian(self, X, sigma=0): """ Compute the weights of the graph as a gaussian function of the distance between the corresponding rows of X, which represents an embdedding of self Parameters ---------- X array of shape (self.V, dim) the coordinate matrix of the embedding sigma=0, float: the parameter of the gaussian function Notes ----- When sigma == 0, the following value is used: ``sigma = sqrt(mean(||X[self.edges[:, 0], :]-X[self.edges[:, 1], :]||^2))`` """ sigma = float(sigma) if sigma < 0: raise ValueError('sigma should be positive') self.set_euclidian(X) d = self.weights if sigma == 0: sigma = (d ** 2).mean() w = np.exp(- (d ** 2) / (2 * sigma)) self.weights = w def symmeterize(self): """Symmeterize self, modify edges and weights so that self.adjacency becomes the symmetric part of the current self.adjacency. """ A = self.to_coo_matrix() symg = wgraph_from_adjacency((A + A.T) / 2) self.E = symg.E self.edges = symg.edges self.weights = symg.weights return self def anti_symmeterize(self): """anti-symmeterize self, i.e. produces the graph whose adjacency matrix would be the antisymmetric part of its current adjacency matrix """ A = self.to_coo_matrix() symg = wgraph_from_adjacency((A - A.T) / 2) self.E = symg.E self.edges = symg.edges self.weights = symg.weights return self.E def voronoi_labelling(self, seed): """ Performs a voronoi labelling of the graph Parameters ---------- seed: array of shape (nseeds), type (np.int), vertices from which the cells are built Returns ------- labels: array of shape (self.V) the labelling of the vertices """ import heapq if hasattr(seed, '__iter__') == False: seed = [seed] try: if (self.weights < 0).any(): raise ValueError('some weights are non-positive') except: raise ValueError('undefined weights') dist, active = np.inf * np.ones(self.V), np.ones(self.V) label = - np.ones(self.V, np.int) idx, neighb, weight = self.compact_neighb() dist[seed] = 0 label[seed] = np.arange(len(seed)) dg = zip(np.zeros_like(seed), seed) heapq.heapify(dg) for j in range(self.V): end = False while True: if len(dg) == 0: end = True break node = heapq.heappop(dg) if active[node[1]]: break if end: break dwin, win = node active[win] = False # the folllowing loop might be vectorized for i in range(idx[win], idx[win + 1]): l, newdist = neighb[i], dwin + weight[i] if newdist < dist[l]: heapq.heappush(dg, (newdist, l)) dist[l] = newdist label[l] = label[win] return label def cliques(self): """ Extraction of the graphe cliques these are defined using replicator dynamics equations Returns ------- cliques: array of shape (self.V), type (np.int) labelling of the vertices according to the clique they belong to """ if (self.weights < 0).any(): raise ValueError('cliques definition require a positive graph') cliques, size = - np.ones(self.V), np.zeros(self.V) adj = self.to_coo_matrix() for k in range(self.V): u = cliques < 0 w = np.zeros_like(u) # replicator dynamics iterations for q in range(self.V): w = u.copy() u = (adj * u) * w if u.sum() == 0: break u /= u.sum() if ((w - u) ** 2).sum() < 1.e-12: break # threshold the result threshold = 1. / max(2., 1. * np.sum(cliques == - 1)) cliques[u > threshold] = k if np.sum(u > threshold) == 0: break size[k] = np.sum(u > threshold) if cliques.min() > - 1: break # sort the labels size = size[size > 0] order = np.argsort(- size) label = cliques.copy() for k, vv in enumerate(order): cliques[label == vv] = k return cliques def remove_trivial_edges(self): """ Removes trivial edges, i.e. edges that are (vv)-like self.weights and self.E are corrected accordingly Returns ------- self.E (int): The number of edges """ if self.E > 0: valid = self.edges[:, 0] != self.edges[:, 1] self.edges = self.edges[valid] self.weights = self.weights[valid] self.E = np.sum(valid) return self.E def subgraph(self, valid): """ Creates a subgraph with the vertices for which valid>0 and with the correponding set of edges Parameters ---------- valid, array of shape (self.V): nonzero for vertices to be retained Returns ------- G, WeightedGraph instance, the desired subgraph of self Notes ----- The vertices are renumbered as [1..p] where p = sum(valid>0) when sum(valid==0) then None is returned """ if np.size(valid) != self.V: raise ValueError("incompatible size for self anf valid") if np.sum(valid > 0) == 0: return None if self.E > 0: win_edges = (valid[self.edges]).min(1) > 0 edges = self.edges[win_edges] weights = self.weights[win_edges] renumb = np.hstack((0, np.cumsum(valid > 0))) edges = renumb[edges] G = WeightedGraph(np.sum(valid > 0), edges, weights) else: G = WeightedGraph(np.sum(valid > 0)) return G def kruskal(self): """ Creates the Minimum Spanning Tree of self using Kruskal's algo. efficient is self is sparse Returns ------- K, WeightedGraph instance: the resulting MST Notes ----- If self contains several connected components, will have the same number k of connected components """ k = self.cc().max() + 1 E = 2 * self.V - 2 V = self.V Kedges = np.zeros((E, 2)).astype(np.int) Kweights = np.zeros(E) iw = np.argsort(self.weights) label = np.arange(V) j = 0 for i in range(V - k): a, b = self.edges[iw[j]] d = self.weights[iw[j]] while label[a] == label[b]: j = j + 1 a, b = self.edges[iw[j]] d = self.weights[iw[j]] if label[a] != label[b]: lb = label[b] label[label == lb] = label[a] Kedges[2 * i] = np.array([a, b]) Kedges[2 * i + 1] = np.array([b, a]) Kweights[2 * i: 2 * i + 2] = d K = WeightedGraph(V, Kedges, Kweights) return K def voronoi_diagram(self, seeds, samples): """ Defines the graph as the Voronoi diagram (VD) that links the seeds. The VD is defined using the sample points. Parameters ---------- seeds: array of shape (self.V, dim) samples: array of shape (nsamples, dim) Notes ----- By default, the weights are a Gaussian function of the distance The implementation is not optimal """ from bipartite_graph import cross_knn # checks if seeds.shape[0] != self.V: raise ValueError("The numberof seeds is not as expected") if np.size(seeds) == self.V: seeds = np.reshape(seeds, (np.size(seeds), 1)) if np.size(samples) == samples.shape[0]: samples = np.reshape(samples, (np.size(samples), 1)) if seeds.shape[1] != samples.shape[1]: raise ValueError("The seeds and samples do not belong \ to the same space") #1. define the graph knn(samples, seeds, 2) j = cross_knn(samples, seeds, 2).edges[:, 1] #2. put all the pairs i the target graph Ns = np.shape(samples)[0] self.E = Ns self.edges = np.array( [j[2 * np.arange(Ns)], j[2 * np.arange(Ns) + 1]]).T self.weights = np.ones(self.E) #3. eliminate the redundancies and set the weights self.cut_redundancies() self.symmeterize() self.set_gaussian(seeds) def show(self, X=None, ax=None): """ Plots the current graph in 2D Parameters ---------- X : None or array of shape (self.V, 2) a set of coordinates that can be used to embed the vertices in 2D. If X.shape[1]>2, a svd reduces X for display. By default, the graph is presented on a circle ax: None or int, optional ax handle Returns ------- ax: axis handle Notes ----- This should be used only for small graphs. """ if np.size(self.weights) == 0: return Graph.show() wm = self.weights.max() import matplotlib.pylab as mp if ax == None: mp.figure() ax = mp.subplot(1, 1, 1) ml = 5. if (X == None): for e in range(self.E): A = (self.edges[e, 0] * 2 * np.pi) / self.V B = (self.edges[e, 1] * 2 * np.pi) / self.V C = max(1, int(self.weights[e] * ml / wm)) mp.plot([np.cos(A), np.cos(B)], [np.sin(A), np.sin(B)], 'k', linewidth=C) t = (2 * np.pi * np.arange(self.V)) / self.V mp.plot(np.cos(t), np.sin(t), 'o', linewidth=ml) mp.axis([-1.1, 1.1, -1.1, 1.1]) return ax if (X.shape[0] != self.V): raise ValueError('X.shape(0)!=self.V') if np.size(X) == self.V: X = np.reshape(X, (self.V, 1)) if X.shape[1] == 1: # plot the graph on a circle x = np.pi * (X - X.min()) / (X.max() - X.min()) for e in range(self.E): A = x[self.edges[e, 0]] B = x[self.edges[e, 1]] C = max(1, int(self.weights[e] * ml / wm)) mp.plot([np.cos(A), np.cos(B)], [np.sin(A), np.sin(B)], 'k', linewidth=C) mp.plot(np.cos(x), np.sin(x), 'o', linewidth=ml) mp.axis([-1.1, 1.1, -0.1, 1.1]) if X.shape[1] > 2: Y = X.copy() from numpy.linalg import svd M1, M2, M3 = svd(Y, 0) Y = np.dot(M1, np.diag(M2)) Y = Y[:, :1] if X.shape[1] < 3: Y = X if Y.shape[1] == 2: for e in range(self.E): A = self.edges[e, 0] B = self.edges[e, 1] C = max(1, int(self.weights[e] * ml / wm)) mp.plot([Y[A, 0], Y[B, 0]], [Y[A, 1], Y[B, 1]], 'k', linewidth=C) mp.plot(Y[:, 0], Y[:, 1], 'o', linewidth=ml) xmin, xmax = Y[:, 0].min(), Y[:, 0].max() ymin, ymax = Y[:, 1].min(), Y[:, 1].max() xmin = 1.1 * xmin - 0.1 * xmax xmax = 1.1 * xmax - 0.1 * xmin ymin = 1.1 * ymin - 0.1 * ymax ymax = 1.1 * ymax - 0.1 * ymin mp.axis([xmin, xmax, ymin, ymax]) return ax def remove_edges(self, valid): """ Removes all the edges for which valid==0 Parameters ---------- valid : (self.E,) array """ if np.size(valid) != self.E: raise ValueError("the input vector does not have the correct size") valid = np.reshape(valid, np.size(valid)) self.E = int(valid.sum()) self.edges = self.edges[valid != 0] self.weights = self.weights[valid != 0] def list_of_neighbors(self): """ returns the set of neighbors of self as a list of arrays """ return self.to_coo_matrix().tolil().rows.tolist() def copy(self): """ returns a copy of self """ G = WeightedGraph(self.V, self.edges.copy(), self.weights.copy()) return G def left_incidence(self): """ Return left incidence matrix Returns ------- left_incid: list the left incidence matrix of self as a list of lists: i.e. the list[[e.0.0, .., e.0.i(0)], .., [e.V.0, E.V.i(V)]] where e.i.j is the set of edge indexes so that e.i.j[0] = i """ linc = [] for i in range(self.V): linc.append([]) for e in range(self.E): i = self.edges[e, 0] a = linc[i] a.append(e) return linc def right_incidence(self): """ Return right incidence matrix Returns ------- right_incid: list the right incidence matrix of self as a list of lists: i.e. the list[[e.0.0, .., e.0.i(0)], .., [e.V.0, E.V.i(V)]] where e.i.j is the set of edge indexes so that e.i.j[1] = i """ rinc = [] for i in range(self.V): rinc.append([]) for e in range(self.E): i = self.edges[e, 1] a = rinc[i] a.append(e) return rinc def is_connected(self): """ States whether self is connected or not """ if self.V < 1: raise ValueError("empty graph") if self.V < 2: return True if self.E == 0: return False cc = self.cc() return int(cc.max() == 0) def to_coo_matrix(self): """ Return adjacency matrix as coo sparse Returns ------- sp: scipy.sparse matrix instance that encodes the adjacency matrix of self """ if self.E > 0: i, j = self.edges.T sm = coo_matrix((self.weights, (i, j)), shape=(self.V, self.V)) else: sm = coo_matrix((self.V, self.V)) return sm nipy-0.3.0/nipy/algorithms/graph/setup.py000066400000000000000000000007451210344137400204410ustar00rootroot00000000000000#!/usr/bin/env python import numpy def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('graph', parent_package, top_path) config.add_subpackage('tests') config.add_extension('_graph', sources=['_graph.c'], include_dirs=[numpy.get_include()]) return config if __name__ == '__main__': print('This is the wrong setup.py file to run') nipy-0.3.0/nipy/algorithms/graph/tests/000077500000000000000000000000001210344137400200635ustar00rootroot00000000000000nipy-0.3.0/nipy/algorithms/graph/tests/__init__.py000066400000000000000000000000501210344137400221670ustar00rootroot00000000000000# Init to make test directory a package nipy-0.3.0/nipy/algorithms/graph/tests/test_bipartite_graph.py000066400000000000000000000052541210344137400246460ustar00rootroot00000000000000#!/usr/bin/env python import numpy as np import numpy.random as nr from unittest import TestCase from ..bipartite_graph import (BipartiteGraph, cross_knn, cross_eps, check_feature_matrices) def basicdata(): x = np.array( [[-1.998,-2.024], [-0.117,-1.010], [1.099,-0.057], [ 1.729,-0.252], [1.003,-0.021], [1.703,-0.739], [-0.557,1.382],[-1.200,-0.446],[-0.331,-0.256], [-0.800,-1.584]]) return x def test_feature_matrices(): """ test that feature matrices are correctly checked """ x, y = nr.rand(10, 1), nr.rand(12) check_feature_matrices(x, y) check_feature_matrices(y, x) check_feature_matrices(x, x) check_feature_matrices(y, y) def test_cross_knn_1(): """ test the construction of k-nn bipartite graph """ x = basicdata() G = cross_knn(x, x, 2) assert (G.E == 20) def test_cross_knn_2(): """ test the construction of k-nn bipartite graph """ x = basicdata() G = cross_knn(x, x, 1) assert (G.E == 10) def test_cross_eps_1(): """ test the construction of eps-nn bipartite graph """ x = basicdata() y = x + 0.1 * nr.randn(x.shape[0], x.shape[1]) G = cross_eps(x, y, 1.) D = G.weights assert((D < 1).all()) def test_copy(): """ test that the weighted graph copy is OK """ x = basicdata() G = cross_knn(x, x, 2) K = G.copy() assert K.edges.shape == (20, 2) def test_subraph_left(): """ Extraction of the 'left subgraph' """ x = basicdata() g = cross_knn(x, x, 2) valid = np.arange(10) < 7 sl = g.subgraph_left(valid) assert sl.V == 7 assert sl.W == 10 assert sl.edges[:, 0].max() == 6 def test_subraph_left2(): """ Extraction of the 'left subgraph', without renumb=False """ x = basicdata() g = cross_knn(x, x, 2) valid = np.arange(10) < 7 sl = g.subgraph_left(valid, renumb=False) assert sl.V == 10 assert sl.W == 10 assert sl.edges[:, 0].max() == 6 def test_subraph_right(): """ Extraction of the 'right subgraph' """ x = basicdata() g = cross_knn(x, x, 2) valid = np.arange(10) < 7 sr = g.subgraph_right(valid) assert sr.W == 7 assert sr.V == 10 assert sr.edges[:, 1].max() == 6 def test_subraph_right2(): """ Extraction of the 'right subgraph', with renumb = False """ x = basicdata() g = cross_knn(x, x, 2) valid = np.arange(10) < 7 sr = g.subgraph_right(valid, renumb = False) assert sr.W == 10 assert sr.V == 10 assert sr.edges[:, 1].max() == 6 if __name__ == '__main__': import nose nose.run(argv=['', __file__]) nipy-0.3.0/nipy/algorithms/graph/tests/test_field.py000066400000000000000000000210611210344137400225570ustar00rootroot00000000000000#!/usr/bin/env python import numpy as np from ..field import (field_from_coo_matrix_and_data, field_from_graph_and_data) from ..graph import wgraph_from_3d_grid from nose.tools import assert_true, assert_equal from numpy.testing import TestCase, assert_array_equal def basic_field(nx=10, ny=10, nz=10): xyz = np.reshape(np.indices((nx, ny, nz)), (3, nx * ny * nz)).T data = np.sum(xyz, 1).astype('d') F = field_from_graph_and_data(wgraph_from_3d_grid(xyz, 26), data) return F def basic_field_random(nx=10, ny=10, nz=1): import numpy.random as nr xyz = np.reshape(np.indices((nx, ny, nz)), (3, nx * ny * nz)).T data = 0.5 * nr.randn(nx * ny * nz, 1) + np.sum(xyz, 1).astype('d') F = field_from_graph_and_data(wgraph_from_3d_grid(xyz, 26), data) return F def basic_field_2(nx=10, ny=10, nz=10): xyz = np.reshape(np.indices((nx, ny, nz)), (3, nx * ny * nz)).T toto = xyz - np.array([5, 5, 5]) data = np.sum(toto ** 2, 1) F = field_from_graph_and_data(wgraph_from_3d_grid(xyz, 26), data) return F def basic_field_3(nx=10, ny=10, nz=10): xyz = np.reshape(np.indices((nx, ny, nz)), (3, nx * ny * nz)).T toto = xyz - np.array([5, 5, 5]) data = np.abs(np.sum(toto ** 2, 1) - 11 ) F = field_from_graph_and_data(wgraph_from_3d_grid(xyz, 26), data) return F def basic_graph(nx=10, ny=10, nz=10): xyz = np.reshape(np.indices((nx, ny, nz)), (3, nx * ny * nz)).T data = np.zeros(xyz.shape[0]) F = field_from_graph_and_data(wgraph_from_3d_grid(xyz, 26), data) return F class test_Field(TestCase): def test_max_1(self): F = basic_field() F.field[555] = 30 depth = F.local_maxima() dep = np.zeros(1000, np.int) dep[555] = 5 dep[999] = 3 assert_true(sum(np.absolute(dep-depth))<1.e-7) def test_max_2(self): F = basic_field() F.field[555] = 28 idx,depth = F.get_local_maxima() self.assert_(len(idx) == 2) self.assert_(np.alltrue( idx == (555, 999))) self.assert_(np.alltrue( depth == (5, 3))) def test_max_3(self): F = basic_field() F.field[555] = 27 idx, depth = F.get_local_maxima() assert_true(np.size(idx) == 2) assert_true(idx[0] == 555) assert_true(idx[1] == 999) assert_true(depth[0] == 5) assert_true(depth[1] == 5) def test_max_4(self): F = basic_field() F.field[555] = 28 idx, depth = F.get_local_maxima(0, 27.5) assert_true(np.size(idx) == 1) assert_true(idx[0] == 555) assert_true(depth[0] == 1) def test_smooth_1(self): G = basic_graph() field = np.zeros((1000,1)) field[555,0] = 1 G.set_field(field) G.diffusion() sfield = G.get_field() OK1 = (sfield[555]==0) OK2 = (sfield[554]==1) OK3 = (np.absolute(sfield[566]-np.sqrt(2))<1.e-7) OK4 = (np.absolute(sfield[446]-np.sqrt(3))<1.e-7) OK = OK1 & OK2 & OK3 & OK4 self.assert_(OK) def test_smooth_2(self): G = basic_graph() field = np.zeros((1000,1)) field[555,0] = 1 G.set_field(field) G.diffusion(1) sfield = G.get_field() OK1 = (sfield[555]==0) OK2 = (sfield[554]==1) OK3 = (np.absolute(sfield[566]-np.sqrt(2))<1.e-7) OK4 = (np.absolute(sfield[446]-np.sqrt(3))<1.e-7) OK = OK1 & OK2 & OK3 & OK4 self.assert_(OK) def test_dilation(self): F = basic_field() F.field[555] = 30 F.field[664] = 0 F.dilation(2) assert_true(F.field[737] == 30) assert_true(F.field[0] == 6) assert_true(F.field[999] == 27) assert_true(F.field[664] == 30) def test_dilation2(self): # test equality of cython and python versions F = basic_field() F.field[555] = 30 F.field[664] = 0 h = F.copy() h.dilation(2) g = F.copy() g.dilation(2, False) assert_array_equal(h.field, g.field) def test_erosion(self): F = basic_field() F.field[555] = 30 F.field[664] = 0 F.erosion(2) field = F.get_field() assert_true(field[737] == 11) assert_true(field[0] == 0) assert_true(field[999] == 21) assert_true(field[664] == 0) def test_opening(self): F = basic_field() F.field[555] = 30 F.field[664] = 0 F.opening(2) field = F.get_field() assert_true(field[737] == 17) assert_true(field[0] == 0) assert_true(field[999] == 21) assert_true(field[555] == 16) def test_closing(self): F = basic_field() F.field[555] = 30 F.field[664] = 0 F.closing(2) field = F.get_field() assert_true(field[737] == 17) assert_true(field[0] == 6) assert_true(field[999] == 27) assert_true(field[555] == 30) def test_watershed_1(self): F = basic_field() F.field[555] = 28 F.field[664] = 0 idx, label = F.custom_watershed() assert_equal(np.size(idx), 2) assert_equal(tuple(idx), (555, 999)) assert_equal((label[776], label[666], label[123]), (1, 0, 0)) def test_watershed_4(self): F = basic_field_3() idx, label = F.custom_watershed() assert_true(np.size(idx) == 9) assert_true(np.unique( [label[555], label[0], label[9], label[90], label[99], label[900], label[909], label[990], label[999]]) .size == 9) def test_watershed_2(self): F = basic_field_2() F.field[555] = 10 F.field[664] = 0 idx, label = F.custom_watershed() assert_true(np.size(idx)==9) def test_watershed_3(self): F = basic_field_2() F.field[555] = 10 F.field[664] = 0 idx, label = F.custom_watershed(0,11) assert_true(np.size(idx)==8) def test_bifurcations_1(self): F = basic_field() idx, parent,label = F.threshold_bifurcations() assert_true(idx == 999) assert_true(parent == 0) def test_bifurcations_2(self): F = basic_field_2() idx, parent, label = F.threshold_bifurcations() assert_true(np.size(idx) == 15) def test_geodesic_kmeans(self, nbseeds=3): # Test the geodisc k-means algorithm import numpy.random as nr F = basic_field_random(5, 5, 1) seeds = np.argsort(nr.rand(F.V))[:nbseeds] seeds, label, inertia = F.geodesic_kmeans(seeds) for i in range(nbseeds): assert_true(label[seeds[i]] == i) print np.unique(label), np.arange(nbseeds) assert_true(np.array([i in np.unique(label) for i in np.arange(nbseeds)]).all()) def test_constrained_voronoi(self, nbseeds=3): # Test the geodisc k-means algorithm import numpy.random as nr F = basic_field_random() seeds = np.argsort(nr.rand(F.V))[:nbseeds] label = F.constrained_voronoi(seeds) for i in range(nbseeds): assert_true(label[seeds[i]] == i) assert_true(np.array([i in np.unique(label) for i in np.arange(nbseeds)]).all()) def test_constrained_voronoi_2(self, nbseeds=3): # Test the geodisc k-means algorithm xyz, x = np.zeros((30, 3)), np.arange(30) xyz[:, 0] = x y = np.array((x // 10), np.float) F = field_from_graph_and_data(wgraph_from_3d_grid(xyz, 6), y) seeds = np.array([1, 18, 25]) label = F.constrained_voronoi(seeds) assert_array_equal(label, x // 10) def test_subfield(self): import numpy.random as nr F = basic_field_random() valid = nr.rand(F.V)>0.1 sf = F.subfield(valid) self.assert_(sf.V==np.sum(valid)) def test_subfield2(self): F = basic_field_random() valid = np.zeros(F.V) sf = F.subfield(valid) self.assert_(sf==None) def test_ward1(self): F = basic_field_random() Lab, J = F.ward(10) self.assert_(Lab.max()==9) def test_ward2(self): F = basic_field_random() Lab, J1 = F.ward(5) Lab, J2 = F.ward(10) self.assert_(J1>J2) def test_field_from_coo_matrix(self): import scipy.sparse as sps V = 10 a = np.random.rand(V, V)>.9 fi = field_from_coo_matrix_and_data(sps.coo_matrix(a), a) print fi.E , a.sum() self.assert_(fi.E==a.sum()) if __name__ == '__main__': import nose nose.run(argv=['', __file__]) nipy-0.3.0/nipy/algorithms/graph/tests/test_forest.py000066400000000000000000000066531210344137400230100ustar00rootroot00000000000000import numpy as np from ..forest import Forest def simple_forest(): """ generate a simple forest """ parents = np.array([2, 2, 4, 4, 4]) F = Forest(5, parents) return F def test_forest(): """ test creation of forest object """ F = simple_forest() assert F.E == 8 assert F.cc().max() == 0 def test_forest_trivial(): """ test creation of forest object """ F = Forest(5) assert F.E == 0 assert (F.cc() == np.arange(5)).all() def test_children(): """ test that we obtain children """ sf = simple_forest() ch = sf.get_children() assert len(ch) == 5 assert ch[0] == [] assert ch[1] == [] assert ch[2] == [0, 1] assert ch[3] == [] assert ch[4] == [2, 3] def test_descendants(): """ test the get_descendants() method """ sf = simple_forest() assert sf.get_descendants(0) == [0] assert sf.get_descendants(1) == [1] assert sf.get_descendants(2) == [0, 1, 2] assert sf.get_descendants(4) == [0, 1, 2, 3, 4] def test_root(): """ test the isroot() method """ root = simple_forest().isroot() assert root[4] == True assert root.sum() == 1 def test_merge_simple_branches(): """ test the merge_simple_branches() method """ f = Forest(5, np.array([2, 2, 4, 4, 4])).merge_simple_branches() assert f.V == 5 f = Forest(5, np.array([1, 2, 4, 4, 4])).merge_simple_branches() assert f.V == 3 def test_all_distances(): """ test the all_distances() methods """ f = simple_forest() dg = f.all_distances() print dg assert dg[0, 3] == 3. assert dg.max() == 3. assert dg.min() == 0. assert dg.shape == (5, 5) dg = f.all_distances(1) assert dg[3] == 3. def test_depth(): """ test the depth_from_leaves() methods """ f = simple_forest() depth = f.depth_from_leaves() assert depth[0] == 0 assert depth[1] == 0 assert depth[3] == 0 assert depth[2] == 1 assert depth[4] == 2 def test_reorder(): """ test the reorder_from_leaves_to_roots() method """ f = simple_forest() order = f.reorder_from_leaves_to_roots() assert (f.depth_from_leaves() == np.array([0, 0, 0, 1, 2])).all() assert (order == np.array([0, 1, 3, 2, 4])).all() def test_leaves(): """ test the leaves_of_a_subtree() method """ f = simple_forest() assert f.leaves_of_a_subtree([0, 1]) == True assert f.leaves_of_a_subtree([0, 3]) == False assert f.leaves_of_a_subtree([1, 3]) == False assert f.leaves_of_a_subtree([0, 1, 3]) == True assert f.leaves_of_a_subtree([1]) == True def test_depth(): """ Test the tree_depth() method """ f = simple_forest() assert f.tree_depth() == 3 def test_upward_and(): """ test the propagate_upward_and() method """ f = simple_forest() assert(f.propagate_upward_and([0, 1, 0, 1, 0]) == [0, 1, 0, 1, 0]).all() assert(f.propagate_upward_and([0, 1, 1, 1, 0]) == [0, 1, 0, 1, 0]).all() assert(f.propagate_upward_and([0, 1, 1, 1, 1]) == [0, 1, 0, 1, 0]).all() assert(f.propagate_upward_and([1, 1, 0, 1, 0]) == [1, 1, 1, 1, 1]).all() def test_upward(): """ test the propagate_upward() method """ f = simple_forest() assert(f.propagate_upward([0, 0, 1, 3, 1]) == [0, 0, 0, 3, 1]).all() assert(f.propagate_upward([0, 0, 5, 0, 2]) == [0, 0, 0, 0, 0]).all() if __name__ == "__main__": import nose nose.run(argv=['', __file__]) nipy-0.3.0/nipy/algorithms/graph/tests/test_graph.py000066400000000000000000000355231210344137400226050ustar00rootroot00000000000000#!/usr/bin/env python import numpy as np import numpy.random as nr from unittest import TestCase from ..graph import (WeightedGraph, complete_graph, mst, knn, eps_nn, wgraph_from_adjacency, wgraph_from_coo_matrix, concatenate_graphs, wgraph_from_3d_grid) def basicdata(): x = np.array( [[-1.998,-2.024], [-0.117,-1.010], [1.099,-0.057], [ 1.729,-0.252], [1.003,-0.021], [1.703,-0.739], [-0.557,1.382],[-1.200,-0.446],[-0.331,-0.256], [-0.800,-1.584]]) return x def basic_graph(): l = np.linspace(0, 2 * np.pi, 20, endpoint=False) x = np.column_stack((np.cos(l), np.sin(l))) G = knn(x, 2) return G def basic_graph_2(): l = np.linspace(0, 2 * np.pi, 20, endpoint=False) x = np.column_stack((np.cos(l), np.sin(l))) G = knn(x, 2) return G, x class test_Graph(TestCase): def test_complete(self): v = 10 G = complete_graph(v) a = G.get_edges()[:, 0] b = G.get_edges()[:, 1] inds = np.indices((v, v)).reshape( (2, v * v) ) self.assert_( ( inds == (a, b) ).all() ) def test_knn_1(self): x = basicdata() G = knn(x, 1) A = G.get_edges()[:,0] OK = (np.shape(A)[0] == (14)) self.assert_(OK) def test_set_euclidian(self): G,x = basic_graph_2() d = G.weights G.set_euclidian(x / 10) D = G.weights OK = np.allclose(D, d / 10, 1e-7) self.assert_(OK) def test_set_gaussian(self): G,x = basic_graph_2() d = G.weights G.set_gaussian(x, 1.0) D = G.weights OK = np.allclose(D, np.exp(- d * d / 2), 1e-7) self.assert_(OK) def test_set_gaussian_2(self): G,x = basic_graph_2() d = G.weights G.set_gaussian(x) D = G.weights sigma = sum(d * d) / len(d) OK = np.allclose(D, np.exp(-d * d / (2 * sigma)), 1e-7) self.assert_(OK) def test_eps_1(self): x = basicdata() G = eps_nn(x, 1.) D = G.weights OK = (np.size(D) == 16) self.assert_(OK) OK = (D < 1).all() self.assert_(OK) def test_mst_1(self): x = basicdata() G = mst(x) D = G.weights OK = (np.size(D) == 18) self.assert_(OK) def test_3d_grid(self): """test the 6nn graph """ x0 = np.array([0, 0, 0]) x1 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [-1, 0, 0], [0, -1, 0], [0, 0, -1]]) x2 = np.array([[1, 1, 0], [0, 1, 1], [1, 0, 1], [1, -1, 0], [0, 1, -1], [1, 0, -1], [-1, 1, 0], [0, -1, 1], [-1, 0, 1], [-1, -1, 0], [-1, 0, -1], [0, -1, -1]]) x3 = np.array([[1, 1, 1], [1, 1, -1], [1, -1, 1], [1, -1, -1], [-1, 1, 1], [-1, 1, -1], [-1, -1, 1], [-1, -1, -1]]) for x in x1: xyz = np.vstack((x0, x)) assert wgraph_from_3d_grid(xyz, 6).E == 2 assert wgraph_from_3d_grid(xyz, 18).E == 2 assert wgraph_from_3d_grid(xyz, 26).E == 2 for x in x2: xyz = np.vstack((x0, x)) assert wgraph_from_3d_grid(xyz, 6).E == 0 assert wgraph_from_3d_grid(xyz, 18).E == 2 assert wgraph_from_3d_grid(xyz, 26).E == 2 for x in x3: xyz = np.vstack((x0, x)) assert wgraph_from_3d_grid(xyz, 6).E == 0 assert wgraph_from_3d_grid(xyz, 18).E == 0 assert wgraph_from_3d_grid(xyz, 26).E == 2 def test_grid_3d_1(self): """ Test the 6 nn graphs on 3d grid """ nx, ny, nz = 9, 6, 1 xyz = np.mgrid[0:nx, 0:ny, 0:nz] xyz = np.reshape(xyz, (3, nx * ny * nz)).T G = wgraph_from_3d_grid(xyz, 6) self.assert_(G.E == 186) def test_grid_3d_2(self): """ Test the 18-nn graph on a 3d grid """ nx, ny, nz = 9, 6, 1 xyz = np.mgrid[0:nx, 0:ny, 0:nz] xyz = np.reshape(xyz,(3, nx * ny * nz)).T G = wgraph_from_3d_grid(xyz, 18) self.assert_(G.E == 346) def test_grid_3d_3(self): """ Test the 26-nn graph on a 3d grid """ nx, ny, nz = 9, 6, 1 xyz = np.mgrid[0:nx, 0:ny, 0:nz] xyz = np.reshape(xyz,(3, nx * ny * nz)).T G = wgraph_from_3d_grid(xyz, 26) self.assert_(G.E == 346) def test_grid_3d_4(self): nx, ny, nz = 10, 10, 10 xyz = np.reshape(np.indices((nx, ny, nz)), (3, nx * ny * nz)).T G = wgraph_from_3d_grid(xyz, 26) D = G.weights # 6 * 9 * 10 * 10 self.assert_(sum(D == 1)==5400 ) # 26 * 8 ** 3 + 6 * 8 ** 2 * 17 + 12 * 8 * 11 + 8 * 7 self.assert_(np.size(D) == 20952 ) # 18 * 8 ** 3 + 6 * 8 ** 2 * 13 + 12 * 8 * 9 + 8 * 6 self.assert_(sum(D < 1.5) == 15120) def test_grid_3d_5(self): nx, ny, nz = 5, 5, 5 xyz = np.reshape(np.indices((nx, ny, nz)), (3, nx * ny * nz)).T G = wgraph_from_3d_grid(xyz, 26) D = G.weights.copy() G.set_euclidian(xyz) assert (np.allclose(G.weights, D, 1.e-7)) def test_grid_3d_6(self): nx, ny, nz = 5, 5, 5 xyz = np.reshape(np.indices((nx, ny, nz)), (3, nx * ny * nz)).T adj = wgraph_from_3d_grid(xyz, 26).to_coo_matrix().tolil() assert len(adj.rows[63]) == 26 for i in [62, 64, 58, 68, 38, 88, 57, 67, 37, 87, 59, 69, 39, 89, 33, 83, 43, 93, 32, 82, 42, 92, 34, 84, 44, 94]: assert i in adj.rows[63] def test_grid_3d_7(self): """ Check that the grid graph is symmetric """ xyz = np.array(np.where(np.random.rand(5, 5, 5) > 0.5)).T adj = wgraph_from_3d_grid(xyz, 6).to_coo_matrix() assert (adj - adj.T).nnz == 0 adj = wgraph_from_3d_grid(xyz, 18).to_coo_matrix() assert (adj - adj.T).nnz == 0 adj = wgraph_from_3d_grid(xyz, 26).to_coo_matrix() assert (adj - adj.T).nnz == 0 def test_cut_redundancies(self): G = basic_graph() e = G.E edges = G.get_edges() weights = G.weights G.E = 2 * G.E G.edges = np.concatenate((edges, edges)) G.weights = np.concatenate((weights, weights)) K = G.cut_redundancies() OK = (K.E == e) self.assert_(OK) def test_degrees(self): G = basic_graph() (r,l) = G.degrees() self.assert_(( r == 2 ).all()) self.assert_(( l == 2 ).all()) def test_normalize(self): G = basic_graph() G.normalize() M = G.to_coo_matrix() sM = np.array(M.sum(1)).ravel() test = np.absolute(sM - 1) < 1.e-7 OK = np.size(np.nonzero(test) == 0) self.assert_(OK) def test_normalize_2(self): G = basic_graph() G.normalize(0) M = G.to_coo_matrix() sM = np.array(M.sum(1)).ravel() test = np.absolute(sM - 1) < 1.e-7 OK = np.size(np.nonzero(test)==0) self.assert_(OK) def test_normalize_3(self): G = basic_graph() G.normalize(1) M = G.to_coo_matrix() sM = np.array(M.sum(0)).ravel() test = np.absolute(sM - 1) < 1.e-7 OK = np.size(np.nonzero(test)==0) self.assert_(OK) def test_adjacency(self): G = basic_graph() M = G.to_coo_matrix() self.assert_(( M.diagonal() == 0 ).all()) A = M.toarray() self.assert_(( np.diag(A, 1) != 0 ).all()) self.assert_(( np.diag(A, -1) != 0 ).all()) def test_cc(self): G = basic_graph() l = G.cc() L = np.array(l==0) OK = L.all() self.assert_(OK) def test_isconnected(self): G = basic_graph() self.assert_(G.is_connected()) def test_main_cc(self): x = basicdata() G = knn(x, 1) l = G.cc() l = G.main_cc() assert np.size(l)==6 def test_dijkstra(self): """ Test dijkstra's algorithm """ G = basic_graph() l = G.dijkstra(0) assert (np.absolute(l[10] - 20 * np.sin(np.pi / 20)) < 1.e-7) def test_dijkstra_multiseed(self): """ Test dijkstra's algorithm, multi_seed version """ G = basic_graph() l = G.dijkstra([0, 1]) assert (np.absolute(l[10] - 18 * np.sin(np.pi / 20)) < 1.e-7) def test_dijkstra2(self): """ Test dijkstra's algorithm, API detail """ G = basic_graph() l = G.dijkstra() assert (np.absolute(l[10] - 20 * np.sin(np.pi / 20)) < 1.e-7) def test_compact_representation(self): """ Test that the compact representation of the graph is indeed correct """ G = basic_graph() idx, ne, we = G.compact_neighb() assert len(idx) == 21 assert idx[0] == 0 assert idx[20] == G.E assert len(ne) == G.E assert len(we) == G.E def test_floyd_1(self): """ Test Floyd's algo without seed """ G = basic_graph() l = G.floyd() for i in range(10): plop = np.absolute(np.diag(l, i) - 2 * i * np.sin(2 * np.pi / 40)) assert(plop.max()<1.e-4) def test_floyd_2(self): """ Test Floyd's algo, with seed """ G = basic_graph() seeds = np.array([0,10]) l = G.floyd(seeds) for i in range(10): plop = np.absolute(l[0,i]-2*i*np.sin(2*np.pi/40)) assert (plop.max()<1.e-4) plop = np.absolute(l[0,19-i]-2*(i+1)*np.sin(2*np.pi/40)) assert (plop.max()<1.e-4) for i in range(10): plop = np.absolute(l[1,i]-2*(10-i)*np.sin(2*np.pi/40)) assert (plop.max()<1.e-4) plop = np.absolute(l[1,19-i]-2*(9-i)*np.sin(2*np.pi/40)) assert (plop.max()<1.e-4) def test_symmeterize(self): a = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6]) b = np.array([1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 0, 0, 1]) edges = np.vstack((a, b)).T d = np.ones(14) G = WeightedGraph(7, edges, d) G.symmeterize() d = G.weights ok = (d == 0.5) self.assert_(ok.all()) def test_voronoi(self): """ test voronoi labelling with 2 seeds """ a = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6]) b = np.array([1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 0, 0, 1]) d = np.array([1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2]); edges = np.transpose(np.vstack((a, b))) G = WeightedGraph(7, edges,d) G.symmeterize() seed = np.array([0, 6]) label = G.voronoi_labelling(seed) assert(label[1] == 0) def test_voronoi2(self): """ test voronoi labelling with one seed """ a = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6]) b = np.array([1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 0, 0, 1]) d = np.array([1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2]); edges = np.vstack((a, b)).T G = WeightedGraph(7, edges,d) G.symmeterize() seed = np.array([0]) label = G.voronoi_labelling(seed) assert(label[4] == 0) def test_voronoi3(self): """ test voronoi labelling with non-connected components """ a = np.array([0, 1, 2, 5, 6]) b = np.array([1, 2, 3, 6, 0]) d = np.array([1, 1, 1, 1, 1]); edges = np.vstack((a, b)).T G = WeightedGraph(7, edges,d) G.symmeterize() seed = np.array([0]) label = G.voronoi_labelling(seed) assert(label[4] == - 1) def test_concatenate1(self,n=10,verbose=0): x1 = nr.randn(n,2) x2 = nr.randn(n,2) G1 = knn(x1, 5) G2 = knn(x2, 5) G = concatenate_graphs(G1, G2) if verbose: G.plot(np.hstack((x1, x2))) self.assert_(G.cc().max()>0) def test_concatenate2(self,n=10,verbose=0): G1 = complete_graph(n) G2 = complete_graph(n) G = concatenate_graphs(G1, G2) self.assert_(G.cc().max() == 1) def test_anti_symmeterize(self,verbose=0): n = 10 eps = 1.e-7 M = (nr.rand(n, n) > 0.7).astype(np.float) C = M - M.T G = wgraph_from_adjacency(M) G.anti_symmeterize() A = G.to_coo_matrix() self.assert_(np.sum(C - A) ** 2 < eps) def test_subgraph_1(self,n=10,verbose=0): x = nr.randn(n, 2) G = WeightedGraph(x.shape[0]) valid = np.zeros(n) g = G.subgraph(valid) self.assert_(g is None) def test_subgraph_2(self,n=10,verbose=0): x = nr.randn(n,2) G = knn(x, 5) valid = np.zeros(n) valid[:n/2] = 1 g = G.subgraph(valid) self.assert_(g.edges.max() < n / 2) def test_graph_create_from_array(self): """ Test the creation of a graph from a sparse coo_matrix """ a = np.random.randn(5, 5) wg = wgraph_from_adjacency(a) b = wg.to_coo_matrix() self.assert_((a == b.todense()).all()) def test_graph_create_from_coo_matrix(self): """ Test the creation of a graph from a sparse coo_matrix """ import scipy.sparse as spp a = (np.random.randn(5, 5) > .8).astype(np.float) s = spp.coo_matrix(a) wg = wgraph_from_coo_matrix(s) b = wg.to_coo_matrix() self.assert_((b.todense() == a).all()) def test_to_coo_matrix(self): """ Test the generation of a sparse matrix as output """ a = (np.random.randn(5, 5)>.8).astype(np.float) wg = wgraph_from_adjacency(a) b = wg.to_coo_matrix().todense() self.assert_((a==b).all()) def test_list_neighbours(self): """ test the generation of neighbours list """ bg = basic_graph() nl = bg.list_of_neighbors() assert(len(nl) == bg.V) for ni in nl: assert len(ni)== 2 def test_kruskal(self): """ test Kruskal's algor to thin the graph """ x = basicdata() dmax = np.sqrt((x ** 2).sum()) m = mst(x) g = eps_nn(x, dmax) k = g.kruskal() assert np.abs(k.weights.sum() - m.weights.sum() < 1.e-7) def test_concatenate3(self): """ test the graph concatenation utlitity """ bg = basic_graph() cg = concatenate_graphs(bg, bg) valid = np.zeros(cg.V) valid[:bg.V] = 1 sg = cg.subgraph(valid) assert (sg.edges == bg.edges).all() assert (sg.weights == bg.weights).all() def test_cliques(self): """ test the computation of cliques """ x = np.random.rand(20, 2) x[15:] += 2. g = knn(x, 5) g.set_gaussian(x, 1.) cliques = g.cliques() assert len(np.unique(cliques)) > 1 if __name__ == '__main__': import nose nose.run(argv=['', __file__]) nipy-0.3.0/nipy/algorithms/interpolation.py000066400000000000000000000053071210344137400210660ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Image interpolators using ndimage. """ import os import tempfile import numpy as np from scipy import ndimage class ImageInterpolator(object): """ Interpolate Image instance at arbitrary points in world space The resampling is done with scipy.ndimage. """ def __init__(self, image, order=3): """ Parameters ---------- image : Image Image to be interpolated order : int, optional order of spline interpolation as used in scipy.ndimage. Default is 3. """ self.image = image self.order = order self._datafile = None self._buildknots() def _buildknots(self): if self.order > 1: data = ndimage.spline_filter( np.nan_to_num(self.image.get_data()), self.order) else: data = np.nan_to_num(self.image.get_data()) if self._datafile is None: _, fname = tempfile.mkstemp() self._datafile = open(fname, mode='wb') else: self._datafile = open(self._datafile.name, 'wb') data = np.nan_to_num(data.astype(np.float64)) data.tofile(self._datafile) datashape = data.shape dtype = data.dtype del(data) self._datafile.close() self._datafile = open(self._datafile.name) self.data = np.memmap(self._datafile.name, dtype=dtype, mode='r+', shape=datashape) def __del__(self): if self._datafile: self._datafile.close() try: os.remove(self._datafile.name) except: pass def evaluate(self, points): """ Resample image at points in world space Parameters ---------- points : array values in self.image.coordmap.output_coords. Each row is a point. Returns ------- V : ndarray interpolator of self.image evaluated at points """ points = np.array(points, np.float64) output_shape = points.shape[1:] points.shape = (points.shape[0], np.product(output_shape)) cmapi = self.image.coordmap.inverse() voxels = cmapi(points.T).T V = ndimage.map_coordinates(self.data, voxels, order=self.order, prefilter=False) # ndimage.map_coordinates returns a flat array, # it needs to be reshaped to the original shape V.shape = output_shape return V nipy-0.3.0/nipy/algorithms/kernel_smooth.py000066400000000000000000000203221210344137400210420ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Linear filter(s). For the moment, only a Gaussian smoothing filter """ import gc import numpy as np import numpy.fft as fft import numpy.linalg as npl from nipy.core.api import Image, AffineTransform from nipy.core.reference.coordinate_map import product class LinearFilter(object): ''' A class to implement some FFT smoothers for Image objects. By default, this does a Gaussian kernel smooth. More choices would be better! ''' normalization = 'l1sum' def __init__(self, coordmap, shape, fwhm=6.0, scale=1.0, location=0.0, cov=None): """ Parameters ---------- coordmap : ``CoordinateMap`` shape : sequence fwhm : float, optional fwhm for Gaussian kernel, default is 6.0 scale : float, optional scaling to apply to data after smooth, default 1.0 location : float offset to apply to data after smooth and scaling, default 0 cov : None or array, optional Covariance matrix """ self.coordmap = coordmap self.bshape = shape self.fwhm = fwhm self.scale = scale self.location = location self.cov = cov self._setup_kernel() def _setup_kernel(self): if not isinstance(self.coordmap, AffineTransform): raise ValueError('for FFT smoothing, we need a ' 'regular (affine) coordmap') # voxel indices of array implied by shape voxels = np.indices(self.bshape).astype(np.float64) # coordinates of physical center. XXX - why the 'floor' here? vox_center = np.floor((np.array(self.bshape) - 1) / 2.0) phys_center = self.coordmap(vox_center) # reshape to (N coordinates, -1). We appear to need to assign # to shape instead of doing a reshape, in order to avoid memory # copies voxels.shape = (voxels.shape[0], np.product(voxels.shape[1:])) # physical coordinates relative to center X = (self.coordmap(voxels.T) - phys_center).T X.shape = (self.coordmap.ndims[1],) + tuple(self.bshape) # compute kernel from these positions kernel = self(X, axis=0) kernel = _crop(kernel) self.norms = {'l2':np.sqrt((kernel**2).sum()), 'l1':np.fabs(kernel).sum(), 'l1sum':kernel.sum()} self._kernel = kernel self.shape = (np.ceil((np.asarray(self.bshape) + np.asarray(kernel.shape))/2)*2+2) self.fkernel = np.zeros(self.shape) slices = [slice(0, kernel.shape[i]) for i in range(len(kernel.shape))] self.fkernel[slices] = kernel self.fkernel = fft.rfftn(self.fkernel) return kernel def _normsq(self, X, axis=-1): """ Compute the (periodic, i.e. on a torus) squared distance needed for FFT smoothing. Assumes coordinate system is linear. Parameters ---------- X : array array of points axis : int, optional axis containing coordinates. Default -1 """ # copy X _X = np.array(X) # roll coordinate axis to front _X = np.rollaxis(_X, axis) # convert coordinates to FWHM units if self.fwhm is not 1.0: f = fwhm2sigma(self.fwhm) if f.shape == (): f = np.ones(len(self.bshape)) * f for i in range(len(self.bshape)): _X[i] /= f[i] # whiten? if self.cov != None: _chol = npl.cholesky(self.cov) _X = np.dot(npl.inv(_chol), _X) # compute squared distance D2 = np.sum(_X**2, axis=0) return D2 def __call__(self, X, axis=-1): ''' Compute kernel from points Parameters ---------- X : array array of points axis : int, optional axis containing coordinates. Default -1 ''' _normsq = self._normsq(X, axis) / 2. t = np.less_equal(_normsq, 15) return np.exp(-np.minimum(_normsq, 15)) * t def smooth(self, inimage, clean=False, is_fft=False): """ Apply smoothing to `inimage` Parameters ---------- inimage : ``Image`` The image to be smoothed. Should be 3D. clean : bool, optional Should we call ``nan_to_num`` on the data before smoothing? is_fft : bool, optional Has the data already been fft'd? Returns ------- s_image : `Image` New image, with smoothing applied """ if inimage.ndim == 4: # we need to generalize which axis to iterate over. By # default it should probably be the last. raise NotImplementedError('Smoothing volumes in a 4D series ' 'is broken, pending a rethink') _out = np.zeros(inimage.shape) # iterate over the first (0) axis - this is confusing - see # above nslice = inimage.shape[0] elif inimage.ndim == 3: nslice = 1 else: raise NotImplementedError('expecting either 3 or 4-d image') in_data = inimage.get_data() for _slice in range(nslice): if in_data.ndim == 4: data = in_data[_slice] elif in_data.ndim == 3: data = in_data[:] if clean: data = np.nan_to_num(data) if not is_fft: data = self._presmooth(data) data *= self.fkernel data = fft.irfftn(data) / self.norms[self.normalization] gc.collect() _dslice = [slice(0, self.bshape[i], 1) for i in range(3)] if self.scale != 1: data = self.scale * data[_dslice] if self.location != 0.0: data += self.location gc.collect() # Write out data if in_data.ndim == 4: _out[_slice] = data else: _out = data _slice += 1 gc.collect() _out = _out[[slice(self._kernel.shape[i]/2, self.bshape[i] + self._kernel.shape[i]/2) for i in range(len(self.bshape))]] if inimage.ndim == 3: return Image(_out, coordmap=self.coordmap) else: # This does not work as written. See above concat_affine = AffineTransform.identity('concat') return Image(_out, coordmap=product(self.coordmap, concat_affine)) def _presmooth(self, indata): slices = [slice(0, self.bshape[i], 1) for i in range(len(self.shape))] _buffer = np.zeros(self.shape) _buffer[slices] = indata return fft.rfftn(_buffer) def fwhm2sigma(fwhm): """ Convert a FWHM value to sigma in a Gaussian kernel. Parameters ---------- fwhm : array-like FWHM value or values Returns ------- sigma : array or float sigma values corresponding to `fwhm` values Examples -------- >>> sigma = fwhm2sigma(6) >>> sigmae = fwhm2sigma([6, 7, 8]) >>> sigma == sigmae[0] True """ fwhm = np.asarray(fwhm) return fwhm / np.sqrt(8 * np.log(2)) def sigma2fwhm(sigma): """ Convert a sigma in a Gaussian kernel to a FWHM value Parameters ---------- sigma : array-like sigma value or values Returns ------- fwhm : array or float fwhm values corresponding to `sigma` values Examples -------- >>> fwhm = sigma2fwhm(3) >>> fwhms = sigma2fwhm([3, 4, 5]) >>> fwhm == fwhms[0] True """ sigma = np.asarray(sigma) return sigma * np.sqrt(8 * np.log(2)) def _crop(X, tol=1.0e-10): """ Find a bounding box for support of fabs(X) > tol and returned crop region. """ aX = np.fabs(X) n = len(X.shape) I = np.indices(X.shape)[:, np.greater(aX, tol)] if I.shape[1] > 0: m = [I[i].min() for i in range(n)] M = [I[i].max() for i in range(n)] slices = [slice(m[i], M[i]+1, 1) for i in range(n)] return X[slices] else: return np.zeros((1,)*n) nipy-0.3.0/nipy/algorithms/optimize.py000066400000000000000000000052711210344137400200370ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: # add-ons to scipy.optimize import numpy as np from scipy.optimize import brent, approx_fprime def _linesearch_brent(func, p, xi, tol=1e-3): """Line-search algorithm using Brent's method. Find the minimium of the function ``func(x0+ alpha*direc)``. """ def myfunc(alpha): return func(p + alpha * xi) alpha_min, fret, iter, num = brent(myfunc, full_output=1, tol=tol) xi = alpha_min*xi return np.squeeze(fret), p+xi def _wrap(function, args): ncalls = [0] def wrapper(x): ncalls[0] += 1 return function(x, *args) return ncalls, wrapper def fmin_steepest(f, x0, fprime=None, xtol=1e-4, ftol=1e-4, maxiter=None, callback=None, disp=True): """ Minimize a function using a steepest gradient descent algorithm. This complements the collection of minimization routines provided in scipy.optimize. Steepest gradient iterations are cheaper than in the conjugate gradient or Newton methods, hence convergence may sometimes turn out faster algthough more iterations are typically needed. Parameters ---------- f : callable Function to be minimized x0 : array Starting point fprime : callable Function that computes the gradient of f xtol : float Relative tolerance on step sizes in line searches ftol : float Relative tolerance on function variations maxiter : int Maximum number of iterations callback : callable Optional function called after each iteration is complete disp : bool Print convergence message if True Returns ------- x : array Gradient descent fix point, local minimizer of f """ x = np.asarray(x0).flatten() fval = np.squeeze(f(x)) it = 0 if maxiter == None: maxiter = x.size*1000 if fprime == None: grad_calls, myfprime = _wrap(approx_fprime, (f, step)) else: grad_calls, myfprime = _wrap(fprime, args) while it < maxiter: it = it + 1 x0 = x fval0 = fval if disp: print('Computing gradient...') direc = myfprime(x) direc = direc / np.sqrt(np.sum(direc**2)) if disp: print('Performing line search...') fval, x = _linesearch_brent(f, x, direc, tol=xtol) if not callback == None: callback(x) if (2.0*(fval0-fval) <= ftol*(abs(fval0)+abs(fval))+1e-20): break if disp: print('Number of iterations: %d' % it) print('Minimum criterion value: %f' % fval) return x nipy-0.3.0/nipy/algorithms/registration/000077500000000000000000000000001210344137400203325ustar00rootroot00000000000000nipy-0.3.0/nipy/algorithms/registration/NOTES_ELF000066400000000000000000000021201210344137400216260ustar00rootroot00000000000000 Notes neurospin/registration registration/ __init__.py registration.py iconic_registration (intensity based, joint histogram) renamed joint registration takes from and two images and compute joint histogram groupwise_registration.py (motion correction in fmri) register a set of images sum of square differences not using joint histogram affine.py (discribes a general 3d affine transformation and its parametrization) class affine params=s(-1)xv12 s: pre_cond radius for the preconditioner is in translation coordinates check for rigidity class grid_transform.py (discrete displacements of the from grid) cubic_spline.c (same results as ndimage) wichmann_prng.c (only for the random interplation) iconic.c to be renamed to histogram.c interpolating the histogram avoids the problem of casting the intensity in C assumes the joint histogram is a signed short array (16bit) clamp Make independent tests with checks starting from different registrations. Sensible default for the focus function What should we do when outside the fov? nipy-0.3.0/nipy/algorithms/registration/TODO.txt000066400000000000000000000014321210344137400216400ustar00rootroot00000000000000* 'permuted' svd in affine.py * rename rotation, scaling, shearing appropriately * spline transform object * log-euclidean transform object ??? * Levenberg-Marquardt * Affine transform creation -------------------------------------------- Transform objects Transform | --> Affine | --> Rigid, Similarity, ... | --> GridTransform | --> SplineTransform | --> PolyAffine | --> PolyRigid, PolySimilarity, ... ChainTransform Any registration method should take a generic transform argument having an `apply` method and a `param` attribute or property. Internally, it may create a ChainTransform object to represent voxel-to-voxel transforms or other kinds of compositions. The transform supplied by the user should be optimizable (have a `param` attribute). nipy-0.3.0/nipy/algorithms/registration/__init__.py000066400000000000000000000017231210344137400224460ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from .resample import resample from .histogram_registration import (HistogramRegistration, clamp, ideal_spacing, interp_methods) from .affine import (threshold, rotation_mat2vec, rotation_vec2mat, to_matrix44, preconditioner, inverse_affine, subgrid_affine, Affine, Affine2D, Rigid, Rigid2D, Similarity, Similarity2D, affine_transforms) from .groupwise_registration import (interp_slice_order, scanner_coords, make_grid, Image4d, Realign4dAlgorithm, resample4d, adjust_subsampling, single_run_realign4d, realign4d, Realign4d, FmriRealign4d) from nipy.testing import Tester test = Tester().test bench = Tester().bench nipy-0.3.0/nipy/algorithms/registration/_registration.c000066400000000000000000013416221210344137400233600ustar00rootroot00000000000000/* Generated by Cython 0.17.4 on Sat Jan 12 17:27:27 2013 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02040000 #error Cython requires Python 2.4+. #else #include /* For offsetof */ #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #endif #if PY_VERSION_HEX < 0x02050000 typedef int Py_ssize_t; #define PY_SSIZE_T_MAX INT_MAX #define PY_SSIZE_T_MIN INT_MIN #define PY_FORMAT_SIZE_T "" #define CYTHON_FORMAT_SSIZE_T "" #define PyInt_FromSsize_t(z) PyInt_FromLong(z) #define PyInt_AsSsize_t(o) __Pyx_PyInt_AsInt(o) #define PyNumber_Index(o) ((PyNumber_Check(o) && !PyFloat_Check(o)) ? PyNumber_Int(o) : \ (PyErr_Format(PyExc_TypeError, \ "expected index value, got %.200s", Py_TYPE(o)->tp_name), \ (PyObject*)0)) #define __Pyx_PyIndex_Check(o) (PyNumber_Check(o) && !PyFloat_Check(o) && \ !PyComplex_Check(o)) #define PyIndex_Check __Pyx_PyIndex_Check #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message) #define __PYX_BUILD_PY_SSIZE_T "i" #else #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #define __Pyx_PyIndex_Check PyIndex_Check #endif #if PY_VERSION_HEX < 0x02060000 #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) #define PyVarObject_HEAD_INIT(type, size) \ PyObject_HEAD_INIT(type) size, #define PyType_Modified(t) typedef struct { void *buf; PyObject *obj; Py_ssize_t len; Py_ssize_t itemsize; int readonly; int ndim; char *format; Py_ssize_t *shape; Py_ssize_t *strides; Py_ssize_t *suboffsets; void *internal; } Py_buffer; #define PyBUF_SIMPLE 0 #define PyBUF_WRITABLE 0x0001 #define PyBUF_FORMAT 0x0004 #define PyBUF_ND 0x0008 #define PyBUF_STRIDES (0x0010 | PyBUF_ND) #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) #define PyBUF_RECORDS (PyBUF_STRIDES | PyBUF_FORMAT | PyBUF_WRITABLE) #define PyBUF_FULL (PyBUF_INDIRECT | PyBUF_FORMAT | PyBUF_WRITABLE) typedef int (*getbufferproc)(PyObject *, Py_buffer *, int); typedef void (*releasebufferproc)(PyObject *, Py_buffer *); #endif #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #endif #if PY_MAJOR_VERSION < 3 && PY_MINOR_VERSION < 6 #define PyUnicode_FromString(s) PyUnicode_Decode(s, strlen(s), "UTF-8", "strict") #endif #if PY_MAJOR_VERSION >= 3 #define Py_TPFLAGS_CHECKTYPES 0 #define Py_TPFLAGS_HAVE_INDEX 0 #endif #if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3) #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ? \ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #else #define CYTHON_PEP393_ENABLED 0 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_READ(k, d, i) ((k=k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_VERSION_HEX < 0x02060000 #define PyBytesObject PyStringObject #define PyBytes_Type PyString_Type #define PyBytes_Check PyString_Check #define PyBytes_CheckExact PyString_CheckExact #define PyBytes_FromString PyString_FromString #define PyBytes_FromStringAndSize PyString_FromStringAndSize #define PyBytes_FromFormat PyString_FromFormat #define PyBytes_DecodeEscape PyString_DecodeEscape #define PyBytes_AsString PyString_AsString #define PyBytes_AsStringAndSize PyString_AsStringAndSize #define PyBytes_Size PyString_Size #define PyBytes_AS_STRING PyString_AS_STRING #define PyBytes_GET_SIZE PyString_GET_SIZE #define PyBytes_Repr PyString_Repr #define PyBytes_Concat PyString_Concat #define PyBytes_ConcatAndDel PyString_ConcatAndDel #endif #if PY_VERSION_HEX < 0x02060000 #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type) #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_VERSION_HEX < 0x03020000 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300) #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b) #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value) #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b) #else #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0))) #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1))) #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1))) #endif #if PY_MAJOR_VERSION >= 3 #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n))) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n))) #else #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n)) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_NAMESTR(n) ((char *)(n)) #define __Pyx_DOCSTR(n) ((char *)(n)) #else #define __Pyx_NAMESTR(n) (n) #define __Pyx_DOCSTR(n) (n) #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include #define __PYX_HAVE__nipy__algorithms__registration___registration #define __PYX_HAVE_API__nipy__algorithms__registration___registration #include "stdio.h" #include "stdlib.h" #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "joint_histogram.h" #include "cubic_spline.h" #include "polyaffine.h" #ifdef _OPENMP #include #endif /* _OPENMP */ #ifdef PYREX_WITHOUT_ASSERTIONS #define CYTHON_WITHOUT_ASSERTIONS #endif /* inline attribute */ #ifndef CYTHON_INLINE #if defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif /* unused attribute */ #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif typedef struct {PyObject **p; char *s; const long n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/ /* Type Conversion Predeclarations */ #define __Pyx_PyBytes_FromUString(s) PyBytes_FromString((char*)s) #define __Pyx_PyBytes_AsUString(s) ((unsigned char*) PyBytes_AsString(s)) #define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x); static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject*); #if CYTHON_COMPILING_IN_CPYTHON #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #ifdef __GNUC__ /* Test for GCC > 2.95 */ #if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* __GNUC__ > 2 ... */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ > 2 ... */ #else /* __GNUC__ */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static PyObject *__pyx_m; static PyObject *__pyx_b; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include #else #include #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "_registration.pyx", "numpy.pxd", "type.pxd", }; /* "numpy.pxd":723 * # in Cython to enable them only on the right systems. * * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t */ typedef npy_int8 __pyx_t_5numpy_int8_t; /* "numpy.pxd":724 * * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t */ typedef npy_int16 __pyx_t_5numpy_int16_t; /* "numpy.pxd":725 * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< * ctypedef npy_int64 int64_t * #ctypedef npy_int96 int96_t */ typedef npy_int32 __pyx_t_5numpy_int32_t; /* "numpy.pxd":726 * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< * #ctypedef npy_int96 int96_t * #ctypedef npy_int128 int128_t */ typedef npy_int64 __pyx_t_5numpy_int64_t; /* "numpy.pxd":730 * #ctypedef npy_int128 int128_t * * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t */ typedef npy_uint8 __pyx_t_5numpy_uint8_t; /* "numpy.pxd":731 * * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t */ typedef npy_uint16 __pyx_t_5numpy_uint16_t; /* "numpy.pxd":732 * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< * ctypedef npy_uint64 uint64_t * #ctypedef npy_uint96 uint96_t */ typedef npy_uint32 __pyx_t_5numpy_uint32_t; /* "numpy.pxd":733 * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< * #ctypedef npy_uint96 uint96_t * #ctypedef npy_uint128 uint128_t */ typedef npy_uint64 __pyx_t_5numpy_uint64_t; /* "numpy.pxd":737 * #ctypedef npy_uint128 uint128_t * * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< * ctypedef npy_float64 float64_t * #ctypedef npy_float80 float80_t */ typedef npy_float32 __pyx_t_5numpy_float32_t; /* "numpy.pxd":738 * * ctypedef npy_float32 float32_t * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< * #ctypedef npy_float80 float80_t * #ctypedef npy_float128 float128_t */ typedef npy_float64 __pyx_t_5numpy_float64_t; /* "numpy.pxd":747 * # The int types are mapped a bit surprising -- * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t # <<<<<<<<<<<<<< * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t */ typedef npy_long __pyx_t_5numpy_int_t; /* "numpy.pxd":748 * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< * ctypedef npy_longlong longlong_t * */ typedef npy_longlong __pyx_t_5numpy_long_t; /* "numpy.pxd":749 * ctypedef npy_long int_t * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< * * ctypedef npy_ulong uint_t */ typedef npy_longlong __pyx_t_5numpy_longlong_t; /* "numpy.pxd":751 * ctypedef npy_longlong longlong_t * * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t */ typedef npy_ulong __pyx_t_5numpy_uint_t; /* "numpy.pxd":752 * * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulonglong_t * */ typedef npy_ulonglong __pyx_t_5numpy_ulong_t; /* "numpy.pxd":753 * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< * * ctypedef npy_intp intp_t */ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; /* "numpy.pxd":755 * ctypedef npy_ulonglong ulonglong_t * * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< * ctypedef npy_uintp uintp_t * */ typedef npy_intp __pyx_t_5numpy_intp_t; /* "numpy.pxd":756 * * ctypedef npy_intp intp_t * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< * * ctypedef npy_double float_t */ typedef npy_uintp __pyx_t_5numpy_uintp_t; /* "numpy.pxd":758 * ctypedef npy_uintp uintp_t * * ctypedef npy_double float_t # <<<<<<<<<<<<<< * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t */ typedef npy_double __pyx_t_5numpy_float_t; /* "numpy.pxd":759 * * ctypedef npy_double float_t * ctypedef npy_double double_t # <<<<<<<<<<<<<< * ctypedef npy_longdouble longdouble_t * */ typedef npy_double __pyx_t_5numpy_double_t; /* "numpy.pxd":760 * ctypedef npy_double float_t * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cfloat cfloat_t */ typedef npy_longdouble __pyx_t_5numpy_longdouble_t; #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< float > __pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< double > __pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif /*--- Type declarations ---*/ /* "numpy.pxd":762 * ctypedef npy_longdouble longdouble_t * * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t */ typedef npy_cfloat __pyx_t_5numpy_cfloat_t; /* "numpy.pxd":763 * * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< * ctypedef npy_clongdouble clongdouble_t * */ typedef npy_cdouble __pyx_t_5numpy_cdouble_t; /* "numpy.pxd":764 * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cdouble complex_t */ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; /* "numpy.pxd":766 * ctypedef npy_clongdouble clongdouble_t * * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew1(a): */ typedef npy_cdouble __pyx_t_5numpy_complex_t; #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/ #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil) \ if (acquire_gil) { \ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ PyGILState_Release(__pyx_gilstate_save); \ } else { \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil) \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext() \ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif /* CYTHON_REFNANNY */ #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /*proto*/ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], \ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, \ const char* function_name); /*proto*/ static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact); /*proto*/ static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/ static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len)) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_PyList_Append(L,x) PyList_Append(L,x) #endif static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); static CYTHON_INLINE int __Pyx_IterFinish(void); /*proto*/ static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); /*proto*/ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level); /*proto*/ static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_Py_intptr_t(Py_intptr_t); #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if defined(_WIN32) && defined(__cplusplus) && CYTHON_CCOMPLEX #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); #if CYTHON_CCOMPLEX #define __Pyx_c_eqf(a, b) ((a)==(b)) #define __Pyx_c_sumf(a, b) ((a)+(b)) #define __Pyx_c_difff(a, b) ((a)-(b)) #define __Pyx_c_prodf(a, b) ((a)*(b)) #define __Pyx_c_quotf(a, b) ((a)/(b)) #define __Pyx_c_negf(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zerof(z) ((z)==(float)0) #define __Pyx_c_conjf(z) (::std::conj(z)) #if 1 #define __Pyx_c_absf(z) (::std::abs(z)) #define __Pyx_c_powf(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zerof(z) ((z)==0) #define __Pyx_c_conjf(z) (conjf(z)) #if 1 #define __Pyx_c_absf(z) (cabsf(z)) #define __Pyx_c_powf(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); #if CYTHON_CCOMPLEX #define __Pyx_c_eq(a, b) ((a)==(b)) #define __Pyx_c_sum(a, b) ((a)+(b)) #define __Pyx_c_diff(a, b) ((a)-(b)) #define __Pyx_c_prod(a, b) ((a)*(b)) #define __Pyx_c_quot(a, b) ((a)/(b)) #define __Pyx_c_neg(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero(z) ((z)==(double)0) #define __Pyx_c_conj(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs(z) (::std::abs(z)) #define __Pyx_c_pow(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero(z) ((z)==0) #define __Pyx_c_conj(z) (conj(z)) #if 1 #define __Pyx_c_abs(z) (cabs(z)) #define __Pyx_c_pow(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject *); static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject *); static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject *); static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject *); static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject *); static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject *); static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject *); static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject *); static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject *); static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject *); static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject *); static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject *); static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject *); static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject *); static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject *); static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject *); static int __Pyx_check_binary_version(void); #if !defined(__Pyx_PyIdentifier_FromString) #if PY_MAJOR_VERSION < 3 #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) #else #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) #endif #endif static PyObject *__Pyx_ImportModule(const char *name); /*proto*/ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /*proto*/ typedef struct { int code_line; PyCodeObject* code_object; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /*proto*/ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/ /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from 'cpython.object' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'libc.stdlib' */ /* Module declarations from 'numpy' */ /* Module declarations from 'numpy' */ static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ /* Module declarations from 'nipy.algorithms.registration._registration' */ static PyArrayObject *__pyx_f_4nipy_10algorithms_12registration_13_registration__reshaped_double(PyObject *, PyArrayObject *); /*proto*/ #define __Pyx_MODULE_NAME "nipy.algorithms.registration._registration" int __pyx_module_is_main_nipy__algorithms__registration___registration = 0; /* Implementation of 'nipy.algorithms.registration._registration' */ static PyObject *__pyx_builtin_RuntimeError; static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_pf_4nipy_10algorithms_12registration_13_registration__joint_histogram(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_H, PyArrayIterObject *__pyx_v_iterI, PyArrayObject *__pyx_v_imJ, PyArrayObject *__pyx_v_Tvox, long __pyx_v_interp); /* proto */ static PyObject *__pyx_pf_4nipy_10algorithms_12registration_13_registration_2_L1_moments(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_H); /* proto */ static PyObject *__pyx_pf_4nipy_10algorithms_12registration_13_registration_4_cspline_transform(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_x); /* proto */ static PyObject *__pyx_pf_4nipy_10algorithms_12registration_13_registration_6_cspline_sample1d(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_R, PyArrayObject *__pyx_v_C, PyObject *__pyx_v_X, PyObject *__pyx_v_mode); /* proto */ static PyObject *__pyx_pf_4nipy_10algorithms_12registration_13_registration_8_cspline_sample2d(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_R, PyArrayObject *__pyx_v_C, PyObject *__pyx_v_X, PyObject *__pyx_v_Y, PyObject *__pyx_v_mx, PyObject *__pyx_v_my); /* proto */ static PyObject *__pyx_pf_4nipy_10algorithms_12registration_13_registration_10_cspline_sample3d(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_R, PyArrayObject *__pyx_v_C, PyObject *__pyx_v_X, PyObject *__pyx_v_Y, PyObject *__pyx_v_Z, PyObject *__pyx_v_mx, PyObject *__pyx_v_my, PyObject *__pyx_v_mz); /* proto */ static PyObject *__pyx_pf_4nipy_10algorithms_12registration_13_registration_12_cspline_sample4d(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_R, PyArrayObject *__pyx_v_C, PyObject *__pyx_v_X, PyObject *__pyx_v_Y, PyObject *__pyx_v_Z, PyObject *__pyx_v_T, PyObject *__pyx_v_mx, PyObject *__pyx_v_my, PyObject *__pyx_v_mz, PyObject *__pyx_v_mt); /* proto */ static PyObject *__pyx_pf_4nipy_10algorithms_12registration_13_registration_14_cspline_resample3d(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_im, PyObject *__pyx_v_dims, PyArrayObject *__pyx_v_Tvox, PyObject *__pyx_v_dtype, PyObject *__pyx_v_mx, PyObject *__pyx_v_my, PyObject *__pyx_v_mz); /* proto */ static PyObject *__pyx_pf_4nipy_10algorithms_12registration_13_registration_16check_array(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_x, int __pyx_v_dim, int __pyx_v_exp_dim, PyObject *__pyx_v_xname); /* proto */ static PyObject *__pyx_pf_4nipy_10algorithms_12registration_13_registration_18_apply_polyaffine(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xyz, PyArrayObject *__pyx_v_centers, PyArrayObject *__pyx_v_affines, PyArrayObject *__pyx_v_sigma); /* proto */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ static char __pyx_k_1[] = "Joint histogram failed because of incorrect input arrays."; static char __pyx_k_3[] = "L1_moments failed because input array is not double."; static char __pyx_k_5[] = "%s array should be double C-contiguous"; static char __pyx_k_6[] = "%s has size %d in last dimension, %d expected"; static char __pyx_k_7[] = "centers and affines arrays should have same shape[0]"; static char __pyx_k_9[] = "ndarray is not C contiguous"; static char __pyx_k_11[] = "ndarray is not Fortran contiguous"; static char __pyx_k_13[] = "Non-native byte order not supported"; static char __pyx_k_15[] = "unknown dtype code in numpy.pxd (%d)"; static char __pyx_k_16[] = "Format string allocated too short, see comment in numpy.pxd"; static char __pyx_k_19[] = "Format string allocated too short."; static char __pyx_k_21[] = "\nBindings for various image registration routines written in C: joint\nhistogram computation, cubic spline interpolation, non-rigid\ntransformations. \n"; static char __pyx_k_22[] = "0.3"; static char __pyx_k_25[] = "/Users/mb312/dev_trees/nipy/nipy/algorithms/registration/_registration.pyx"; static char __pyx_k_26[] = "nipy.algorithms.registration._registration"; static char __pyx_k__B[] = "B"; static char __pyx_k__C[] = "C"; static char __pyx_k__H[] = "H"; static char __pyx_k__I[] = "I"; static char __pyx_k__L[] = "L"; static char __pyx_k__O[] = "O"; static char __pyx_k__Q[] = "Q"; static char __pyx_k__R[] = "R"; static char __pyx_k__T[] = "T"; static char __pyx_k__X[] = "X"; static char __pyx_k__Y[] = "Y"; static char __pyx_k__Z[] = "Z"; static char __pyx_k__b[] = "b"; static char __pyx_k__c[] = "c"; static char __pyx_k__d[] = "d"; static char __pyx_k__f[] = "f"; static char __pyx_k__g[] = "g"; static char __pyx_k__h[] = "h"; static char __pyx_k__i[] = "i"; static char __pyx_k__l[] = "l"; static char __pyx_k__n[] = "n"; static char __pyx_k__q[] = "q"; static char __pyx_k__r[] = "r"; static char __pyx_k__t[] = "t"; static char __pyx_k__u[] = "u"; static char __pyx_k__x[] = "x"; static char __pyx_k__y[] = "y"; static char __pyx_k__z[] = "z"; static char __pyx_k__Ta[] = "Ta"; static char __pyx_k__Xa[] = "Xa"; static char __pyx_k__Ya[] = "Ya"; static char __pyx_k__Za[] = "Za"; static char __pyx_k__Zd[] = "Zd"; static char __pyx_k__Zf[] = "Zf"; static char __pyx_k__Zg[] = "Zg"; static char __pyx_k__im[] = "im"; static char __pyx_k__mt[] = "mt"; static char __pyx_k__mx[] = "mx"; static char __pyx_k__my[] = "my"; static char __pyx_k__mz[] = "mz"; static char __pyx_k__np[] = "np"; static char __pyx_k__dev[] = "dev"; static char __pyx_k__dim[] = "dim"; static char __pyx_k__imJ[] = "imJ"; static char __pyx_k__ret[] = "ret"; static char __pyx_k__xyz[] = "xyz"; static char __pyx_k__Tvox[] = "Tvox"; static char __pyx_k__dims[] = "dims"; static char __pyx_k__kind[] = "kind"; static char __pyx_k__mode[] = "mode"; static char __pyx_k__size[] = "size"; static char __pyx_k__tvox[] = "tvox"; static char __pyx_k__zero[] = "zero"; static char __pyx_k__dtype[] = "dtype"; static char __pyx_k__flags[] = "flags"; static char __pyx_k__index[] = "index"; static char __pyx_k__iterI[] = "iterI"; static char __pyx_k__modes[] = "modes"; static char __pyx_k__multi[] = "multi"; static char __pyx_k__numpy[] = "numpy"; static char __pyx_k__order[] = "order"; static char __pyx_k__range[] = "range"; static char __pyx_k__sigma[] = "sigma"; static char __pyx_k__xname[] = "xname"; static char __pyx_k__zeros[] = "zeros"; static char __pyx_k__astype[] = "astype"; static char __pyx_k__clampI[] = "clampI"; static char __pyx_k__clampJ[] = "clampJ"; static char __pyx_k__double[] = "double"; static char __pyx_k__interp[] = "interp"; static char __pyx_k__median[] = "median"; static char __pyx_k__affines[] = "affines"; static char __pyx_k__asarray[] = "asarray"; static char __pyx_k__centers[] = "centers"; static char __pyx_k__exp_dim[] = "exp_dim"; static char __pyx_k__nearest[] = "nearest"; static char __pyx_k__reflect[] = "reflect"; static char __pyx_k__reshape[] = "reshape"; static char __pyx_k____main__[] = "__main__"; static char __pyx_k____test__[] = "__test__"; static char __pyx_k__ValueError[] = "ValueError"; static char __pyx_k___L1_moments[] = "_L1_moments"; static char __pyx_k____version__[] = "__version__"; static char __pyx_k__check_array[] = "check_array"; static char __pyx_k__C_CONTIGUOUS[] = "C_CONTIGUOUS"; static char __pyx_k__RuntimeError[] = "RuntimeError"; static char __pyx_k__cast_integer[] = "cast_integer"; static char __pyx_k__im_resampled[] = "im_resampled"; static char __pyx_k___joint_histogram[] = "_joint_histogram"; static char __pyx_k___apply_polyaffine[] = "_apply_polyaffine"; static char __pyx_k___cspline_sample1d[] = "_cspline_sample1d"; static char __pyx_k___cspline_sample2d[] = "_cspline_sample2d"; static char __pyx_k___cspline_sample3d[] = "_cspline_sample3d"; static char __pyx_k___cspline_sample4d[] = "_cspline_sample4d"; static char __pyx_k___cspline_transform[] = "_cspline_transform"; static char __pyx_k___cspline_resample3d[] = "_cspline_resample3d"; static PyObject *__pyx_kp_s_1; static PyObject *__pyx_kp_u_11; static PyObject *__pyx_kp_u_13; static PyObject *__pyx_kp_u_15; static PyObject *__pyx_kp_u_16; static PyObject *__pyx_kp_u_19; static PyObject *__pyx_kp_s_22; static PyObject *__pyx_kp_s_25; static PyObject *__pyx_n_s_26; static PyObject *__pyx_kp_s_3; static PyObject *__pyx_kp_s_5; static PyObject *__pyx_kp_s_6; static PyObject *__pyx_kp_s_7; static PyObject *__pyx_kp_u_9; static PyObject *__pyx_n_s__C; static PyObject *__pyx_n_s__C_CONTIGUOUS; static PyObject *__pyx_n_s__H; static PyObject *__pyx_n_s__R; static PyObject *__pyx_n_s__RuntimeError; static PyObject *__pyx_n_s__T; static PyObject *__pyx_n_s__Ta; static PyObject *__pyx_n_s__Tvox; static PyObject *__pyx_n_s__ValueError; static PyObject *__pyx_n_s__X; static PyObject *__pyx_n_s__Xa; static PyObject *__pyx_n_s__Y; static PyObject *__pyx_n_s__Ya; static PyObject *__pyx_n_s__Z; static PyObject *__pyx_n_s__Za; static PyObject *__pyx_n_s___L1_moments; static PyObject *__pyx_n_s____main__; static PyObject *__pyx_n_s____test__; static PyObject *__pyx_n_s____version__; static PyObject *__pyx_n_s___apply_polyaffine; static PyObject *__pyx_n_s___cspline_resample3d; static PyObject *__pyx_n_s___cspline_sample1d; static PyObject *__pyx_n_s___cspline_sample2d; static PyObject *__pyx_n_s___cspline_sample3d; static PyObject *__pyx_n_s___cspline_sample4d; static PyObject *__pyx_n_s___cspline_transform; static PyObject *__pyx_n_s___joint_histogram; static PyObject *__pyx_n_s__affines; static PyObject *__pyx_n_s__asarray; static PyObject *__pyx_n_s__astype; static PyObject *__pyx_n_s__c; static PyObject *__pyx_n_s__cast_integer; static PyObject *__pyx_n_s__centers; static PyObject *__pyx_n_s__check_array; static PyObject *__pyx_n_s__clampI; static PyObject *__pyx_n_s__clampJ; static PyObject *__pyx_n_s__dev; static PyObject *__pyx_n_s__dim; static PyObject *__pyx_n_s__dims; static PyObject *__pyx_n_s__double; static PyObject *__pyx_n_s__dtype; static PyObject *__pyx_n_s__exp_dim; static PyObject *__pyx_n_s__flags; static PyObject *__pyx_n_s__h; static PyObject *__pyx_n_s__i; static PyObject *__pyx_n_s__im; static PyObject *__pyx_n_s__imJ; static PyObject *__pyx_n_s__im_resampled; static PyObject *__pyx_n_s__index; static PyObject *__pyx_n_s__interp; static PyObject *__pyx_n_s__iterI; static PyObject *__pyx_n_s__kind; static PyObject *__pyx_n_s__median; static PyObject *__pyx_n_s__mode; static PyObject *__pyx_n_s__modes; static PyObject *__pyx_n_s__mt; static PyObject *__pyx_n_s__multi; static PyObject *__pyx_n_s__mx; static PyObject *__pyx_n_s__my; static PyObject *__pyx_n_s__mz; static PyObject *__pyx_n_s__n; static PyObject *__pyx_n_s__nearest; static PyObject *__pyx_n_s__np; static PyObject *__pyx_n_s__numpy; static PyObject *__pyx_n_s__order; static PyObject *__pyx_n_s__r; static PyObject *__pyx_n_s__range; static PyObject *__pyx_n_s__reflect; static PyObject *__pyx_n_s__reshape; static PyObject *__pyx_n_s__ret; static PyObject *__pyx_n_s__sigma; static PyObject *__pyx_n_s__size; static PyObject *__pyx_n_s__t; static PyObject *__pyx_n_s__tvox; static PyObject *__pyx_n_s__u; static PyObject *__pyx_n_s__x; static PyObject *__pyx_n_s__xname; static PyObject *__pyx_n_s__xyz; static PyObject *__pyx_n_s__y; static PyObject *__pyx_n_s__z; static PyObject *__pyx_n_s__zero; static PyObject *__pyx_n_s__zeros; static PyObject *__pyx_int_0; static PyObject *__pyx_int_1; static PyObject *__pyx_int_2; static PyObject *__pyx_int_3; static PyObject *__pyx_int_12; static PyObject *__pyx_int_15; static PyObject *__pyx_k_tuple_2; static PyObject *__pyx_k_tuple_4; static PyObject *__pyx_k_tuple_8; static PyObject *__pyx_k_tuple_10; static PyObject *__pyx_k_tuple_12; static PyObject *__pyx_k_tuple_14; static PyObject *__pyx_k_tuple_17; static PyObject *__pyx_k_tuple_18; static PyObject *__pyx_k_tuple_20; static PyObject *__pyx_k_tuple_23; static PyObject *__pyx_k_tuple_27; static PyObject *__pyx_k_tuple_29; static PyObject *__pyx_k_tuple_31; static PyObject *__pyx_k_tuple_33; static PyObject *__pyx_k_tuple_35; static PyObject *__pyx_k_tuple_37; static PyObject *__pyx_k_tuple_39; static PyObject *__pyx_k_tuple_41; static PyObject *__pyx_k_tuple_43; static PyObject *__pyx_k_codeobj_24; static PyObject *__pyx_k_codeobj_28; static PyObject *__pyx_k_codeobj_30; static PyObject *__pyx_k_codeobj_32; static PyObject *__pyx_k_codeobj_34; static PyObject *__pyx_k_codeobj_36; static PyObject *__pyx_k_codeobj_38; static PyObject *__pyx_k_codeobj_40; static PyObject *__pyx_k_codeobj_42; static PyObject *__pyx_k_codeobj_44; /* Python wrapper */ static PyObject *__pyx_pw_4nipy_10algorithms_12registration_13_registration_1_joint_histogram(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_10algorithms_12registration_13_registration__joint_histogram[] = "\n Compute the joint histogram given a transformation trial. \n "; static PyMethodDef __pyx_mdef_4nipy_10algorithms_12registration_13_registration_1_joint_histogram = {__Pyx_NAMESTR("_joint_histogram"), (PyCFunction)__pyx_pw_4nipy_10algorithms_12registration_13_registration_1_joint_histogram, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_10algorithms_12registration_13_registration__joint_histogram)}; static PyObject *__pyx_pw_4nipy_10algorithms_12registration_13_registration_1_joint_histogram(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_H = 0; PyArrayIterObject *__pyx_v_iterI = 0; PyArrayObject *__pyx_v_imJ = 0; PyArrayObject *__pyx_v_Tvox = 0; long __pyx_v_interp; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("_joint_histogram (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__H,&__pyx_n_s__iterI,&__pyx_n_s__imJ,&__pyx_n_s__Tvox,&__pyx_n_s__interp,0}; PyObject* values[5] = {0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__H)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__iterI)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_joint_histogram", 1, 5, 5, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__imJ)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_joint_histogram", 1, 5, 5, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Tvox)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_joint_histogram", 1, 5, 5, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__interp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_joint_histogram", 1, 5, 5, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_joint_histogram") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 5) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); } __pyx_v_H = ((PyArrayObject *)values[0]); __pyx_v_iterI = ((PyArrayIterObject *)values[1]); __pyx_v_imJ = ((PyArrayObject *)values[2]); __pyx_v_Tvox = ((PyArrayObject *)values[3]); __pyx_v_interp = __Pyx_PyInt_AsLong(values[4]); if (unlikely((__pyx_v_interp == (long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("_joint_histogram", 1, 5, 5, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.algorithms.registration._registration._joint_histogram", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_H), __pyx_ptype_5numpy_ndarray, 1, "H", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_iterI), __pyx_ptype_5numpy_flatiter, 1, "iterI", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_imJ), __pyx_ptype_5numpy_ndarray, 1, "imJ", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_Tvox), __pyx_ptype_5numpy_ndarray, 1, "Tvox", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_4nipy_10algorithms_12registration_13_registration__joint_histogram(__pyx_self, __pyx_v_H, __pyx_v_iterI, __pyx_v_imJ, __pyx_v_Tvox, __pyx_v_interp); goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/algorithms/registration/_registration.pyx":57 * * * def _joint_histogram(ndarray H, flatiter iterI, ndarray imJ, ndarray Tvox, long interp): # <<<<<<<<<<<<<< * """ * Compute the joint histogram given a transformation trial. */ static PyObject *__pyx_pf_4nipy_10algorithms_12registration_13_registration__joint_histogram(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_H, PyArrayIterObject *__pyx_v_iterI, PyArrayObject *__pyx_v_imJ, PyArrayObject *__pyx_v_Tvox, long __pyx_v_interp) { unsigned int __pyx_v_clampI; unsigned int __pyx_v_clampJ; int __pyx_v_ret; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_joint_histogram", 0); /* "nipy/algorithms/registration/_registration.pyx":67 * * # Views * clampI = H.shape[0] # <<<<<<<<<<<<<< * clampJ = H.shape[1] * */ __pyx_v_clampI = ((unsigned int)(__pyx_v_H->dimensions[0])); /* "nipy/algorithms/registration/_registration.pyx":68 * # Views * clampI = H.shape[0] * clampJ = H.shape[1] # <<<<<<<<<<<<<< * * # Compute joint histogram */ __pyx_v_clampJ = ((unsigned int)(__pyx_v_H->dimensions[1])); /* "nipy/algorithms/registration/_registration.pyx":71 * * # Compute joint histogram * ret = joint_histogram(H, clampI, clampJ, iterI, imJ, Tvox, interp) # <<<<<<<<<<<<<< * if not ret == 0: * raise RuntimeError('Joint histogram failed because of incorrect input arrays.') */ __pyx_v_ret = joint_histogram(__pyx_v_H, __pyx_v_clampI, __pyx_v_clampJ, __pyx_v_iterI, __pyx_v_imJ, __pyx_v_Tvox, __pyx_v_interp); /* "nipy/algorithms/registration/_registration.pyx":72 * # Compute joint histogram * ret = joint_histogram(H, clampI, clampJ, iterI, imJ, Tvox, interp) * if not ret == 0: # <<<<<<<<<<<<<< * raise RuntimeError('Joint histogram failed because of incorrect input arrays.') * */ __pyx_t_1 = (!(__pyx_v_ret == 0)); if (__pyx_t_1) { /* "nipy/algorithms/registration/_registration.pyx":73 * ret = joint_histogram(H, clampI, clampJ, iterI, imJ, Tvox, interp) * if not ret == 0: * raise RuntimeError('Joint histogram failed because of incorrect input arrays.') # <<<<<<<<<<<<<< * * return */ __pyx_t_2 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_2), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L3; } __pyx_L3:; /* "nipy/algorithms/registration/_registration.pyx":75 * raise RuntimeError('Joint histogram failed because of incorrect input arrays.') * * return # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("nipy.algorithms.registration._registration._joint_histogram", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_10algorithms_12registration_13_registration_3_L1_moments(PyObject *__pyx_self, PyObject *__pyx_v_H); /*proto*/ static char __pyx_doc_4nipy_10algorithms_12registration_13_registration_2_L1_moments[] = "\n Compute L1 moments of order 0, 1 and 2 of a one-dimensional\n histogram.\n "; static PyMethodDef __pyx_mdef_4nipy_10algorithms_12registration_13_registration_3_L1_moments = {__Pyx_NAMESTR("_L1_moments"), (PyCFunction)__pyx_pw_4nipy_10algorithms_12registration_13_registration_3_L1_moments, METH_O, __Pyx_DOCSTR(__pyx_doc_4nipy_10algorithms_12registration_13_registration_2_L1_moments)}; static PyObject *__pyx_pw_4nipy_10algorithms_12registration_13_registration_3_L1_moments(PyObject *__pyx_self, PyObject *__pyx_v_H) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("_L1_moments (wrapper)", 0); if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_H), __pyx_ptype_5numpy_ndarray, 1, "H", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_4nipy_10algorithms_12registration_13_registration_2_L1_moments(__pyx_self, ((PyArrayObject *)__pyx_v_H)); goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/algorithms/registration/_registration.pyx":78 * * * def _L1_moments(ndarray H): # <<<<<<<<<<<<<< * """ * Compute L1 moments of order 0, 1 and 2 of a one-dimensional */ static PyObject *__pyx_pf_4nipy_10algorithms_12registration_13_registration_2_L1_moments(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_H) { double __pyx_v_n[1]; double __pyx_v_median[1]; double __pyx_v_dev[1]; int __pyx_v_ret; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_L1_moments", 0); /* "nipy/algorithms/registration/_registration.pyx":87 * int ret * * ret = L1_moments(n, median, dev, H) # <<<<<<<<<<<<<< * if not ret == 0: * raise RuntimeError('L1_moments failed because input array is not double.') */ __pyx_v_ret = L1_moments(__pyx_v_n, __pyx_v_median, __pyx_v_dev, __pyx_v_H); /* "nipy/algorithms/registration/_registration.pyx":88 * * ret = L1_moments(n, median, dev, H) * if not ret == 0: # <<<<<<<<<<<<<< * raise RuntimeError('L1_moments failed because input array is not double.') * */ __pyx_t_1 = (!(__pyx_v_ret == 0)); if (__pyx_t_1) { /* "nipy/algorithms/registration/_registration.pyx":89 * ret = L1_moments(n, median, dev, H) * if not ret == 0: * raise RuntimeError('L1_moments failed because input array is not double.') # <<<<<<<<<<<<<< * * return n[0], median[0], dev[0] */ __pyx_t_2 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L3; } __pyx_L3:; /* "nipy/algorithms/registration/_registration.pyx":91 * raise RuntimeError('L1_moments failed because input array is not double.') * * return n[0], median[0], dev[0] # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = PyFloat_FromDouble((__pyx_v_n[0])); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyFloat_FromDouble((__pyx_v_median[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyFloat_FromDouble((__pyx_v_dev[0])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_r = ((PyObject *)__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("nipy.algorithms.registration._registration._L1_moments", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_10algorithms_12registration_13_registration_5_cspline_transform(PyObject *__pyx_self, PyObject *__pyx_v_x); /*proto*/ static PyMethodDef __pyx_mdef_4nipy_10algorithms_12registration_13_registration_5_cspline_transform = {__Pyx_NAMESTR("_cspline_transform"), (PyCFunction)__pyx_pw_4nipy_10algorithms_12registration_13_registration_5_cspline_transform, METH_O, __Pyx_DOCSTR(0)}; static PyObject *__pyx_pw_4nipy_10algorithms_12registration_13_registration_5_cspline_transform(PyObject *__pyx_self, PyObject *__pyx_v_x) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("_cspline_transform (wrapper)", 0); if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 1, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 94; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_4nipy_10algorithms_12registration_13_registration_4_cspline_transform(__pyx_self, ((PyArrayObject *)__pyx_v_x)); goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/algorithms/registration/_registration.pyx":94 * * * def _cspline_transform(ndarray x): # <<<<<<<<<<<<<< * c = np.zeros([x.shape[i] for i in range(x.ndim)], dtype=np.double) * cubic_spline_transform(c, x) */ static PyObject *__pyx_pf_4nipy_10algorithms_12registration_13_registration_4_cspline_transform(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_x) { PyObject *__pyx_v_c = NULL; int __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_cspline_transform", 0); /* "nipy/algorithms/registration/_registration.pyx":95 * * def _cspline_transform(ndarray x): * c = np.zeros([x.shape[i] for i in range(x.ndim)], dtype=np.double) # <<<<<<<<<<<<<< * cubic_spline_transform(c, x) * return c */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 95; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__zeros); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 95; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 95; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __pyx_v_x->nd; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; __pyx_t_5 = __Pyx_PyInt_to_py_Py_intptr_t((__pyx_v_x->dimensions[__pyx_v_i])); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 95; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); if (unlikely(__Pyx_PyList_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 95; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 95; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(((PyObject *)__pyx_t_1)); PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_t_1)); __Pyx_GIVEREF(((PyObject *)__pyx_t_1)); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 95; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); __pyx_t_6 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 95; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = PyObject_GetAttr(__pyx_t_6, __pyx_n_s__double); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 95; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__dtype), __pyx_t_7) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 95; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_5), ((PyObject *)__pyx_t_1)); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 95; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_v_c = __pyx_t_7; __pyx_t_7 = 0; /* "nipy/algorithms/registration/_registration.pyx":96 * def _cspline_transform(ndarray x): * c = np.zeros([x.shape[i] for i in range(x.ndim)], dtype=np.double) * cubic_spline_transform(c, x) # <<<<<<<<<<<<<< * return c * */ if (!(likely(((__pyx_v_c) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_c, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 96; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_7 = __pyx_v_c; __Pyx_INCREF(__pyx_t_7); cubic_spline_transform(((PyArrayObject *)__pyx_t_7), __pyx_v_x); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "nipy/algorithms/registration/_registration.pyx":97 * c = np.zeros([x.shape[i] for i in range(x.ndim)], dtype=np.double) * cubic_spline_transform(c, x) * return c # <<<<<<<<<<<<<< * * cdef ndarray _reshaped_double(object in_arr, ndarray sh_arr): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_c); __pyx_r = __pyx_v_c; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("nipy.algorithms.registration._registration._cspline_transform", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_c); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/algorithms/registration/_registration.pyx":99 * return c * * cdef ndarray _reshaped_double(object in_arr, ndarray sh_arr): # <<<<<<<<<<<<<< * shape = [sh_arr.shape[i] for i in range(sh_arr.ndim)] * return np.reshape(in_arr, shape).astype(np.double) */ static PyArrayObject *__pyx_f_4nipy_10algorithms_12registration_13_registration__reshaped_double(PyObject *__pyx_v_in_arr, PyArrayObject *__pyx_v_sh_arr) { PyObject *__pyx_v_shape = NULL; int __pyx_v_i; PyArrayObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_reshaped_double", 0); /* "nipy/algorithms/registration/_registration.pyx":100 * * cdef ndarray _reshaped_double(object in_arr, ndarray sh_arr): * shape = [sh_arr.shape[i] for i in range(sh_arr.ndim)] # <<<<<<<<<<<<<< * return np.reshape(in_arr, shape).astype(np.double) * */ __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 100; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_v_sh_arr->nd; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; __pyx_t_4 = __Pyx_PyInt_to_py_Py_intptr_t((__pyx_v_sh_arr->dimensions[__pyx_v_i])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 100; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); if (unlikely(__Pyx_PyList_Append(__pyx_t_1, (PyObject*)__pyx_t_4))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 100; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } __pyx_t_4 = ((PyObject *)__pyx_t_1); __Pyx_INCREF(__pyx_t_4); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_v_shape = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; /* "nipy/algorithms/registration/_registration.pyx":101 * cdef ndarray _reshaped_double(object in_arr, ndarray sh_arr): * shape = [sh_arr.shape[i] for i in range(sh_arr.ndim)] * return np.reshape(in_arr, shape).astype(np.double) # <<<<<<<<<<<<<< * * def _cspline_sample1d(ndarray R, ndarray C, X=0, mode='zero'): */ __Pyx_XDECREF(((PyObject *)__pyx_r)); __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__reshape); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_INCREF(__pyx_v_in_arr); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_in_arr); __Pyx_GIVEREF(__pyx_v_in_arr); __Pyx_INCREF(((PyObject *)__pyx_v_shape)); PyTuple_SET_ITEM(__pyx_t_4, 1, ((PyObject *)__pyx_v_shape)); __Pyx_GIVEREF(((PyObject *)__pyx_v_shape)); __pyx_t_5 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __pyx_t_4 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__astype); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_1 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__double); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("nipy.algorithms.registration._registration._reshaped_double", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_shape); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_10algorithms_12registration_13_registration_7_cspline_sample1d(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4nipy_10algorithms_12registration_13_registration_7_cspline_sample1d = {__Pyx_NAMESTR("_cspline_sample1d"), (PyCFunction)__pyx_pw_4nipy_10algorithms_12registration_13_registration_7_cspline_sample1d, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}; static PyObject *__pyx_pw_4nipy_10algorithms_12registration_13_registration_7_cspline_sample1d(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_R = 0; PyArrayObject *__pyx_v_C = 0; PyObject *__pyx_v_X = 0; PyObject *__pyx_v_mode = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("_cspline_sample1d (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__R,&__pyx_n_s__C,&__pyx_n_s__X,&__pyx_n_s__mode,0}; PyObject* values[4] = {0,0,0,0}; values[2] = ((PyObject *)__pyx_int_0); values[3] = ((PyObject *)__pyx_n_s__zero); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__R)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__C)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_cspline_sample1d", 0, 2, 4, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__X); if (value) { values[2] = value; kw_args--; } } case 3: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__mode); if (value) { values[3] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_cspline_sample1d") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_R = ((PyArrayObject *)values[0]); __pyx_v_C = ((PyArrayObject *)values[1]); __pyx_v_X = values[2]; __pyx_v_mode = values[3]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("_cspline_sample1d", 0, 2, 4, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.algorithms.registration._registration._cspline_sample1d", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_R), __pyx_ptype_5numpy_ndarray, 1, "R", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_C), __pyx_ptype_5numpy_ndarray, 1, "C", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_4nipy_10algorithms_12registration_13_registration_6_cspline_sample1d(__pyx_self, __pyx_v_R, __pyx_v_C, __pyx_v_X, __pyx_v_mode); goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/algorithms/registration/_registration.pyx":103 * return np.reshape(in_arr, shape).astype(np.double) * * def _cspline_sample1d(ndarray R, ndarray C, X=0, mode='zero'): # <<<<<<<<<<<<<< * cdef double *r, *x * cdef broadcast multi */ static PyObject *__pyx_pf_4nipy_10algorithms_12registration_13_registration_6_cspline_sample1d(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_R, PyArrayObject *__pyx_v_C, PyObject *__pyx_v_X, PyObject *__pyx_v_mode) { double *__pyx_v_r; double *__pyx_v_x; PyArrayMultiIterObject *__pyx_v_multi = 0; PyArrayObject *__pyx_v_Xa = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_cspline_sample1d", 0); /* "nipy/algorithms/registration/_registration.pyx":106 * cdef double *r, *x * cdef broadcast multi * Xa = _reshaped_double(X, R) # <<<<<<<<<<<<<< * multi = PyArray_MultiIterNew(2, R, Xa) * while(multi.index < multi.size): */ __pyx_t_1 = ((PyObject *)__pyx_f_4nipy_10algorithms_12registration_13_registration__reshaped_double(__pyx_v_X, __pyx_v_R)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 106; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_Xa = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/registration/_registration.pyx":107 * cdef broadcast multi * Xa = _reshaped_double(X, R) * multi = PyArray_MultiIterNew(2, R, Xa) # <<<<<<<<<<<<<< * while(multi.index < multi.size): * r = PyArray_MultiIter_DATA(multi, 0) */ __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_R), ((void *)__pyx_v_Xa)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_broadcast))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_multi = ((PyArrayMultiIterObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/registration/_registration.pyx":108 * Xa = _reshaped_double(X, R) * multi = PyArray_MultiIterNew(2, R, Xa) * while(multi.index < multi.size): # <<<<<<<<<<<<<< * r = PyArray_MultiIter_DATA(multi, 0) * x = PyArray_MultiIter_DATA(multi, 1) */ while (1) { __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_multi), __pyx_n_s__index); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetAttr(((PyObject *)__pyx_v_multi), __pyx_n_s__size); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_2, Py_LT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!__pyx_t_4) break; /* "nipy/algorithms/registration/_registration.pyx":109 * multi = PyArray_MultiIterNew(2, R, Xa) * while(multi.index < multi.size): * r = PyArray_MultiIter_DATA(multi, 0) # <<<<<<<<<<<<<< * x = PyArray_MultiIter_DATA(multi, 1) * r[0] = cubic_spline_sample1d(x[0], C, modes[mode]) */ __pyx_v_r = ((double *)PyArray_MultiIter_DATA(__pyx_v_multi, 0)); /* "nipy/algorithms/registration/_registration.pyx":110 * while(multi.index < multi.size): * r = PyArray_MultiIter_DATA(multi, 0) * x = PyArray_MultiIter_DATA(multi, 1) # <<<<<<<<<<<<<< * r[0] = cubic_spline_sample1d(x[0], C, modes[mode]) * PyArray_MultiIter_NEXT(multi) */ __pyx_v_x = ((double *)PyArray_MultiIter_DATA(__pyx_v_multi, 1)); /* "nipy/algorithms/registration/_registration.pyx":111 * r = PyArray_MultiIter_DATA(multi, 0) * x = PyArray_MultiIter_DATA(multi, 1) * r[0] = cubic_spline_sample1d(x[0], C, modes[mode]) # <<<<<<<<<<<<<< * PyArray_MultiIter_NEXT(multi) * return R */ __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__modes); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyObject_GetItem(__pyx_t_3, __pyx_v_mode); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_5 = __Pyx_PyInt_AsInt(__pyx_t_2); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; (__pyx_v_r[0]) = cubic_spline_sample1d((__pyx_v_x[0]), __pyx_v_C, __pyx_t_5); /* "nipy/algorithms/registration/_registration.pyx":112 * x = PyArray_MultiIter_DATA(multi, 1) * r[0] = cubic_spline_sample1d(x[0], C, modes[mode]) * PyArray_MultiIter_NEXT(multi) # <<<<<<<<<<<<<< * return R * */ PyArray_MultiIter_NEXT(__pyx_v_multi); } /* "nipy/algorithms/registration/_registration.pyx":113 * r[0] = cubic_spline_sample1d(x[0], C, modes[mode]) * PyArray_MultiIter_NEXT(multi) * return R # <<<<<<<<<<<<<< * * def _cspline_sample2d(ndarray R, ndarray C, X=0, Y=0, */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_R)); __pyx_r = ((PyObject *)__pyx_v_R); goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("nipy.algorithms.registration._registration._cspline_sample1d", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_multi); __Pyx_XDECREF((PyObject *)__pyx_v_Xa); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_10algorithms_12registration_13_registration_9_cspline_sample2d(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4nipy_10algorithms_12registration_13_registration_9_cspline_sample2d = {__Pyx_NAMESTR("_cspline_sample2d"), (PyCFunction)__pyx_pw_4nipy_10algorithms_12registration_13_registration_9_cspline_sample2d, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}; static PyObject *__pyx_pw_4nipy_10algorithms_12registration_13_registration_9_cspline_sample2d(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_R = 0; PyArrayObject *__pyx_v_C = 0; PyObject *__pyx_v_X = 0; PyObject *__pyx_v_Y = 0; PyObject *__pyx_v_mx = 0; PyObject *__pyx_v_my = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("_cspline_sample2d (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__R,&__pyx_n_s__C,&__pyx_n_s__X,&__pyx_n_s__Y,&__pyx_n_s__mx,&__pyx_n_s__my,0}; PyObject* values[6] = {0,0,0,0,0,0}; values[2] = ((PyObject *)__pyx_int_0); values[3] = ((PyObject *)__pyx_int_0); values[4] = ((PyObject *)__pyx_n_s__zero); values[5] = ((PyObject *)__pyx_n_s__zero); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__R)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__C)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_cspline_sample2d", 0, 2, 6, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__X); if (value) { values[2] = value; kw_args--; } } case 3: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Y); if (value) { values[3] = value; kw_args--; } } case 4: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__mx); if (value) { values[4] = value; kw_args--; } } case 5: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__my); if (value) { values[5] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_cspline_sample2d") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_R = ((PyArrayObject *)values[0]); __pyx_v_C = ((PyArrayObject *)values[1]); __pyx_v_X = values[2]; __pyx_v_Y = values[3]; __pyx_v_mx = values[4]; __pyx_v_my = values[5]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("_cspline_sample2d", 0, 2, 6, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.algorithms.registration._registration._cspline_sample2d", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_R), __pyx_ptype_5numpy_ndarray, 1, "R", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_C), __pyx_ptype_5numpy_ndarray, 1, "C", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_4nipy_10algorithms_12registration_13_registration_8_cspline_sample2d(__pyx_self, __pyx_v_R, __pyx_v_C, __pyx_v_X, __pyx_v_Y, __pyx_v_mx, __pyx_v_my); goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/algorithms/registration/_registration.pyx":115 * return R * * def _cspline_sample2d(ndarray R, ndarray C, X=0, Y=0, # <<<<<<<<<<<<<< * mx='zero', my='zero'): * cdef double *r, *x, *y */ static PyObject *__pyx_pf_4nipy_10algorithms_12registration_13_registration_8_cspline_sample2d(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_R, PyArrayObject *__pyx_v_C, PyObject *__pyx_v_X, PyObject *__pyx_v_Y, PyObject *__pyx_v_mx, PyObject *__pyx_v_my) { double *__pyx_v_r; double *__pyx_v_x; double *__pyx_v_y; PyArrayMultiIterObject *__pyx_v_multi = 0; PyArrayObject *__pyx_v_Xa = NULL; PyArrayObject *__pyx_v_Ya = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_cspline_sample2d", 0); /* "nipy/algorithms/registration/_registration.pyx":119 * cdef double *r, *x, *y * cdef broadcast multi * Xa = _reshaped_double(X, R) # <<<<<<<<<<<<<< * Ya = _reshaped_double(Y, R) * multi = PyArray_MultiIterNew(3, R, Xa, Ya) */ __pyx_t_1 = ((PyObject *)__pyx_f_4nipy_10algorithms_12registration_13_registration__reshaped_double(__pyx_v_X, __pyx_v_R)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_Xa = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/registration/_registration.pyx":120 * cdef broadcast multi * Xa = _reshaped_double(X, R) * Ya = _reshaped_double(Y, R) # <<<<<<<<<<<<<< * multi = PyArray_MultiIterNew(3, R, Xa, Ya) * while(multi.index < multi.size): */ __pyx_t_1 = ((PyObject *)__pyx_f_4nipy_10algorithms_12registration_13_registration__reshaped_double(__pyx_v_Y, __pyx_v_R)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_Ya = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/registration/_registration.pyx":121 * Xa = _reshaped_double(X, R) * Ya = _reshaped_double(Y, R) * multi = PyArray_MultiIterNew(3, R, Xa, Ya) # <<<<<<<<<<<<<< * while(multi.index < multi.size): * r = PyArray_MultiIter_DATA(multi, 0) */ __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_R), ((void *)__pyx_v_Xa), ((void *)__pyx_v_Ya)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_broadcast))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_multi = ((PyArrayMultiIterObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/registration/_registration.pyx":122 * Ya = _reshaped_double(Y, R) * multi = PyArray_MultiIterNew(3, R, Xa, Ya) * while(multi.index < multi.size): # <<<<<<<<<<<<<< * r = PyArray_MultiIter_DATA(multi, 0) * x = PyArray_MultiIter_DATA(multi, 1) */ while (1) { __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_multi), __pyx_n_s__index); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetAttr(((PyObject *)__pyx_v_multi), __pyx_n_s__size); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_2, Py_LT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!__pyx_t_4) break; /* "nipy/algorithms/registration/_registration.pyx":123 * multi = PyArray_MultiIterNew(3, R, Xa, Ya) * while(multi.index < multi.size): * r = PyArray_MultiIter_DATA(multi, 0) # <<<<<<<<<<<<<< * x = PyArray_MultiIter_DATA(multi, 1) * y = PyArray_MultiIter_DATA(multi, 2) */ __pyx_v_r = ((double *)PyArray_MultiIter_DATA(__pyx_v_multi, 0)); /* "nipy/algorithms/registration/_registration.pyx":124 * while(multi.index < multi.size): * r = PyArray_MultiIter_DATA(multi, 0) * x = PyArray_MultiIter_DATA(multi, 1) # <<<<<<<<<<<<<< * y = PyArray_MultiIter_DATA(multi, 2) * r[0] = cubic_spline_sample2d(x[0], y[0], C, modes[mx], modes[my]) */ __pyx_v_x = ((double *)PyArray_MultiIter_DATA(__pyx_v_multi, 1)); /* "nipy/algorithms/registration/_registration.pyx":125 * r = PyArray_MultiIter_DATA(multi, 0) * x = PyArray_MultiIter_DATA(multi, 1) * y = PyArray_MultiIter_DATA(multi, 2) # <<<<<<<<<<<<<< * r[0] = cubic_spline_sample2d(x[0], y[0], C, modes[mx], modes[my]) * PyArray_MultiIter_NEXT(multi) */ __pyx_v_y = ((double *)PyArray_MultiIter_DATA(__pyx_v_multi, 2)); /* "nipy/algorithms/registration/_registration.pyx":126 * x = PyArray_MultiIter_DATA(multi, 1) * y = PyArray_MultiIter_DATA(multi, 2) * r[0] = cubic_spline_sample2d(x[0], y[0], C, modes[mx], modes[my]) # <<<<<<<<<<<<<< * PyArray_MultiIter_NEXT(multi) * return R */ __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__modes); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 126; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyObject_GetItem(__pyx_t_3, __pyx_v_mx); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 126; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_5 = __Pyx_PyInt_AsInt(__pyx_t_2); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 126; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__modes); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 126; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyObject_GetItem(__pyx_t_2, __pyx_v_my); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 126; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_6 = __Pyx_PyInt_AsInt(__pyx_t_3); if (unlikely((__pyx_t_6 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 126; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; (__pyx_v_r[0]) = cubic_spline_sample2d((__pyx_v_x[0]), (__pyx_v_y[0]), __pyx_v_C, __pyx_t_5, __pyx_t_6); /* "nipy/algorithms/registration/_registration.pyx":127 * y = PyArray_MultiIter_DATA(multi, 2) * r[0] = cubic_spline_sample2d(x[0], y[0], C, modes[mx], modes[my]) * PyArray_MultiIter_NEXT(multi) # <<<<<<<<<<<<<< * return R * */ PyArray_MultiIter_NEXT(__pyx_v_multi); } /* "nipy/algorithms/registration/_registration.pyx":128 * r[0] = cubic_spline_sample2d(x[0], y[0], C, modes[mx], modes[my]) * PyArray_MultiIter_NEXT(multi) * return R # <<<<<<<<<<<<<< * * def _cspline_sample3d(ndarray R, ndarray C, X=0, Y=0, Z=0, */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_R)); __pyx_r = ((PyObject *)__pyx_v_R); goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("nipy.algorithms.registration._registration._cspline_sample2d", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_multi); __Pyx_XDECREF((PyObject *)__pyx_v_Xa); __Pyx_XDECREF((PyObject *)__pyx_v_Ya); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_10algorithms_12registration_13_registration_11_cspline_sample3d(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4nipy_10algorithms_12registration_13_registration_11_cspline_sample3d = {__Pyx_NAMESTR("_cspline_sample3d"), (PyCFunction)__pyx_pw_4nipy_10algorithms_12registration_13_registration_11_cspline_sample3d, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}; static PyObject *__pyx_pw_4nipy_10algorithms_12registration_13_registration_11_cspline_sample3d(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_R = 0; PyArrayObject *__pyx_v_C = 0; PyObject *__pyx_v_X = 0; PyObject *__pyx_v_Y = 0; PyObject *__pyx_v_Z = 0; PyObject *__pyx_v_mx = 0; PyObject *__pyx_v_my = 0; PyObject *__pyx_v_mz = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("_cspline_sample3d (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__R,&__pyx_n_s__C,&__pyx_n_s__X,&__pyx_n_s__Y,&__pyx_n_s__Z,&__pyx_n_s__mx,&__pyx_n_s__my,&__pyx_n_s__mz,0}; PyObject* values[8] = {0,0,0,0,0,0,0,0}; values[2] = ((PyObject *)__pyx_int_0); values[3] = ((PyObject *)__pyx_int_0); values[4] = ((PyObject *)__pyx_int_0); values[5] = ((PyObject *)__pyx_n_s__zero); values[6] = ((PyObject *)__pyx_n_s__zero); values[7] = ((PyObject *)__pyx_n_s__zero); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__R)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__C)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_cspline_sample3d", 0, 2, 8, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__X); if (value) { values[2] = value; kw_args--; } } case 3: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Y); if (value) { values[3] = value; kw_args--; } } case 4: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Z); if (value) { values[4] = value; kw_args--; } } case 5: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__mx); if (value) { values[5] = value; kw_args--; } } case 6: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__my); if (value) { values[6] = value; kw_args--; } } case 7: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__mz); if (value) { values[7] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_cspline_sample3d") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_R = ((PyArrayObject *)values[0]); __pyx_v_C = ((PyArrayObject *)values[1]); __pyx_v_X = values[2]; __pyx_v_Y = values[3]; __pyx_v_Z = values[4]; __pyx_v_mx = values[5]; __pyx_v_my = values[6]; __pyx_v_mz = values[7]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("_cspline_sample3d", 0, 2, 8, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.algorithms.registration._registration._cspline_sample3d", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_R), __pyx_ptype_5numpy_ndarray, 1, "R", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_C), __pyx_ptype_5numpy_ndarray, 1, "C", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_4nipy_10algorithms_12registration_13_registration_10_cspline_sample3d(__pyx_self, __pyx_v_R, __pyx_v_C, __pyx_v_X, __pyx_v_Y, __pyx_v_Z, __pyx_v_mx, __pyx_v_my, __pyx_v_mz); goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/algorithms/registration/_registration.pyx":130 * return R * * def _cspline_sample3d(ndarray R, ndarray C, X=0, Y=0, Z=0, # <<<<<<<<<<<<<< * mx='zero', my='zero', mz='zero'): * cdef double *r, *x, *y, *z */ static PyObject *__pyx_pf_4nipy_10algorithms_12registration_13_registration_10_cspline_sample3d(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_R, PyArrayObject *__pyx_v_C, PyObject *__pyx_v_X, PyObject *__pyx_v_Y, PyObject *__pyx_v_Z, PyObject *__pyx_v_mx, PyObject *__pyx_v_my, PyObject *__pyx_v_mz) { double *__pyx_v_r; double *__pyx_v_x; double *__pyx_v_y; double *__pyx_v_z; PyArrayMultiIterObject *__pyx_v_multi = 0; PyArrayObject *__pyx_v_Xa = NULL; PyArrayObject *__pyx_v_Ya = NULL; PyArrayObject *__pyx_v_Za = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_cspline_sample3d", 0); /* "nipy/algorithms/registration/_registration.pyx":134 * cdef double *r, *x, *y, *z * cdef broadcast multi * Xa = _reshaped_double(X, R) # <<<<<<<<<<<<<< * Ya = _reshaped_double(Y, R) * Za = _reshaped_double(Z, R) */ __pyx_t_1 = ((PyObject *)__pyx_f_4nipy_10algorithms_12registration_13_registration__reshaped_double(__pyx_v_X, __pyx_v_R)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_Xa = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/registration/_registration.pyx":135 * cdef broadcast multi * Xa = _reshaped_double(X, R) * Ya = _reshaped_double(Y, R) # <<<<<<<<<<<<<< * Za = _reshaped_double(Z, R) * multi = PyArray_MultiIterNew(4, R, Xa, Ya, Za) */ __pyx_t_1 = ((PyObject *)__pyx_f_4nipy_10algorithms_12registration_13_registration__reshaped_double(__pyx_v_Y, __pyx_v_R)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_Ya = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/registration/_registration.pyx":136 * Xa = _reshaped_double(X, R) * Ya = _reshaped_double(Y, R) * Za = _reshaped_double(Z, R) # <<<<<<<<<<<<<< * multi = PyArray_MultiIterNew(4, R, Xa, Ya, Za) * while(multi.index < multi.size): */ __pyx_t_1 = ((PyObject *)__pyx_f_4nipy_10algorithms_12registration_13_registration__reshaped_double(__pyx_v_Z, __pyx_v_R)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 136; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_Za = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/registration/_registration.pyx":137 * Ya = _reshaped_double(Y, R) * Za = _reshaped_double(Z, R) * multi = PyArray_MultiIterNew(4, R, Xa, Ya, Za) # <<<<<<<<<<<<<< * while(multi.index < multi.size): * r = PyArray_MultiIter_DATA(multi, 0) */ __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_R), ((void *)__pyx_v_Xa), ((void *)__pyx_v_Ya), ((void *)__pyx_v_Za)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 137; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_broadcast))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 137; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_multi = ((PyArrayMultiIterObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/registration/_registration.pyx":138 * Za = _reshaped_double(Z, R) * multi = PyArray_MultiIterNew(4, R, Xa, Ya, Za) * while(multi.index < multi.size): # <<<<<<<<<<<<<< * r = PyArray_MultiIter_DATA(multi, 0) * x = PyArray_MultiIter_DATA(multi, 1) */ while (1) { __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_multi), __pyx_n_s__index); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetAttr(((PyObject *)__pyx_v_multi), __pyx_n_s__size); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_2, Py_LT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!__pyx_t_4) break; /* "nipy/algorithms/registration/_registration.pyx":139 * multi = PyArray_MultiIterNew(4, R, Xa, Ya, Za) * while(multi.index < multi.size): * r = PyArray_MultiIter_DATA(multi, 0) # <<<<<<<<<<<<<< * x = PyArray_MultiIter_DATA(multi, 1) * y = PyArray_MultiIter_DATA(multi, 2) */ __pyx_v_r = ((double *)PyArray_MultiIter_DATA(__pyx_v_multi, 0)); /* "nipy/algorithms/registration/_registration.pyx":140 * while(multi.index < multi.size): * r = PyArray_MultiIter_DATA(multi, 0) * x = PyArray_MultiIter_DATA(multi, 1) # <<<<<<<<<<<<<< * y = PyArray_MultiIter_DATA(multi, 2) * z = PyArray_MultiIter_DATA(multi, 3) */ __pyx_v_x = ((double *)PyArray_MultiIter_DATA(__pyx_v_multi, 1)); /* "nipy/algorithms/registration/_registration.pyx":141 * r = PyArray_MultiIter_DATA(multi, 0) * x = PyArray_MultiIter_DATA(multi, 1) * y = PyArray_MultiIter_DATA(multi, 2) # <<<<<<<<<<<<<< * z = PyArray_MultiIter_DATA(multi, 3) * r[0] = cubic_spline_sample3d(x[0], y[0], z[0], C, modes[mx], modes[my], modes[mz]) */ __pyx_v_y = ((double *)PyArray_MultiIter_DATA(__pyx_v_multi, 2)); /* "nipy/algorithms/registration/_registration.pyx":142 * x = PyArray_MultiIter_DATA(multi, 1) * y = PyArray_MultiIter_DATA(multi, 2) * z = PyArray_MultiIter_DATA(multi, 3) # <<<<<<<<<<<<<< * r[0] = cubic_spline_sample3d(x[0], y[0], z[0], C, modes[mx], modes[my], modes[mz]) * PyArray_MultiIter_NEXT(multi) */ __pyx_v_z = ((double *)PyArray_MultiIter_DATA(__pyx_v_multi, 3)); /* "nipy/algorithms/registration/_registration.pyx":143 * y = PyArray_MultiIter_DATA(multi, 2) * z = PyArray_MultiIter_DATA(multi, 3) * r[0] = cubic_spline_sample3d(x[0], y[0], z[0], C, modes[mx], modes[my], modes[mz]) # <<<<<<<<<<<<<< * PyArray_MultiIter_NEXT(multi) * return R */ __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__modes); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyObject_GetItem(__pyx_t_3, __pyx_v_mx); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_5 = __Pyx_PyInt_AsInt(__pyx_t_2); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__modes); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyObject_GetItem(__pyx_t_2, __pyx_v_my); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_6 = __Pyx_PyInt_AsInt(__pyx_t_3); if (unlikely((__pyx_t_6 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__modes); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyObject_GetItem(__pyx_t_3, __pyx_v_mz); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyInt_AsInt(__pyx_t_2); if (unlikely((__pyx_t_7 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; (__pyx_v_r[0]) = cubic_spline_sample3d((__pyx_v_x[0]), (__pyx_v_y[0]), (__pyx_v_z[0]), __pyx_v_C, __pyx_t_5, __pyx_t_6, __pyx_t_7); /* "nipy/algorithms/registration/_registration.pyx":144 * z = PyArray_MultiIter_DATA(multi, 3) * r[0] = cubic_spline_sample3d(x[0], y[0], z[0], C, modes[mx], modes[my], modes[mz]) * PyArray_MultiIter_NEXT(multi) # <<<<<<<<<<<<<< * return R * */ PyArray_MultiIter_NEXT(__pyx_v_multi); } /* "nipy/algorithms/registration/_registration.pyx":145 * r[0] = cubic_spline_sample3d(x[0], y[0], z[0], C, modes[mx], modes[my], modes[mz]) * PyArray_MultiIter_NEXT(multi) * return R # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_R)); __pyx_r = ((PyObject *)__pyx_v_R); goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("nipy.algorithms.registration._registration._cspline_sample3d", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_multi); __Pyx_XDECREF((PyObject *)__pyx_v_Xa); __Pyx_XDECREF((PyObject *)__pyx_v_Ya); __Pyx_XDECREF((PyObject *)__pyx_v_Za); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_10algorithms_12registration_13_registration_13_cspline_sample4d(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_10algorithms_12registration_13_registration_12_cspline_sample4d[] = "\n In-place cubic spline sampling. R.dtype must be 'double'. \n "; static PyMethodDef __pyx_mdef_4nipy_10algorithms_12registration_13_registration_13_cspline_sample4d = {__Pyx_NAMESTR("_cspline_sample4d"), (PyCFunction)__pyx_pw_4nipy_10algorithms_12registration_13_registration_13_cspline_sample4d, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_10algorithms_12registration_13_registration_12_cspline_sample4d)}; static PyObject *__pyx_pw_4nipy_10algorithms_12registration_13_registration_13_cspline_sample4d(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_R = 0; PyArrayObject *__pyx_v_C = 0; PyObject *__pyx_v_X = 0; PyObject *__pyx_v_Y = 0; PyObject *__pyx_v_Z = 0; PyObject *__pyx_v_T = 0; PyObject *__pyx_v_mx = 0; PyObject *__pyx_v_my = 0; PyObject *__pyx_v_mz = 0; PyObject *__pyx_v_mt = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("_cspline_sample4d (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__R,&__pyx_n_s__C,&__pyx_n_s__X,&__pyx_n_s__Y,&__pyx_n_s__Z,&__pyx_n_s__T,&__pyx_n_s__mx,&__pyx_n_s__my,&__pyx_n_s__mz,&__pyx_n_s__mt,0}; PyObject* values[10] = {0,0,0,0,0,0,0,0,0,0}; values[2] = ((PyObject *)__pyx_int_0); values[3] = ((PyObject *)__pyx_int_0); values[4] = ((PyObject *)__pyx_int_0); values[5] = ((PyObject *)__pyx_int_0); values[6] = ((PyObject *)__pyx_n_s__zero); values[7] = ((PyObject *)__pyx_n_s__zero); values[8] = ((PyObject *)__pyx_n_s__zero); values[9] = ((PyObject *)__pyx_n_s__zero); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__R)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__C)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_cspline_sample4d", 0, 2, 10, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__X); if (value) { values[2] = value; kw_args--; } } case 3: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Y); if (value) { values[3] = value; kw_args--; } } case 4: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Z); if (value) { values[4] = value; kw_args--; } } case 5: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__T); if (value) { values[5] = value; kw_args--; } } case 6: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__mx); if (value) { values[6] = value; kw_args--; } } case 7: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__my); if (value) { values[7] = value; kw_args--; } } case 8: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__mz); if (value) { values[8] = value; kw_args--; } } case 9: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__mt); if (value) { values[9] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_cspline_sample4d") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_R = ((PyArrayObject *)values[0]); __pyx_v_C = ((PyArrayObject *)values[1]); __pyx_v_X = values[2]; __pyx_v_Y = values[3]; __pyx_v_Z = values[4]; __pyx_v_T = values[5]; __pyx_v_mx = values[6]; __pyx_v_my = values[7]; __pyx_v_mz = values[8]; __pyx_v_mt = values[9]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("_cspline_sample4d", 0, 2, 10, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.algorithms.registration._registration._cspline_sample4d", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_R), __pyx_ptype_5numpy_ndarray, 1, "R", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_C), __pyx_ptype_5numpy_ndarray, 1, "C", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_4nipy_10algorithms_12registration_13_registration_12_cspline_sample4d(__pyx_self, __pyx_v_R, __pyx_v_C, __pyx_v_X, __pyx_v_Y, __pyx_v_Z, __pyx_v_T, __pyx_v_mx, __pyx_v_my, __pyx_v_mz, __pyx_v_mt); goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/algorithms/registration/_registration.pyx":148 * * * def _cspline_sample4d(ndarray R, ndarray C, X=0, Y=0, Z=0, T=0, # <<<<<<<<<<<<<< * mx='zero', my='zero', mz='zero', mt='zero'): * """ */ static PyObject *__pyx_pf_4nipy_10algorithms_12registration_13_registration_12_cspline_sample4d(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_R, PyArrayObject *__pyx_v_C, PyObject *__pyx_v_X, PyObject *__pyx_v_Y, PyObject *__pyx_v_Z, PyObject *__pyx_v_T, PyObject *__pyx_v_mx, PyObject *__pyx_v_my, PyObject *__pyx_v_mz, PyObject *__pyx_v_mt) { double *__pyx_v_r; double *__pyx_v_x; double *__pyx_v_y; double *__pyx_v_z; double *__pyx_v_t; PyArrayMultiIterObject *__pyx_v_multi = 0; PyArrayObject *__pyx_v_Xa = NULL; PyArrayObject *__pyx_v_Ya = NULL; PyArrayObject *__pyx_v_Za = NULL; PyArrayObject *__pyx_v_Ta = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_cspline_sample4d", 0); /* "nipy/algorithms/registration/_registration.pyx":155 * cdef double *r, *x, *y, *z, *t * cdef broadcast multi * Xa = _reshaped_double(X, R) # <<<<<<<<<<<<<< * Ya = _reshaped_double(Y, R) * Za = _reshaped_double(Z, R) */ __pyx_t_1 = ((PyObject *)__pyx_f_4nipy_10algorithms_12registration_13_registration__reshaped_double(__pyx_v_X, __pyx_v_R)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_Xa = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/registration/_registration.pyx":156 * cdef broadcast multi * Xa = _reshaped_double(X, R) * Ya = _reshaped_double(Y, R) # <<<<<<<<<<<<<< * Za = _reshaped_double(Z, R) * Ta = _reshaped_double(T, R) */ __pyx_t_1 = ((PyObject *)__pyx_f_4nipy_10algorithms_12registration_13_registration__reshaped_double(__pyx_v_Y, __pyx_v_R)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_Ya = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/registration/_registration.pyx":157 * Xa = _reshaped_double(X, R) * Ya = _reshaped_double(Y, R) * Za = _reshaped_double(Z, R) # <<<<<<<<<<<<<< * Ta = _reshaped_double(T, R) * multi = PyArray_MultiIterNew(5, R, Xa, Ya, Za, Ta) */ __pyx_t_1 = ((PyObject *)__pyx_f_4nipy_10algorithms_12registration_13_registration__reshaped_double(__pyx_v_Z, __pyx_v_R)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_Za = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/registration/_registration.pyx":158 * Ya = _reshaped_double(Y, R) * Za = _reshaped_double(Z, R) * Ta = _reshaped_double(T, R) # <<<<<<<<<<<<<< * multi = PyArray_MultiIterNew(5, R, Xa, Ya, Za, Ta) * while(multi.index < multi.size): */ __pyx_t_1 = ((PyObject *)__pyx_f_4nipy_10algorithms_12registration_13_registration__reshaped_double(__pyx_v_T, __pyx_v_R)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_Ta = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/registration/_registration.pyx":159 * Za = _reshaped_double(Z, R) * Ta = _reshaped_double(T, R) * multi = PyArray_MultiIterNew(5, R, Xa, Ya, Za, Ta) # <<<<<<<<<<<<<< * while(multi.index < multi.size): * r = PyArray_MultiIter_DATA(multi, 0) */ __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_R), ((void *)__pyx_v_Xa), ((void *)__pyx_v_Ya), ((void *)__pyx_v_Za), ((void *)__pyx_v_Ta)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_broadcast))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_multi = ((PyArrayMultiIterObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/registration/_registration.pyx":160 * Ta = _reshaped_double(T, R) * multi = PyArray_MultiIterNew(5, R, Xa, Ya, Za, Ta) * while(multi.index < multi.size): # <<<<<<<<<<<<<< * r = PyArray_MultiIter_DATA(multi, 0) * x = PyArray_MultiIter_DATA(multi, 1) */ while (1) { __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_multi), __pyx_n_s__index); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 160; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetAttr(((PyObject *)__pyx_v_multi), __pyx_n_s__size); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 160; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_2, Py_LT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 160; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 160; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!__pyx_t_4) break; /* "nipy/algorithms/registration/_registration.pyx":161 * multi = PyArray_MultiIterNew(5, R, Xa, Ya, Za, Ta) * while(multi.index < multi.size): * r = PyArray_MultiIter_DATA(multi, 0) # <<<<<<<<<<<<<< * x = PyArray_MultiIter_DATA(multi, 1) * y = PyArray_MultiIter_DATA(multi, 2) */ __pyx_v_r = ((double *)PyArray_MultiIter_DATA(__pyx_v_multi, 0)); /* "nipy/algorithms/registration/_registration.pyx":162 * while(multi.index < multi.size): * r = PyArray_MultiIter_DATA(multi, 0) * x = PyArray_MultiIter_DATA(multi, 1) # <<<<<<<<<<<<<< * y = PyArray_MultiIter_DATA(multi, 2) * z = PyArray_MultiIter_DATA(multi, 3) */ __pyx_v_x = ((double *)PyArray_MultiIter_DATA(__pyx_v_multi, 1)); /* "nipy/algorithms/registration/_registration.pyx":163 * r = PyArray_MultiIter_DATA(multi, 0) * x = PyArray_MultiIter_DATA(multi, 1) * y = PyArray_MultiIter_DATA(multi, 2) # <<<<<<<<<<<<<< * z = PyArray_MultiIter_DATA(multi, 3) * t = PyArray_MultiIter_DATA(multi, 4) */ __pyx_v_y = ((double *)PyArray_MultiIter_DATA(__pyx_v_multi, 2)); /* "nipy/algorithms/registration/_registration.pyx":164 * x = PyArray_MultiIter_DATA(multi, 1) * y = PyArray_MultiIter_DATA(multi, 2) * z = PyArray_MultiIter_DATA(multi, 3) # <<<<<<<<<<<<<< * t = PyArray_MultiIter_DATA(multi, 4) * r[0] = cubic_spline_sample4d(x[0], y[0], z[0], t[0], C, modes[mx], modes[my], modes[mz], modes[mt]) */ __pyx_v_z = ((double *)PyArray_MultiIter_DATA(__pyx_v_multi, 3)); /* "nipy/algorithms/registration/_registration.pyx":165 * y = PyArray_MultiIter_DATA(multi, 2) * z = PyArray_MultiIter_DATA(multi, 3) * t = PyArray_MultiIter_DATA(multi, 4) # <<<<<<<<<<<<<< * r[0] = cubic_spline_sample4d(x[0], y[0], z[0], t[0], C, modes[mx], modes[my], modes[mz], modes[mt]) * PyArray_MultiIter_NEXT(multi) */ __pyx_v_t = ((double *)PyArray_MultiIter_DATA(__pyx_v_multi, 4)); /* "nipy/algorithms/registration/_registration.pyx":166 * z = PyArray_MultiIter_DATA(multi, 3) * t = PyArray_MultiIter_DATA(multi, 4) * r[0] = cubic_spline_sample4d(x[0], y[0], z[0], t[0], C, modes[mx], modes[my], modes[mz], modes[mt]) # <<<<<<<<<<<<<< * PyArray_MultiIter_NEXT(multi) * return R */ __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__modes); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyObject_GetItem(__pyx_t_3, __pyx_v_mx); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_5 = __Pyx_PyInt_AsInt(__pyx_t_2); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__modes); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyObject_GetItem(__pyx_t_2, __pyx_v_my); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_6 = __Pyx_PyInt_AsInt(__pyx_t_3); if (unlikely((__pyx_t_6 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__modes); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyObject_GetItem(__pyx_t_3, __pyx_v_mz); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyInt_AsInt(__pyx_t_2); if (unlikely((__pyx_t_7 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__modes); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyObject_GetItem(__pyx_t_2, __pyx_v_mt); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_8 = __Pyx_PyInt_AsInt(__pyx_t_3); if (unlikely((__pyx_t_8 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; (__pyx_v_r[0]) = cubic_spline_sample4d((__pyx_v_x[0]), (__pyx_v_y[0]), (__pyx_v_z[0]), (__pyx_v_t[0]), __pyx_v_C, __pyx_t_5, __pyx_t_6, __pyx_t_7, __pyx_t_8); /* "nipy/algorithms/registration/_registration.pyx":167 * t = PyArray_MultiIter_DATA(multi, 4) * r[0] = cubic_spline_sample4d(x[0], y[0], z[0], t[0], C, modes[mx], modes[my], modes[mz], modes[mt]) * PyArray_MultiIter_NEXT(multi) # <<<<<<<<<<<<<< * return R * */ PyArray_MultiIter_NEXT(__pyx_v_multi); } /* "nipy/algorithms/registration/_registration.pyx":168 * r[0] = cubic_spline_sample4d(x[0], y[0], z[0], t[0], C, modes[mx], modes[my], modes[mz], modes[mt]) * PyArray_MultiIter_NEXT(multi) * return R # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_R)); __pyx_r = ((PyObject *)__pyx_v_R); goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("nipy.algorithms.registration._registration._cspline_sample4d", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_multi); __Pyx_XDECREF((PyObject *)__pyx_v_Xa); __Pyx_XDECREF((PyObject *)__pyx_v_Ya); __Pyx_XDECREF((PyObject *)__pyx_v_Za); __Pyx_XDECREF((PyObject *)__pyx_v_Ta); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_10algorithms_12registration_13_registration_15_cspline_resample3d(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_10algorithms_12registration_13_registration_14_cspline_resample3d[] = "\n Perform cubic spline resampling of a 3d input image `im` into a\n grid with shape `dims` according to an affine transform\n represented by a 4x4 matrix `Tvox` that assumes voxel\n coordinates. Boundary conditions on each axis are determined by\n the keyword arguments `mx`, `my` and `mz`, respectively. Possible\n choices are:\n\n 'zero': assume zero intensity outside the target grid\n 'nearest': extrapolate intensity by the closest grid point along the axis\n 'reflect': extrapolate intensity by mirroring the input image along the axis\n\n Note that `Tvox` will be re-ordered in C convention if needed.\n "; static PyMethodDef __pyx_mdef_4nipy_10algorithms_12registration_13_registration_15_cspline_resample3d = {__Pyx_NAMESTR("_cspline_resample3d"), (PyCFunction)__pyx_pw_4nipy_10algorithms_12registration_13_registration_15_cspline_resample3d, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_10algorithms_12registration_13_registration_14_cspline_resample3d)}; static PyObject *__pyx_pw_4nipy_10algorithms_12registration_13_registration_15_cspline_resample3d(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_im = 0; PyObject *__pyx_v_dims = 0; PyArrayObject *__pyx_v_Tvox = 0; PyObject *__pyx_v_dtype = 0; PyObject *__pyx_v_mx = 0; PyObject *__pyx_v_my = 0; PyObject *__pyx_v_mz = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("_cspline_resample3d (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__im,&__pyx_n_s__dims,&__pyx_n_s__Tvox,&__pyx_n_s__dtype,&__pyx_n_s__mx,&__pyx_n_s__my,&__pyx_n_s__mz,0}; PyObject* values[7] = {0,0,0,0,0,0,0}; /* "nipy/algorithms/registration/_registration.pyx":171 * * * def _cspline_resample3d(ndarray im, dims, ndarray Tvox, dtype=None, # <<<<<<<<<<<<<< * mx='zero', my='zero', mz='zero'): * """ */ values[3] = ((PyObject *)Py_None); values[4] = ((PyObject *)__pyx_n_s__zero); values[5] = ((PyObject *)__pyx_n_s__zero); values[6] = ((PyObject *)__pyx_n_s__zero); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__im)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__dims)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_cspline_resample3d", 0, 3, 7, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 171; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Tvox)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_cspline_resample3d", 0, 3, 7, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 171; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__dtype); if (value) { values[3] = value; kw_args--; } } case 4: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__mx); if (value) { values[4] = value; kw_args--; } } case 5: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__my); if (value) { values[5] = value; kw_args--; } } case 6: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__mz); if (value) { values[6] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_cspline_resample3d") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 171; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_im = ((PyArrayObject *)values[0]); __pyx_v_dims = values[1]; __pyx_v_Tvox = ((PyArrayObject *)values[2]); __pyx_v_dtype = values[3]; __pyx_v_mx = values[4]; __pyx_v_my = values[5]; __pyx_v_mz = values[6]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("_cspline_resample3d", 0, 3, 7, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 171; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.algorithms.registration._registration._cspline_resample3d", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_im), __pyx_ptype_5numpy_ndarray, 1, "im", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 171; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_Tvox), __pyx_ptype_5numpy_ndarray, 1, "Tvox", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 171; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_4nipy_10algorithms_12registration_13_registration_14_cspline_resample3d(__pyx_self, __pyx_v_im, __pyx_v_dims, __pyx_v_Tvox, __pyx_v_dtype, __pyx_v_mx, __pyx_v_my, __pyx_v_mz); goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4nipy_10algorithms_12registration_13_registration_14_cspline_resample3d(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_im, PyObject *__pyx_v_dims, PyArrayObject *__pyx_v_Tvox, PyObject *__pyx_v_dtype, PyObject *__pyx_v_mx, PyObject *__pyx_v_my, PyObject *__pyx_v_mz) { double *__pyx_v_tvox; int __pyx_v_cast_integer; PyObject *__pyx_v_im_resampled = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_cspline_resample3d", 0); __Pyx_INCREF((PyObject *)__pyx_v_Tvox); __Pyx_INCREF(__pyx_v_dtype); /* "nipy/algorithms/registration/_registration.pyx":191 * * # Create output array * if dtype == None: # <<<<<<<<<<<<<< * dtype = im.dtype * im_resampled = np.zeros(tuple(dims), dtype=dtype) */ __pyx_t_1 = PyObject_RichCompare(__pyx_v_dtype, Py_None, Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_2) { /* "nipy/algorithms/registration/_registration.pyx":192 * # Create output array * if dtype == None: * dtype = im.dtype # <<<<<<<<<<<<<< * im_resampled = np.zeros(tuple(dims), dtype=dtype) * */ __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_im), __pyx_n_s__dtype); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 192; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_v_dtype); __pyx_v_dtype = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L3; } __pyx_L3:; /* "nipy/algorithms/registration/_registration.pyx":193 * if dtype == None: * dtype = im.dtype * im_resampled = np.zeros(tuple(dims), dtype=dtype) # <<<<<<<<<<<<<< * * # Ensure that the Tvox array is C-contiguous (required by the */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__zeros); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_dims); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_dims); __Pyx_GIVEREF(__pyx_v_dims); __pyx_t_4 = PyObject_Call(((PyObject *)((PyObject*)(&PyTuple_Type))), ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyDict_New(); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_4)); if (PyDict_SetItem(__pyx_t_4, ((PyObject *)__pyx_n_s__dtype), __pyx_v_dtype) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_5 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_1), ((PyObject *)__pyx_t_4)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __pyx_v_im_resampled = __pyx_t_5; __pyx_t_5 = 0; /* "nipy/algorithms/registration/_registration.pyx":197 * # Ensure that the Tvox array is C-contiguous (required by the * # underlying C routine) * Tvox = np.asarray(Tvox, dtype='double', order='C') # <<<<<<<<<<<<<< * tvox = Tvox.data * */ __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 197; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__asarray); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 197; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 197; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(((PyObject *)__pyx_v_Tvox)); PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_Tvox)); __Pyx_GIVEREF(((PyObject *)__pyx_v_Tvox)); __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 197; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__dtype), ((PyObject *)__pyx_n_s__double)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 197; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__order), ((PyObject *)__pyx_n_s__C)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 197; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_3 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), ((PyObject *)__pyx_t_1)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 197; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 197; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(((PyObject *)__pyx_v_Tvox)); __pyx_v_Tvox = ((PyArrayObject *)__pyx_t_3); __pyx_t_3 = 0; /* "nipy/algorithms/registration/_registration.pyx":198 * # underlying C routine) * Tvox = np.asarray(Tvox, dtype='double', order='C') * tvox = Tvox.data # <<<<<<<<<<<<<< * * # Actual resampling */ __pyx_v_tvox = ((double *)__pyx_v_Tvox->data); /* "nipy/algorithms/registration/_registration.pyx":201 * * # Actual resampling * if dtype.kind == 'i': # <<<<<<<<<<<<<< * cast_integer = 1 * elif dtype.kind == 'u': */ __pyx_t_3 = PyObject_GetAttr(__pyx_v_dtype, __pyx_n_s__kind); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 201; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = PyObject_RichCompare(__pyx_t_3, ((PyObject *)__pyx_n_s__i), Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 201; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 201; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_2) { /* "nipy/algorithms/registration/_registration.pyx":202 * # Actual resampling * if dtype.kind == 'i': * cast_integer = 1 # <<<<<<<<<<<<<< * elif dtype.kind == 'u': * cast_integer = 2 */ __pyx_v_cast_integer = 1; goto __pyx_L4; } /* "nipy/algorithms/registration/_registration.pyx":203 * if dtype.kind == 'i': * cast_integer = 1 * elif dtype.kind == 'u': # <<<<<<<<<<<<<< * cast_integer = 2 * else: */ __pyx_t_1 = PyObject_GetAttr(__pyx_v_dtype, __pyx_n_s__kind); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, ((PyObject *)__pyx_n_s__u), Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_2) { /* "nipy/algorithms/registration/_registration.pyx":204 * cast_integer = 1 * elif dtype.kind == 'u': * cast_integer = 2 # <<<<<<<<<<<<<< * else: * cast_integer = 0 */ __pyx_v_cast_integer = 2; goto __pyx_L4; } /*else*/ { /* "nipy/algorithms/registration/_registration.pyx":206 * cast_integer = 2 * else: * cast_integer = 0 # <<<<<<<<<<<<<< * cubic_spline_resample3d(im_resampled, im, tvox, cast_integer, * modes[mx], modes[my], modes[mz]) */ __pyx_v_cast_integer = 0; } __pyx_L4:; /* "nipy/algorithms/registration/_registration.pyx":207 * else: * cast_integer = 0 * cubic_spline_resample3d(im_resampled, im, tvox, cast_integer, # <<<<<<<<<<<<<< * modes[mx], modes[my], modes[mz]) * */ if (!(likely(((__pyx_v_im_resampled) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_im_resampled, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_3 = __pyx_v_im_resampled; __Pyx_INCREF(__pyx_t_3); /* "nipy/algorithms/registration/_registration.pyx":208 * cast_integer = 0 * cubic_spline_resample3d(im_resampled, im, tvox, cast_integer, * modes[mx], modes[my], modes[mz]) # <<<<<<<<<<<<<< * * return im_resampled */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__modes); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 208; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = PyObject_GetItem(__pyx_t_1, __pyx_v_mx); if (!__pyx_t_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 208; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_6 = __Pyx_PyInt_AsInt(__pyx_t_5); if (unlikely((__pyx_t_6 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 208; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__modes); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 208; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_1 = PyObject_GetItem(__pyx_t_5, __pyx_v_my); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 208; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyInt_AsInt(__pyx_t_1); if (unlikely((__pyx_t_7 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 208; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__modes); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 208; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = PyObject_GetItem(__pyx_t_1, __pyx_v_mz); if (!__pyx_t_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 208; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_8 = __Pyx_PyInt_AsInt(__pyx_t_5); if (unlikely((__pyx_t_8 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 208; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; cubic_spline_resample3d(((PyArrayObject *)__pyx_t_3), __pyx_v_im, __pyx_v_tvox, __pyx_v_cast_integer, __pyx_t_6, __pyx_t_7, __pyx_t_8); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "nipy/algorithms/registration/_registration.pyx":210 * modes[mx], modes[my], modes[mz]) * * return im_resampled # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_im_resampled); __pyx_r = __pyx_v_im_resampled; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("nipy.algorithms.registration._registration._cspline_resample3d", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_im_resampled); __Pyx_XDECREF((PyObject *)__pyx_v_Tvox); __Pyx_XDECREF(__pyx_v_dtype); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_10algorithms_12registration_13_registration_17check_array(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4nipy_10algorithms_12registration_13_registration_17check_array = {__Pyx_NAMESTR("check_array"), (PyCFunction)__pyx_pw_4nipy_10algorithms_12registration_13_registration_17check_array, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}; static PyObject *__pyx_pw_4nipy_10algorithms_12registration_13_registration_17check_array(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_x = 0; int __pyx_v_dim; int __pyx_v_exp_dim; PyObject *__pyx_v_xname = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("check_array (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__x,&__pyx_n_s__dim,&__pyx_n_s__exp_dim,&__pyx_n_s__xname,0}; PyObject* values[4] = {0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__dim)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("check_array", 1, 4, 4, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 213; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__exp_dim)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("check_array", 1, 4, 4, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 213; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__xname)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("check_array", 1, 4, 4, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 213; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "check_array") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 213; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 4) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); } __pyx_v_x = ((PyArrayObject *)values[0]); __pyx_v_dim = __Pyx_PyInt_AsInt(values[1]); if (unlikely((__pyx_v_dim == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 213; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_exp_dim = __Pyx_PyInt_AsInt(values[2]); if (unlikely((__pyx_v_exp_dim == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 213; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_xname = values[3]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("check_array", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 213; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.algorithms.registration._registration.check_array", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 1, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 213; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_4nipy_10algorithms_12registration_13_registration_16check_array(__pyx_self, __pyx_v_x, __pyx_v_dim, __pyx_v_exp_dim, __pyx_v_xname); goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/algorithms/registration/_registration.pyx":213 * * * def check_array(ndarray x, int dim, int exp_dim, xname): # <<<<<<<<<<<<<< * if not x.flags['C_CONTIGUOUS'] or not x.dtype=='double': * raise ValueError('%s array should be double C-contiguous' % xname) */ static PyObject *__pyx_pf_4nipy_10algorithms_12registration_13_registration_16check_array(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_x, int __pyx_v_dim, int __pyx_v_exp_dim, PyObject *__pyx_v_xname) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("check_array", 0); /* "nipy/algorithms/registration/_registration.pyx":214 * * def check_array(ndarray x, int dim, int exp_dim, xname): * if not x.flags['C_CONTIGUOUS'] or not x.dtype=='double': # <<<<<<<<<<<<<< * raise ValueError('%s array should be double C-contiguous' % xname) * if not dim == exp_dim: */ __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_x), __pyx_n_s__flags); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__C_CONTIGUOUS)); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_4 = (!__pyx_t_3); if (!__pyx_t_4) { __pyx_t_2 = PyObject_GetAttr(((PyObject *)__pyx_v_x), __pyx_n_s__dtype); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyObject_RichCompare(__pyx_t_2, ((PyObject *)__pyx_n_s__double), Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_5 = (!__pyx_t_3); __pyx_t_3 = __pyx_t_5; } else { __pyx_t_3 = __pyx_t_4; } if (__pyx_t_3) { /* "nipy/algorithms/registration/_registration.pyx":215 * def check_array(ndarray x, int dim, int exp_dim, xname): * if not x.flags['C_CONTIGUOUS'] or not x.dtype=='double': * raise ValueError('%s array should be double C-contiguous' % xname) # <<<<<<<<<<<<<< * if not dim == exp_dim: * raise ValueError('%s has size %d in last dimension, %d expected' % (xname, dim, exp_dim)) */ __pyx_t_1 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_5), __pyx_v_xname); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_t_1)); __Pyx_GIVEREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_t_1 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; {__pyx_filename = __pyx_f[0]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L3; } __pyx_L3:; /* "nipy/algorithms/registration/_registration.pyx":216 * if not x.flags['C_CONTIGUOUS'] or not x.dtype=='double': * raise ValueError('%s array should be double C-contiguous' % xname) * if not dim == exp_dim: # <<<<<<<<<<<<<< * raise ValueError('%s has size %d in last dimension, %d expected' % (xname, dim, exp_dim)) * */ __pyx_t_3 = (!(__pyx_v_dim == __pyx_v_exp_dim)); if (__pyx_t_3) { /* "nipy/algorithms/registration/_registration.pyx":217 * raise ValueError('%s array should be double C-contiguous' % xname) * if not dim == exp_dim: * raise ValueError('%s has size %d in last dimension, %d expected' % (xname, dim, exp_dim)) # <<<<<<<<<<<<<< * * def _apply_polyaffine(ndarray xyz, ndarray centers, ndarray affines, ndarray sigma): */ __pyx_t_1 = PyInt_FromLong(__pyx_v_dim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 217; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromLong(__pyx_v_exp_dim); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 217; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = PyTuple_New(3); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 217; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_INCREF(__pyx_v_xname); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_v_xname); __Pyx_GIVEREF(__pyx_v_xname); PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_6, 2, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_6), ((PyObject *)__pyx_t_6)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 217; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_2)); __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0; __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 217; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_6, 0, ((PyObject *)__pyx_t_2)); __Pyx_GIVEREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_6), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 217; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0; __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; {__pyx_filename = __pyx_f[0]; __pyx_lineno = 217; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L4; } __pyx_L4:; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("nipy.algorithms.registration._registration.check_array", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_10algorithms_12registration_13_registration_19_apply_polyaffine(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4nipy_10algorithms_12registration_13_registration_19_apply_polyaffine = {__Pyx_NAMESTR("_apply_polyaffine"), (PyCFunction)__pyx_pw_4nipy_10algorithms_12registration_13_registration_19_apply_polyaffine, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}; static PyObject *__pyx_pw_4nipy_10algorithms_12registration_13_registration_19_apply_polyaffine(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xyz = 0; PyArrayObject *__pyx_v_centers = 0; PyArrayObject *__pyx_v_affines = 0; PyArrayObject *__pyx_v_sigma = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("_apply_polyaffine (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__xyz,&__pyx_n_s__centers,&__pyx_n_s__affines,&__pyx_n_s__sigma,0}; PyObject* values[4] = {0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__xyz)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__centers)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_apply_polyaffine", 1, 4, 4, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__affines)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_apply_polyaffine", 1, 4, 4, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__sigma)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_apply_polyaffine", 1, 4, 4, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_apply_polyaffine") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 4) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); } __pyx_v_xyz = ((PyArrayObject *)values[0]); __pyx_v_centers = ((PyArrayObject *)values[1]); __pyx_v_affines = ((PyArrayObject *)values[2]); __pyx_v_sigma = ((PyArrayObject *)values[3]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("_apply_polyaffine", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.algorithms.registration._registration._apply_polyaffine", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xyz), __pyx_ptype_5numpy_ndarray, 1, "xyz", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_centers), __pyx_ptype_5numpy_ndarray, 1, "centers", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_affines), __pyx_ptype_5numpy_ndarray, 1, "affines", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_sigma), __pyx_ptype_5numpy_ndarray, 1, "sigma", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_4nipy_10algorithms_12registration_13_registration_18_apply_polyaffine(__pyx_self, __pyx_v_xyz, __pyx_v_centers, __pyx_v_affines, __pyx_v_sigma); goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/algorithms/registration/_registration.pyx":219 * raise ValueError('%s has size %d in last dimension, %d expected' % (xname, dim, exp_dim)) * * def _apply_polyaffine(ndarray xyz, ndarray centers, ndarray affines, ndarray sigma): # <<<<<<<<<<<<<< * * check_array(xyz, xyz.shape[1], 3, 'xyz') */ static PyObject *__pyx_pf_4nipy_10algorithms_12registration_13_registration_18_apply_polyaffine(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xyz, PyArrayObject *__pyx_v_centers, PyArrayObject *__pyx_v_affines, PyArrayObject *__pyx_v_sigma) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_apply_polyaffine", 0); /* "nipy/algorithms/registration/_registration.pyx":221 * def _apply_polyaffine(ndarray xyz, ndarray centers, ndarray affines, ndarray sigma): * * check_array(xyz, xyz.shape[1], 3, 'xyz') # <<<<<<<<<<<<<< * check_array(centers, centers.shape[1], 3, 'centers') * check_array(affines, affines.shape[1], 12, 'affines') */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__check_array); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 221; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyInt_to_py_Py_intptr_t((__pyx_v_xyz->dimensions[1])); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 221; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 221; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_v_xyz)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_xyz)); __Pyx_GIVEREF(((PyObject *)__pyx_v_xyz)); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __Pyx_INCREF(__pyx_int_3); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_int_3); __Pyx_GIVEREF(__pyx_int_3); __Pyx_INCREF(((PyObject *)__pyx_n_s__xyz)); PyTuple_SET_ITEM(__pyx_t_3, 3, ((PyObject *)__pyx_n_s__xyz)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__xyz)); __pyx_t_2 = 0; __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 221; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "nipy/algorithms/registration/_registration.pyx":222 * * check_array(xyz, xyz.shape[1], 3, 'xyz') * check_array(centers, centers.shape[1], 3, 'centers') # <<<<<<<<<<<<<< * check_array(affines, affines.shape[1], 12, 'affines') * check_array(sigma, sigma.size, 3, 'sigma') */ __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__check_array); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_to_py_Py_intptr_t((__pyx_v_centers->dimensions[1])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = PyTuple_New(4); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)__pyx_v_centers)); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_v_centers)); __Pyx_GIVEREF(((PyObject *)__pyx_v_centers)); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __Pyx_INCREF(__pyx_int_3); PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_int_3); __Pyx_GIVEREF(__pyx_int_3); __Pyx_INCREF(((PyObject *)__pyx_n_s__centers)); PyTuple_SET_ITEM(__pyx_t_1, 3, ((PyObject *)__pyx_n_s__centers)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__centers)); __pyx_t_3 = 0; __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "nipy/algorithms/registration/_registration.pyx":223 * check_array(xyz, xyz.shape[1], 3, 'xyz') * check_array(centers, centers.shape[1], 3, 'centers') * check_array(affines, affines.shape[1], 12, 'affines') # <<<<<<<<<<<<<< * check_array(sigma, sigma.size, 3, 'sigma') * if not centers.shape[0] == affines.shape[0]: */ __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__check_array); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 223; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = __Pyx_PyInt_to_py_Py_intptr_t((__pyx_v_affines->dimensions[1])); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 223; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 223; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(((PyObject *)__pyx_v_affines)); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_affines)); __Pyx_GIVEREF(((PyObject *)__pyx_v_affines)); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __Pyx_INCREF(__pyx_int_12); PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_int_12); __Pyx_GIVEREF(__pyx_int_12); __Pyx_INCREF(((PyObject *)__pyx_n_s__affines)); PyTuple_SET_ITEM(__pyx_t_2, 3, ((PyObject *)__pyx_n_s__affines)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__affines)); __pyx_t_1 = 0; __pyx_t_1 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 223; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/registration/_registration.pyx":224 * check_array(centers, centers.shape[1], 3, 'centers') * check_array(affines, affines.shape[1], 12, 'affines') * check_array(sigma, sigma.size, 3, 'sigma') # <<<<<<<<<<<<<< * if not centers.shape[0] == affines.shape[0]: * raise ValueError('centers and affines arrays should have same shape[0]') */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__check_array); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 224; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetAttr(((PyObject *)__pyx_v_sigma), __pyx_n_s__size); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 224; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 224; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_v_sigma)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_sigma)); __Pyx_GIVEREF(((PyObject *)__pyx_v_sigma)); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __Pyx_INCREF(__pyx_int_3); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_int_3); __Pyx_GIVEREF(__pyx_int_3); __Pyx_INCREF(((PyObject *)__pyx_n_s__sigma)); PyTuple_SET_ITEM(__pyx_t_3, 3, ((PyObject *)__pyx_n_s__sigma)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__sigma)); __pyx_t_2 = 0; __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 224; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "nipy/algorithms/registration/_registration.pyx":225 * check_array(affines, affines.shape[1], 12, 'affines') * check_array(sigma, sigma.size, 3, 'sigma') * if not centers.shape[0] == affines.shape[0]: # <<<<<<<<<<<<<< * raise ValueError('centers and affines arrays should have same shape[0]') * */ __pyx_t_4 = (!((__pyx_v_centers->dimensions[0]) == (__pyx_v_affines->dimensions[0]))); if (__pyx_t_4) { /* "nipy/algorithms/registration/_registration.pyx":226 * check_array(sigma, sigma.size, 3, 'sigma') * if not centers.shape[0] == affines.shape[0]: * raise ValueError('centers and affines arrays should have same shape[0]') # <<<<<<<<<<<<<< * * apply_polyaffine(xyz, centers, affines, sigma) */ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_8), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 226; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; {__pyx_filename = __pyx_f[0]; __pyx_lineno = 226; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L3; } __pyx_L3:; /* "nipy/algorithms/registration/_registration.pyx":228 * raise ValueError('centers and affines arrays should have same shape[0]') * * apply_polyaffine(xyz, centers, affines, sigma) # <<<<<<<<<<<<<< */ apply_polyaffine(__pyx_v_xyz, __pyx_v_centers, __pyx_v_affines, __pyx_v_sigma); __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("nipy.algorithms.registration._registration._apply_polyaffine", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":194 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_copy_shape; int __pyx_v_i; int __pyx_v_ndim; int __pyx_v_endian_detector; int __pyx_v_little_endian; int __pyx_v_t; char *__pyx_v_f; PyArray_Descr *__pyx_v_descr = 0; int __pyx_v_offset; int __pyx_v_hasfields; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; char *__pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "numpy.pxd":200 * # of flags * * if info == NULL: return # <<<<<<<<<<<<<< * * cdef int copy_shape, i, ndim */ __pyx_t_1 = (__pyx_v_info == NULL); if (__pyx_t_1) { __pyx_r = 0; goto __pyx_L0; goto __pyx_L3; } __pyx_L3:; /* "numpy.pxd":203 * * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((&endian_detector)[0] != 0) * */ __pyx_v_endian_detector = 1; /* "numpy.pxd":204 * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * * ndim = PyArray_NDIM(self) */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "numpy.pxd":206 * cdef bint little_endian = ((&endian_detector)[0] != 0) * * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< * * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); /* "numpy.pxd":208 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); if (__pyx_t_1) { /* "numpy.pxd":209 * * if sizeof(npy_intp) != sizeof(Py_ssize_t): * copy_shape = 1 # <<<<<<<<<<<<<< * else: * copy_shape = 0 */ __pyx_v_copy_shape = 1; goto __pyx_L4; } /*else*/ { /* "numpy.pxd":211 * copy_shape = 1 * else: * copy_shape = 0 # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) */ __pyx_v_copy_shape = 0; } __pyx_L4:; /* "numpy.pxd":213 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS); if (__pyx_t_1) { /* "numpy.pxd":214 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not C contiguous") * */ __pyx_t_2 = (!PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS)); __pyx_t_3 = __pyx_t_2; } else { __pyx_t_3 = __pyx_t_1; } if (__pyx_t_3) { /* "numpy.pxd":215 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_10), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L5; } __pyx_L5:; /* "numpy.pxd":217 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ __pyx_t_3 = ((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS); if (__pyx_t_3) { /* "numpy.pxd":218 * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not Fortran contiguous") * */ __pyx_t_1 = (!PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS)); __pyx_t_2 = __pyx_t_1; } else { __pyx_t_2 = __pyx_t_3; } if (__pyx_t_2) { /* "numpy.pxd":219 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_12), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L6; } __pyx_L6:; /* "numpy.pxd":221 * raise ValueError(u"ndarray is not Fortran contiguous") * * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< * info.ndim = ndim * if copy_shape: */ __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); /* "numpy.pxd":222 * * info.buf = PyArray_DATA(self) * info.ndim = ndim # <<<<<<<<<<<<<< * if copy_shape: * # Allocate new buffer for strides and shape info. */ __pyx_v_info->ndim = __pyx_v_ndim; /* "numpy.pxd":223 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ if (__pyx_v_copy_shape) { /* "numpy.pxd":226 * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) # <<<<<<<<<<<<<< * info.shape = info.strides + ndim * for i in range(ndim): */ __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2))); /* "numpy.pxd":227 * # This is allocated as one block, strides first. * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) * info.shape = info.strides + ndim # <<<<<<<<<<<<<< * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] */ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); /* "numpy.pxd":228 * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) * info.shape = info.strides + ndim * for i in range(ndim): # <<<<<<<<<<<<<< * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] */ __pyx_t_5 = __pyx_v_ndim; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "numpy.pxd":229 * info.shape = info.strides + ndim * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< * info.shape[i] = PyArray_DIMS(self)[i] * else: */ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); /* "numpy.pxd":230 * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< * else: * info.strides = PyArray_STRIDES(self) */ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); } goto __pyx_L7; } /*else*/ { /* "numpy.pxd":232 * info.shape[i] = PyArray_DIMS(self)[i] * else: * info.strides = PyArray_STRIDES(self) # <<<<<<<<<<<<<< * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL */ __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); /* "numpy.pxd":233 * else: * info.strides = PyArray_STRIDES(self) * info.shape = PyArray_DIMS(self) # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) */ __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); } __pyx_L7:; /* "numpy.pxd":234 * info.strides = PyArray_STRIDES(self) * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) */ __pyx_v_info->suboffsets = NULL; /* "numpy.pxd":235 * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< * info.readonly = not PyArray_ISWRITEABLE(self) * */ __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); /* "numpy.pxd":236 * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< * * cdef int t */ __pyx_v_info->readonly = (!PyArray_ISWRITEABLE(__pyx_v_self)); /* "numpy.pxd":239 * * cdef int t * cdef char* f = NULL # <<<<<<<<<<<<<< * cdef dtype descr = self.descr * cdef list stack */ __pyx_v_f = NULL; /* "numpy.pxd":240 * cdef int t * cdef char* f = NULL * cdef dtype descr = self.descr # <<<<<<<<<<<<<< * cdef list stack * cdef int offset */ __pyx_t_4 = ((PyObject *)__pyx_v_self->descr); __Pyx_INCREF(__pyx_t_4); __pyx_v_descr = ((PyArray_Descr *)__pyx_t_4); __pyx_t_4 = 0; /* "numpy.pxd":244 * cdef int offset * * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< * * if not hasfields and not copy_shape: */ __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); /* "numpy.pxd":246 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ __pyx_t_2 = (!__pyx_v_hasfields); if (__pyx_t_2) { __pyx_t_3 = (!__pyx_v_copy_shape); __pyx_t_1 = __pyx_t_3; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "numpy.pxd":248 * if not hasfields and not copy_shape: * # do not call releasebuffer * info.obj = None # <<<<<<<<<<<<<< * else: * # need to call releasebuffer */ __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = Py_None; goto __pyx_L10; } /*else*/ { /* "numpy.pxd":251 * else: * # need to call releasebuffer * info.obj = self # <<<<<<<<<<<<<< * * if not hasfields: */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); } __pyx_L10:; /* "numpy.pxd":253 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ __pyx_t_1 = (!__pyx_v_hasfields); if (__pyx_t_1) { /* "numpy.pxd":254 * * if not hasfields: * t = descr.type_num # <<<<<<<<<<<<<< * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): */ __pyx_t_5 = __pyx_v_descr->type_num; __pyx_v_t = __pyx_t_5; /* "numpy.pxd":255 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_1 = (__pyx_v_descr->byteorder == '>'); if (__pyx_t_1) { __pyx_t_2 = __pyx_v_little_endian; } else { __pyx_t_2 = __pyx_t_1; } if (!__pyx_t_2) { /* "numpy.pxd":256 * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" */ __pyx_t_1 = (__pyx_v_descr->byteorder == '<'); if (__pyx_t_1) { __pyx_t_3 = (!__pyx_v_little_endian); __pyx_t_7 = __pyx_t_3; } else { __pyx_t_7 = __pyx_t_1; } __pyx_t_1 = __pyx_t_7; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "numpy.pxd":257 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_14), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L12; } __pyx_L12:; /* "numpy.pxd":258 * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" */ __pyx_t_1 = (__pyx_v_t == NPY_BYTE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__b; goto __pyx_L13; } /* "numpy.pxd":259 * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" */ __pyx_t_1 = (__pyx_v_t == NPY_UBYTE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__B; goto __pyx_L13; } /* "numpy.pxd":260 * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" */ __pyx_t_1 = (__pyx_v_t == NPY_SHORT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__h; goto __pyx_L13; } /* "numpy.pxd":261 * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" */ __pyx_t_1 = (__pyx_v_t == NPY_USHORT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__H; goto __pyx_L13; } /* "numpy.pxd":262 * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" */ __pyx_t_1 = (__pyx_v_t == NPY_INT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__i; goto __pyx_L13; } /* "numpy.pxd":263 * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" */ __pyx_t_1 = (__pyx_v_t == NPY_UINT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__I; goto __pyx_L13; } /* "numpy.pxd":264 * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" */ __pyx_t_1 = (__pyx_v_t == NPY_LONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__l; goto __pyx_L13; } /* "numpy.pxd":265 * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" */ __pyx_t_1 = (__pyx_v_t == NPY_ULONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__L; goto __pyx_L13; } /* "numpy.pxd":266 * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" */ __pyx_t_1 = (__pyx_v_t == NPY_LONGLONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__q; goto __pyx_L13; } /* "numpy.pxd":267 * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" */ __pyx_t_1 = (__pyx_v_t == NPY_ULONGLONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Q; goto __pyx_L13; } /* "numpy.pxd":268 * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" */ __pyx_t_1 = (__pyx_v_t == NPY_FLOAT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__f; goto __pyx_L13; } /* "numpy.pxd":269 * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" */ __pyx_t_1 = (__pyx_v_t == NPY_DOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__d; goto __pyx_L13; } /* "numpy.pxd":270 * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" */ __pyx_t_1 = (__pyx_v_t == NPY_LONGDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__g; goto __pyx_L13; } /* "numpy.pxd":271 * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" */ __pyx_t_1 = (__pyx_v_t == NPY_CFLOAT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zf; goto __pyx_L13; } /* "numpy.pxd":272 * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" */ __pyx_t_1 = (__pyx_v_t == NPY_CDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zd; goto __pyx_L13; } /* "numpy.pxd":273 * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f = "O" * else: */ __pyx_t_1 = (__pyx_v_t == NPY_CLONGDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zg; goto __pyx_L13; } /* "numpy.pxd":274 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_1 = (__pyx_v_t == NPY_OBJECT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__O; goto __pyx_L13; } /*else*/ { /* "numpy.pxd":276 * elif t == NPY_OBJECT: f = "O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * info.format = f * return */ __pyx_t_4 = PyInt_FromLong(__pyx_v_t); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_8 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_15), __pyx_t_4); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_8)); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_8)); __Pyx_GIVEREF(((PyObject *)__pyx_t_8)); __pyx_t_8 = 0; __pyx_t_8 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L13:; /* "numpy.pxd":277 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f # <<<<<<<<<<<<<< * return * else: */ __pyx_v_info->format = __pyx_v_f; /* "numpy.pxd":278 * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f * return # <<<<<<<<<<<<<< * else: * info.format = stdlib.malloc(_buffer_format_string_len) */ __pyx_r = 0; goto __pyx_L0; goto __pyx_L11; } /*else*/ { /* "numpy.pxd":280 * return * else: * info.format = stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 */ __pyx_v_info->format = ((char *)malloc(255)); /* "numpy.pxd":281 * else: * info.format = stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< * offset = 0 * f = _util_dtypestring(descr, info.format + 1, */ (__pyx_v_info->format[0]) = '^'; /* "numpy.pxd":282 * info.format = stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 # <<<<<<<<<<<<<< * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, */ __pyx_v_offset = 0; /* "numpy.pxd":285 * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, * &offset) # <<<<<<<<<<<<<< * f[0] = c'\0' # Terminate format string * */ __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_9; /* "numpy.pxd":286 * info.format + _buffer_format_string_len, * &offset) * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< * * def __releasebuffer__(ndarray self, Py_buffer* info): */ (__pyx_v_f[0]) = '\x00'; } __pyx_L11:; __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_descr); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":288 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__releasebuffer__", 0); /* "numpy.pxd":289 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_t_1 = PyArray_HASFIELDS(__pyx_v_self); if (__pyx_t_1) { /* "numpy.pxd":290 * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): * stdlib.free(info.format) # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) */ free(__pyx_v_info->format); goto __pyx_L3; } __pyx_L3:; /* "numpy.pxd":291 * if PyArray_HASFIELDS(self): * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * stdlib.free(info.strides) * # info.shape was stored after info.strides in the same block */ __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); if (__pyx_t_1) { /* "numpy.pxd":292 * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) # <<<<<<<<<<<<<< * # info.shape was stored after info.strides in the same block * */ free(__pyx_v_info->strides); goto __pyx_L4; } __pyx_L4:; __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":768 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, a) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); /* "numpy.pxd":769 * * cdef inline object PyArray_MultiIterNew1(a): * return PyArray_MultiIterNew(1, a) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew2(a, b): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 769; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":771 * return PyArray_MultiIterNew(1, a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, a, b) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); /* "numpy.pxd":772 * * cdef inline object PyArray_MultiIterNew2(a, b): * return PyArray_MultiIterNew(2, a, b) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew3(a, b, c): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":774 * return PyArray_MultiIterNew(2, a, b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, a, b, c) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); /* "numpy.pxd":775 * * cdef inline object PyArray_MultiIterNew3(a, b, c): * return PyArray_MultiIterNew(3, a, b, c) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":777 * return PyArray_MultiIterNew(3, a, b, c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, a, b, c, d) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); /* "numpy.pxd":778 * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): * return PyArray_MultiIterNew(4, a, b, c, d) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 778; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":780 * return PyArray_MultiIterNew(4, a, b, c, d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, a, b, c, d, e) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); /* "numpy.pxd":781 * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): * return PyArray_MultiIterNew(5, a, b, c, d, e) # <<<<<<<<<<<<<< * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 781; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":783 * return PyArray_MultiIterNew(5, a, b, c, d, e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { PyArray_Descr *__pyx_v_child = 0; int __pyx_v_endian_detector; int __pyx_v_little_endian; PyObject *__pyx_v_fields = 0; PyObject *__pyx_v_childname = NULL; PyObject *__pyx_v_new_offset = NULL; PyObject *__pyx_v_t = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *(*__pyx_t_6)(PyObject *); int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_t_10; long __pyx_t_11; char *__pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_util_dtypestring", 0); /* "numpy.pxd":790 * cdef int delta_offset * cdef tuple i * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((&endian_detector)[0] != 0) * cdef tuple fields */ __pyx_v_endian_detector = 1; /* "numpy.pxd":791 * cdef tuple i * cdef int endian_detector = 1 * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * cdef tuple fields * */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "numpy.pxd":794 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ if (unlikely(((PyObject *)__pyx_v_descr->names) == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_1 = ((PyObject *)__pyx_v_descr->names); __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; for (;;) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif __Pyx_XDECREF(__pyx_v_childname); __pyx_v_childname = __pyx_t_3; __pyx_t_3 = 0; /* "numpy.pxd":795 * * for childname in descr.names: * fields = descr.fields[childname] # <<<<<<<<<<<<<< * child, new_offset = fields * */ __pyx_t_3 = PyObject_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (!__pyx_t_3) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected tuple, got %.200s", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF(((PyObject *)__pyx_v_fields)); __pyx_v_fields = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "numpy.pxd":796 * for childname in descr.names: * fields = descr.fields[childname] * child, new_offset = fields # <<<<<<<<<<<<<< * * if (end - f) - (new_offset - offset[0]) < 15: */ if (likely(PyTuple_CheckExact(((PyObject *)__pyx_v_fields)))) { PyObject* sequence = ((PyObject *)__pyx_v_fields); #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else if (1) { __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } else { Py_ssize_t index = -1; __pyx_t_5 = PyObject_GetIter(((PyObject *)__pyx_v_fields)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = Py_TYPE(__pyx_t_5)->tp_iternext; index = 0; __pyx_t_3 = __pyx_t_6(__pyx_t_5); if (unlikely(!__pyx_t_3)) goto __pyx_L5_unpacking_failed; __Pyx_GOTREF(__pyx_t_3); index = 1; __pyx_t_4 = __pyx_t_6(__pyx_t_5); if (unlikely(!__pyx_t_4)) goto __pyx_L5_unpacking_failed; __Pyx_GOTREF(__pyx_t_4); if (__Pyx_IternextUnpackEndCheck(__pyx_t_6(__pyx_t_5), 2) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = NULL; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L6_unpacking_done; __pyx_L5_unpacking_failed:; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_L6_unpacking_done:; } if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF(((PyObject *)__pyx_v_child)); __pyx_v_child = ((PyArray_Descr *)__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_v_new_offset); __pyx_v_new_offset = __pyx_t_4; __pyx_t_4 = 0; /* "numpy.pxd":798 * child, new_offset = fields * * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ __pyx_t_4 = PyInt_FromLong((__pyx_v_end - __pyx_v_f)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_Subtract(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyObject_RichCompare(__pyx_t_3, __pyx_int_15, Py_LT); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { /* "numpy.pxd":799 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_t_5 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_17), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L7; } __pyx_L7:; /* "numpy.pxd":801 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_7 = (__pyx_v_child->byteorder == '>'); if (__pyx_t_7) { __pyx_t_8 = __pyx_v_little_endian; } else { __pyx_t_8 = __pyx_t_7; } if (!__pyx_t_8) { /* "numpy.pxd":802 * * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * # One could encode it in the format string and have Cython */ __pyx_t_7 = (__pyx_v_child->byteorder == '<'); if (__pyx_t_7) { __pyx_t_9 = (!__pyx_v_little_endian); __pyx_t_10 = __pyx_t_9; } else { __pyx_t_10 = __pyx_t_7; } __pyx_t_7 = __pyx_t_10; } else { __pyx_t_7 = __pyx_t_8; } if (__pyx_t_7) { /* "numpy.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_18), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L8; } __pyx_L8:; /* "numpy.pxd":813 * * # Output padding bytes * while offset[0] < new_offset: # <<<<<<<<<<<<<< * f[0] = 120 # "x"; pad byte * f += 1 */ while (1) { __pyx_t_5 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_t_5, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!__pyx_t_7) break; /* "numpy.pxd":814 * # Output padding bytes * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< * f += 1 * offset[0] += 1 */ (__pyx_v_f[0]) = 120; /* "numpy.pxd":815 * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte * f += 1 # <<<<<<<<<<<<<< * offset[0] += 1 * */ __pyx_v_f = (__pyx_v_f + 1); /* "numpy.pxd":816 * f[0] = 120 # "x"; pad byte * f += 1 * offset[0] += 1 # <<<<<<<<<<<<<< * * offset[0] += child.itemsize */ __pyx_t_11 = 0; (__pyx_v_offset[__pyx_t_11]) = ((__pyx_v_offset[__pyx_t_11]) + 1); } /* "numpy.pxd":818 * offset[0] += 1 * * offset[0] += child.itemsize # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(child): */ __pyx_t_11 = 0; (__pyx_v_offset[__pyx_t_11]) = ((__pyx_v_offset[__pyx_t_11]) + __pyx_v_child->elsize); /* "numpy.pxd":820 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ __pyx_t_7 = (!PyDataType_HASFIELDS(__pyx_v_child)); if (__pyx_t_7) { /* "numpy.pxd":821 * * if not PyDataType_HASFIELDS(child): * t = child.type_num # <<<<<<<<<<<<<< * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") */ __pyx_t_3 = PyInt_FromLong(__pyx_v_child->type_num); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 821; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_XDECREF(__pyx_v_t); __pyx_v_t = __pyx_t_3; __pyx_t_3 = 0; /* "numpy.pxd":822 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ __pyx_t_7 = ((__pyx_v_end - __pyx_v_f) < 5); if (__pyx_t_7) { /* "numpy.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_t_3 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_20), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L12; } __pyx_L12:; /* "numpy.pxd":826 * * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" */ __pyx_t_3 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 98; goto __pyx_L13; } /* "numpy.pxd":827 * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" */ __pyx_t_5 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 66; goto __pyx_L13; } /* "numpy.pxd":828 * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" */ __pyx_t_3 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 104; goto __pyx_L13; } /* "numpy.pxd":829 * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" */ __pyx_t_5 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 72; goto __pyx_L13; } /* "numpy.pxd":830 * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" */ __pyx_t_3 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 105; goto __pyx_L13; } /* "numpy.pxd":831 * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" */ __pyx_t_5 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 73; goto __pyx_L13; } /* "numpy.pxd":832 * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" */ __pyx_t_3 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 108; goto __pyx_L13; } /* "numpy.pxd":833 * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" */ __pyx_t_5 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 76; goto __pyx_L13; } /* "numpy.pxd":834 * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" */ __pyx_t_3 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 113; goto __pyx_L13; } /* "numpy.pxd":835 * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" */ __pyx_t_5 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 81; goto __pyx_L13; } /* "numpy.pxd":836 * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" */ __pyx_t_3 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 102; goto __pyx_L13; } /* "numpy.pxd":837 * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf */ __pyx_t_5 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 100; goto __pyx_L13; } /* "numpy.pxd":838 * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd */ __pyx_t_3 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 103; goto __pyx_L13; } /* "numpy.pxd":839 * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg */ __pyx_t_5 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 102; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":840 * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" */ __pyx_t_3 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 100; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":841 * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: */ __pyx_t_5 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 103; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":842 * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_3 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 79; goto __pyx_L13; } /*else*/ { /* "numpy.pxd":844 * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * f += 1 * else: */ __pyx_t_5 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_15), __pyx_v_t); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_5)); __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_5)); __Pyx_GIVEREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L13:; /* "numpy.pxd":845 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * f += 1 # <<<<<<<<<<<<<< * else: * # Cython ignores struct boundary information ("T{...}"), */ __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L11; } /*else*/ { /* "numpy.pxd":849 * # Cython ignores struct boundary information ("T{...}"), * # so don't output it * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< * return f * */ __pyx_t_12 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_12 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 849; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_12; } __pyx_L11:; } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "numpy.pxd":850 * # so don't output it * f = _util_dtypestring(child, f, end, offset) * return f # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_f; goto __pyx_L0; __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_child); __Pyx_XDECREF(__pyx_v_fields); __Pyx_XDECREF(__pyx_v_childname); __Pyx_XDECREF(__pyx_v_new_offset); __Pyx_XDECREF(__pyx_v_t); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":965 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { PyObject *__pyx_v_baseptr; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("set_array_base", 0); /* "numpy.pxd":967 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ __pyx_t_1 = (__pyx_v_base == Py_None); if (__pyx_t_1) { /* "numpy.pxd":968 * cdef PyObject* baseptr * if base is None: * baseptr = NULL # <<<<<<<<<<<<<< * else: * Py_INCREF(base) # important to do this before decref below! */ __pyx_v_baseptr = NULL; goto __pyx_L3; } /*else*/ { /* "numpy.pxd":970 * baseptr = NULL * else: * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< * baseptr = base * Py_XDECREF(arr.base) */ Py_INCREF(__pyx_v_base); /* "numpy.pxd":971 * else: * Py_INCREF(base) # important to do this before decref below! * baseptr = base # <<<<<<<<<<<<<< * Py_XDECREF(arr.base) * arr.base = baseptr */ __pyx_v_baseptr = ((PyObject *)__pyx_v_base); } __pyx_L3:; /* "numpy.pxd":972 * Py_INCREF(base) # important to do this before decref below! * baseptr = base * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< * arr.base = baseptr * */ Py_XDECREF(__pyx_v_arr->base); /* "numpy.pxd":973 * baseptr = base * Py_XDECREF(arr.base) * arr.base = baseptr # <<<<<<<<<<<<<< * * cdef inline object get_array_base(ndarray arr): */ __pyx_v_arr->base = __pyx_v_baseptr; __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":975 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("get_array_base", 0); /* "numpy.pxd":976 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ __pyx_t_1 = (__pyx_v_arr->base == NULL); if (__pyx_t_1) { /* "numpy.pxd":977 * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: * return None # <<<<<<<<<<<<<< * else: * return arr.base */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; goto __pyx_L3; } /*else*/ { /* "numpy.pxd":979 * return None * else: * return arr.base # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); __pyx_r = ((PyObject *)__pyx_v_arr->base); goto __pyx_L0; } __pyx_L3:; __pyx_r = Py_None; __Pyx_INCREF(Py_None); __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, __Pyx_NAMESTR("_registration"), __Pyx_DOCSTR(__pyx_k_21), /* m_doc */ -1, /* m_size */ __pyx_methods /* m_methods */, NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_kp_s_1, __pyx_k_1, sizeof(__pyx_k_1), 0, 0, 1, 0}, {&__pyx_kp_u_11, __pyx_k_11, sizeof(__pyx_k_11), 0, 1, 0, 0}, {&__pyx_kp_u_13, __pyx_k_13, sizeof(__pyx_k_13), 0, 1, 0, 0}, {&__pyx_kp_u_15, __pyx_k_15, sizeof(__pyx_k_15), 0, 1, 0, 0}, {&__pyx_kp_u_16, __pyx_k_16, sizeof(__pyx_k_16), 0, 1, 0, 0}, {&__pyx_kp_u_19, __pyx_k_19, sizeof(__pyx_k_19), 0, 1, 0, 0}, {&__pyx_kp_s_22, __pyx_k_22, sizeof(__pyx_k_22), 0, 0, 1, 0}, {&__pyx_kp_s_25, __pyx_k_25, sizeof(__pyx_k_25), 0, 0, 1, 0}, {&__pyx_n_s_26, __pyx_k_26, sizeof(__pyx_k_26), 0, 0, 1, 1}, {&__pyx_kp_s_3, __pyx_k_3, sizeof(__pyx_k_3), 0, 0, 1, 0}, {&__pyx_kp_s_5, __pyx_k_5, sizeof(__pyx_k_5), 0, 0, 1, 0}, {&__pyx_kp_s_6, __pyx_k_6, sizeof(__pyx_k_6), 0, 0, 1, 0}, {&__pyx_kp_s_7, __pyx_k_7, sizeof(__pyx_k_7), 0, 0, 1, 0}, {&__pyx_kp_u_9, __pyx_k_9, sizeof(__pyx_k_9), 0, 1, 0, 0}, {&__pyx_n_s__C, __pyx_k__C, sizeof(__pyx_k__C), 0, 0, 1, 1}, {&__pyx_n_s__C_CONTIGUOUS, __pyx_k__C_CONTIGUOUS, sizeof(__pyx_k__C_CONTIGUOUS), 0, 0, 1, 1}, {&__pyx_n_s__H, __pyx_k__H, sizeof(__pyx_k__H), 0, 0, 1, 1}, {&__pyx_n_s__R, __pyx_k__R, sizeof(__pyx_k__R), 0, 0, 1, 1}, {&__pyx_n_s__RuntimeError, __pyx_k__RuntimeError, sizeof(__pyx_k__RuntimeError), 0, 0, 1, 1}, {&__pyx_n_s__T, __pyx_k__T, sizeof(__pyx_k__T), 0, 0, 1, 1}, {&__pyx_n_s__Ta, __pyx_k__Ta, sizeof(__pyx_k__Ta), 0, 0, 1, 1}, {&__pyx_n_s__Tvox, __pyx_k__Tvox, sizeof(__pyx_k__Tvox), 0, 0, 1, 1}, {&__pyx_n_s__ValueError, __pyx_k__ValueError, sizeof(__pyx_k__ValueError), 0, 0, 1, 1}, {&__pyx_n_s__X, __pyx_k__X, sizeof(__pyx_k__X), 0, 0, 1, 1}, {&__pyx_n_s__Xa, __pyx_k__Xa, sizeof(__pyx_k__Xa), 0, 0, 1, 1}, {&__pyx_n_s__Y, __pyx_k__Y, sizeof(__pyx_k__Y), 0, 0, 1, 1}, {&__pyx_n_s__Ya, __pyx_k__Ya, sizeof(__pyx_k__Ya), 0, 0, 1, 1}, {&__pyx_n_s__Z, __pyx_k__Z, sizeof(__pyx_k__Z), 0, 0, 1, 1}, {&__pyx_n_s__Za, __pyx_k__Za, sizeof(__pyx_k__Za), 0, 0, 1, 1}, {&__pyx_n_s___L1_moments, __pyx_k___L1_moments, sizeof(__pyx_k___L1_moments), 0, 0, 1, 1}, {&__pyx_n_s____main__, __pyx_k____main__, sizeof(__pyx_k____main__), 0, 0, 1, 1}, {&__pyx_n_s____test__, __pyx_k____test__, sizeof(__pyx_k____test__), 0, 0, 1, 1}, {&__pyx_n_s____version__, __pyx_k____version__, sizeof(__pyx_k____version__), 0, 0, 1, 1}, {&__pyx_n_s___apply_polyaffine, __pyx_k___apply_polyaffine, sizeof(__pyx_k___apply_polyaffine), 0, 0, 1, 1}, {&__pyx_n_s___cspline_resample3d, __pyx_k___cspline_resample3d, sizeof(__pyx_k___cspline_resample3d), 0, 0, 1, 1}, {&__pyx_n_s___cspline_sample1d, __pyx_k___cspline_sample1d, sizeof(__pyx_k___cspline_sample1d), 0, 0, 1, 1}, {&__pyx_n_s___cspline_sample2d, __pyx_k___cspline_sample2d, sizeof(__pyx_k___cspline_sample2d), 0, 0, 1, 1}, {&__pyx_n_s___cspline_sample3d, __pyx_k___cspline_sample3d, sizeof(__pyx_k___cspline_sample3d), 0, 0, 1, 1}, {&__pyx_n_s___cspline_sample4d, __pyx_k___cspline_sample4d, sizeof(__pyx_k___cspline_sample4d), 0, 0, 1, 1}, {&__pyx_n_s___cspline_transform, __pyx_k___cspline_transform, sizeof(__pyx_k___cspline_transform), 0, 0, 1, 1}, {&__pyx_n_s___joint_histogram, __pyx_k___joint_histogram, sizeof(__pyx_k___joint_histogram), 0, 0, 1, 1}, {&__pyx_n_s__affines, __pyx_k__affines, sizeof(__pyx_k__affines), 0, 0, 1, 1}, {&__pyx_n_s__asarray, __pyx_k__asarray, sizeof(__pyx_k__asarray), 0, 0, 1, 1}, {&__pyx_n_s__astype, __pyx_k__astype, sizeof(__pyx_k__astype), 0, 0, 1, 1}, {&__pyx_n_s__c, __pyx_k__c, sizeof(__pyx_k__c), 0, 0, 1, 1}, {&__pyx_n_s__cast_integer, __pyx_k__cast_integer, sizeof(__pyx_k__cast_integer), 0, 0, 1, 1}, {&__pyx_n_s__centers, __pyx_k__centers, sizeof(__pyx_k__centers), 0, 0, 1, 1}, {&__pyx_n_s__check_array, __pyx_k__check_array, sizeof(__pyx_k__check_array), 0, 0, 1, 1}, {&__pyx_n_s__clampI, __pyx_k__clampI, sizeof(__pyx_k__clampI), 0, 0, 1, 1}, {&__pyx_n_s__clampJ, __pyx_k__clampJ, sizeof(__pyx_k__clampJ), 0, 0, 1, 1}, {&__pyx_n_s__dev, __pyx_k__dev, sizeof(__pyx_k__dev), 0, 0, 1, 1}, {&__pyx_n_s__dim, __pyx_k__dim, sizeof(__pyx_k__dim), 0, 0, 1, 1}, {&__pyx_n_s__dims, __pyx_k__dims, sizeof(__pyx_k__dims), 0, 0, 1, 1}, {&__pyx_n_s__double, __pyx_k__double, sizeof(__pyx_k__double), 0, 0, 1, 1}, {&__pyx_n_s__dtype, __pyx_k__dtype, sizeof(__pyx_k__dtype), 0, 0, 1, 1}, {&__pyx_n_s__exp_dim, __pyx_k__exp_dim, sizeof(__pyx_k__exp_dim), 0, 0, 1, 1}, {&__pyx_n_s__flags, __pyx_k__flags, sizeof(__pyx_k__flags), 0, 0, 1, 1}, {&__pyx_n_s__h, __pyx_k__h, sizeof(__pyx_k__h), 0, 0, 1, 1}, {&__pyx_n_s__i, __pyx_k__i, sizeof(__pyx_k__i), 0, 0, 1, 1}, {&__pyx_n_s__im, __pyx_k__im, sizeof(__pyx_k__im), 0, 0, 1, 1}, {&__pyx_n_s__imJ, __pyx_k__imJ, sizeof(__pyx_k__imJ), 0, 0, 1, 1}, {&__pyx_n_s__im_resampled, __pyx_k__im_resampled, sizeof(__pyx_k__im_resampled), 0, 0, 1, 1}, {&__pyx_n_s__index, __pyx_k__index, sizeof(__pyx_k__index), 0, 0, 1, 1}, {&__pyx_n_s__interp, __pyx_k__interp, sizeof(__pyx_k__interp), 0, 0, 1, 1}, {&__pyx_n_s__iterI, __pyx_k__iterI, sizeof(__pyx_k__iterI), 0, 0, 1, 1}, {&__pyx_n_s__kind, __pyx_k__kind, sizeof(__pyx_k__kind), 0, 0, 1, 1}, {&__pyx_n_s__median, __pyx_k__median, sizeof(__pyx_k__median), 0, 0, 1, 1}, {&__pyx_n_s__mode, __pyx_k__mode, sizeof(__pyx_k__mode), 0, 0, 1, 1}, {&__pyx_n_s__modes, __pyx_k__modes, sizeof(__pyx_k__modes), 0, 0, 1, 1}, {&__pyx_n_s__mt, __pyx_k__mt, sizeof(__pyx_k__mt), 0, 0, 1, 1}, {&__pyx_n_s__multi, __pyx_k__multi, sizeof(__pyx_k__multi), 0, 0, 1, 1}, {&__pyx_n_s__mx, __pyx_k__mx, sizeof(__pyx_k__mx), 0, 0, 1, 1}, {&__pyx_n_s__my, __pyx_k__my, sizeof(__pyx_k__my), 0, 0, 1, 1}, {&__pyx_n_s__mz, __pyx_k__mz, sizeof(__pyx_k__mz), 0, 0, 1, 1}, {&__pyx_n_s__n, __pyx_k__n, sizeof(__pyx_k__n), 0, 0, 1, 1}, {&__pyx_n_s__nearest, __pyx_k__nearest, sizeof(__pyx_k__nearest), 0, 0, 1, 1}, {&__pyx_n_s__np, __pyx_k__np, sizeof(__pyx_k__np), 0, 0, 1, 1}, {&__pyx_n_s__numpy, __pyx_k__numpy, sizeof(__pyx_k__numpy), 0, 0, 1, 1}, {&__pyx_n_s__order, __pyx_k__order, sizeof(__pyx_k__order), 0, 0, 1, 1}, {&__pyx_n_s__r, __pyx_k__r, sizeof(__pyx_k__r), 0, 0, 1, 1}, {&__pyx_n_s__range, __pyx_k__range, sizeof(__pyx_k__range), 0, 0, 1, 1}, {&__pyx_n_s__reflect, __pyx_k__reflect, sizeof(__pyx_k__reflect), 0, 0, 1, 1}, {&__pyx_n_s__reshape, __pyx_k__reshape, sizeof(__pyx_k__reshape), 0, 0, 1, 1}, {&__pyx_n_s__ret, __pyx_k__ret, sizeof(__pyx_k__ret), 0, 0, 1, 1}, {&__pyx_n_s__sigma, __pyx_k__sigma, sizeof(__pyx_k__sigma), 0, 0, 1, 1}, {&__pyx_n_s__size, __pyx_k__size, sizeof(__pyx_k__size), 0, 0, 1, 1}, {&__pyx_n_s__t, __pyx_k__t, sizeof(__pyx_k__t), 0, 0, 1, 1}, {&__pyx_n_s__tvox, __pyx_k__tvox, sizeof(__pyx_k__tvox), 0, 0, 1, 1}, {&__pyx_n_s__u, __pyx_k__u, sizeof(__pyx_k__u), 0, 0, 1, 1}, {&__pyx_n_s__x, __pyx_k__x, sizeof(__pyx_k__x), 0, 0, 1, 1}, {&__pyx_n_s__xname, __pyx_k__xname, sizeof(__pyx_k__xname), 0, 0, 1, 1}, {&__pyx_n_s__xyz, __pyx_k__xyz, sizeof(__pyx_k__xyz), 0, 0, 1, 1}, {&__pyx_n_s__y, __pyx_k__y, sizeof(__pyx_k__y), 0, 0, 1, 1}, {&__pyx_n_s__z, __pyx_k__z, sizeof(__pyx_k__z), 0, 0, 1, 1}, {&__pyx_n_s__zero, __pyx_k__zero, sizeof(__pyx_k__zero), 0, 0, 1, 1}, {&__pyx_n_s__zeros, __pyx_k__zeros, sizeof(__pyx_k__zeros), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_RuntimeError = __Pyx_GetName(__pyx_b, __pyx_n_s__RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_range = __Pyx_GetName(__pyx_b, __pyx_n_s__range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 95; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_ValueError = __Pyx_GetName(__pyx_b, __pyx_n_s__ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "nipy/algorithms/registration/_registration.pyx":73 * ret = joint_histogram(H, clampI, clampJ, iterI, imJ, Tvox, interp) * if not ret == 0: * raise RuntimeError('Joint histogram failed because of incorrect input arrays.') # <<<<<<<<<<<<<< * * return */ __pyx_k_tuple_2 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_2); __Pyx_INCREF(((PyObject *)__pyx_kp_s_1)); PyTuple_SET_ITEM(__pyx_k_tuple_2, 0, ((PyObject *)__pyx_kp_s_1)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_1)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_2)); /* "nipy/algorithms/registration/_registration.pyx":89 * ret = L1_moments(n, median, dev, H) * if not ret == 0: * raise RuntimeError('L1_moments failed because input array is not double.') # <<<<<<<<<<<<<< * * return n[0], median[0], dev[0] */ __pyx_k_tuple_4 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_4); __Pyx_INCREF(((PyObject *)__pyx_kp_s_3)); PyTuple_SET_ITEM(__pyx_k_tuple_4, 0, ((PyObject *)__pyx_kp_s_3)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_3)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_4)); /* "nipy/algorithms/registration/_registration.pyx":226 * check_array(sigma, sigma.size, 3, 'sigma') * if not centers.shape[0] == affines.shape[0]: * raise ValueError('centers and affines arrays should have same shape[0]') # <<<<<<<<<<<<<< * * apply_polyaffine(xyz, centers, affines, sigma) */ __pyx_k_tuple_8 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 226; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_8); __Pyx_INCREF(((PyObject *)__pyx_kp_s_7)); PyTuple_SET_ITEM(__pyx_k_tuple_8, 0, ((PyObject *)__pyx_kp_s_7)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_7)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_8)); /* "numpy.pxd":215 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_k_tuple_10 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_10)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_10); __Pyx_INCREF(((PyObject *)__pyx_kp_u_9)); PyTuple_SET_ITEM(__pyx_k_tuple_10, 0, ((PyObject *)__pyx_kp_u_9)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_9)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_10)); /* "numpy.pxd":219 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_k_tuple_12 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_12)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_12); __Pyx_INCREF(((PyObject *)__pyx_kp_u_11)); PyTuple_SET_ITEM(__pyx_k_tuple_12, 0, ((PyObject *)__pyx_kp_u_11)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_11)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_12)); /* "numpy.pxd":257 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_k_tuple_14 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_14)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_14); __Pyx_INCREF(((PyObject *)__pyx_kp_u_13)); PyTuple_SET_ITEM(__pyx_k_tuple_14, 0, ((PyObject *)__pyx_kp_u_13)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_13)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_14)); /* "numpy.pxd":799 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_k_tuple_17 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_17)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_17); __Pyx_INCREF(((PyObject *)__pyx_kp_u_16)); PyTuple_SET_ITEM(__pyx_k_tuple_17, 0, ((PyObject *)__pyx_kp_u_16)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_16)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_17)); /* "numpy.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_k_tuple_18 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_18)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_18); __Pyx_INCREF(((PyObject *)__pyx_kp_u_13)); PyTuple_SET_ITEM(__pyx_k_tuple_18, 0, ((PyObject *)__pyx_kp_u_13)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_13)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_18)); /* "numpy.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_k_tuple_20 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_20)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_20); __Pyx_INCREF(((PyObject *)__pyx_kp_u_19)); PyTuple_SET_ITEM(__pyx_k_tuple_20, 0, ((PyObject *)__pyx_kp_u_19)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_19)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_20)); /* "nipy/algorithms/registration/_registration.pyx":57 * * * def _joint_histogram(ndarray H, flatiter iterI, ndarray imJ, ndarray Tvox, long interp): # <<<<<<<<<<<<<< * """ * Compute the joint histogram given a transformation trial. */ __pyx_k_tuple_23 = PyTuple_New(10); if (unlikely(!__pyx_k_tuple_23)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_23); __Pyx_INCREF(((PyObject *)__pyx_n_s__H)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 0, ((PyObject *)__pyx_n_s__H)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__H)); __Pyx_INCREF(((PyObject *)__pyx_n_s__iterI)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 1, ((PyObject *)__pyx_n_s__iterI)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__iterI)); __Pyx_INCREF(((PyObject *)__pyx_n_s__imJ)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 2, ((PyObject *)__pyx_n_s__imJ)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__imJ)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Tvox)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 3, ((PyObject *)__pyx_n_s__Tvox)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Tvox)); __Pyx_INCREF(((PyObject *)__pyx_n_s__interp)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 4, ((PyObject *)__pyx_n_s__interp)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__interp)); __Pyx_INCREF(((PyObject *)__pyx_n_s__h)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 5, ((PyObject *)__pyx_n_s__h)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__h)); __Pyx_INCREF(((PyObject *)__pyx_n_s__tvox)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 6, ((PyObject *)__pyx_n_s__tvox)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__tvox)); __Pyx_INCREF(((PyObject *)__pyx_n_s__clampI)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 7, ((PyObject *)__pyx_n_s__clampI)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__clampI)); __Pyx_INCREF(((PyObject *)__pyx_n_s__clampJ)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 8, ((PyObject *)__pyx_n_s__clampJ)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__clampJ)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ret)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 9, ((PyObject *)__pyx_n_s__ret)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ret)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_23)); __pyx_k_codeobj_24 = (PyObject*)__Pyx_PyCode_New(5, 0, 10, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_23, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_25, __pyx_n_s___joint_histogram, 57, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_24)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/algorithms/registration/_registration.pyx":78 * * * def _L1_moments(ndarray H): # <<<<<<<<<<<<<< * """ * Compute L1 moments of order 0, 1 and 2 of a one-dimensional */ __pyx_k_tuple_27 = PyTuple_New(5); if (unlikely(!__pyx_k_tuple_27)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_27); __Pyx_INCREF(((PyObject *)__pyx_n_s__H)); PyTuple_SET_ITEM(__pyx_k_tuple_27, 0, ((PyObject *)__pyx_n_s__H)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__H)); __Pyx_INCREF(((PyObject *)__pyx_n_s__n)); PyTuple_SET_ITEM(__pyx_k_tuple_27, 1, ((PyObject *)__pyx_n_s__n)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__n)); __Pyx_INCREF(((PyObject *)__pyx_n_s__median)); PyTuple_SET_ITEM(__pyx_k_tuple_27, 2, ((PyObject *)__pyx_n_s__median)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__median)); __Pyx_INCREF(((PyObject *)__pyx_n_s__dev)); PyTuple_SET_ITEM(__pyx_k_tuple_27, 3, ((PyObject *)__pyx_n_s__dev)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__dev)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ret)); PyTuple_SET_ITEM(__pyx_k_tuple_27, 4, ((PyObject *)__pyx_n_s__ret)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ret)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_27)); __pyx_k_codeobj_28 = (PyObject*)__Pyx_PyCode_New(1, 0, 5, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_27, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_25, __pyx_n_s___L1_moments, 78, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_28)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/algorithms/registration/_registration.pyx":94 * * * def _cspline_transform(ndarray x): # <<<<<<<<<<<<<< * c = np.zeros([x.shape[i] for i in range(x.ndim)], dtype=np.double) * cubic_spline_transform(c, x) */ __pyx_k_tuple_29 = PyTuple_New(3); if (unlikely(!__pyx_k_tuple_29)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 94; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_29); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_29, 0, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__c)); PyTuple_SET_ITEM(__pyx_k_tuple_29, 1, ((PyObject *)__pyx_n_s__c)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__c)); __Pyx_INCREF(((PyObject *)__pyx_n_s__i)); PyTuple_SET_ITEM(__pyx_k_tuple_29, 2, ((PyObject *)__pyx_n_s__i)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__i)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_29)); __pyx_k_codeobj_30 = (PyObject*)__Pyx_PyCode_New(1, 0, 3, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_29, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_25, __pyx_n_s___cspline_transform, 94, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_30)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 94; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/algorithms/registration/_registration.pyx":103 * return np.reshape(in_arr, shape).astype(np.double) * * def _cspline_sample1d(ndarray R, ndarray C, X=0, mode='zero'): # <<<<<<<<<<<<<< * cdef double *r, *x * cdef broadcast multi */ __pyx_k_tuple_31 = PyTuple_New(8); if (unlikely(!__pyx_k_tuple_31)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_31); __Pyx_INCREF(((PyObject *)__pyx_n_s__R)); PyTuple_SET_ITEM(__pyx_k_tuple_31, 0, ((PyObject *)__pyx_n_s__R)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__R)); __Pyx_INCREF(((PyObject *)__pyx_n_s__C)); PyTuple_SET_ITEM(__pyx_k_tuple_31, 1, ((PyObject *)__pyx_n_s__C)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__C)); __Pyx_INCREF(((PyObject *)__pyx_n_s__X)); PyTuple_SET_ITEM(__pyx_k_tuple_31, 2, ((PyObject *)__pyx_n_s__X)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__X)); __Pyx_INCREF(((PyObject *)__pyx_n_s__mode)); PyTuple_SET_ITEM(__pyx_k_tuple_31, 3, ((PyObject *)__pyx_n_s__mode)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__mode)); __Pyx_INCREF(((PyObject *)__pyx_n_s__r)); PyTuple_SET_ITEM(__pyx_k_tuple_31, 4, ((PyObject *)__pyx_n_s__r)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__r)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_31, 5, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__multi)); PyTuple_SET_ITEM(__pyx_k_tuple_31, 6, ((PyObject *)__pyx_n_s__multi)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__multi)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Xa)); PyTuple_SET_ITEM(__pyx_k_tuple_31, 7, ((PyObject *)__pyx_n_s__Xa)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Xa)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_31)); __pyx_k_codeobj_32 = (PyObject*)__Pyx_PyCode_New(4, 0, 8, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_31, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_25, __pyx_n_s___cspline_sample1d, 103, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_32)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/algorithms/registration/_registration.pyx":115 * return R * * def _cspline_sample2d(ndarray R, ndarray C, X=0, Y=0, # <<<<<<<<<<<<<< * mx='zero', my='zero'): * cdef double *r, *x, *y */ __pyx_k_tuple_33 = PyTuple_New(12); if (unlikely(!__pyx_k_tuple_33)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_33); __Pyx_INCREF(((PyObject *)__pyx_n_s__R)); PyTuple_SET_ITEM(__pyx_k_tuple_33, 0, ((PyObject *)__pyx_n_s__R)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__R)); __Pyx_INCREF(((PyObject *)__pyx_n_s__C)); PyTuple_SET_ITEM(__pyx_k_tuple_33, 1, ((PyObject *)__pyx_n_s__C)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__C)); __Pyx_INCREF(((PyObject *)__pyx_n_s__X)); PyTuple_SET_ITEM(__pyx_k_tuple_33, 2, ((PyObject *)__pyx_n_s__X)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__X)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Y)); PyTuple_SET_ITEM(__pyx_k_tuple_33, 3, ((PyObject *)__pyx_n_s__Y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__mx)); PyTuple_SET_ITEM(__pyx_k_tuple_33, 4, ((PyObject *)__pyx_n_s__mx)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__mx)); __Pyx_INCREF(((PyObject *)__pyx_n_s__my)); PyTuple_SET_ITEM(__pyx_k_tuple_33, 5, ((PyObject *)__pyx_n_s__my)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__my)); __Pyx_INCREF(((PyObject *)__pyx_n_s__r)); PyTuple_SET_ITEM(__pyx_k_tuple_33, 6, ((PyObject *)__pyx_n_s__r)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__r)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_33, 7, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__y)); PyTuple_SET_ITEM(__pyx_k_tuple_33, 8, ((PyObject *)__pyx_n_s__y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__multi)); PyTuple_SET_ITEM(__pyx_k_tuple_33, 9, ((PyObject *)__pyx_n_s__multi)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__multi)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Xa)); PyTuple_SET_ITEM(__pyx_k_tuple_33, 10, ((PyObject *)__pyx_n_s__Xa)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Xa)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Ya)); PyTuple_SET_ITEM(__pyx_k_tuple_33, 11, ((PyObject *)__pyx_n_s__Ya)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Ya)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_33)); __pyx_k_codeobj_34 = (PyObject*)__Pyx_PyCode_New(6, 0, 12, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_33, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_25, __pyx_n_s___cspline_sample2d, 115, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_34)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/algorithms/registration/_registration.pyx":130 * return R * * def _cspline_sample3d(ndarray R, ndarray C, X=0, Y=0, Z=0, # <<<<<<<<<<<<<< * mx='zero', my='zero', mz='zero'): * cdef double *r, *x, *y, *z */ __pyx_k_tuple_35 = PyTuple_New(16); if (unlikely(!__pyx_k_tuple_35)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_35); __Pyx_INCREF(((PyObject *)__pyx_n_s__R)); PyTuple_SET_ITEM(__pyx_k_tuple_35, 0, ((PyObject *)__pyx_n_s__R)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__R)); __Pyx_INCREF(((PyObject *)__pyx_n_s__C)); PyTuple_SET_ITEM(__pyx_k_tuple_35, 1, ((PyObject *)__pyx_n_s__C)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__C)); __Pyx_INCREF(((PyObject *)__pyx_n_s__X)); PyTuple_SET_ITEM(__pyx_k_tuple_35, 2, ((PyObject *)__pyx_n_s__X)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__X)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Y)); PyTuple_SET_ITEM(__pyx_k_tuple_35, 3, ((PyObject *)__pyx_n_s__Y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Z)); PyTuple_SET_ITEM(__pyx_k_tuple_35, 4, ((PyObject *)__pyx_n_s__Z)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Z)); __Pyx_INCREF(((PyObject *)__pyx_n_s__mx)); PyTuple_SET_ITEM(__pyx_k_tuple_35, 5, ((PyObject *)__pyx_n_s__mx)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__mx)); __Pyx_INCREF(((PyObject *)__pyx_n_s__my)); PyTuple_SET_ITEM(__pyx_k_tuple_35, 6, ((PyObject *)__pyx_n_s__my)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__my)); __Pyx_INCREF(((PyObject *)__pyx_n_s__mz)); PyTuple_SET_ITEM(__pyx_k_tuple_35, 7, ((PyObject *)__pyx_n_s__mz)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__mz)); __Pyx_INCREF(((PyObject *)__pyx_n_s__r)); PyTuple_SET_ITEM(__pyx_k_tuple_35, 8, ((PyObject *)__pyx_n_s__r)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__r)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_35, 9, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__y)); PyTuple_SET_ITEM(__pyx_k_tuple_35, 10, ((PyObject *)__pyx_n_s__y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__z)); PyTuple_SET_ITEM(__pyx_k_tuple_35, 11, ((PyObject *)__pyx_n_s__z)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__z)); __Pyx_INCREF(((PyObject *)__pyx_n_s__multi)); PyTuple_SET_ITEM(__pyx_k_tuple_35, 12, ((PyObject *)__pyx_n_s__multi)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__multi)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Xa)); PyTuple_SET_ITEM(__pyx_k_tuple_35, 13, ((PyObject *)__pyx_n_s__Xa)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Xa)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Ya)); PyTuple_SET_ITEM(__pyx_k_tuple_35, 14, ((PyObject *)__pyx_n_s__Ya)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Ya)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Za)); PyTuple_SET_ITEM(__pyx_k_tuple_35, 15, ((PyObject *)__pyx_n_s__Za)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Za)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_35)); __pyx_k_codeobj_36 = (PyObject*)__Pyx_PyCode_New(8, 0, 16, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_35, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_25, __pyx_n_s___cspline_sample3d, 130, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_36)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/algorithms/registration/_registration.pyx":148 * * * def _cspline_sample4d(ndarray R, ndarray C, X=0, Y=0, Z=0, T=0, # <<<<<<<<<<<<<< * mx='zero', my='zero', mz='zero', mt='zero'): * """ */ __pyx_k_tuple_37 = PyTuple_New(20); if (unlikely(!__pyx_k_tuple_37)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_37); __Pyx_INCREF(((PyObject *)__pyx_n_s__R)); PyTuple_SET_ITEM(__pyx_k_tuple_37, 0, ((PyObject *)__pyx_n_s__R)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__R)); __Pyx_INCREF(((PyObject *)__pyx_n_s__C)); PyTuple_SET_ITEM(__pyx_k_tuple_37, 1, ((PyObject *)__pyx_n_s__C)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__C)); __Pyx_INCREF(((PyObject *)__pyx_n_s__X)); PyTuple_SET_ITEM(__pyx_k_tuple_37, 2, ((PyObject *)__pyx_n_s__X)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__X)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Y)); PyTuple_SET_ITEM(__pyx_k_tuple_37, 3, ((PyObject *)__pyx_n_s__Y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Z)); PyTuple_SET_ITEM(__pyx_k_tuple_37, 4, ((PyObject *)__pyx_n_s__Z)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Z)); __Pyx_INCREF(((PyObject *)__pyx_n_s__T)); PyTuple_SET_ITEM(__pyx_k_tuple_37, 5, ((PyObject *)__pyx_n_s__T)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__T)); __Pyx_INCREF(((PyObject *)__pyx_n_s__mx)); PyTuple_SET_ITEM(__pyx_k_tuple_37, 6, ((PyObject *)__pyx_n_s__mx)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__mx)); __Pyx_INCREF(((PyObject *)__pyx_n_s__my)); PyTuple_SET_ITEM(__pyx_k_tuple_37, 7, ((PyObject *)__pyx_n_s__my)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__my)); __Pyx_INCREF(((PyObject *)__pyx_n_s__mz)); PyTuple_SET_ITEM(__pyx_k_tuple_37, 8, ((PyObject *)__pyx_n_s__mz)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__mz)); __Pyx_INCREF(((PyObject *)__pyx_n_s__mt)); PyTuple_SET_ITEM(__pyx_k_tuple_37, 9, ((PyObject *)__pyx_n_s__mt)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__mt)); __Pyx_INCREF(((PyObject *)__pyx_n_s__r)); PyTuple_SET_ITEM(__pyx_k_tuple_37, 10, ((PyObject *)__pyx_n_s__r)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__r)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_37, 11, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__y)); PyTuple_SET_ITEM(__pyx_k_tuple_37, 12, ((PyObject *)__pyx_n_s__y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__z)); PyTuple_SET_ITEM(__pyx_k_tuple_37, 13, ((PyObject *)__pyx_n_s__z)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__z)); __Pyx_INCREF(((PyObject *)__pyx_n_s__t)); PyTuple_SET_ITEM(__pyx_k_tuple_37, 14, ((PyObject *)__pyx_n_s__t)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__t)); __Pyx_INCREF(((PyObject *)__pyx_n_s__multi)); PyTuple_SET_ITEM(__pyx_k_tuple_37, 15, ((PyObject *)__pyx_n_s__multi)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__multi)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Xa)); PyTuple_SET_ITEM(__pyx_k_tuple_37, 16, ((PyObject *)__pyx_n_s__Xa)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Xa)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Ya)); PyTuple_SET_ITEM(__pyx_k_tuple_37, 17, ((PyObject *)__pyx_n_s__Ya)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Ya)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Za)); PyTuple_SET_ITEM(__pyx_k_tuple_37, 18, ((PyObject *)__pyx_n_s__Za)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Za)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Ta)); PyTuple_SET_ITEM(__pyx_k_tuple_37, 19, ((PyObject *)__pyx_n_s__Ta)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Ta)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_37)); __pyx_k_codeobj_38 = (PyObject*)__Pyx_PyCode_New(10, 0, 20, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_37, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_25, __pyx_n_s___cspline_sample4d, 148, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_38)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/algorithms/registration/_registration.pyx":171 * * * def _cspline_resample3d(ndarray im, dims, ndarray Tvox, dtype=None, # <<<<<<<<<<<<<< * mx='zero', my='zero', mz='zero'): * """ */ __pyx_k_tuple_39 = PyTuple_New(10); if (unlikely(!__pyx_k_tuple_39)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 171; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_39); __Pyx_INCREF(((PyObject *)__pyx_n_s__im)); PyTuple_SET_ITEM(__pyx_k_tuple_39, 0, ((PyObject *)__pyx_n_s__im)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__im)); __Pyx_INCREF(((PyObject *)__pyx_n_s__dims)); PyTuple_SET_ITEM(__pyx_k_tuple_39, 1, ((PyObject *)__pyx_n_s__dims)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__dims)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Tvox)); PyTuple_SET_ITEM(__pyx_k_tuple_39, 2, ((PyObject *)__pyx_n_s__Tvox)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Tvox)); __Pyx_INCREF(((PyObject *)__pyx_n_s__dtype)); PyTuple_SET_ITEM(__pyx_k_tuple_39, 3, ((PyObject *)__pyx_n_s__dtype)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__dtype)); __Pyx_INCREF(((PyObject *)__pyx_n_s__mx)); PyTuple_SET_ITEM(__pyx_k_tuple_39, 4, ((PyObject *)__pyx_n_s__mx)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__mx)); __Pyx_INCREF(((PyObject *)__pyx_n_s__my)); PyTuple_SET_ITEM(__pyx_k_tuple_39, 5, ((PyObject *)__pyx_n_s__my)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__my)); __Pyx_INCREF(((PyObject *)__pyx_n_s__mz)); PyTuple_SET_ITEM(__pyx_k_tuple_39, 6, ((PyObject *)__pyx_n_s__mz)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__mz)); __Pyx_INCREF(((PyObject *)__pyx_n_s__tvox)); PyTuple_SET_ITEM(__pyx_k_tuple_39, 7, ((PyObject *)__pyx_n_s__tvox)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__tvox)); __Pyx_INCREF(((PyObject *)__pyx_n_s__cast_integer)); PyTuple_SET_ITEM(__pyx_k_tuple_39, 8, ((PyObject *)__pyx_n_s__cast_integer)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__cast_integer)); __Pyx_INCREF(((PyObject *)__pyx_n_s__im_resampled)); PyTuple_SET_ITEM(__pyx_k_tuple_39, 9, ((PyObject *)__pyx_n_s__im_resampled)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__im_resampled)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_39)); __pyx_k_codeobj_40 = (PyObject*)__Pyx_PyCode_New(7, 0, 10, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_39, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_25, __pyx_n_s___cspline_resample3d, 171, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_40)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 171; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/algorithms/registration/_registration.pyx":213 * * * def check_array(ndarray x, int dim, int exp_dim, xname): # <<<<<<<<<<<<<< * if not x.flags['C_CONTIGUOUS'] or not x.dtype=='double': * raise ValueError('%s array should be double C-contiguous' % xname) */ __pyx_k_tuple_41 = PyTuple_New(4); if (unlikely(!__pyx_k_tuple_41)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 213; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_41); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_41, 0, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__dim)); PyTuple_SET_ITEM(__pyx_k_tuple_41, 1, ((PyObject *)__pyx_n_s__dim)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__dim)); __Pyx_INCREF(((PyObject *)__pyx_n_s__exp_dim)); PyTuple_SET_ITEM(__pyx_k_tuple_41, 2, ((PyObject *)__pyx_n_s__exp_dim)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__exp_dim)); __Pyx_INCREF(((PyObject *)__pyx_n_s__xname)); PyTuple_SET_ITEM(__pyx_k_tuple_41, 3, ((PyObject *)__pyx_n_s__xname)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__xname)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_41)); __pyx_k_codeobj_42 = (PyObject*)__Pyx_PyCode_New(4, 0, 4, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_41, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_25, __pyx_n_s__check_array, 213, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_42)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 213; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/algorithms/registration/_registration.pyx":219 * raise ValueError('%s has size %d in last dimension, %d expected' % (xname, dim, exp_dim)) * * def _apply_polyaffine(ndarray xyz, ndarray centers, ndarray affines, ndarray sigma): # <<<<<<<<<<<<<< * * check_array(xyz, xyz.shape[1], 3, 'xyz') */ __pyx_k_tuple_43 = PyTuple_New(4); if (unlikely(!__pyx_k_tuple_43)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_43); __Pyx_INCREF(((PyObject *)__pyx_n_s__xyz)); PyTuple_SET_ITEM(__pyx_k_tuple_43, 0, ((PyObject *)__pyx_n_s__xyz)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__xyz)); __Pyx_INCREF(((PyObject *)__pyx_n_s__centers)); PyTuple_SET_ITEM(__pyx_k_tuple_43, 1, ((PyObject *)__pyx_n_s__centers)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__centers)); __Pyx_INCREF(((PyObject *)__pyx_n_s__affines)); PyTuple_SET_ITEM(__pyx_k_tuple_43, 2, ((PyObject *)__pyx_n_s__affines)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__affines)); __Pyx_INCREF(((PyObject *)__pyx_n_s__sigma)); PyTuple_SET_ITEM(__pyx_k_tuple_43, 3, ((PyObject *)__pyx_n_s__sigma)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__sigma)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_43)); __pyx_k_codeobj_44 = (PyObject*)__Pyx_PyCode_New(4, 0, 4, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_43, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_25, __pyx_n_s___apply_polyaffine, 219, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_44)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_2 = PyInt_FromLong(2); if (unlikely(!__pyx_int_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_3 = PyInt_FromLong(3); if (unlikely(!__pyx_int_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_12 = PyInt_FromLong(12); if (unlikely(!__pyx_int_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_15 = PyInt_FromLong(15); if (unlikely(!__pyx_int_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC init_registration(void); /*proto*/ PyMODINIT_FUNC init_registration(void) #else PyMODINIT_FUNC PyInit__registration(void); /*proto*/ PyMODINIT_FUNC PyInit__registration(void) #endif { PyObject *__pyx_t_1 = NULL; __Pyx_RefNannyDeclarations #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit__registration(void)", 0); if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #ifdef __Pyx_CyFunction_USED if (__Pyx_CyFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4(__Pyx_NAMESTR("_registration"), __pyx_methods, __Pyx_DOCSTR(__pyx_k_21), 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (!PyDict_GetItemString(modules, "nipy.algorithms.registration._registration")) { if (unlikely(PyDict_SetItemString(modules, "nipy.algorithms.registration._registration", __pyx_m) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } } #endif __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); if (unlikely(!__pyx_b)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; /*--- Initialize various global constants etc. ---*/ if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_module_is_main_nipy__algorithms__registration___registration) { if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s____main__) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; } /*--- Builtin init code ---*/ if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Constants init code ---*/ if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Global init code ---*/ /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ /*--- Type import code ---*/ __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", #if CYTHON_COMPILING_IN_PYPY sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 861; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Variable import code ---*/ /*--- Function import code ---*/ /*--- Execution code ---*/ /* "nipy/algorithms/registration/_registration.pyx":9 * """ * * __version__ = '0.3' # <<<<<<<<<<<<<< * * */ if (PyObject_SetAttr(__pyx_m, __pyx_n_s____version__, ((PyObject *)__pyx_kp_s_22)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/algorithms/registration/_registration.pyx":47 * * # Initialize numpy * joint_histogram_import_array() # <<<<<<<<<<<<<< * cubic_spline_import_array() * polyaffine_import_array() */ joint_histogram_import_array(); /* "nipy/algorithms/registration/_registration.pyx":48 * # Initialize numpy * joint_histogram_import_array() * cubic_spline_import_array() # <<<<<<<<<<<<<< * polyaffine_import_array() * import_array() */ cubic_spline_import_array(); /* "nipy/algorithms/registration/_registration.pyx":49 * joint_histogram_import_array() * cubic_spline_import_array() * polyaffine_import_array() # <<<<<<<<<<<<<< * import_array() * import numpy as np */ polyaffine_import_array(); /* "nipy/algorithms/registration/_registration.pyx":50 * cubic_spline_import_array() * polyaffine_import_array() * import_array() # <<<<<<<<<<<<<< * import numpy as np * */ import_array(); /* "nipy/algorithms/registration/_registration.pyx":51 * polyaffine_import_array() * import_array() * import numpy as np # <<<<<<<<<<<<<< * * # Globals */ __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__numpy), 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__np, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/registration/_registration.pyx":54 * * # Globals * modes = {'zero': 0, 'nearest': 1, 'reflect': 2} # <<<<<<<<<<<<<< * * */ __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__zero), __pyx_int_0) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__nearest), __pyx_int_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__reflect), __pyx_int_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (PyObject_SetAttr(__pyx_m, __pyx_n_s__modes, ((PyObject *)__pyx_t_1)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; /* "nipy/algorithms/registration/_registration.pyx":57 * * * def _joint_histogram(ndarray H, flatiter iterI, ndarray imJ, ndarray Tvox, long interp): # <<<<<<<<<<<<<< * """ * Compute the joint histogram given a transformation trial. */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_10algorithms_12registration_13_registration_1_joint_histogram, NULL, __pyx_n_s_26); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s___joint_histogram, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/registration/_registration.pyx":78 * * * def _L1_moments(ndarray H): # <<<<<<<<<<<<<< * """ * Compute L1 moments of order 0, 1 and 2 of a one-dimensional */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_10algorithms_12registration_13_registration_3_L1_moments, NULL, __pyx_n_s_26); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s___L1_moments, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/registration/_registration.pyx":94 * * * def _cspline_transform(ndarray x): # <<<<<<<<<<<<<< * c = np.zeros([x.shape[i] for i in range(x.ndim)], dtype=np.double) * cubic_spline_transform(c, x) */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_10algorithms_12registration_13_registration_5_cspline_transform, NULL, __pyx_n_s_26); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 94; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s___cspline_transform, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 94; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/registration/_registration.pyx":103 * return np.reshape(in_arr, shape).astype(np.double) * * def _cspline_sample1d(ndarray R, ndarray C, X=0, mode='zero'): # <<<<<<<<<<<<<< * cdef double *r, *x * cdef broadcast multi */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_10algorithms_12registration_13_registration_7_cspline_sample1d, NULL, __pyx_n_s_26); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s___cspline_sample1d, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/registration/_registration.pyx":115 * return R * * def _cspline_sample2d(ndarray R, ndarray C, X=0, Y=0, # <<<<<<<<<<<<<< * mx='zero', my='zero'): * cdef double *r, *x, *y */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_10algorithms_12registration_13_registration_9_cspline_sample2d, NULL, __pyx_n_s_26); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s___cspline_sample2d, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/registration/_registration.pyx":130 * return R * * def _cspline_sample3d(ndarray R, ndarray C, X=0, Y=0, Z=0, # <<<<<<<<<<<<<< * mx='zero', my='zero', mz='zero'): * cdef double *r, *x, *y, *z */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_10algorithms_12registration_13_registration_11_cspline_sample3d, NULL, __pyx_n_s_26); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s___cspline_sample3d, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/registration/_registration.pyx":148 * * * def _cspline_sample4d(ndarray R, ndarray C, X=0, Y=0, Z=0, T=0, # <<<<<<<<<<<<<< * mx='zero', my='zero', mz='zero', mt='zero'): * """ */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_10algorithms_12registration_13_registration_13_cspline_sample4d, NULL, __pyx_n_s_26); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s___cspline_sample4d, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/registration/_registration.pyx":171 * * * def _cspline_resample3d(ndarray im, dims, ndarray Tvox, dtype=None, # <<<<<<<<<<<<<< * mx='zero', my='zero', mz='zero'): * """ */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_10algorithms_12registration_13_registration_15_cspline_resample3d, NULL, __pyx_n_s_26); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 171; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s___cspline_resample3d, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 171; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/registration/_registration.pyx":213 * * * def check_array(ndarray x, int dim, int exp_dim, xname): # <<<<<<<<<<<<<< * if not x.flags['C_CONTIGUOUS'] or not x.dtype=='double': * raise ValueError('%s array should be double C-contiguous' % xname) */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_10algorithms_12registration_13_registration_17check_array, NULL, __pyx_n_s_26); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 213; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__check_array, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 213; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/registration/_registration.pyx":219 * raise ValueError('%s has size %d in last dimension, %d expected' % (xname, dim, exp_dim)) * * def _apply_polyaffine(ndarray xyz, ndarray centers, ndarray affines, ndarray sigma): # <<<<<<<<<<<<<< * * check_array(xyz, xyz.shape[1], 3, 'xyz') */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_10algorithms_12registration_13_registration_19_apply_polyaffine, NULL, __pyx_n_s_26); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s___apply_polyaffine, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/registration/_registration.pyx":1 * # -*- Mode: Python -*- # <<<<<<<<<<<<<< * * """ */ __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); if (PyObject_SetAttr(__pyx_m, __pyx_n_s____test__, ((PyObject *)__pyx_t_1)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; /* "numpy.pxd":975 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); if (__pyx_m) { __Pyx_AddTraceback("init nipy.algorithms.registration._registration", __pyx_clineno, __pyx_lineno, __pyx_filename); Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init nipy.algorithms.registration._registration"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if PY_MAJOR_VERSION < 3 return; #else return __pyx_m; #endif } /* Runtime support code */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* CYTHON_REFNANNY */ static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) { PyObject *result; result = PyObject_GetAttr(dict, name); if (!result) { if (dict != __pyx_b) { PyErr_Clear(); result = PyObject_GetAttr(__pyx_b, name); } if (!result) { PyErr_SetObject(PyExc_NameError, name); } } return result; } static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%s() takes %s %" CYTHON_FORMAT_SSIZE_T "d positional argument%s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%s() got an unexpected keyword argument '%s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact) { if (!type) { PyErr_Format(PyExc_SystemError, "Missing type object"); return 0; } if (none_allowed && obj == Py_None) return 1; else if (exact) { if (Py_TYPE(obj) == type) return 1; } else { if (PyObject_TypeCheck(obj, type)) return 1; } PyErr_Format(PyExc_TypeError, "Argument '%s' has incorrect type (expected %s, got %s)", name, type->tp_name, Py_TYPE(obj)->tp_name); return 0; } static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) { #if CYTHON_COMPILING_IN_CPYTHON PyObject *tmp_type, *tmp_value, *tmp_tb; PyThreadState *tstate = PyThreadState_GET(); tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_Restore(type, value, tb); #endif } static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(type, value, tb); #endif } #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } #if PY_VERSION_HEX < 0x02050000 if (PyClass_Check(type)) { #else if (PyType_Check(type)) { #endif #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; #if PY_VERSION_HEX < 0x02050000 if (PyInstance_Check(type)) { type = (PyObject*) ((PyInstanceObject*)type)->in_class; Py_INCREF(type); } else { type = 0; PyErr_SetString(PyExc_TypeError, "raise: exception must be an old-style class or instance"); goto raise_error; } #else type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } #endif } __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else /* Python 3+ */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyEval_CallObject(type, args); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause && cause != Py_None) { PyObject *fixed_cause; if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { PyThreadState *tstate = PyThreadState_GET(); PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } } bad: Py_XDECREF(owned_instance); return; } #endif static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_Format(PyExc_SystemError, "Missing type object"); return 0; } if (likely(PyObject_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%s to unpack", index, (index == 1) ? "" : "s"); } static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } static CYTHON_INLINE int __Pyx_IterFinish(void) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); PyObject* exc_type = tstate->curexc_type; if (unlikely(exc_type)) { if (likely(exc_type == PyExc_StopIteration) || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration)) { PyObject *exc_value, *exc_tb; exc_value = tstate->curexc_value; exc_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; Py_DECREF(exc_type); Py_XDECREF(exc_value); Py_XDECREF(exc_tb); return 0; } else { return -1; } } return 0; #else if (unlikely(PyErr_Occurred())) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) { PyErr_Clear(); return 0; } else { return -1; } } return 0; #endif } static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) { if (unlikely(retval)) { Py_DECREF(retval); __Pyx_RaiseTooManyValuesError(expected); return -1; } else { return __Pyx_IterFinish(); } return 0; } static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level) { PyObject *py_import = 0; PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; py_import = __Pyx_GetAttrString(__pyx_b, "__import__"); if (!py_import) goto bad; if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; #if PY_VERSION_HEX >= 0x02050000 { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { /* try package relative import first */ PyObject *py_level = PyInt_FromLong(1); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; /* try absolute import on failure */ } #endif if (!module) { PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); } } #else if (level>0) { PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4."); goto bad; } module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, NULL); #endif bad: Py_XDECREF(empty_list); Py_XDECREF(py_import); Py_XDECREF(empty_dict); return module; } static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_Py_intptr_t(Py_intptr_t val) { const Py_intptr_t neg_one = (Py_intptr_t)-1, const_zero = (Py_intptr_t)0; const int is_unsigned = const_zero < neg_one; if ((sizeof(Py_intptr_t) == sizeof(char)) || (sizeof(Py_intptr_t) == sizeof(short))) { return PyInt_FromLong((long)val); } else if ((sizeof(Py_intptr_t) == sizeof(int)) || (sizeof(Py_intptr_t) == sizeof(long))) { if (is_unsigned) return PyLong_FromUnsignedLong((unsigned long)val); else return PyInt_FromLong((long)val); } else if (sizeof(Py_intptr_t) == sizeof(PY_LONG_LONG)) { if (is_unsigned) return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG)val); else return PyLong_FromLongLong((PY_LONG_LONG)val); } else { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; return _PyLong_FromByteArray(bytes, sizeof(Py_intptr_t), little, !is_unsigned); } } #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return ::std::complex< float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y*(__pyx_t_float_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real*z.real + z.imag*z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(a, a); case 3: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, a); case 4: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_absf(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return ::std::complex< double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y*(__pyx_t_double_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real*z.real + z.imag*z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(a, a); case 3: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, a); case 4: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_abs(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject* x) { const unsigned char neg_one = (unsigned char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned char" : "value too large to convert to unsigned char"); } return (unsigned char)-1; } return (unsigned char)val; } return (unsigned char)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject* x) { const unsigned short neg_one = (unsigned short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned short" : "value too large to convert to unsigned short"); } return (unsigned short)-1; } return (unsigned short)val; } return (unsigned short)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject* x) { const unsigned int neg_one = (unsigned int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned int" : "value too large to convert to unsigned int"); } return (unsigned int)-1; } return (unsigned int)val; } return (unsigned int)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject* x) { const char neg_one = (char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to char" : "value too large to convert to char"); } return (char)-1; } return (char)val; } return (char)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject* x) { const short neg_one = (short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to short" : "value too large to convert to short"); } return (short)-1; } return (short)val; } return (short)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject* x) { const int neg_one = (int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to int" : "value too large to convert to int"); } return (int)-1; } return (int)val; } return (int)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject* x) { const signed char neg_one = (signed char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed char" : "value too large to convert to signed char"); } return (signed char)-1; } return (signed char)val; } return (signed char)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject* x) { const signed short neg_one = (signed short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed short" : "value too large to convert to signed short"); } return (signed short)-1; } return (signed short)val; } return (signed short)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject* x) { const signed int neg_one = (signed int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed int" : "value too large to convert to signed int"); } return (signed int)-1; } return (signed int)val; } return (signed int)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject* x) { const int neg_one = (int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to int" : "value too large to convert to int"); } return (int)-1; } return (int)val; } return (int)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) { const unsigned long neg_one = (unsigned long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned long"); return (unsigned long)-1; } return (unsigned long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned long"); return (unsigned long)-1; } return (unsigned long)PyLong_AsUnsignedLong(x); } else { return (unsigned long)PyLong_AsLong(x); } } else { unsigned long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned long)-1; val = __Pyx_PyInt_AsUnsignedLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject* x) { const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned PY_LONG_LONG"); return (unsigned PY_LONG_LONG)-1; } return (unsigned PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned PY_LONG_LONG"); return (unsigned PY_LONG_LONG)-1; } return (unsigned PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (unsigned PY_LONG_LONG)PyLong_AsLongLong(x); } } else { unsigned PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned PY_LONG_LONG)-1; val = __Pyx_PyInt_AsUnsignedLongLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject* x) { const long neg_one = (long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long)-1; } return (long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long)-1; } return (long)PyLong_AsUnsignedLong(x); } else { return (long)PyLong_AsLong(x); } } else { long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (long)-1; val = __Pyx_PyInt_AsLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) { const PY_LONG_LONG neg_one = (PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to PY_LONG_LONG"); return (PY_LONG_LONG)-1; } return (PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to PY_LONG_LONG"); return (PY_LONG_LONG)-1; } return (PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (PY_LONG_LONG)PyLong_AsLongLong(x); } } else { PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (PY_LONG_LONG)-1; val = __Pyx_PyInt_AsLongLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) { const signed long neg_one = (signed long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed long"); return (signed long)-1; } return (signed long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed long"); return (signed long)-1; } return (signed long)PyLong_AsUnsignedLong(x); } else { return (signed long)PyLong_AsLong(x); } } else { signed long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (signed long)-1; val = __Pyx_PyInt_AsSignedLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject* x) { const signed PY_LONG_LONG neg_one = (signed PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed PY_LONG_LONG"); return (signed PY_LONG_LONG)-1; } return (signed PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed PY_LONG_LONG"); return (signed PY_LONG_LONG)-1; } return (signed PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (signed PY_LONG_LONG)PyLong_AsLongLong(x); } } else { signed PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (signed PY_LONG_LONG)-1; val = __Pyx_PyInt_AsSignedLongLong(tmp); Py_DECREF(tmp); return val; } } static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); #if PY_VERSION_HEX < 0x02050000 return PyErr_Warn(NULL, message); #else return PyErr_WarnEx(NULL, message, 1); #endif } return 0; } #ifndef __PYX_HAVE_RT_ImportModule #define __PYX_HAVE_RT_ImportModule static PyObject *__Pyx_ImportModule(const char *name) { PyObject *py_name = 0; PyObject *py_module = 0; py_name = __Pyx_PyIdentifier_FromString(name); if (!py_name) goto bad; py_module = PyImport_Import(py_name); Py_DECREF(py_name); return py_module; bad: Py_XDECREF(py_name); return 0; } #endif #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict) { PyObject *py_module = 0; PyObject *result = 0; PyObject *py_name = 0; char warning[200]; py_module = __Pyx_ImportModule(module_name); if (!py_module) goto bad; py_name = __Pyx_PyIdentifier_FromString(class_name); if (!py_name) goto bad; result = PyObject_GetAttr(py_module, py_name); Py_DECREF(py_name); py_name = 0; Py_DECREF(py_module); py_module = 0; if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%s.%s is not a type object", module_name, class_name); goto bad; } if (!strict && (size_t)((PyTypeObject *)result)->tp_basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility", module_name, class_name); #if PY_VERSION_HEX < 0x02050000 if (PyErr_Warn(NULL, warning) < 0) goto bad; #else if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; #endif } else if ((size_t)((PyTypeObject *)result)->tp_basicsize != size) { PyErr_Format(PyExc_ValueError, "%s.%s has the wrong size, try recompiling", module_name, class_name); goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(py_module); Py_XDECREF(result); return NULL; } #endif static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = (start + end) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, /*int argcount,*/ 0, /*int kwonlyargcount,*/ 0, /*int nlocals,*/ 0, /*int stacksize,*/ 0, /*int flags,*/ __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, /*int firstlineno,*/ __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_globals = 0; PyFrameObject *py_frame = 0; py_code = __pyx_find_code_object(c_line ? c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? c_line : py_line, py_code); } py_globals = PyModule_GetDict(__pyx_m); if (!py_globals) goto bad; py_frame = PyFrame_New( PyThreadState_GET(), /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ py_globals, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; py_frame->f_lineno = py_line; PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else /* Python 3+ has unicode identifiers */ if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; ++t; } return 0; } /* Type Conversion Functions */ static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) { PyNumberMethods *m; const char *name = NULL; PyObject *res = NULL; #if PY_VERSION_HEX < 0x03000000 if (PyInt_Check(x) || PyLong_Check(x)) #else if (PyLong_Check(x)) #endif return Py_INCREF(x), x; m = Py_TYPE(x)->tp_as_number; #if PY_VERSION_HEX < 0x03000000 if (m && m->nb_int) { name = "int"; res = PyNumber_Int(x); } else if (m && m->nb_long) { name = "long"; res = PyNumber_Long(x); } #else if (m && m->nb_int) { name = "int"; res = PyNumber_Long(x); } #endif if (res) { #if PY_VERSION_HEX < 0x03000000 if (!PyInt_Check(res) && !PyLong_Check(res)) { #else if (!PyLong_Check(res)) { #endif PyErr_Format(PyExc_TypeError, "__%s__ returned non-%s (type %.200s)", name, name, Py_TYPE(res)->tp_name); Py_DECREF(res); return NULL; } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject* x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { #if PY_VERSION_HEX < 0x02050000 if (ival <= LONG_MAX) return PyInt_FromLong((long)ival); else { unsigned char *bytes = (unsigned char *) &ival; int one = 1; int little = (int)*(unsigned char*)&one; return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0); } #else return PyInt_FromSize_t(ival); #endif } static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject* x) { unsigned PY_LONG_LONG val = __Pyx_PyInt_AsUnsignedLongLong(x); if (unlikely(val == (unsigned PY_LONG_LONG)-1 && PyErr_Occurred())) { return (size_t)-1; } else if (unlikely(val != (unsigned PY_LONG_LONG)(size_t)val)) { PyErr_SetString(PyExc_OverflowError, "value too large to convert to size_t"); return (size_t)-1; } return (size_t)val; } #endif /* Py_PYTHON_H */ nipy-0.3.0/nipy/algorithms/registration/_registration.pyx000066400000000000000000000201121210344137400237410ustar00rootroot00000000000000# -*- Mode: Python -*- """ Bindings for various image registration routines written in C: joint histogram computation, cubic spline interpolation, non-rigid transformations. """ __version__ = '0.3' # Includes from numpy cimport (import_array, ndarray, flatiter, broadcast, PyArray_MultiIterNew, PyArray_MultiIter_DATA, PyArray_MultiIter_NEXT) # Externals cdef extern from "joint_histogram.h": void joint_histogram_import_array() int joint_histogram(ndarray H, unsigned int clampI, unsigned int clampJ, flatiter iterI, ndarray imJ_padded, ndarray Tvox, int interp) int L1_moments(double* n, double* median, double* dev, ndarray H) cdef extern from "cubic_spline.h": void cubic_spline_import_array() void cubic_spline_transform(ndarray res, ndarray src) double cubic_spline_sample1d(double x, ndarray coef, int mode) double cubic_spline_sample2d(double x, double y, ndarray coef, int mode_x, int mode_y) double cubic_spline_sample3d(double x, double y, double z, ndarray coef, int mode_x, int mode_y, int mode_z) double cubic_spline_sample4d(double x, double y, double z, double t, ndarray coef, int mode_x, int mode_y, int mode_z, int mode_t) void cubic_spline_resample3d(ndarray im_resampled, ndarray im, double* Tvox, int cast_integer, int mode_x, int mode_y, int mode_z) cdef extern from "polyaffine.h": void polyaffine_import_array() void apply_polyaffine(ndarray XYZ, ndarray Centers, ndarray Affines, ndarray Sigma) # Initialize numpy joint_histogram_import_array() cubic_spline_import_array() polyaffine_import_array() import_array() import numpy as np # Globals modes = {'zero': 0, 'nearest': 1, 'reflect': 2} def _joint_histogram(ndarray H, flatiter iterI, ndarray imJ, ndarray Tvox, long interp): """ Compute the joint histogram given a transformation trial. """ cdef: double *h, *tvox unsigned int clampI, clampJ int ret # Views clampI = H.shape[0] clampJ = H.shape[1] # Compute joint histogram ret = joint_histogram(H, clampI, clampJ, iterI, imJ, Tvox, interp) if not ret == 0: raise RuntimeError('Joint histogram failed because of incorrect input arrays.') return def _L1_moments(ndarray H): """ Compute L1 moments of order 0, 1 and 2 of a one-dimensional histogram. """ cdef: double n[1], median[1], dev[1] int ret ret = L1_moments(n, median, dev, H) if not ret == 0: raise RuntimeError('L1_moments failed because input array is not double.') return n[0], median[0], dev[0] def _cspline_transform(ndarray x): c = np.zeros([x.shape[i] for i in range(x.ndim)], dtype=np.double) cubic_spline_transform(c, x) return c cdef ndarray _reshaped_double(object in_arr, ndarray sh_arr): shape = [sh_arr.shape[i] for i in range(sh_arr.ndim)] return np.reshape(in_arr, shape).astype(np.double) def _cspline_sample1d(ndarray R, ndarray C, X=0, mode='zero'): cdef double *r, *x cdef broadcast multi Xa = _reshaped_double(X, R) multi = PyArray_MultiIterNew(2, R, Xa) while(multi.index < multi.size): r = PyArray_MultiIter_DATA(multi, 0) x = PyArray_MultiIter_DATA(multi, 1) r[0] = cubic_spline_sample1d(x[0], C, modes[mode]) PyArray_MultiIter_NEXT(multi) return R def _cspline_sample2d(ndarray R, ndarray C, X=0, Y=0, mx='zero', my='zero'): cdef double *r, *x, *y cdef broadcast multi Xa = _reshaped_double(X, R) Ya = _reshaped_double(Y, R) multi = PyArray_MultiIterNew(3, R, Xa, Ya) while(multi.index < multi.size): r = PyArray_MultiIter_DATA(multi, 0) x = PyArray_MultiIter_DATA(multi, 1) y = PyArray_MultiIter_DATA(multi, 2) r[0] = cubic_spline_sample2d(x[0], y[0], C, modes[mx], modes[my]) PyArray_MultiIter_NEXT(multi) return R def _cspline_sample3d(ndarray R, ndarray C, X=0, Y=0, Z=0, mx='zero', my='zero', mz='zero'): cdef double *r, *x, *y, *z cdef broadcast multi Xa = _reshaped_double(X, R) Ya = _reshaped_double(Y, R) Za = _reshaped_double(Z, R) multi = PyArray_MultiIterNew(4, R, Xa, Ya, Za) while(multi.index < multi.size): r = PyArray_MultiIter_DATA(multi, 0) x = PyArray_MultiIter_DATA(multi, 1) y = PyArray_MultiIter_DATA(multi, 2) z = PyArray_MultiIter_DATA(multi, 3) r[0] = cubic_spline_sample3d(x[0], y[0], z[0], C, modes[mx], modes[my], modes[mz]) PyArray_MultiIter_NEXT(multi) return R def _cspline_sample4d(ndarray R, ndarray C, X=0, Y=0, Z=0, T=0, mx='zero', my='zero', mz='zero', mt='zero'): """ In-place cubic spline sampling. R.dtype must be 'double'. """ cdef double *r, *x, *y, *z, *t cdef broadcast multi Xa = _reshaped_double(X, R) Ya = _reshaped_double(Y, R) Za = _reshaped_double(Z, R) Ta = _reshaped_double(T, R) multi = PyArray_MultiIterNew(5, R, Xa, Ya, Za, Ta) while(multi.index < multi.size): r = PyArray_MultiIter_DATA(multi, 0) x = PyArray_MultiIter_DATA(multi, 1) y = PyArray_MultiIter_DATA(multi, 2) z = PyArray_MultiIter_DATA(multi, 3) t = PyArray_MultiIter_DATA(multi, 4) r[0] = cubic_spline_sample4d(x[0], y[0], z[0], t[0], C, modes[mx], modes[my], modes[mz], modes[mt]) PyArray_MultiIter_NEXT(multi) return R def _cspline_resample3d(ndarray im, dims, ndarray Tvox, dtype=None, mx='zero', my='zero', mz='zero'): """ Perform cubic spline resampling of a 3d input image `im` into a grid with shape `dims` according to an affine transform represented by a 4x4 matrix `Tvox` that assumes voxel coordinates. Boundary conditions on each axis are determined by the keyword arguments `mx`, `my` and `mz`, respectively. Possible choices are: 'zero': assume zero intensity outside the target grid 'nearest': extrapolate intensity by the closest grid point along the axis 'reflect': extrapolate intensity by mirroring the input image along the axis Note that `Tvox` will be re-ordered in C convention if needed. """ cdef double *tvox cdef int cast_integer # Create output array if dtype == None: dtype = im.dtype im_resampled = np.zeros(tuple(dims), dtype=dtype) # Ensure that the Tvox array is C-contiguous (required by the # underlying C routine) Tvox = np.asarray(Tvox, dtype='double', order='C') tvox = Tvox.data # Actual resampling if dtype.kind == 'i': cast_integer = 1 elif dtype.kind == 'u': cast_integer = 2 else: cast_integer = 0 cubic_spline_resample3d(im_resampled, im, tvox, cast_integer, modes[mx], modes[my], modes[mz]) return im_resampled def check_array(ndarray x, int dim, int exp_dim, xname): if not x.flags['C_CONTIGUOUS'] or not x.dtype=='double': raise ValueError('%s array should be double C-contiguous' % xname) if not dim == exp_dim: raise ValueError('%s has size %d in last dimension, %d expected' % (xname, dim, exp_dim)) def _apply_polyaffine(ndarray xyz, ndarray centers, ndarray affines, ndarray sigma): check_array(xyz, xyz.shape[1], 3, 'xyz') check_array(centers, centers.shape[1], 3, 'centers') check_array(affines, affines.shape[1], 12, 'affines') check_array(sigma, sigma.size, 3, 'sigma') if not centers.shape[0] == affines.shape[0]: raise ValueError('centers and affines arrays should have same shape[0]') apply_polyaffine(xyz, centers, affines, sigma) nipy-0.3.0/nipy/algorithms/registration/affine.py000066400000000000000000000326141210344137400221420ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np import scipy.linalg as spl from nibabel.affines import apply_affine from ...externals.transforms3d.quaternions import mat2quat, quat2axangle from .transform import Transform # Globals RADIUS = 100 MAX_ANGLE = 1e10 * 2 * np.pi SMALL_ANGLE = 1e-30 MAX_DIST = 1e10 LOG_MAX_DIST = np.log(MAX_DIST) TINY = float(np.finfo(np.double).tiny) def threshold(x, th): return np.maximum(np.minimum(x, th), -th) def rotation_mat2vec(R): """ Rotation vector from rotation matrix `R` Parameters ---------- R : (3,3) array-like Rotation matrix Returns ------- vec : (3,) array Rotation vector, where norm of `vec` is the angle ``theta``, and the axis of rotation is given by ``vec / theta`` """ ax, angle = quat2axangle(mat2quat(R)) return ax * angle def rotation_vec2mat(r): """ R = rotation_vec2mat(r) The rotation matrix is given by the Rodrigues formula: R = Id + sin(theta)*Sn + (1-cos(theta))*Sn^2 with: 0 -nz ny Sn = nz 0 -nx -ny nx 0 where n = r / ||r|| In case the angle ||r|| is very small, the above formula may lead to numerical instabilities. We instead use a Taylor expansion around theta=0: R = I + sin(theta)/tetha Sr + (1-cos(theta))/teta2 Sr^2 leading to: R = I + (1-theta2/6)*Sr + (1/2-theta2/24)*Sr^2 To avoid numerical instabilities, an upper threshold is applied to the angle. It is chosen to be a multiple of 2*pi, hence the resulting rotation is then the identity matrix. This strategy warrants that the output matrix is a continuous function of the input vector. """ theta = np.sqrt(np.sum(r ** 2)) if theta > MAX_ANGLE: return np.eye(3) elif theta > SMALL_ANGLE: n = r / theta Sn = np.array([[0, -n[2], n[1]], [n[2], 0, -n[0]], [-n[1], n[0], 0]]) R = np.eye(3) + np.sin(theta) * Sn\ + (1 - np.cos(theta)) * np.dot(Sn, Sn) else: Sr = np.array([[0, -r[2], r[1]], [r[2], 0, -r[0]], [-r[1], r[0], 0]]) theta2 = theta * theta R = np.eye(3) + (1 - theta2 / 6.) * Sr\ + (.5 - theta2 / 24.) * np.dot(Sr, Sr) return R def to_matrix44(t, dtype=np.double): """ T = to_matrix44(t) t is a vector of affine transformation parameters with size at least 6. size < 6 ==> error size == 6 ==> t is interpreted as translation + rotation size == 7 ==> t is interpreted as translation + rotation + isotropic scaling 7 < size < 12 ==> error size >= 12 ==> t is interpreted as translation + rotation + scaling + pre-rotation """ size = t.size T = np.eye(4, dtype=dtype) R = rotation_vec2mat(t[3:6]) if size == 6: T[0:3, 0:3] = R elif size == 7: T[0:3, 0:3] = t[6] * R else: S = np.diag(np.exp(threshold(t[6:9], LOG_MAX_DIST))) Q = rotation_vec2mat(t[9:12]) # Beware: R*s*Q T[0:3, 0:3] = np.dot(R, np.dot(S, Q)) T[0:3, 3] = threshold(t[0:3], MAX_DIST) return T def preconditioner(radius): """ Computes a scaling vector pc such that, if p=(u,r,s,q) represents affine transformation parameters, where u is a translation, r and q are rotation vectors, and s is the vector of log-scales, then all components of (p/pc) are roughly comparable to the translation component. To that end, we use a `radius` parameter which represents the 'typical size' of the object being registered. This is used to reformat the parameter vector (translation+rotation+scaling+pre-rotation) so that each element roughly represents a variation in mm. """ rad = 1. / radius sca = 1. / radius return np.array([1, 1, 1, rad, rad, rad, sca, sca, sca, rad, rad, rad]) def inverse_affine(affine): return spl.inv(affine) def slices2aff(slices): """ Return affine from start, step of sequence `slices` of slice objects Parameters ---------- slices : sequence of slice objects Returns ------- aff : ndarray If ``N = len(slices)`` then affine is shape (N+1, N+1) with diagonal given by the ``step`` attribute of the slice objects (where None corresponds to 1), and the `:N` elements in the last column are given by the ``start`` attribute of the slice objects Examples -------- >>> slices2aff([slice(None), slice(None)]) array([[ 1., 0., 0.], [ 0., 1., 0.], [ 0., 0., 1.]]) >>> slices2aff([slice(2, 3, 4), slice(3, 4, 5), slice(4, 5, 6)]) array([[ 4., 0., 0., 2.], [ 0., 5., 0., 3.], [ 0., 0., 6., 4.], [ 0., 0., 0., 1.]]) """ starts = [s.start if not s.start is None else 0 for s in slices] steps = [s.step if not s.step is None else 1 for s in slices] aff = np.diag(steps + [1.]) aff[:-1, -1] = starts return aff def subgrid_affine(affine, slices): """ Return dot prodoct of `affine` and affine resulting from `slices` Parameters ---------- affine : array-like Affine to apply on right of affine resulting from `slices` slices : sequence of slice objects Slices generating (N+1, N+1) affine from ``slices2aff``, where ``N = len(slices)`` Returns ------- aff : ndarray result of ``np.dot(affine, slice_affine)`` where ``slice_affine`` is affine resulting from ``slices2aff(slices)``. Raises ------ ValueError : if the ``slice_affine`` contains non-integer values """ slices_aff = slices2aff(slices) if not np.all(slices_aff == np.round(slices_aff)): raise ValueError("Need integer slice start, step") return np.dot(affine, slices_aff) class Affine(Transform): param_inds = range(12) def __init__(self, array=None, radius=RADIUS): self._direct = True self._precond = preconditioner(radius) if array == None: self._vec12 = np.zeros(12) return array = np.array(array) if array.size == 12: self._vec12 = array.ravel().copy() elif array.shape == (4, 4): self.from_matrix44(array) else: raise ValueError('Invalid array') def copy(self): new = self.__class__() new._direct = self._direct new._precond[:] = self._precond[:] new._vec12 = self._vec12.copy() return new def from_matrix44(self, aff): """ Convert a 4x4 matrix describing an affine transform into a 12-sized vector of natural affine parameters: translation, rotation, log-scale, pre-rotation (to allow for shearing when combined with non-unitary scales). In case the transform has a negative determinant, set the `_direct` attribute to False. """ vec12 = np.zeros((12,)) vec12[0:3] = aff[:3, 3] # Use SVD to find orthogonal and diagonal matrices such that # aff[0:3,0:3] == R*S*Q R, s, Q = spl.svd(aff[0:3, 0:3]) if spl.det(R) < 0: R = -R Q = -Q r = rotation_mat2vec(R) if spl.det(Q) < 0: Q = -Q self._direct = False q = rotation_mat2vec(Q) vec12[3:6] = r vec12[6:9] = np.log(np.maximum(s, TINY)) vec12[9:12] = q self._vec12 = vec12 def apply(self, xyz): return apply_affine(self.as_affine(), xyz) def _get_param(self): param = self._vec12 / self._precond return param[self.param_inds] def _set_param(self, p): p = np.asarray(p) inds = self.param_inds self._vec12[inds] = p * self._precond[inds] def _get_translation(self): return self._vec12[0:3] def _set_translation(self, x): self._vec12[0:3] = x def _get_rotation(self): return self._vec12[3:6] def _set_rotation(self, x): self._vec12[3:6] = x def _get_scaling(self): return np.exp(self._vec12[6:9]) def _set_scaling(self, x): self._vec12[6:9] = np.log(x) def _get_pre_rotation(self): return self._vec12[9:12] def _set_pre_rotation(self, x): self._vec12[9:12] = x def _get_direct(self): return self._direct def _get_precond(self): return self._precond translation = property(_get_translation, _set_translation) rotation = property(_get_rotation, _set_rotation) scaling = property(_get_scaling, _set_scaling) pre_rotation = property(_get_pre_rotation, _set_pre_rotation) is_direct = property(_get_direct) precond = property(_get_precond) param = property(_get_param, _set_param) def as_affine(self, dtype='double'): T = to_matrix44(self._vec12, dtype=dtype) if not self._direct: T[:3, :3] *= -1 return T def compose(self, other): """ Compose this transform onto another Parameters ---------- other : Transform transform that we compose onto Returns ------- composed_transform : Transform a transform implementing the composition of self on `other` """ # If other is not an Affine, use either its left compose # method, if available, or the generic compose method if not hasattr(other, 'as_affine'): if hasattr(other, 'left_compose'): return other.left_compose(self) else: return Transform(self.apply).compose(other) # Affine case: choose more capable of input types as output # type other_aff = other.as_affine() self_inds = set(self.param_inds) other_inds = set(other.param_inds) if self_inds.issubset(other_inds): klass = other.__class__ elif other_inds.isssubset(self_inds): klass = self.__class__ else: # neither one contains capabilities of the other klass = Affine a = klass() a._precond[:] = self._precond[:] a.from_matrix44(np.dot(self.as_affine(), other_aff)) return a def __str__(self): string = 'translation : %s\n' % str(self.translation) string += 'rotation : %s\n' % str(self.rotation) string += 'scaling : %s\n' % str(self.scaling) string += 'pre-rotation: %s' % str(self.pre_rotation) return string def inv(self): """ Return the inverse affine transform. """ a = self.__class__() a._precond[:] = self._precond[:] a.from_matrix44(spl.inv(self.as_affine())) return a class Affine2D(Affine): param_inds = [0, 1, 5, 6, 7, 11] class Rigid(Affine): param_inds = range(6) def from_matrix44(self, aff): """ Convert a 4x4 matrix describing a rigid transform into a 12-sized vector of natural affine parameters: translation, rotation, log-scale, pre-rotation (to allow for pre-rotation when combined with non-unitary scales). In case the transform has a negative determinant, set the `_direct` attribute to False. """ vec12 = np.zeros((12,)) vec12[:3] = aff[:3, 3] R = aff[:3, :3] if spl.det(R) < 0: R = -R self._direct = False vec12[3:6] = rotation_mat2vec(R) vec12[6:9] = 0.0 self._vec12 = vec12 def __str__(self): string = 'translation : %s\n' % str(self.translation) string += 'rotation : %s\n' % str(self.rotation) return string class Rigid2D(Rigid): param_inds = [0, 1, 5] class Similarity(Affine): param_inds = range(7) def from_matrix44(self, aff): """ Convert a 4x4 matrix describing a similarity transform into a 12-sized vector of natural affine parameters: translation, rotation, log-scale, pre-rotation (to allow for pre-rotation when combined with non-unitary scales). In case the transform has a negative determinant, set the `_direct` attribute to False. """ vec12 = np.zeros((12,)) vec12[:3] = aff[:3, 3] ## A = s R ==> det A = (s)**3 ==> s = (det A)**(1/3) A = aff[:3, :3] detA = spl.det(A) s = np.maximum(np.abs(detA) ** (1 / 3.), TINY) if detA < 0: A = -A self._direct = False vec12[3:6] = rotation_mat2vec(A / s) vec12[6:9] = np.log(s) self._vec12 = vec12 def _set_param(self, p): p = np.asarray(p) self._vec12[range(9)] =\ (p[[0, 1, 2, 3, 4, 5, 6, 6, 6]] * self._precond[range(9)]) param = property(Affine._get_param, _set_param) def __str__(self): string = 'translation : %s\n' % str(self.translation) string += 'rotation : %s\n' % str(self.rotation) string += 'scaling : %s\n' % str(self.scaling[0]) return string class Similarity2D(Similarity): param_inds = [0, 1, 5, 6] def _set_param(self, p): p = np.asarray(p) self._vec12[[0, 1, 5, 6, 7, 8]] =\ (p[[0, 1, 2, 3, 3, 3]] * self._precond[[0, 1, 5, 6, 7, 8]]) param = property(Similarity._get_param, _set_param) affine_transforms = {'affine': Affine, 'affine2d': Affine2D, 'similarity': Similarity, 'similarity2d': Similarity2D, 'rigid': Rigid, 'rigid2d': Rigid2D} nipy-0.3.0/nipy/algorithms/registration/chain_transform.py000066400000000000000000000037051210344137400240660ustar00rootroot00000000000000""" Chain transforms """ from .affine import Affine class ChainTransform(object): def __init__(self, optimizable, pre=None, post=None): """ Create chain transform instance Parameters ---------- optimizable : array or Transform Transform that we are optimizing. If this is an array, then assume it's an affine matrix. pre : None or array or Transform, optional If not None, a transform that should be applied to points before applying the `optimizable` transform. If an array, then assume it's an affine matrix. post : None or Transform, optional If not None, a transform that should be applied to points after applying any `pre` transform, and then the `optimizable` transform. If an array, assume it's an affine matrix """ if not hasattr(optimizable, 'param'): raise ValueError('Input transform should be optimizable') if not hasattr(optimizable, 'apply'): optimizable = Affine(optimizable) if not hasattr(pre, 'apply'): pre = Affine(pre) if not hasattr(post, 'apply'): post = Affine(post) self.optimizable = optimizable self.pre = pre self.post = post def apply(self, pts): """ Apply full transformation to points `pts` If there are N points, then `pts` will be N by 3 Parameters ---------- pts : array-like array of points Returns ------- transformed_pts : array N by 3 array of transformed points """ composed = self.post.compose(self.optimizable.compose(self.pre)) return composed.apply(pts) def _set_param(self, param): self.optimizable.param = param def _get_param(self): return self.optimizable.param param = property(_get_param, _set_param, None, 'get/set param') nipy-0.3.0/nipy/algorithms/registration/cubic_spline.c000066400000000000000000000463631210344137400231510ustar00rootroot00000000000000#include "cubic_spline.h" #include #include #include /* Useful marcos */ #define ABS(a) ( (a) > 0.0 ? (a) : (-(a)) ) #define FLOOR(a)((a)>0.0 ? (int)(a):(((int)(a)-a)!= 0.0 ? (int)(a)-1 : (int)(a))) #define ROUND(a)(FLOOR(a+0.5)) #ifdef _MSC_VER #define inline __inline #endif /* Three different boundary conditions are implemented: mode == 0 : 'zero' mode == 1: 'nearest' mode == 2: 'reflect' Depending on the mode, the input coordinate x is mirrored so as to fall within the image bounds [0..ddim] and a weight w is computed. */ #define APPLY_BOUNDARY_CONDITIONS(mode, x, w, ddim) \ if (!_apply_boundary_conditions(mode, ddim, &x, &w)) \ return 0.0; #define COMPUTE_NEIGHBORS(x, ddim, nx, px) \ if (!_mirror_grid_neighbors(x, ddim, &nx, &px)) \ return 0.0; /* The following marco forces numpy to consider a PyArrayIterObject non-contiguous. Otherwise, coordinates won't be updated - don't know whether this is a bug or not. */ #define UPDATE_ITERATOR_COORDS(iter) \ iter->contiguous = 0; static void _cubic_spline_transform1d(double* res, double* src, unsigned int dim, unsigned int res_stride, unsigned int src_stride); static void _cubic_spline_transform(PyArrayObject* res, int axis, double* work); static inline void _copy_double_buffer(double* res, double* src, unsigned int dim, unsigned int src_stride); static inline int _mirrored_position(int x, unsigned int ddim); static inline int _apply_boundary_conditions(int mode, unsigned int ddim, double* x, double* w); static inline int _mirror_grid_neighbors(double x, unsigned int ddim, int* nx, int* px); static inline void _apply_affine_transform(double* Tx, double* Ty, double* Tz, const double* Tvox, size_t x, size_t y, size_t z); /* Numpy import */ void cubic_spline_import_array(void) { import_array(); return; } /* Returns the value of the cubic B-spline function at x */ double cubic_spline_basis (double x) { double y, absx, aux; absx = ABS(x); if (absx >= 2) return 0.0; if (absx < 1) { aux = absx*absx; y = 0.66666666666667 - aux + 0.5*absx*aux; } else { aux = 2 - absx; y = aux*aux*aux / 6.0; } return y; } /* Assumes that src and res are same size and both point to DOUBLE buffers. */ static void _cubic_spline_transform1d(double* res, double* src, unsigned int dim, unsigned int res_stride, unsigned int src_stride) { int k; double cp, cm, z1_k; double *buf_src, *buf_res; double z1 = -0.26794919243112; /* -2 + sqrt(3) */ double cz1 = 0.28867513459481; /* z1/(z1^2-1) */ /* Initial value for the causal recursion. We use a mirror symmetric boundary condition for the discrete signal, yielding: cp(0) = (1/2-z1^(2N-2)) \sum_{k=0}^{2N-3} s(k) z1^k s(k), where we set: s(N)=s(N-2), s(N+1)=s(N-3), ..., s(2N-3)=s(1). */ buf_src = src; cp = *buf_src; z1_k = 1; for (k=1; k=0; k--) { */ for (k=1; kao, axis); stride = PyArray_STRIDE((PyArrayObject*)iter->ao, axis)/sizeof(double); /* Apply the cubic spline transform along given axis */ while(iter->index < iter->size) { _copy_double_buffer(work, PyArray_ITER_DATA(iter), dim, stride); _cubic_spline_transform1d(PyArray_ITER_DATA(iter), work, dim, stride, 1); PyArray_ITER_NEXT(iter); } /* Free local structures */ Py_DECREF(iter); return; } void cubic_spline_transform(PyArrayObject* res, const PyArrayObject* src) { double* work; unsigned int axis, aux=0, dimmax=0; /* Copy src into res */ PyArray_CastTo(res, (PyArrayObject*)src); /* Compute the maximum array dimension over axes */ for(axis=0; axisnd; axis++) { aux = PyArray_DIM(res, axis); if (aux > dimmax) dimmax = aux; } /* Allocate auxiliary buffer */ work = (double*)malloc(sizeof(double)*dimmax); /* Apply separable cubic spline transforms */ for(axis=0; axisnd; axis++) _cubic_spline_transform(res, axis, work); /* Free auxiliary buffer */ free(work); return; } double cubic_spline_sample1d (double x, const PyArrayObject* Coef, int mode) { unsigned int ddim = PyArray_DIM(Coef, 0) - 1; unsigned int offset = PyArray_STRIDE(Coef, 0)/sizeof(double); double *coef = PyArray_DATA(Coef); double *buf; int nx, px, xx; double s; double bspx[4]; int posx[4]; double *buf_bspx; int *buf_posx; double w = 1; APPLY_BOUNDARY_CONDITIONS(mode, x, w, ddim); COMPUTE_NEIGHBORS(x, ddim, nx, px); /* Compute the B-spline values as well as the image positions where to find the B-spline coefficients (including mirror conditions) */ buf_bspx = (double*)bspx; buf_posx = (int*)posx; for (xx = nx; xx <= px; xx ++, buf_bspx ++, buf_posx ++) { *buf_bspx = cubic_spline_basis(x-(double)xx); *buf_posx = _mirrored_position(xx, ddim); } /* Compute the interpolated value incrementally */ s = 0.0; buf_bspx = (double*)bspx; buf_posx = (int*)posx; for (xx = nx; xx <= px; xx ++, buf_bspx ++, buf_posx ++) { /* Point towards the coefficient value at position xx */ buf = coef + (*buf_posx)*offset; /* Update signal value */ s += (*buf) * (*buf_bspx); } return w*s; } double cubic_spline_sample2d (double x, double y, const PyArrayObject* Coef, int mode_x, int mode_y) { unsigned int ddimX = PyArray_DIM(Coef, 0) - 1; unsigned int ddimY = PyArray_DIM(Coef, 1) - 1; unsigned int offX = PyArray_STRIDE(Coef, 0)/sizeof(double); unsigned int offY = PyArray_STRIDE(Coef, 1)/sizeof(double); double *coef = PyArray_DATA(Coef); double *buf; int nx, ny, px, py, xx, yy; double s, aux; double bspx[4], bspy[4]; int posx[4], posy[4]; double *buf_bspx, *buf_bspy; int *buf_posx, *buf_posy; int shfty; double wx = 1, wy = 1; APPLY_BOUNDARY_CONDITIONS(mode_x, x, wx, ddimX); COMPUTE_NEIGHBORS(x, ddimX, nx, px); APPLY_BOUNDARY_CONDITIONS(mode_y, y, wy, ddimY); COMPUTE_NEIGHBORS(y, ddimY, ny, py); /* Compute the B-spline values as well as the image positions where to find the B-spline coefficients (including mirror conditions) */ buf_bspx = (double*)bspx; buf_posx = (int*)posx; for (xx = nx; xx <= px; xx ++, buf_bspx ++, buf_posx ++) { *buf_bspx = cubic_spline_basis(x-(double)xx); *buf_posx = _mirrored_position(xx, ddimX); } buf_bspy = (double*)bspy; buf_posy = (int*)posy; for (yy = ny; yy <= py; yy ++, buf_bspy ++, buf_posy ++) { *buf_bspy = cubic_spline_basis(y-(double)yy); *buf_posy = _mirrored_position(yy, ddimY); } /* Compute the interpolated value incrementally */ s = 0.0; buf_bspy = (double*)bspy; buf_posy = (int*)posy; for (yy = ny; yy <= py; yy ++, buf_bspy ++, buf_posy ++) { aux = 0.0; buf_bspx = (double*)bspx; buf_posx = (int*)posx; shfty = offY*(*buf_posy); for (xx = nx; xx <= px; xx ++, buf_bspx ++, buf_posx ++) { /* Point towards the coefficient value at position (xx, yy, zz) */ buf = coef + offX*(*buf_posx) + shfty; /* Update signal value */ aux += (*buf) * (*buf_bspx); } s += aux * (*buf_bspy); } return wx*wy*s; } double cubic_spline_sample3d (double x, double y, double z, const PyArrayObject* Coef, int mode_x, int mode_y, int mode_z) { unsigned int ddimX = PyArray_DIM(Coef, 0) - 1; unsigned int ddimY = PyArray_DIM(Coef, 1) - 1; unsigned int ddimZ = PyArray_DIM(Coef, 2) - 1; unsigned int offX = PyArray_STRIDE(Coef, 0)/sizeof(double); unsigned int offY = PyArray_STRIDE(Coef, 1)/sizeof(double); unsigned int offZ = PyArray_STRIDE(Coef, 2)/sizeof(double); double *coef = PyArray_DATA(Coef); double *buf; int nx, ny, nz, px, py, pz; int xx, yy, zz; double s, aux, aux2; double bspx[4], bspy[4], bspz[4]; int posx[4], posy[4], posz[4]; double *buf_bspx, *buf_bspy, *buf_bspz; int *buf_posx, *buf_posy, *buf_posz; int shftyz, shftz; double wx = 1, wy = 1, wz = 1; APPLY_BOUNDARY_CONDITIONS(mode_x, x, wx, ddimX); COMPUTE_NEIGHBORS(x, ddimX, nx, px); APPLY_BOUNDARY_CONDITIONS(mode_y, y, wy, ddimY); COMPUTE_NEIGHBORS(y, ddimY, ny, py); APPLY_BOUNDARY_CONDITIONS(mode_z, z, wz, ddimZ); COMPUTE_NEIGHBORS(z, ddimZ, nz, pz); /* Compute the B-spline values as well as the image positions where to find the B-spline coefficients (including mirror conditions) */ buf_bspx = (double*)bspx; buf_posx = (int*)posx; for (xx = nx; xx <= px; xx ++, buf_bspx ++, buf_posx ++) { *buf_bspx = cubic_spline_basis(x-(double)xx); *buf_posx = _mirrored_position(xx, ddimX); } buf_bspy = (double*)bspy; buf_posy = (int*)posy; for (yy = ny; yy <= py; yy ++, buf_bspy ++, buf_posy ++) { *buf_bspy = cubic_spline_basis(y-(double)yy); *buf_posy = _mirrored_position(yy, ddimY); } buf_bspz = (double*)bspz; buf_posz = (int*)posz; for (zz = nz; zz <= pz; zz ++, buf_bspz ++, buf_posz ++) { *buf_bspz = cubic_spline_basis(z-(double)zz); *buf_posz = _mirrored_position(zz, ddimZ); } /* Compute the interpolated value incrementally */ s = 0.0; buf_bspz = (double*)bspz; buf_posz = (int*)posz; for (zz = nz; zz <= pz; zz ++, buf_bspz ++, buf_posz ++) { aux2 = 0.0; buf_bspy = (double*)bspy; buf_posy = (int*)posy; shftz = offZ*(*buf_posz); for (yy = ny; yy <= py; yy ++, buf_bspy ++, buf_posy ++) { aux = 0.0; buf_bspx = (double*)bspx; buf_posx = (int*)posx; shftyz = offY*(*buf_posy) + shftz; for (xx = nx; xx <= px; xx ++, buf_bspx ++, buf_posx ++) { /* Point towards the coefficient value at position (xx, yy, zz) */ buf = coef + offX*(*buf_posx) + shftyz; /* Update signal value */ aux += (*buf) * (*buf_bspx); } /* end loop on x */ aux2 += aux * (*buf_bspy); } /* end loop on y */ s += aux2 * (*buf_bspz); } /* end loop on z */ return wx*wy*wz*s; } double cubic_spline_sample4d (double x, double y, double z, double t, const PyArrayObject* Coef, int mode_x, int mode_y, int mode_z, int mode_t) { unsigned int ddimX = PyArray_DIM(Coef, 0) - 1; unsigned int ddimY = PyArray_DIM(Coef, 1) - 1; unsigned int ddimZ = PyArray_DIM(Coef, 2) - 1; unsigned int ddimT = PyArray_DIM(Coef, 3) - 1; unsigned int offX = PyArray_STRIDE(Coef, 0)/sizeof(double); unsigned int offY = PyArray_STRIDE(Coef, 1)/sizeof(double); unsigned int offZ = PyArray_STRIDE(Coef, 2)/sizeof(double); unsigned int offT = PyArray_STRIDE(Coef, 3)/sizeof(double); double *coef = PyArray_DATA(Coef); double *buf; int nx, ny, nz, nt, px, py, pz, pt; int xx, yy, zz, tt; double s, aux, aux2, aux3; double bspx[4], bspy[4], bspz[4], bspt[4]; int posx[4], posy[4], posz[4], post[4]; double *buf_bspx, *buf_bspy, *buf_bspz, *buf_bspt; int *buf_posx, *buf_posy, *buf_posz, *buf_post; int shftyzt, shftzt, shftt; double wx = 1, wy = 1, wz = 1, wt = 1; APPLY_BOUNDARY_CONDITIONS(mode_x, x, wx, ddimX); COMPUTE_NEIGHBORS(x, ddimX, nx, px); APPLY_BOUNDARY_CONDITIONS(mode_y, y, wy, ddimY); COMPUTE_NEIGHBORS(y, ddimY, ny, py); APPLY_BOUNDARY_CONDITIONS(mode_z, z, wz, ddimZ); COMPUTE_NEIGHBORS(z, ddimZ, nz, pz); APPLY_BOUNDARY_CONDITIONS(mode_t, t, wt, ddimT); COMPUTE_NEIGHBORS(t, ddimT, nt, pt); /* Compute the B-spline values as well as the image positions where to find the B-spline coefficients (including mirror conditions) */ buf_bspx = (double*)bspx; buf_posx = (int*)posx; for (xx = nx; xx <= px; xx ++, buf_bspx ++, buf_posx ++) { *buf_bspx = cubic_spline_basis(x-(double)xx); *buf_posx = _mirrored_position(xx, ddimX); } buf_bspy = (double*)bspy; buf_posy = (int*)posy; for (yy = ny; yy <= py; yy ++, buf_bspy ++, buf_posy ++) { *buf_bspy = cubic_spline_basis(y-(double)yy); *buf_posy = _mirrored_position(yy, ddimY); } buf_bspz = (double*)bspz; buf_posz = (int*)posz; for (zz = nz; zz <= pz; zz ++, buf_bspz ++, buf_posz ++) { *buf_bspz = cubic_spline_basis(z-(double)zz); *buf_posz = _mirrored_position(zz, ddimZ); } buf_bspt = (double*)bspt; buf_post = (int*)post; for (tt = nt; tt <= pt; tt ++, buf_bspt ++, buf_post ++) { *buf_bspt = cubic_spline_basis(t-(double)tt); *buf_post = _mirrored_position(tt, ddimT); } /* Compute the interpolated value incrementally by visiting the neighbors in turn */ s = 0.0; buf_bspt = (double*)bspt; buf_post = (int*)post; for (tt = nt; tt <= pt; tt ++, buf_bspt ++, buf_post ++) { aux3 = 0.0; buf_bspz = (double*)bspz; buf_posz = (int*)posz; shftt = offT*(*buf_post); for (zz = nz; zz <= pz; zz ++, buf_bspz ++, buf_posz ++) { aux2 = 0.0; buf_bspy = (double*)bspy; buf_posy = (int*)posy; shftzt = offZ*(*buf_posz) + shftt; for (yy = ny; yy <= py; yy ++, buf_bspy ++, buf_posy ++) { aux = 0.0; buf_bspx = (double*)bspx; buf_posx = (int*)posx; shftyzt = offY*(*buf_posy) + shftzt; for (xx = nx; xx <= px; xx ++, buf_bspx ++, buf_posx ++) { /* Point towards the coefficient value at position (xx, yy, zz, tt) */ buf = coef + offX*(*buf_posx) + shftyzt; /* Update signal value */ aux += (*buf) * (*buf_bspx); } /* end loop on x */ aux2 += aux * (*buf_bspy); } /* end loop on y */ aux3 += aux2 * (*buf_bspz); } /* end loop on z */ s += aux3 * (*buf_bspt); } /* end loop on t */ return wx*wy*wz*wt*s; } /* Resample a 3d image submitted to an affine transformation. Tvox is the voxel transformation from the image to the destination grid. */ void cubic_spline_resample3d(PyArrayObject* im_resampled, const PyArrayObject* im, const double* Tvox, int cast_integer, int mode_x, int mode_y, int mode_z) { double i1; PyObject* py_i1; PyArrayObject* im_spline_coeff; PyArrayIterObject* imIter = (PyArrayIterObject*)PyArray_IterNew((PyObject*)im_resampled); unsigned int x, y, z; unsigned dimX = PyArray_DIM(im, 0); unsigned dimY = PyArray_DIM(im, 1); unsigned dimZ = PyArray_DIM(im, 2); npy_intp dims[3] = {dimX, dimY, dimZ}; double Tx, Ty, Tz; /* Compute the spline coefficient image */ im_spline_coeff = (PyArrayObject*)PyArray_SimpleNew(3, dims, NPY_DOUBLE); cubic_spline_transform(im_spline_coeff, im); /* Force iterator coordinates to be updated */ UPDATE_ITERATOR_COORDS(imIter); /* Resampling loop */ while(imIter->index < imIter->size) { x = imIter->coordinates[0]; y = imIter->coordinates[1]; z = imIter->coordinates[2]; _apply_affine_transform(&Tx, &Ty, &Tz, Tvox, x, y, z); i1 = cubic_spline_sample3d(Tx, Ty, Tz, im_spline_coeff, mode_x, mode_y, mode_z); if (cast_integer) { i1 = ROUND(i1); if (cast_integer == 2) if (i1 < 0) i1 = 0; } /* Copy interpolated value into numpy array */ py_i1 = PyFloat_FromDouble(i1); PyArray_SETITEM(im_resampled, PyArray_ITER_DATA(imIter), py_i1); Py_DECREF(py_i1); /* Increment iterator */ PyArray_ITER_NEXT(imIter); } /* Free memory */ Py_DECREF(imIter); Py_DECREF(im_spline_coeff); return; } static inline void _apply_affine_transform(double* Tx, double* Ty, double* Tz, const double* Tvox, size_t x, size_t y, size_t z) { double* bufTvox = (double*)Tvox; *Tx = (*bufTvox)*x; bufTvox++; *Tx += (*bufTvox)*y; bufTvox++; *Tx += (*bufTvox)*z; bufTvox++; *Tx += *bufTvox; bufTvox++; *Ty = (*bufTvox)*x; bufTvox++; *Ty += (*bufTvox)*y; bufTvox++; *Ty += (*bufTvox)*z; bufTvox++; *Ty += *bufTvox; bufTvox++; *Tz = (*bufTvox)*x; bufTvox++; *Tz += (*bufTvox)*y; bufTvox++; *Tz += (*bufTvox)*z; bufTvox++; *Tz += *bufTvox; return; } /* Convert an input grid coordinate x into another grid coordinate within [0, ddim], possibly using a reflection. This function implicitely assumes that -ddim < x < 2*ddim */ static inline int _mirrored_position(int x, unsigned int ddim) { if (x < 0) return -x; else if (x > ddim) return 2 * ddim - x; else return x; } /* Depending on the chosen mode, mirror the position and set the weight. */ static inline int _apply_boundary_conditions(int mode, unsigned int ddim, double* x, double* w) { int ok = 1; unsigned int dim = ddim + 1; int neg_ddim; unsigned int two_ddim; if (mode == 0) { if (*x < -1) ok = 0; else if (*x < 0) { *w = 1 + *x; *x = 0; } else if (*x > dim) ok = 0; else if (*x > ddim) { *w = dim - *x; *x = ddim; } } else if (mode == 1) { if (*x < 0) *x = 0; else if (*x > ddim) *x = ddim; } else{ /* mode==2 */ neg_ddim = -ddim; two_ddim = 2 * ddim; if ((*x < neg_ddim) || (*x > two_ddim)) ok = 0; } return ok; } /* Compute left and right cubic spline neighbors in the image grid mirrored once on each side. Returns 0 if no neighbor can be found. */ static inline int _mirror_grid_neighbors(double x, unsigned int ddim, int* nx, int* px) { int ok = 0; *px = (int)(x+ddim+2); if ((*px>=3) && (*px<=3*ddim)) { ok = 1; *px = *px-ddim; *nx = *px-3; } return ok; } static inline int _neighbors_zero_outside(double x, unsigned int ddim, int* nx, int* px, double* weight) { int ok = 0, aux; unsigned int dim = ddim+1; *weight = 1; if ((x>-1) && (xdim) { /* ddim<=x #include /* Numpy import */ extern void cubic_spline_import_array(void); /*! \brief Cubic spline basis function \param x input value */ extern double cubic_spline_basis(double x); /*! \brief Cubic spline transform of a one-dimensional signal \param src input signal \param res output signal (same size) */ extern void cubic_spline_transform(PyArrayObject* res, const PyArrayObject* src); extern double cubic_spline_sample1d(double x, const PyArrayObject* coef, int mode); extern double cubic_spline_sample2d(double x, double y, const PyArrayObject* coef, int mode_x, int mode_y); extern double cubic_spline_sample3d(double x, double y, double z, const PyArrayObject* coef, int mode_x, int mode_y, int mode_z); extern double cubic_spline_sample4d(double x, double y, double z, double t, const PyArrayObject* coef, int mode_x, int mode_y, int mode_z, int mode_t); extern void cubic_spline_resample3d(PyArrayObject* im_resampled, const PyArrayObject* im, const double* Tvox, int cast_integer, int mode_x, int mode_y, int mode_z); #ifdef __cplusplus } #endif #endif nipy-0.3.0/nipy/algorithms/registration/groupwise_registration.py000066400000000000000000000726641210344137400255410ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import warnings import numpy as np from nibabel.affines import apply_affine from ...fixes.nibabel import io_orientation from ...core.image.image_spaces import (make_xyz_image, xyz_affine, as_xyz_image) from .optimizer import configure_optimizer, use_derivatives from .affine import Rigid from ._registration import (_cspline_transform, _cspline_sample3d, _cspline_sample4d) # Module globals VERBOSE = True # enables online print statements SLICE_ORDER = 'ascending' INTERLEAVED = None OPTIMIZER = 'ncg' XTOL = 1e-5 FTOL = 1e-5 GTOL = 1e-5 STEPSIZE = 1e-6 SMALL = 1e-20 MAXITER = 64 MAXFUN = None BORDERS = 1, 1, 1 REFSCAN = 0 EXTRAPOLATE_SPACE = 'reflect' EXTRAPOLATE_TIME = 'reflect' LOOPS = 5 # loops within each run BETWEEN_LOOPS = 5 # loops used to realign different runs SPEEDUP = 5 # image sub-sampling factor for speeding up """ # How to tune those parameters for a multi-resolution implementation LOOPS = 5, 1 BETWEEN_LOOPS = 5, 1 SPEEDUP = 5, 2 """ def interp_slice_order(Z, slice_order): Z = np.asarray(Z) nslices = len(slice_order) aux = np.asarray(list(slice_order) + [slice_order[0] + nslices]) Zf = np.floor(Z).astype('int') w = Z - Zf Zal = Zf % nslices Za = Zal + w ret = (1 - w) * aux[Zal] + w * aux[Zal + 1] ret += (Z - Za) return ret def scanner_coords(xyz, affine, from_world, to_world): Tv = np.dot(from_world, np.dot(affine, to_world)) XYZ = apply_affine(Tv, xyz) return XYZ[:, 0], XYZ[:, 1], XYZ[:, 2] def make_grid(dims, subsampling=(1, 1, 1), borders=(0, 0, 0)): slices = [slice(b, d - b, s)\ for d, s, b in zip(dims, subsampling, borders)] xyz = np.mgrid[slices] xyz = np.rollaxis(xyz, 0, 4) xyz = np.reshape(xyz, [np.prod(xyz.shape[0:-1]), 3]) return xyz class Image4d(object): """ Class to represent a sequence of 3d scans (possibly acquired on a slice-by-slice basis). Object remains empty until the data array is actually loaded in memory. Parameters ---------- data : nd array or proxy (function that actually gets the array) """ def __init__(self, data, affine, tr, tr_slices=None, start=0.0, slice_order=SLICE_ORDER, interleaved=INTERLEAVED, slice_info=None): """ Configure fMRI acquisition time parameters. """ self.affine = np.asarray(affine) self.tr = float(tr) self.start = float(start) self.interleaved = bool(interleaved) # guess the slice axis and direction (z-axis) if slice_info == None: orient = io_orientation(self.affine) self.slice_axis = int(np.where(orient[:, 0] == 2)[0]) self.slice_direction = int(orient[self.slice_axis, 1]) else: self.slice_axis = int(slice_info[0]) self.slice_direction = int(slice_info[1]) # unformatted parameters self._tr_slices = tr_slices self._slice_order = slice_order if isinstance(data, np.ndarray): self._data = data self._shape = data.shape self._get_data = None self._init_timing_parameters() else: self._data = None self._shape = None self._get_data = data def _load_data(self): self._data = self._get_data() self._shape = self._data.shape self._init_timing_parameters() def get_data(self): if self._data == None: self._load_data() return self._data def get_shape(self): if self._shape == None: self._load_data() return self._shape def _init_timing_parameters(self): # Number of slices nslices = self.get_shape()[self.slice_axis] self.nslices = nslices # Default slice repetition time (no silence) if self._tr_slices == None: self.tr_slices = self.tr / float(nslices) else: self.tr_slices = float(self._tr_slices) # Set slice order if isinstance(self._slice_order, str): if not self.interleaved: aux = range(nslices) else: aux = range(nslices)[0::2] + range(nslices)[1::2] if self._slice_order == 'descending': aux.reverse() self.slice_order = np.array(aux) else: # Verify correctness of provided slice indexes provided_slices = np.array(sorted(self._slice_order)) if np.any(provided_slices != np.arange(nslices)): raise ValueError( "Incorrect slice indexes were provided. There are %d " "slices in the volume, indexes should start from 0 and " "list all slices. " "Provided slice_order: %s" % (nslices, self._slice_order)) self.slice_order = np.asarray(self._slice_order) def z_to_slice(self, z): """ Account for the fact that slices may be stored in reverse order wrt the scanner coordinate system convention (slice 0 == bottom of the head) """ if self.slice_direction < 0: return self.nslices - 1 - z else: return z def scanner_time(self, zv, t): """ tv = scanner_time(zv, t) zv, tv are grid coordinates; t is an actual time value. """ corr = self.tr_slices * interp_slice_order(self.z_to_slice(zv), self.slice_order) return (t - self.start - corr) / self.tr def free_data(self): if not self._get_data == None: self._data = None class Realign4dAlgorithm(object): def __init__(self, im4d, affine_class=Rigid, transforms=None, time_interp=True, subsampling=(1, 1, 1), borders=BORDERS, optimizer=OPTIMIZER, optimize_template=True, xtol=XTOL, ftol=FTOL, gtol=GTOL, stepsize=STEPSIZE, maxiter=MAXITER, maxfun=MAXFUN, refscan=REFSCAN): self.dims = im4d.get_shape() self.nscans = self.dims[3] self.xyz = make_grid(self.dims[0:3], subsampling, borders) masksize = self.xyz.shape[0] self.data = np.zeros([masksize, self.nscans], dtype='double') # Initialize space/time transformation parameters self.affine = im4d.affine self.inv_affine = np.linalg.inv(self.affine) if transforms == None: self.transforms = [affine_class() for scan in range(self.nscans)] else: self.transforms = transforms self.scanner_time = im4d.scanner_time self.timestamps = im4d.tr * np.arange(self.nscans) # Compute the 4d cubic spline transform self.time_interp = time_interp if time_interp: self.cbspline = _cspline_transform(im4d.get_data()) else: self.cbspline = np.zeros(self.dims, dtype='double') for t in range(self.dims[3]): self.cbspline[:, :, :, t] =\ _cspline_transform(im4d.get_data()[:, :, :, t]) # The reference scan conventionally defines the head # coordinate system self.optimize_template = optimize_template if not optimize_template and refscan == None: self.refscan = REFSCAN else: self.refscan = refscan # Set the minimization method self.set_fmin(optimizer, stepsize, xtol=xtol, ftol=ftol, gtol=gtol, maxiter=maxiter, maxfun=maxfun) # Auxiliary array for realignment estimation self._res = np.zeros(masksize, dtype='double') self._res0 = np.zeros(masksize, dtype='double') self._aux = np.zeros(masksize, dtype='double') self.A = np.zeros((masksize, self.transforms[0].param.size), dtype='double') self._pc = None def resample(self, t): """ Resample a particular time frame on the (sub-sampled) working grid. x,y,z,t are "head" grid coordinates X,Y,Z,T are "scanner" grid coordinates """ X, Y, Z = scanner_coords(self.xyz, self.transforms[t].as_affine(), self.inv_affine, self.affine) if self.time_interp: T = self.scanner_time(Z, self.timestamps[t]) _cspline_sample4d(self.data[:, t], self.cbspline, X, Y, Z, T, mx=EXTRAPOLATE_SPACE, my=EXTRAPOLATE_SPACE, mz=EXTRAPOLATE_SPACE, mt=EXTRAPOLATE_TIME) else: _cspline_sample3d(self.data[:, t], self.cbspline[:, :, :, t], X, Y, Z, mx=EXTRAPOLATE_SPACE, my=EXTRAPOLATE_SPACE, mz=EXTRAPOLATE_SPACE) def resample_full_data(self): if VERBOSE: print('Gridding...') xyz = make_grid(self.dims[0:3]) res = np.zeros(self.dims) for t in range(self.nscans): if VERBOSE: print('Fully resampling scan %d/%d' % (t + 1, self.nscans)) X, Y, Z = scanner_coords(xyz, self.transforms[t].as_affine(), self.inv_affine, self.affine) if self.time_interp: T = self.scanner_time(Z, self.timestamps[t]) _cspline_sample4d(res[:, :, :, t], self.cbspline, X, Y, Z, T, mt='nearest') else: _cspline_sample3d(res[:, :, :, t], self.cbspline[:, :, :, t], X, Y, Z) return res def set_fmin(self, optimizer, stepsize, **kwargs): """ Return the minimization function """ self.stepsize = stepsize self.optimizer = optimizer self.optimizer_kwargs = kwargs self.optimizer_kwargs.setdefault('xtol', XTOL) self.optimizer_kwargs.setdefault('ftol', FTOL) self.optimizer_kwargs.setdefault('gtol', GTOL) self.optimizer_kwargs.setdefault('maxiter', MAXITER) self.optimizer_kwargs.setdefault('maxfun', MAXFUN) self.use_derivatives = use_derivatives(self.optimizer) def init_instant_motion(self, t): """ Pre-compute and cache some constants (at fixed time) for repeated computations of the alignment energy. The idea is to decompose the average temporal variance via: V = (n-1)/n V* + (n-1)/n^2 (x-m*)^2 with x the considered volume at time t, and m* the mean of all resampled volumes but x. Only the second term is variable when one volumes while the others are fixed. A similar decomposition is used for the global variance, so we end up with: V/V0 = [nV* + (x-m*)^2] / [nV0* + (x-m0*)^2] """ fixed = range(self.nscans) fixed.remove(t) aux = self.data[:, fixed] if self.optimize_template: self.mu = np.mean(aux, 1) self.offset = self.nscans * np.mean((aux.T - self.mu) ** 2) self.mu0 = np.mean(aux) self.offset0 = self.nscans * np.mean((aux - self.mu0) ** 2) self._t = t self._pc = None def set_transform(self, t, pc): self.transforms[t].param = pc self.resample(t) def _init_energy(self, pc): if pc is self._pc: return self.set_transform(self._t, pc) self._pc = pc self._res[:] = self.data[:, self._t] - self.mu[:] self._V = np.maximum(self.offset + np.mean(self._res ** 2), SMALL) self._res0[:] = self.data[:, self._t] - self.mu0 self._V0 = np.maximum(self.offset0 + np.mean(self._res0 ** 2), SMALL) if self.use_derivatives: # linearize the data wrt the transform parameters # use the auxiliary array to save the current resampled data self._aux[:] = self.data[:, self._t] basis = np.eye(6) for j in range(pc.size): self.set_transform(self._t, pc + self.stepsize * basis[j]) self.A[:, j] = (self.data[:, self._t] - self._aux)\ / self.stepsize self.transforms[self._t].param = pc self.data[:, self._t] = self._aux[:] # pre-compute gradient and hessian of numerator and # denominator c = 2 / float(self.data.shape[0]) self._dV = c * np.dot(self.A.T, self._res) self._dV0 = c * np.dot(self.A.T, self._res0) self._H = c * np.dot(self.A.T, self.A) def _energy(self): """ The alignment energy is defined as the log-ratio between the average temporal variance in the sequence and the global spatio-temporal variance. """ return np.log(self._V / self._V0) def _energy_gradient(self): return self._dV / self._V - self._dV0 / self._V0 def _energy_hessian(self): return (1 / self._V - 1 / self._V0) * self._H\ - np.dot(self._dV, self._dV.T) / np.maximum(self._V ** 2, SMALL)\ + np.dot(self._dV0, self._dV0.T) / np.maximum(self._V0 ** 2, SMALL) def estimate_instant_motion(self, t): """ Estimate motion parameters at a particular time. """ if VERBOSE: print('Estimating motion at time frame %d/%d...' % (t + 1, self.nscans)) def f(pc): self._init_energy(pc) return self._energy() def fprime(pc): self._init_energy(pc) return self._energy_gradient() def fhess(pc): self._init_energy(pc) return self._energy_hessian() self.init_instant_motion(t) fmin, args, kwargs =\ configure_optimizer(self.optimizer, fprime=fprime, fhess=fhess, **self.optimizer_kwargs) # With scipy >= 0.9, some scipy minimization functions like # fmin_bfgs may crash due to the subroutine # `scalar_search_armijo` returning None as a stepsize when # unhappy about the objective function. This seems to have the # potential to occur in groupwise registration when using # strong image subsampling, i.e. at the coarser levels of the # multiscale pyramid. To avoid crashes, we insert a try/catch # instruction. try: pc = fmin(f, self.transforms[t].param, disp=VERBOSE, *args, **kwargs) self.set_transform(t, pc) except: warnings.warn('Minimization failed') def estimate_motion(self): """ Optimize motion parameters for the whole sequence. All the time frames are initially resampled according to the current space/time transformation, the parameters of which are further optimized sequentially. """ for t in range(self.nscans): if VERBOSE: print('Resampling scan %d/%d' % (t + 1, self.nscans)) self.resample(t) # Set the template as the reference scan (will be overwritten # if template is to be optimized) if not hasattr(self, 'template'): self.mu = self.data[:, self.refscan].copy() for t in range(self.nscans): self.estimate_instant_motion(t) if VERBOSE: print(self.transforms[t]) def align_to_refscan(self): """ The `motion_estimate` method aligns scans with an online template so that spatial transforms map some average head space to the scanner space. To conventionally redefine the head space as being aligned with some reference scan, we need to right compose each head_average-to-scanner transform with the refscan's 'to head_average' transform. """ if self.refscan == None: return Tref_inv = self.transforms[self.refscan].inv() for t in range(self.nscans): self.transforms[t] = (self.transforms[t]).compose(Tref_inv) def resample4d(im4d, transforms, time_interp=True): """ Resample a 4D image according to the specified sequence of spatial transforms, using either 4D interpolation if `time_interp` is True and 3D interpolation otherwise. """ r = Realign4dAlgorithm(im4d, transforms=transforms, time_interp=time_interp) res = r.resample_full_data() im4d.free_data() return res def adjust_subsampling(speedup, dims): dims = np.array(dims) aux = np.maximum(speedup * dims / np.prod(dims) ** (1 / 3.), [1, 1, 1]) return aux.astype('int') def single_run_realign4d(im4d, affine_class=Rigid, time_interp=True, loops=LOOPS, speedup=SPEEDUP, borders=BORDERS, optimizer=OPTIMIZER, xtol=XTOL, ftol=FTOL, gtol=GTOL, stepsize=STEPSIZE, maxiter=MAXITER, maxfun=MAXFUN, refscan=REFSCAN): """ Realign a single run in space and time. Parameters ---------- im4d : Image4d instance speedup : int or sequence If a sequence, implement a multi-scale """ if not type(loops) in (list, tuple, np.array): loops = [loops] repeats = len(loops) def format_arg(x): if not type(x) in (list, tuple, np.array): x = [x for i in range(repeats)] else: if not len(x) == repeats: raise ValueError('inconsistent length in arguments') return x speedup = format_arg(speedup) optimizer = format_arg(optimizer) xtol = format_arg(xtol) ftol = format_arg(ftol) gtol = format_arg(gtol) stepsize = format_arg(stepsize) maxiter = format_arg(maxiter) maxfun = format_arg(maxfun) transforms = None opt_params = zip(loops, speedup, optimizer, xtol, ftol, gtol, stepsize, maxiter, maxfun) for loops_, speedup_, optimizer_, xtol_, ftol_, gtol_,\ stepsize_, maxiter_, maxfun_ in opt_params: subsampling = adjust_subsampling(speedup_, im4d.get_shape()[0:3]) r = Realign4dAlgorithm(im4d, transforms=transforms, affine_class=affine_class, time_interp=time_interp, subsampling=subsampling, borders=borders, refscan=refscan, optimizer=optimizer_, xtol=xtol_, ftol=ftol_, gtol=gtol_, stepsize=stepsize_, maxiter=maxiter_, maxfun=maxfun_) for loop in range(loops_): r.estimate_motion() r.align_to_refscan() transforms = r.transforms im4d.free_data() return transforms def realign4d(runs, affine_class=Rigid, time_interp=True, align_runs=True, loops=LOOPS, between_loops=BETWEEN_LOOPS, speedup=SPEEDUP, borders=BORDERS, optimizer=OPTIMIZER, xtol=XTOL, ftol=FTOL, gtol=GTOL, stepsize=STEPSIZE, maxiter=MAXITER, maxfun=MAXFUN, refscan=REFSCAN): """ Parameters ---------- runs : list of Image4d objects Returns ------- transforms : list nested list of rigid transformations transforms map an 'ideal' 4d grid (conventionally aligned with the first scan of the first run) to the 'acquisition' 4d grid for each run """ # Single-session case if not type(runs) in (list, tuple, np.array): runs = [runs] nruns = len(runs) if nruns == 1: align_runs = False # Correct motion and slice timing in each sequence separately transforms = [single_run_realign4d(run, affine_class=affine_class, time_interp=time_interp, loops=loops, speedup=speedup, borders=borders, optimizer=optimizer, xtol=xtol, ftol=ftol, gtol=gtol, stepsize=stepsize, maxiter=maxiter, maxfun=maxfun, refscan=refscan) for run in runs] if not align_runs: return transforms, transforms, None # Correct between-session motion using the mean image of each # corrected run, and creating a fake time series with no temporal # smoothness ## FIXME: check that all runs have the same to-world transform mean_img_shape = list(runs[0].get_shape()[0:3]) + [nruns] mean_img_data = np.zeros(mean_img_shape) for i in range(nruns): corr_run = resample4d(runs[i], transforms=transforms[i], time_interp=time_interp) mean_img_data[..., i] = corr_run.mean(3) del corr_run mean_img = Image4d(mean_img_data, affine=runs[0].affine, tr=1.0, tr_slices=0.0) transfo_mean = single_run_realign4d(mean_img, affine_class=affine_class, time_interp=False, loops=between_loops, speedup=speedup, borders=borders, optimizer=optimizer, xtol=xtol, ftol=ftol, gtol=gtol, stepsize=stepsize, maxiter=maxiter, maxfun=maxfun) # Compose transformations for each run ctransforms = [None for i in range(nruns)] for i in range(nruns): ctransforms[i] = [t.compose(transfo_mean[i]) for t in transforms[i]] return ctransforms, transforms, transfo_mean class Realign4d(object): def __init__(self, images, affine_class=Rigid): self._generic_init(images, affine_class, SLICE_ORDER, INTERLEAVED, 1.0, 0.0, 0.0, False, None) def _generic_init(self, images, affine_class, slice_order, interleaved, tr, tr_slices, start, time_interp, slice_info): if slice_order == None: slice_order = SLICE_ORDER if time_interp: raise ValueError('Slice order is requested' + ' with time interpolation switched on') time_interp = False if tr == None: raise ValueError('Repetition time cannot be None') if not type(images) in (list, tuple, np.array): images = [images] self._runs = [] self.affine_class = affine_class for im in images: xyz_img = as_xyz_image(im) self._runs.append(Image4d(xyz_img.get_data, xyz_affine(xyz_img), tr=tr, tr_slices=tr_slices, start=start, slice_order=slice_order, interleaved=interleaved, slice_info=slice_info)) self._transforms = [None for run in self._runs] self._within_run_transforms = [None for run in self._runs] self._mean_transforms = [None for run in self._runs] self._time_interp = time_interp def estimate(self, loops=LOOPS, between_loops=None, align_runs=True, speedup=SPEEDUP, borders=BORDERS, optimizer=OPTIMIZER, xtol=XTOL, ftol=FTOL, gtol=GTOL, stepsize=STEPSIZE, maxiter=MAXITER, maxfun=MAXFUN, refscan=REFSCAN): if between_loops == None: between_loops = loops t = realign4d(self._runs, affine_class=self.affine_class, time_interp=self._time_interp, align_runs=align_runs, loops=loops, between_loops=between_loops, speedup=speedup, borders=borders, optimizer=optimizer, xtol=xtol, ftol=ftol, gtol=gtol, stepsize=stepsize, maxiter=maxiter, maxfun=maxfun, refscan=refscan) self._transforms, self._within_run_transforms,\ self._mean_transforms = t def resample(self, r=None, align_runs=True): """ Return the resampled run number r as a 4d nipy-like image. Returns all runs as a list of images if r == None. """ if align_runs: transforms = self._transforms else: transforms = self._within_run_transforms runs = range(len(self._runs)) if r == None: data = [resample4d(self._runs[r], transforms=transforms[r], time_interp=self._time_interp) for r in runs] return [make_xyz_image(data[r], self._runs[r].affine, 'scanner') for r in runs] else: data = resample4d(self._runs[r], transforms=transforms[r], time_interp=self._time_interp) return make_xyz_image(data, self._runs[r].affine, 'scanner') class FmriRealign4d(Realign4d): def __init__(self, images, slice_order, interleaved=None, tr=1.0, tr_slices=None, start=0.0, time_interp=True, affine_class=Rigid, slice_info=None): """ Spatiotemporal realignment class for fMRI series. Parameters ---------- images : image or list of images Single or multiple input 4d images representing one or several fMRI runs. tr : float Inter-scan repetition time, i.e. the time elapsed between two consecutive scans. The unit in which `tr` is given is arbitrary although it needs to be consistent with the `tr_slices` and `start` arguments. tr_slices : float Inter-slice repetition time, same as tr for slices. If None, acquisition is assumed continuous and `tr_slices` is set to `tr` divided by the number of slices. start : float Starting acquisition time respective to the implicit time origin. slice_order : str or array-like If str, one of {'ascending', 'descending'}. If array-like, then the order in which the slices were collected in time. For instance, the following represents an ascending contiguous sequence: slice_order = [0, 1, 2, ...] interleaved : bool Deprecated. Whether slice acquisition order is interleaved. Ignored if `slice_order` is array-like. If slice_order=='ascending' and interleaved==True, the assumed slice order is: [0, 2, 4, ..., 1, 3, 5, ...] If slice_order=='descending' and interleaved==True, the assumed slice order is: [N-1, N-3, N-5, ..., N-2, N-4, N-6] Given that there exist other types of interleaved acquisitions depending on scanner settings and manufacturers, it is strongly recommended to input the slice_order as an array unless you are sure what you are doing. slice_info : None or tuple, optional None, or a tuple with slice axis as the first element and direction as the second, for instance (2, 1). If None, then guess the slice axis, and direction, as the closest to the z axis, as estimated from the affine. """ if not interleaved == None: warnings.warn('interleaved keyword is deprecated. Please input explicit slice order instead.') self._generic_init(images, affine_class, slice_order, interleaved, tr, tr_slices, start, time_interp, slice_info) nipy-0.3.0/nipy/algorithms/registration/histogram_registration.py000066400000000000000000000343221210344137400254770ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Intensity-based image registration """ import numpy as np from ...core.image.image_spaces import (make_xyz_image, as_xyz_image, xyz_affine) from .optimizer import configure_optimizer from .affine import inverse_affine, subgrid_affine, affine_transforms from .chain_transform import ChainTransform from .similarity_measures import similarity_measures as _sms from ._registration import _joint_histogram MAX_INT = np.iinfo(np.intp).max # Module globals VERBOSE = True # enables online print statements OPTIMIZER = 'powell' XTOL = 1e-2 FTOL = 1e-2 GTOL = 1e-3 MAXITER = 25 MAXFUN = None CLAMP_DTYPE = 'short' # do not edit BINS = 256 SIMILARITY = 'crl1' INTERP = 'pv' NPOINTS = 64 ** 3 # Dictionary of interpolation methods (partial volume, trilinear, # random) interp_methods = {'pv': 0, 'tri': 1, 'rand': -1} class HistogramRegistration(object): """ A class to reprensent a generic intensity-based image registration algorithm. """ def __init__(self, from_img, to_img, from_bins=BINS, to_bins=None, from_mask=None, to_mask=None, similarity=SIMILARITY, interp=INTERP, **kwargs): """ Creates a new histogram registration object. Parameters ---------- from_img : nipy-like image `From` image to_img : nipy-like image `To` image from_bins : integer Number of histogram bins to represent the `from` image to_bins : integer Number of histogram bins to represent the `to` image from_mask : array-like Mask to apply to the `from` image to_mask : array-like Mask to apply to the `to` image similarity : str or callable Cost-function for assessing image similarity. If a string, one of 'cc': correlation coefficient, 'cr': correlation ratio, 'crl1': L1-norm based correlation ratio, 'mi': mutual information, 'nmi': normalized mutual information, 'slr': supervised log-likelihood ratio. If a callable, it should take a two-dimensional array representing the image joint histogram as an input and return a float. interp : str Interpolation method. One of 'pv': Partial volume, 'tri': Trilinear, 'rand': Random interpolation. See ``joint_histogram.c`` """ # Function assumes xyx_affine for inputs from_img = as_xyz_image(from_img) to_img = as_xyz_image(to_img) # Binning sizes if to_bins is None: to_bins = from_bins # Clamping of the `from` image. The number of bins may be # overriden if unnecessarily large. data, from_bins = clamp(from_img.get_data(), bins=from_bins, mask=from_mask) self._from_img = make_xyz_image(data, xyz_affine(from_img), 'scanner') # Set field of view in the `from` image with potential # subsampling for faster similarity evaluation. This also sets # the _from_data and _vox_coords attributes if from_mask is None: self.subsample(npoints=NPOINTS) else: corner, size = smallest_bounding_box(from_mask) self.set_fov(corner=corner, size=size, npoints=NPOINTS) # Clamping of the `to` image including padding with -1 data, to_bins = clamp(to_img.get_data(), bins=to_bins, mask=to_mask) self._to_data = -np.ones(np.array(to_img.shape) + 2, dtype=CLAMP_DTYPE) self._to_data[1:-1, 1:-1, 1:-1] = data self._to_inv_affine = inverse_affine(xyz_affine(to_img)) # Joint histogram: must be double contiguous as it will be # passed to C routines which assume so self._joint_hist = np.zeros([from_bins, to_bins], dtype='double') # Set default registration parameters self._set_interp(interp) self._set_similarity(similarity, **kwargs) def _get_interp(self): return interp_methods.keys()\ [interp_methods.values().index(self._interp)] def _set_interp(self, interp): self._interp = interp_methods[interp] interp = property(_get_interp, _set_interp) def set_fov(self, spacing=None, corner=(0, 0, 0), size=None, npoints=None): """ Defines a subset of the `from` image to restrict joint histogram computation. Parameters ---------- spacing : sequence (3,) of positive integers Subsampling of image in voxels, where None (default) results in the subsampling to be automatically adjusted to roughly match a cubic grid with `npoints` voxels corner : sequence (3,) of positive integers Bounding box origin in voxel coordinates size : sequence (3,) of positive integers Desired bounding box size npoints : positive integer Desired number of voxels in the bounding box. If a `spacing` argument is provided, then `npoints` is ignored. """ if spacing is None and npoints is None: spacing = [1, 1, 1] if size is None: size = self._from_img.shape slicer = lambda c, s, sp:\ tuple([slice(c[i], s[i] + c[i], sp[i]) for i in range(3)]) # Adjust spacing to match desired field of view size if not spacing is None: fov_data = self._from_img.get_data()[slicer(corner, size, spacing)] else: fov_data = self._from_img.get_data()[slicer(corner, size, [1, 1, 1])] spacing = ideal_spacing(fov_data, npoints=npoints) fov_data = self._from_img.get_data()[slicer(corner, size, spacing)] self._from_data = fov_data self._from_npoints = (fov_data >= 0).sum() self._from_affine = subgrid_affine(xyz_affine(self._from_img), slicer(corner, size, spacing)) # We cache the voxel coordinates of the clamped image self._vox_coords =\ np.indices(self._from_data.shape).transpose((1, 2, 3, 0)) def subsample(self, spacing=None, npoints=None): self.set_fov(spacing=spacing, npoints=npoints) def _set_similarity(self, similarity='cr', **kwargs): if similarity in _sms: self._similarity = similarity self._similarity_call =\ _sms[similarity](self._joint_hist.shape, **kwargs) else: if not hasattr(similarity, '__call__'): raise ValueError('similarity should be callable') self._similarity = 'custom' self._similarity_call = similarity def _get_similarity(self): return self._similarity similarity = property(_get_similarity, _set_similarity) def eval(self, T): """ Evaluate similarity function given a world-to-world transform. Parameters ---------- T : Transform Transform object implementing ``apply`` method """ Tv = ChainTransform(T, pre=self._from_affine, post=self._to_inv_affine) return self._eval(Tv) def _eval(self, Tv): """ Evaluate similarity function given a voxel-to-voxel transform. Parameters ---------- Tv : Transform Transform object implementing ``apply`` method Should map voxel space to voxel space """ # trans_vox_coords needs be C-contiguous trans_vox_coords = Tv.apply(self._vox_coords) interp = self._interp if self._interp < 0: interp = - np.random.randint(MAX_INT) _joint_histogram(self._joint_hist, self._from_data.flat, # array iterator self._to_data, trans_vox_coords, interp) return self._similarity_call(self._joint_hist) def optimize(self, T, optimizer=OPTIMIZER, **kwargs): """ Optimize transform `T` with respect to similarity measure. The input object `T` will change as a result of the optimization. Parameters ---------- T : object or str An object representing a transformation that should implement ``apply`` method and ``param`` attribute or property. If a string, one of 'rigid', 'similarity', or 'affine'. The corresponding transformation class is then initialized by default. optimizer : str Name of optimization function (one of 'powell', 'steepest', 'cg', 'bfgs', 'simplex') **kwargs : dict keyword arguments to pass to optimizer """ # Replace T if a string is passed if T in affine_transforms: T = affine_transforms[T]() # Pull callback out of keyword arguments, if present callback = kwargs.pop('callback', None) # Create transform chain object with T generating params Tv = ChainTransform(T, pre=self._from_affine, post=self._to_inv_affine) tc0 = Tv.param # Cost function to minimize def cost(tc): # This is where the similarity function is calculcated Tv.param = tc return -self._eval(Tv) # Callback during optimization if callback is None and VERBOSE: def callback(tc): Tv.param = tc print(Tv.optimizable) print(str(self.similarity) + ' = %s' % self._eval(Tv)) print('') # Switching to the appropriate optimizer if VERBOSE: print('Initial guess...') print(Tv.optimizable) kwargs.setdefault('xtol', XTOL) kwargs.setdefault('ftol', FTOL) kwargs.setdefault('gtol', GTOL) kwargs.setdefault('maxiter', MAXITER) kwargs.setdefault('maxfun', MAXFUN) fmin, args, kwargs = configure_optimizer(optimizer, fprime=None, fhess=None, **kwargs) # Output if VERBOSE: print ('Optimizing using %s' % fmin.__name__) kwargs['callback'] = callback Tv.param = fmin(cost, tc0, *args, **kwargs) return Tv.optimizable def explore(self, T0, *args): """ Evaluate the similarity at the transformations specified by sequences of parameter values. For instance: explore(T0, (0, [-1,0,1]), (4, [-2.,2])) """ nparams = T0.param.size deltas = [[0] for i in range(nparams)] for a in args: deltas[a[0]] = a[1] grids = np.mgrid[[slice(0, len(d)) for d in deltas]] ntrials = np.prod(grids.shape[1:]) Deltas = [np.asarray(deltas[i])[grids[i, :]].ravel()\ for i in range(nparams)] simis = np.zeros(ntrials) params = np.zeros([nparams, ntrials]) Tv = ChainTransform(T0, pre=self._from_affine, post=self._to_inv_affine) param0 = Tv.param for i in range(ntrials): param = param0 + np.array([D[i] for D in Deltas]) Tv.param = param simis[i] = self._eval(Tv) params[:, i] = param return simis, params def _clamp(x, y, bins=BINS): # Threshold dmaxmax = 2 ** (8 * y.dtype.itemsize - 1) - 1 dmax = bins - 1 # default output maximum value if dmax > dmaxmax: raise ValueError('Excess number of bins') xmin = float(x.min()) xmax = float(x.max()) d = xmax - xmin """ If the image dynamic is small, no need for compression: just downshift image values and re-estimate the dynamic range (hence xmax is translated to xmax-tth casted to the appropriate dtype. Otherwise, compress after downshifting image values (values equal to the threshold are reset to zero). """ if issubclass(x.dtype.type, np.integer) and d <= dmax: y[:] = x - xmin bins = int(d) + 1 else: a = dmax / d y[:] = np.round(a * (x - xmin)) return y, bins def clamp(x, bins=BINS, mask=None): """ Clamp array values that fall within a given mask in the range [0..bins-1] and reset masked values to -1. Parameters ---------- x : ndarray The input array bins : number Desired number of bins mask : ndarray, tuple or slice Anything such that x[mask] is an array. Returns ------- y : ndarray Clamped array, masked items are assigned -1 bins : number Adjusted number of bins """ if bins > np.iinfo(np.short).max: raise ValueError('Too large a bin size') y = -np.ones(x.shape, dtype=CLAMP_DTYPE) if mask is None: y, bins = _clamp(x, y, bins) else: ym = y[mask] xm = x[mask] ym, bins = _clamp(xm, ym, bins) y[mask] = ym return y, bins def ideal_spacing(data, npoints): """ Tune spacing factors so that the number of voxels in the output block matches a given number. Parameters ---------- data : ndarray or sequence Data image to subsample npoints : number Target number of voxels (negative values will be ignored) Returns ------- spacing: ndarray Spacing factors """ dims = data.shape actual_npoints = (data >= 0).sum() spacing = np.ones(3, dtype='uint') while actual_npoints > npoints: # Subsample the direction with the highest number of samples ddims = dims / spacing if ddims[0] >= ddims[1] and ddims[0] >= ddims[2]: dir = 0 elif ddims[1] > ddims[0] and ddims[1] >= ddims[2]: dir = 1 else: dir = 2 spacing[dir] += 1 subdata = data[::spacing[0], ::spacing[1], ::spacing[2]] actual_npoints = (subdata >= 0).sum() return spacing def smallest_bounding_box(msk): """ Extract the smallest bounding box from a mask """ x, y, z = np.where(msk > 0) corner = [x.min(), y.min(), z.min()] size = [x.max() + 1, y.max() + 1, z.max() + 1] return corner, size nipy-0.3.0/nipy/algorithms/registration/joint_histogram.c000066400000000000000000000244751210344137400237120ustar00rootroot00000000000000#include "joint_histogram.h" #include "wichmann_prng.h" #include #include #include #define SQR(a) ((a)*(a)) #define FLOOR(a)((a)>0.0 ? (int)(a):(((int)(a)-a)!= 0.0 ? (int)(a)-1 : (int)(a))) #define UROUND(a) ((int)(a+0.5)) #define ROUND(a)(FLOOR(a+0.5)) #ifdef _MSC_VER #define inline __inline #endif static inline void _pv_interpolation(unsigned int i, double* H, unsigned int clampJ, const signed short* J, const double* W, int nn, void* params); static inline void _tri_interpolation(unsigned int i, double* H, unsigned int clampJ, const signed short* J, const double* W, int nn, void* params); static inline void _rand_interpolation(unsigned int i, double* H, unsigned int clampJ, const signed short* J, const double* W, int nn, void* params); /* Numpy import */ void joint_histogram_import_array(void) { import_array(); return; } /* JOINT HISTOGRAM COMPUTATION. iterI : assumed to iterate over a signed short encoded, possibly non-contiguous array. imJ_padded : assumed C-contiguous (last index varies faster) & signed short encoded. H : assumed C-contiguous. Tvox : assumed C-contiguous: either a 3x4=12-sized array (or bigger) for an affine transformation or a 3xN array for a pre-computed transformation, with N equal to the size of the array corresponding to iterI (no checking done) Negative intensities are ignored. */ #define APPEND_NEIGHBOR(q, w) \ j = J[q]; \ if (j>=0) { \ *bufJnn = j; bufJnn ++; \ *bufW = w; bufW ++; \ nn ++; } int joint_histogram(PyArrayObject* JH, unsigned int clampI, unsigned int clampJ, PyArrayIterObject* iterI, const PyArrayObject* imJ_padded, const PyArrayObject* Tvox, long interp) { const signed short* J=(signed short*)imJ_padded->data; size_t dimJX=imJ_padded->dimensions[0]-2; size_t dimJY=imJ_padded->dimensions[1]-2; size_t dimJZ=imJ_padded->dimensions[2]-2; signed short Jnn[8]; double W[8]; signed short *bufI, *bufJnn; double *bufW; signed short i, j; size_t off; size_t u2 = imJ_padded->dimensions[2]; size_t u3 = u2+1; size_t u4 = imJ_padded->dimensions[1]*u2; size_t u5 = u4+1; size_t u6 = u4+u2; size_t u7 = u6+1; double wx, wy, wz, wxwy, wxwz, wywz; double W0, W2, W3, W4; int nn, nx, ny, nz; double *H = (double*)PyArray_DATA(JH); double Tx, Ty, Tz; double *tvox = (double*)PyArray_DATA(Tvox); void (*interpolate)(unsigned int, double*, unsigned int, const signed short*, const double*, int, void*); void* interp_params = NULL; prng_state rng; /* Check assumptions regarding input arrays. If it fails, the function will return -1 without doing anything else. iterI : assumed to iterate over a signed short encoded, possibly non-contiguous array. imJ_padded : assumed C-contiguous (last index varies faster) & signed short encoded. H : assumed C-contiguous. Tvox : assumed C-contiguous: either a 3x4=12-sized array (or bigger) for an affine transformation or a 3xN array for a pre-computed transformation, with N equal to the size of the array corresponding to iterI (no checking done) */ if (PyArray_TYPE(iterI->ao) != NPY_SHORT) { fprintf(stderr, "Invalid type for the array iterator\n"); return -1; } if ( (!PyArray_ISCONTIGUOUS(imJ_padded)) || (!PyArray_ISCONTIGUOUS(JH)) || (!PyArray_ISCONTIGUOUS(Tvox)) ) { fprintf(stderr, "Some non-contiguous arrays\n"); return -1; } /* Reset the source image iterator */ PyArray_ITER_RESET(iterI); /* Set interpolation method */ if (interp==0) interpolate = &_pv_interpolation; else if (interp>0) interpolate = &_tri_interpolation; else { /* interp < 0 */ interpolate = &_rand_interpolation; prng_seed(-interp, &rng); interp_params = (void*)(&rng); } /* Re-initialize joint histogram */ memset((void*)H, 0, clampI*clampJ*sizeof(double)); /* Looop over source voxels */ while(iterI->index < iterI->size) { /* Source voxel intensity */ bufI = (signed short*)PyArray_ITER_DATA(iterI); i = bufI[0]; /* Compute the transformed grid coordinates of current voxel */ Tx = *tvox; tvox++; Ty = *tvox; tvox++; Tz = *tvox; tvox++; /* Test whether the current voxel is below the intensity threshold, or the transformed point is completly outside the reference grid */ if ((i>=0) && (Tx>-1) && (Tx-1) && (Ty-1) && (Tz x */ /*** Trilinear interpolation weights. Note: wx = nnx + 1 - Tx, where nnx is the location in the NON-PADDED grid */ wx = nx - Tx; wy = ny - Ty; wz = nz - Tz; wxwy = wx*wy; wxwz = wx*wz; wywz = wy*wz; /*** Prepare buffers */ bufJnn = Jnn; bufW = W; /*** Initialize neighbor list */ off = nx*u4 + ny*u2 + nz; nn = 0; /*** Neighbor 0: (0,0,0) */ W0 = wxwy*wz; APPEND_NEIGHBOR(off, W0); /*** Neighbor 1: (0,0,1) */ APPEND_NEIGHBOR(off+1, wxwy-W0); /*** Neighbor 2: (0,1,0) */ W2 = wxwz-W0; APPEND_NEIGHBOR(off+u2, W2); /*** Neightbor 3: (0,1,1) */ W3 = wx-wxwy-W2; APPEND_NEIGHBOR(off+u3, W3); /*** Neighbor 4: (1,0,0) */ W4 = wywz-W0; APPEND_NEIGHBOR(off+u4, W4); /*** Neighbor 5: (1,0,1) */ APPEND_NEIGHBOR(off+u5, wy-wxwy-W4); /*** Neighbor 6: (1,1,0) */ APPEND_NEIGHBOR(off+u6, wz-wxwz-W4); /*** Neighbor 7: (1,1,1) */ APPEND_NEIGHBOR(off+u7, 1-W3-wy-wz+wywz); /* Update the joint histogram using the desired interpolation technique */ interpolate(i, H, clampJ, Jnn, W, nn, interp_params); } /* End of IF TRANSFORMS INSIDE */ /* Update source index */ PyArray_ITER_NEXT(iterI); } /* End of loop over voxels */ return 0; } /* Partial Volume interpolation. See Maes et al, IEEE TMI, 2007. */ static inline void _pv_interpolation(unsigned int i, double* H, unsigned int clampJ, const signed short* J, const double* W, int nn, void* params) { int k; unsigned int clampJ_i = clampJ*i; const signed short *bufJ = J; const double *bufW = W; for(k=0; k 0.0) { jm /= sumW; H[UROUND(jm)+clampJ_i] += 1; } return; } /* Random interpolation. */ static inline void _rand_interpolation(unsigned int i, double* H, unsigned int clampJ, const signed short* J, const double* W, int nn, void* params) { prng_state* rng = (prng_state*)params; int k; unsigned int clampJ_i = clampJ*i; const double *bufW; double sumW, draw; for(k=0, bufW=W, sumW=0.0; k draw) break; } H[J[k]+clampJ_i] += 1; return; } /* A function to compute the weighted median in one-dimensional histogram. */ int L1_moments(double* n_, double* median_, double* dev_, const PyArrayObject* H) { int i, med; double median, dev, n, cpdf, lim; const double *buf; const double* h; unsigned int size; unsigned int offset; if (PyArray_TYPE(H) != NPY_DOUBLE) { fprintf(stderr, "Input array should be double\n"); return -1; } /* Initialize */ h = (const double*)PyArray_DATA(H); size = PyArray_DIM(H, 0); offset = PyArray_STRIDE(H, 0)/sizeof(double); n = median = dev = 0; cpdf = 0; buf = h; for (i=0; i= n/2 */ if (n > 0) { lim = 0.5*n; i = 0; buf = h; cpdf = *buf; dev = 0; while (cpdf < lim) { i ++; buf += offset; cpdf += *buf; dev += - i*(*buf); } /* We then have: i-1 < med < i and choose i as the median (alternatively, an interpolation between i-1 and i could be performed by linearly approximating the cumulative function). The L1 deviation reads: sum*E(|X-med|) = - sum_{i<=med} i h(i) [1] + sum_{i>med} i h(i) [2] + med * [2*cpdf(med) - sum] [3] Term [1] is currently equal to `dev` variable. */ median = (double)i; dev += (2*cpdf - n)*median; med = i+1; /* Complete computation of the L1 deviation by computing the truncated mean [2]) */ if (med < size) { buf = h + med*offset; for (i=med; i #include /* Numpy import */ extern void joint_histogram_import_array(void); /* Update a pre-allocated joint histogram. Important notice: in all computations, H will be assumed C-contiguous. This means that it is contiguous and that, in C convention (row-major order, i.e. column indices are fastest): i (source intensities) are row indices j (target intensities) are column indices interp: 0 - PV interpolation 1 - TRILINEAR interpolation <0 - RANDOM interpolation with seed=-interp */ extern int joint_histogram(PyArrayObject* H, unsigned int clampI, unsigned int clampJ, PyArrayIterObject* iterI, const PyArrayObject* imJ_padded, const PyArrayObject* Tvox, long interp); extern int L1_moments(double* n_, double* median_, double* dev_, const PyArrayObject* H); #ifdef __cplusplus } #endif #endif nipy-0.3.0/nipy/algorithms/registration/optimizer.py000066400000000000000000000027161210344137400227340ustar00rootroot00000000000000from scipy.optimize import (fmin as fmin_simplex, fmin_powell, fmin_cg, fmin_bfgs, fmin_ncg) from ..optimize import fmin_steepest def subdict(dic, keys): sdic = {} for k in keys: sdic[k] = dic[k] return sdic def configure_optimizer(optimizer, fprime=None, fhess=None, **kwargs): """ Return the minimization function """ args = [] kwargs['fprime'] = fprime kwargs['fhess'] = fhess kwargs['avextol'] = kwargs['xtol'] if optimizer == 'simplex': keys = ('xtol', 'ftol', 'maxiter', 'maxfun') fmin = fmin_simplex elif optimizer == 'powell': keys = ('xtol', 'ftol', 'maxiter', 'maxfun') fmin = fmin_powell elif optimizer == 'cg': keys = ('gtol', 'maxiter', 'fprime') fmin = fmin_cg elif optimizer == 'bfgs': keys = ('gtol', 'maxiter', 'fprime') fmin = fmin_bfgs elif optimizer == 'ncg': args = [fprime] keys = ('avextol', 'maxiter', 'fhess') fmin = fmin_ncg elif optimizer == 'steepest': keys = ('xtol', 'ftol', 'maxiter', 'fprime') fmin = fmin_steepest else: raise ValueError('unknown optimizer: %s' % optimizer) return fmin, args, subdict(kwargs, keys) def use_derivatives(optimizer): if optimizer in ('simplex', 'powell'): return False else: return True nipy-0.3.0/nipy/algorithms/registration/polyaffine.c000066400000000000000000000052561210344137400226420ustar00rootroot00000000000000#include "polyaffine.h" #include #include #define TINY 1e-200 /* Numpy import */ void polyaffine_import_array(void) { import_array(); return; } static double _gaussian(double* xyz, double* center, double* sigma) { double aux, d2 = 0.0; int i; for (i=0; i<3; i++) { aux = xyz[i] - center[i]; aux /= sigma[i]; d2 += aux*aux; } return exp(-.5*d2); } /* Compute: y += w*x */ static void _add_weighted_affine(double* y, const double* x, double w) { int i; for (i=0; i<12; i++) y[i] += w*x[i]; return; } /* Compute: y = mat*x */ static void _apply_affine(double *y, const double* mat, const double* x, double W) { y[0] = mat[0]*x[0]+mat[1]*x[1]+mat[2]*x[2]+mat[3]; y[1] = mat[4]*x[0]+mat[5]*x[1]+mat[6]*x[2]+mat[7]; y[2] = mat[8]*x[0]+mat[9]*x[1]+mat[10]*x[2]+mat[11]; if (Windex < iter_xyz->size) { xyz = PyArray_ITER_DATA(iter_xyz); PyArray_ITER_RESET(iter_centers); PyArray_ITER_RESET(iter_affines); memset((void*)mat, 0, bytes_mat); W = 0.0; /* Loop over centers */ while(iter_centers->index < iter_centers->size) { center = PyArray_ITER_DATA(iter_centers); affine = PyArray_ITER_DATA(iter_affines); w = _gaussian(xyz, center, sigma); W += w; _add_weighted_affine(mat, affine, w); PyArray_ITER_NEXT(iter_centers); PyArray_ITER_NEXT(iter_affines); } /* Apply matrix */ _apply_affine(t_xyz, mat, xyz, W); memcpy((void*)xyz, (void*)t_xyz, bytes_xyz); /* Update xyz iterator */ PyArray_ITER_NEXT(iter_xyz); } /* Free memory */ Py_XDECREF(iter_xyz); Py_XDECREF(iter_centers); Py_XDECREF(iter_affines); return; } nipy-0.3.0/nipy/algorithms/registration/polyaffine.h000066400000000000000000000006051210344137400226400ustar00rootroot00000000000000#ifndef POLYAFFINE #define POLYAFFINE #ifdef __cplusplus extern "C" { #endif #include #include extern void polyaffine_import_array(void); extern void apply_polyaffine(PyArrayObject* XYZ, const PyArrayObject* Centers, const PyArrayObject* Affines, const PyArrayObject* Sigma); #ifdef __cplusplus } #endif #endif nipy-0.3.0/nipy/algorithms/registration/polyaffine.py000066400000000000000000000072261210344137400230470ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np from .transform import Transform from .affine import apply_affine from ._registration import _apply_polyaffine TINY_SIGMA = 1e-200 class PolyAffine(Transform): def __init__(self, centers, affines, sigma, glob_affine=None): """ centers: N times 3 array We are given a set of affine transforms T_i with centers x_i, all in homogeneous coordinates. The polyaffine transform is defined, up to a right composition with a global affine, as: T(x) = sum_i w_i(x) T_i x where w_i(x) = g(x-x_i)/Z(x) are normalized Gaussian weights that sum up to one for every x. """ # Format input arguments self.centers = np.asarray(centers, dtype='double', order='C') self.sigma = np.zeros(3) self.sigma[:] = np.maximum(TINY_SIGMA, sigma) if hasattr(affines[0], 'as_affine'): affines = np.array([a.as_affine() for a in affines]) else: affines = np.asarray(affines) if hasattr(glob_affine, 'as_affine'): self.glob_affine = glob_affine.as_affine() else: self.glob_affine = glob_affine # Cache a (N, 12) matrix containing the affines coefficients, # should be C-contiguous double. self._affines = np.zeros((len(self.centers), 12)) self._affines[:] = np.reshape(affines[:, 0:3, :], (len(self.centers), 12)) def affine(self, i): aff = np.eye(4) aff[0:3, :] = self._affines[i].reshape(3, 4) return aff def affines(self): return [self.affine(i) for i in range(len(self.centers))] def apply(self, xyz): """ xyz is an (N, 3) array """ # txyz should be double C-contiguous for the the cython # routine _apply_polyaffine if self.glob_affine == None: txyz = np.array(xyz, copy=True, dtype='double', order='C') else: txyz = apply_affine(self.glob_affine, xyz) _apply_polyaffine(txyz, self.centers, self._affines, self.sigma) return txyz def compose(self, other): """ Compose this transform onto another Parameters ---------- other : Transform transform that we compose onto Returns ------- composed_transform : Transform a transform implementing the composition of self on `other` """ # If other is not an Affine, use the generic compose method if not hasattr(other, 'as_affine'): return Transform(self.apply).compose(other) # Affine case: the result is a polyaffine transform with same # local affines if self.glob_affine == None: glob_affine = other.as_affine() else: glob_affine = np.dot(self.glob_affine, other.as_affine()) return self.__class__(self.centers, self.affines(), self.sigma, glob_affine=glob_affine) def left_compose(self, other): # If other is not an Affine, use the generic compose method if not hasattr(other, 'as_affine'): return Transform(other.apply).compose(self) # Affine case: the result is a polyaffine transform with same # global affine other_affine = other.as_affine() affines = [np.dot(other_affine, self.affine(i)) \ for i in range(len(self.centers))] return self.__class__(self.centers, affines, self.sigma, glob_affine=self.glob_affine) nipy-0.3.0/nipy/algorithms/registration/resample.py000066400000000000000000000115231210344137400225160ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np from scipy.ndimage import affine_transform, map_coordinates from ...core.image.image_spaces import (make_xyz_image, as_xyz_image, xyz_affine) from .affine import inverse_affine, Affine from ._registration import (_cspline_transform, _cspline_sample3d, _cspline_resample3d) INTERP_ORDER = 3 def resample(moving, transform=None, reference=None, mov_voxel_coords=False, ref_voxel_coords=False, dtype=None, interp_order=INTERP_ORDER): """ Resample `movimg` into voxel space of `reference` using `transform` Apply a transformation to the image considered as 'moving' to bring it into the same grid as a given `reference` image. The transformation usually maps world space in `reference` to world space in `movimg`, but can also be a voxel to voxel mapping (see parameters below). This function uses scipy.ndimage except for the case `interp_order==3`, where a fast cubic spline implementation is used. Parameters ---------- moving: nipy-like image Image to be resampled. transform: transform object or None Represents a transform that goes from the `reference` image to the `moving` image. None means an identity transform. Otherwise, it should have either an `apply` method, or an `as_affine` method. By default, `transform` maps between the output (world) space of `reference` and the output (world) space of `moving`. If `mov_voxel_coords` is True, maps to the *voxel* space of `moving` and if `ref_vox_coords` is True, maps from the *voxel* space of `reference`. reference : None or nipy-like image or tuple, optional The reference image defines the image dimensions and xyz affine to which to resample. It can be input as a nipy-like image or as a tuple (shape, affine). If None, use `movimg` to define these. mov_voxel_coords : boolean, optional True if the transform maps to voxel coordinates, False if it maps to world coordinates. ref_voxel_coords : boolean, optional True if the transform maps from voxel coordinates, False if it maps from world coordinates. interp_order: int, optional Spline interpolation order, defaults to 3. Returns ------- aligned_img : Image Image resliced to `reference` with reference-to-movimg transform `transform` """ # Function assumes xyz_affine for inputs moving = as_xyz_image(moving) mov_aff = xyz_affine(moving) if reference == None: reference = moving if isinstance(reference, (tuple, list)): ref_shape, ref_aff = reference else: # Expecting image. Must be an image that can make an xyz_affine reference = as_xyz_image(reference) ref_shape = reference.shape ref_aff = xyz_affine(reference) if not len(ref_shape) == 3 or not ref_aff.shape == (4, 4): raise ValueError('Input image should be 3D') data = moving.get_data() if dtype == None: dtype = data.dtype # Assume identity transform by default if transform == None: transform = Affine() # Case: affine transform if hasattr(transform, 'as_affine'): Tv = transform.as_affine() if not ref_voxel_coords: Tv = np.dot(Tv, ref_aff) if not mov_voxel_coords: Tv = np.dot(inverse_affine(mov_aff), Tv) if interp_order == 3: output = _cspline_resample3d(data, ref_shape, Tv, dtype=dtype) output = output.astype(dtype) else: output = np.zeros(ref_shape, dtype=dtype) affine_transform(data, Tv[0:3, 0:3], offset=Tv[0:3, 3], order=interp_order, cval=0, output_shape=ref_shape, output=output) # Case: non-affine transform else: Tv = transform if not ref_voxel_coords: Tv = Tv.compose(Affine(ref_aff)) if not mov_voxel_coords: Tv = Affine(inverse_affine(mov_aff)).compose(Tv) coords = np.indices(ref_shape).transpose((1, 2, 3, 0)) coords = np.reshape(coords, (np.prod(ref_shape), 3)) coords = Tv.apply(coords).T if interp_order == 3: cbspline = _cspline_transform(data) output = np.zeros(ref_shape, dtype='double') output = _cspline_sample3d(output, cbspline, *coords) output = output.astype(dtype) else: output = map_coordinates(data, coords, order=interp_order, cval=0, output=dtype) return make_xyz_image(output, ref_aff, 'scanner') nipy-0.3.0/nipy/algorithms/registration/setup.py000066400000000000000000000013361210344137400220470ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('registration', parent_package, top_path) config.add_subpackage('tests') config.add_include_dirs(config.name.replace('.', os.sep)) config.add_extension( '_registration', sources=['_registration.pyx', 'joint_histogram.c', 'wichmann_prng.c', 'cubic_spline.c', 'polyaffine.c']) return config if __name__ == '__main__': print('This is the wrong setup.py file to run') nipy-0.3.0/nipy/algorithms/registration/similarity_measures.py000066400000000000000000000121151210344137400247760ustar00rootroot00000000000000from ._registration import _L1_moments import numpy as np from scipy.ndimage import gaussian_filter TINY = float(np.finfo(np.double).tiny) SIGMA_FACTOR = 0.05 def nonzero(x): """ Force strictly positive values. """ return np.maximum(x, TINY) def dist2loss(dist, margI=None, margJ=None): L = dist LT = L.T if margI == None: margI = L.sum(0) if margJ == None: margJ = L.sum(1) L /= nonzero(margI) LT /= nonzero(margJ) return -np.log(nonzero(L)) class SimilarityMeasure(object): def __init__(self, shape, **kwargs): self.shape = shape self.J, self.I = np.indices(shape) for key in kwargs.keys(): self.__setattr__(key, kwargs[key]) def loss(self, H): return np.zeros(H.shape) def npoints(self, H): return H.sum() def overall_loss(self, H): return np.sum(H * self.loss(H)) def averaged_loss(self, H): return np.sum(H * self.loss(H)) / nonzero(self.npoints(H)) def __call__(self, H): return -self.averaged_loss(H) class SupervisedLikelihoodRatio(SimilarityMeasure): def loss(self, H): if not hasattr(self, 'L'): self.L = dist2loss(self.dist) return self.L class MutualInformation(SimilarityMeasure): def loss(self, H): return dist2loss(H / nonzero(self.npoints(H))) class ParzenMutualInformation(SimilarityMeasure): def loss(self, H): if not hasattr(self, 'sigma'): self.sigma = SIGMA_FACTOR * np.array(H.shape) npts = nonzero(self.npoints(H)) Hs = H / npts gaussian_filter(Hs, sigma=self.sigma, mode='constant', output=Hs) return dist2loss(Hs) class DiscreteParzenMutualInformation(SimilarityMeasure): def loss(self, H): if not hasattr(self, 'sigma'): self.sigma = SIGMA_FACTOR * np.array(H.shape) Hs = gaussian_filter(H, sigma=self.sigma, mode='constant') Hs /= nonzero(Hs.sum()) return dist2loss(Hs) class NormalizedMutualInformation(SimilarityMeasure): """ NMI = 2*(1 - H(I,J)/[H(I)+H(J)]) = 2*MI/[H(I)+H(J)]) """ def loss(self, H): L = H / nonzero(self.npoints(H)) lI = L.sum(0) lJ = L.sum(1) self.hI = lI self.hJ = lJ return -np.log(nonzero(L)) def __call__(self, H): HIJ = self.averaged_loss(H) HI = -np.sum(self.hI * np.log(nonzero(self.hI))) HJ = -np.sum(self.hJ * np.log(nonzero(self.hJ))) return 2 * (1 - HIJ / nonzero(HI + HJ)) class CorrelationCoefficient(SimilarityMeasure): def loss(self, H): rho2 = self(H) I = (self.I - self.mI) / np.sqrt(nonzero(self.vI)) J = (self.J - self.mJ) / np.sqrt(nonzero(self.vJ)) L = rho2 * I ** 2 + rho2 * J ** 2 - 2 * self.rho * I * J tmp = nonzero(1. - rho2) L *= .5 / tmp L += .5 * np.log(tmp) return L def averaged_loss(self, H): return .5 * np.log(nonzero(1 - self(H))) def __call__(self, H): npts = nonzero(self.npoints(H)) self.mI = np.sum(H * self.I) / npts self.mJ = np.sum(H * self.J) / npts self.vI = np.sum(H * (self.I) ** 2) / npts - self.mI ** 2 self.vJ = np.sum(H * (self.J) ** 2) / npts - self.mJ ** 2 self.cIJ = np.sum(H * self.J * self.I) / npts - self.mI * self.mJ self.rho = self.cIJ / nonzero(np.sqrt(self.vI * self.vJ)) return self.rho ** 2 class CorrelationRatio(SimilarityMeasure): def loss(self, H): print('Sorry, not implemented yet...') return def averaged_loss(self, H): return .5 * np.log(nonzero(1. - self(H))) def __call__(self, H): self.npts_J = np.sum(H, 1) tmp = nonzero(self.npts_J) self.mI_J = np.sum(H * self.I, 1) / tmp self.vI_J = np.sum(H * (self.I) ** 2, 1) / tmp - self.mI_J ** 2 self.npts = np.sum(self.npts_J) tmp = nonzero(self.npts) hI = np.sum(H, 0) hJ = np.sum(H, 1) self.mI = np.sum(hI * self.I[0, :]) / tmp self.vI = np.sum(hI * self.I[0, :] ** 2) / tmp - self.mI ** 2 mean_vI_J = np.sum(hJ * self.vI_J) / tmp return 1. - mean_vI_J / nonzero(self.vI) class CorrelationRatioL1(SimilarityMeasure): def loss(self, H): print('Sorry, not implemented yet...') return def averaged_loss(self, H): return np.log(nonzero(1. - self(H))) def __call__(self, H): tmp = np.array([_L1_moments(H[j, :]) for j in range(H.shape[0])]) self.npts_J, self.mI_J, self.sI_J = tmp[:, 0], tmp[:, 1], tmp[:, 2] hI = np.sum(H, 0) hJ = np.sum(H, 1) self.npts, self.mI, self.sI = _L1_moments(hI) mean_sI_J = np.sum(hJ * self.sI_J) / nonzero(self.npts) return 1. - mean_sI_J / nonzero(self.sI) similarity_measures = { 'slr': SupervisedLikelihoodRatio, 'mi': MutualInformation, 'nmi': NormalizedMutualInformation, 'pmi': ParzenMutualInformation, 'dpmi': DiscreteParzenMutualInformation, 'cc': CorrelationCoefficient, 'cr': CorrelationRatio, 'crl1': CorrelationRatioL1} nipy-0.3.0/nipy/algorithms/registration/tests/000077500000000000000000000000001210344137400214745ustar00rootroot00000000000000nipy-0.3.0/nipy/algorithms/registration/tests/__init__.py000066400000000000000000000000501210344137400236000ustar00rootroot00000000000000# Init to make test directory a package nipy-0.3.0/nipy/algorithms/registration/tests/test_affine.py000066400000000000000000000127731210344137400243470ustar00rootroot00000000000000#!/usr/bin/env python import numpy as np from ..affine import (Affine, Affine2D, Rigid, Rigid2D, Similarity, Similarity2D, rotation_mat2vec, subgrid_affine, slices2aff) from nose.tools import assert_true, assert_false, assert_raises from numpy.testing import assert_array_equal, assert_array_almost_equal from ....testing import assert_almost_equal def random_vec12(subtype='affine'): v = np.array([0,0,0,0.0,0,0,1,1,1,0,0,0]) v[0:3] = 20*np.random.rand(3) v[3:6] = np.random.rand(3) if subtype == 'similarity': v[6:9] = np.random.rand() elif subtype == 'affine': v[6:9] = np.random.rand(3) v[9:12] = np.random.rand(3) return v """ def test_rigid_compose(): T1 = Affine(random_vec12('rigid')) T2 = Affine(random_vec12('rigid')) T = T1*T2 assert_almost_equal(T.as_affine(), np.dot(T1.as_affine(), T2.as_affine())) def test_compose(): T1 = Affine(random_vec12('affine')) T2 = Affine(random_vec12('similarity')) T = T1*T2 assert_almost_equal(T.as_affine(), np.dot(T1.as_affine(), T2.as_affine())) """ def test_mat2vec(): mat = np.eye(4) tmp = np.random.rand(3,3) U, s, Vt = np.linalg.svd(tmp) U /= np.linalg.det(U) Vt /= np.linalg.det(Vt) mat[0:3,0:3] = np.dot(np.dot(U, np.diag(s)), Vt) T = Affine(mat) assert_almost_equal(T.as_affine(), mat) def test_rotation_mat2vec(): r = rotation_mat2vec(np.diag([-1,1,-1])) assert_false(np.isnan(r).max()) def test_composed_affines(): aff1 = np.diag([2, 3, 4, 1]) aff2 = np.eye(4) aff2[:3,3] = (10, 11, 12) comped = np.dot(aff2, aff1) comped_obj = Affine(comped) assert_array_almost_equal(comped_obj.as_affine(), comped) aff1_obj = Affine(aff1) aff2_obj = Affine(aff2) re_comped = aff2_obj.compose(aff1_obj) assert_array_almost_equal(re_comped.as_affine(), comped) # Crazy, crazy, crazy aff1_remixed = aff1_obj.as_affine() aff2_remixed = aff2_obj.as_affine() comped_remixed = np.dot(aff2_remixed, aff1_remixed) assert_array_almost_equal(comped_remixed, Affine(comped_remixed).as_affine()) def test_affine_types(): pts = np.random.normal(size=(10,3)) for klass, n_params in ((Affine, 12), (Affine2D, 6), (Rigid, 6), (Rigid2D, 3), (Similarity, 7), (Similarity2D, 4), ): obj = klass() assert_array_equal(obj.param, np.zeros((n_params,))) obj.param = np.ones((n_params,)) assert_array_equal(obj.param, np.ones((n_params,))) # Check that round trip works orig_aff = obj.as_affine() obj2 = klass(orig_aff) assert_array_almost_equal(obj2.as_affine(), orig_aff) # Check inverse inv_obj = obj.inv() # Check points transform and invert pts_dash = obj.apply(pts) assert_array_almost_equal(pts, inv_obj.apply(pts_dash)) # Check composition with inverse gives identity with_inv = inv_obj.compose(obj) assert_array_almost_equal(with_inv.as_affine(), np.eye(4)) # Just check that str works without error s = str(obj) # Check default parameter input obj = klass(np.zeros((12,))) assert_array_equal(obj.param, np.zeros((n_params,))) obj = klass(list(np.zeros((12,)))) assert_array_equal(obj.param, np.zeros((n_params,))) def test_indirect_affines(): T = np.eye(4) A = np.random.rand(3,3) if np.linalg.det(A) > 0: A = -A T[:3,:3] = A obj = Affine(T) assert_false(obj.is_direct) assert_array_almost_equal(T, obj.as_affine()) def test_slices2aff(): # Take a series of slices, return equivalent affine for N in range(1, 5): slices = [slice(None) for n in range(N)] aff = np.eye(N+1) assert_array_equal(slices2aff(slices), aff) slices = [slice(2) for n in range(N)] assert_array_equal(slices2aff(slices), aff) slices = [slice(2, 4) for n in range(N)] aff2 = aff.copy() aff2[:-1,-1] = [2] * N assert_array_equal(slices2aff(slices), aff2) slices = [slice(2, 4, 5) for n in range(N)] aff3 = np.diag([5] * N + [1]) aff3[:-1,-1] = [2] * N assert_array_equal(slices2aff(slices), aff3) slices = [slice(2.1, 11, 4.9), slice(3.2, 11, 5.8), slice(4.3, 11, 6.7)] assert_array_equal(slices2aff(slices), [[4.9, 0, 0, 2.1], [0, 5.8, 0, 3.2], [0, 0, 6.7, 4.3], [0, 0, 0, 1]]) def test_subgrid_affine(): # Takes an affine and a series of slices, creates affine from slices, # returns dot(affine, affine_from_slices) slices = [slice(2, 11, 4), slice(3, 11, 5), slice(4, 11, 6)] assert_array_equal(subgrid_affine(np.eye(4), slices), [[4, 0, 0, 2], [0, 5, 0, 3], [0, 0, 6, 4], [0, 0, 0, 1]]) assert_array_equal(subgrid_affine(np.diag([2, 3, 4, 1]), slices), [[8, 0, 0, 4], [0, 15, 0, 9], [0, 0, 24, 16], [0, 0, 0, 1]]) # Raises error for non-integer slice arguments slices[0] = slice(2.1, 11, 4) assert_raises(ValueError, subgrid_affine, np.eye(4), slices) nipy-0.3.0/nipy/algorithms/registration/tests/test_chain_transforms.py000066400000000000000000000105211210344137400264440ustar00rootroot00000000000000""" Testing combined transformation objects The combined transform object associates a spatial transformation with the parameters of that transformation, for use in an optimizer. The combined transform object does several things. First, it can transform a coordinate array with:: transformed_pts = obj.apply(pts) Second, the transform can phrase itself as a vector of parameters that are suitable for optimization:: vec = obj.get_params() Third, the transform can be modified by setting from the optimization parameters:: obj.set_params(new_vec) new_transformed_pts = obj.apply(pts) """ import numpy as np import numpy.linalg as npl from nibabel.affines import apply_affine from ..chain_transform import ChainTransform from ..affine import Affine from numpy.testing import (assert_array_almost_equal, assert_array_equal) from nose.tools import assert_true, assert_equal, assert_raises AFF1 = np.diag([2, 3, 4, 1]) AFF2 = np.eye(4) AFF2[:3,3] = (10, 11, 12) # generate a random affine with a positive determinant AFF3 = np.eye(4) AFF3[:3,3] = np.random.normal(size=(3,)) tmp = np.random.normal(size=(3,3)) AFF3[:3,:3] = np.sign(npl.det(tmp))*tmp POINTS = np.arange(12).reshape(4,3) # Make affine objects AFF1_OBJ, AFF2_OBJ, AFF3_OBJ = [Affine(a) for a in [AFF1, AFF2.copy(), AFF3]] def test_creation(): # This is the simplest possible example, where there is a thing we are # optimizing, and an optional pre and post transform # Reset the aff2 object aff2_obj = Affine(AFF2.copy()) ct = ChainTransform(aff2_obj) # Check apply gives expected result assert_array_equal(ct.apply(POINTS), apply_affine(AFF2, POINTS)) # Check that result is changed by setting params assert_array_equal(ct.param, aff2_obj.param) ct.param = np.zeros((12,)) assert_array_almost_equal(ct.apply(POINTS), POINTS) # Does changing params in chain object change components passed in? assert_array_almost_equal(aff2_obj.param, np.zeros((12,))) # Reset the aff2 object aff2_obj = Affine(AFF2.copy()) # Check apply gives the expected results ct = ChainTransform(aff2_obj, pre=AFF1_OBJ) assert_array_almost_equal(AFF1_OBJ.as_affine(), AFF1) assert_array_almost_equal(aff2_obj.as_affine(), AFF2) tmp = np.dot(AFF2, AFF1) assert_array_almost_equal(ct.apply(POINTS), apply_affine(np.dot(AFF2, AFF1), POINTS)) # Check that result is changed by setting params assert_array_almost_equal(ct.param, aff2_obj.param) ct.param = np.zeros((12,)) assert_array_almost_equal(ct.apply(POINTS), apply_affine(AFF1, POINTS)) # Does changing params in chain object change components passed in? assert_array_almost_equal(aff2_obj.param, np.zeros((12,))) # Reset the aff2 object aff2_obj = Affine(AFF2.copy()) ct = ChainTransform(aff2_obj, pre=AFF1_OBJ, post=AFF3_OBJ) assert_array_almost_equal(ct.apply(POINTS), apply_affine(np.dot(AFF3, np.dot(AFF2, AFF1)), POINTS)) # Check that result is changed by setting params assert_array_equal(ct.param, aff2_obj.param) ct.param = np.zeros((12,)) assert_array_almost_equal(ct.apply(POINTS), apply_affine(np.dot(AFF3, AFF1), POINTS)) # Does changing params in chain object change components passed in? assert_array_equal(aff2_obj.param, np.zeros((12,))) # disabling this test because ChainTransform now returns an error if # it doesn't get an optimizable transform. """ def test_inputs(): # Check that we can pass arrays or None as pre and post assert_array_almost_equal(ChainTransform(AFF2).apply(POINTS), ChainTransform(AFF2_OBJ).apply(POINTS)) assert_array_almost_equal(ChainTransform(AFF2, pre=AFF1).apply(POINTS), ChainTransform(AFF2_OBJ, pre=AFF1_OBJ).apply(POINTS)) assert_array_almost_equal(ChainTransform(AFF2, pre=AFF1, post=AFF3).apply(POINTS), ChainTransform(AFF2_OBJ, pre=AFF1_OBJ, post=AFF3_OBJ).apply(POINTS)) assert_array_almost_equal(ChainTransform(AFF2, pre=None).apply(POINTS), ChainTransform(AFF2_OBJ).apply(POINTS)) assert_array_almost_equal(ChainTransform(AFF2, pre=None, post=None).apply(POINTS), ChainTransform(AFF2_OBJ).apply(POINTS)) """ nipy-0.3.0/nipy/algorithms/registration/tests/test_cubic_spline.py000066400000000000000000000016731210344137400255530ustar00rootroot00000000000000""" Testing """ import numpy as np from .._registration import (_cspline_transform, _cspline_sample1d, _cspline_sample4d) from numpy.testing import assert_array_almost_equal from nose.tools import assert_true, assert_equal, assert_raises def test_sample1d(): a = np.random.rand(100) c = _cspline_transform(a) x = np.arange(100) b = np.zeros(100) b = _cspline_sample1d(b, c, x) assert_array_almost_equal(a, b) b = _cspline_sample1d(b, c, x, mode='nearest') assert_array_almost_equal(a, b) def test_sample4d(): a = np.random.rand(4, 5, 6, 7) c = _cspline_transform(a) x = np.mgrid[0:4, 0:5, 0:6, 0:7] b = np.zeros(a.shape) args = list(x) b = _cspline_sample4d(b, c, *args) assert_array_almost_equal(a, b) args = list(x) + ['nearest' for i in range(4)] b = _cspline_sample4d(b, c, *args) assert_array_almost_equal(a, b) nipy-0.3.0/nipy/algorithms/registration/tests/test_fmri_realign4d.py000066400000000000000000000111421210344137400257720ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from nose.tools import assert_equal, assert_raises from numpy.testing import assert_array_almost_equal, assert_array_equal import numpy as np from .... import load_image from ....testing import funcfile from ....fixes.nibabel import io_orientation from ..groupwise_registration import Image4d, resample4d, FmriRealign4d from ..affine import Rigid im = load_image(funcfile) def test_scanner_time(): im4d = Image4d(im.get_data(), im.affine, tr=2., slice_order='ascending', interleaved=False) assert_equal(im4d.scanner_time(0, 0), 0.) assert_equal(im4d.scanner_time(0, im4d.tr), 1.) assert_equal(im4d.scanner_time(1, im4d.tr_slices), 0.) def test_slice_info(): im4d = Image4d(im.get_data(), im.affine, tr=2., slice_info=(1, -1)) assert_equal(im4d.slice_axis, 1) assert_equal(im4d.slice_direction, -1) def _test_image4d_init(nslices): data = np.zeros((3, 4, nslices, 6)) aff = np.eye(4) tr = 2.0 img4d = Image4d(data, aff, tr) assert_array_equal(img4d.slice_order, range(nslices)) img4d = Image4d(data, aff, tr, slice_order='ascending') assert_array_equal(img4d.slice_order, range(nslices)) img4d = Image4d(data, aff, tr, slice_order='descending') assert_array_equal(img4d.slice_order, range(nslices)[::-1]) # test interleaved slice order slice_order = range(nslices)[::2] + range(nslices)[1::2] img4d = Image4d(data, aff, tr, slice_order='ascending', interleaved=True) assert_array_equal(img4d.slice_order, slice_order) slice_order.reverse() img4d = Image4d(data, aff, tr, slice_order='descending', interleaved=True) assert_array_equal(img4d.slice_order, slice_order) # can pass array img4d = Image4d(data, aff, tr, slice_order=np.arange(nslices)) assert_array_equal(img4d.slice_order, range(nslices)) # or list img4d = Image4d(data, aff, tr, slice_order=range(nslices)) assert_array_equal(img4d.slice_order, range(nslices)) # but raises exception in case of the incorrect slice indexes for bad_slice_order in ( [0], # insufficient np.arange(nslices)-1, # negative etc np.arange(nslices) + 0.1, # floats range(nslices//2)*2, # twice the same (would match in length for even nslices) ): assert_raises(ValueError, Image4d, data, aff, tr, slice_order=bad_slice_order) def test_image4d_init_5slices(): _test_image4d_init(5) def test_image4d_init_6slices(): _test_image4d_init(6) def test_slice_timing(): affine = np.eye(4) affine[0:3, 0:3] = im.affine[0:3, 0:3] im4d = Image4d(im.get_data(), affine, tr=2., tr_slices=0.0) x = resample4d(im4d, [Rigid() for i in range(im.shape[3])]) assert_array_almost_equal(im4d.get_data(), x) def test_realign4d_no_time_interp(): runs = [im, im] R = FmriRealign4d(runs, slice_order=None, time_interp=False) def test_realign4d(): """ This tests whether realign4d yields the same results depending on whether the slice order is input explicitely or as slice_order='ascending'. Due to the very small size of the image used for testing (only 3 slices), optimization is numerically unstable. It seems to make the default optimizer, namely scipy.fmin.fmin_ncg, adopt a random behavior. To work around the resulting inconsistency in results, we use nipy.optimize.fmin_steepest as the optimizer, although it's generally not recommended in practice. """ runs = [im, im] orient = io_orientation(im.affine) slice_axis = int(np.where(orient[:, 0] == 2)[0]) R1 = FmriRealign4d(runs, tr=2., slice_order='ascending') R1.estimate(refscan=None, loops=1, between_loops=1, optimizer='steepest') R2 = FmriRealign4d(runs, tr=2., slice_order=range(im.shape[slice_axis])) R2.estimate(refscan=None, loops=1, between_loops=1, optimizer='steepest') for r in range(2): for i in range(im.shape[3]): assert_array_almost_equal(R1._transforms[r][i].translation, R2._transforms[r][i].translation) assert_array_almost_equal(R1._transforms[r][i].rotation, R2._transforms[r][i].rotation) for i in range(im.shape[3]): assert_array_almost_equal(R1._mean_transforms[r].translation, R2._mean_transforms[r].translation) assert_array_almost_equal(R1._mean_transforms[r].rotation, R2._mean_transforms[r].rotation) nipy-0.3.0/nipy/algorithms/registration/tests/test_histogram_registration.py000066400000000000000000000137431210344137400277040ustar00rootroot00000000000000#!/usr/bin/env python import numpy as np from ....core.image.image_spaces import make_xyz_image from ..affine import Affine from ..histogram_registration import HistogramRegistration from .._registration import _joint_histogram from numpy.testing import assert_array_equal from ....testing import assert_equal, assert_almost_equal, assert_raises dummy_affine = np.eye(4) def make_data_bool(dx=100, dy=100, dz=50): return (np.random.rand(dx, dy, dz) - np.random.rand()) > 0 def make_data_uint8(dx=100, dy=100, dz=50): return (256 * (np.random.rand(dx, dy, dz) - np.random.rand())).astype('uint8') def make_data_int16(dx=100, dy=100, dz=50): return (256 * (np.random.rand(dx, dy, dz) - np.random.rand())).astype('int16') def make_data_float64(dx=100, dy=100, dz=50): return (256 * (np.random.rand(dx, dy, dz) - np.random.rand())).astype('float64') def _test_clamping(I, thI=0.0, clI=256, mask=None): R = HistogramRegistration(I, I, from_bins=clI, from_mask=mask, to_mask=mask) R.subsample(spacing=[1, 1, 1]) Ic = R._from_data Ic2 = R._to_data[1:-1, 1:-1, 1:-1] assert_equal(Ic, Ic2) dyn = Ic.max() + 1 assert_equal(dyn, R._joint_hist.shape[0]) assert_equal(dyn, R._joint_hist.shape[1]) return Ic, Ic2 def test_clamping_uint8(): I = make_xyz_image(make_data_uint8(), dummy_affine, 'scanner') _test_clamping(I) def test_clamping_uint8_nonstd(): I = make_xyz_image(make_data_uint8(), dummy_affine, 'scanner') _test_clamping(I, 10, 165) def test_clamping_int16(): I = make_xyz_image(make_data_int16(), dummy_affine, 'scanner') _test_clamping(I) def test_masked_clamping_int16(): I = make_xyz_image(make_data_int16(), dummy_affine, 'scanner') _test_clamping(I, mask=make_data_bool()) def test_clamping_int16_nonstd(): I = make_xyz_image(make_data_int16(), dummy_affine, 'scanner') _test_clamping(I, 10, 165) def test_clamping_float64(): I = make_xyz_image(make_data_float64(), dummy_affine, 'scanner') _test_clamping(I) def test_clamping_float64_nonstd(): I = make_xyz_image(make_data_float64(), dummy_affine, 'scanner') _test_clamping(I, 10, 165) def _test_similarity_measure(simi, val): I = make_xyz_image(make_data_int16(), dummy_affine, 'scanner') J = make_xyz_image(I.get_data().copy(), dummy_affine, 'scanner') R = HistogramRegistration(I, J) R.subsample(spacing=[2, 1, 3]) R.similarity = simi assert_almost_equal(R.eval(Affine()), val) def test_correlation_coefficient(): _test_similarity_measure('cc', 1.0) def test_correlation_ratio(): _test_similarity_measure('cr', 1.0) def test_correlation_ratio_L1(): _test_similarity_measure('crl1', 1.0) def test_normalized_mutual_information(): _test_similarity_measure('nmi', 1.0) def test_joint_hist_eval(): I = make_xyz_image(make_data_int16(), dummy_affine, 'scanner') J = make_xyz_image(I.get_data().copy(), dummy_affine, 'scanner') # Obviously the data should be the same assert_array_equal(I.get_data(), J.get_data()) # Instantiate default thing R = HistogramRegistration(I, J) R.similarity = 'cc' null_affine = Affine() val = R.eval(null_affine) assert_almost_equal(val, 1.0) # Try with what should be identity R.subsample(spacing=[1, 1, 1]) assert_array_equal(R._from_data.shape, I.shape) val = R.eval(null_affine) assert_almost_equal(val, 1.0) def test_joint_hist_raw(): # Set up call to joint histogram jh_arr = np.zeros((10, 10), dtype=np.double) data_shape = (2, 3, 4) data = np.random.randint(size=data_shape, low=0, high=10).astype(np.short) data2 = np.zeros(np.array(data_shape) + 2, dtype=np.short) data2[:] = -1 data2[1:-1, 1:-1, 1:-1] = data.copy() vox_coords = np.indices(data_shape).transpose((1, 2, 3, 0)) vox_coords = np.ascontiguousarray(vox_coords.astype(np.double)) _joint_histogram(jh_arr, data.flat, data2, vox_coords, 0) assert_almost_equal(np.diag(np.diag(jh_arr)), jh_arr) def test_explore(): I = make_xyz_image(make_data_int16(), dummy_affine, 'scanner') J = make_xyz_image(make_data_int16(), dummy_affine, 'scanner') R = HistogramRegistration(I, J) T = Affine() simi, params = R.explore(T, (0, [-1, 0, 1]), (1, [-1, 0, 1])) def test_histogram_registration(): """ Test the histogram registration class. """ I = make_xyz_image(make_data_int16(), dummy_affine, 'scanner') J = make_xyz_image(I.get_data().copy(), dummy_affine, 'scanner') R = HistogramRegistration(I, J) assert_raises(ValueError, R.subsample, spacing=[0, 1, 3]) def test_set_fov(): I = make_xyz_image(make_data_int16(), dummy_affine, 'scanner') J = make_xyz_image(I.get_data().copy(), dummy_affine, 'scanner') R = HistogramRegistration(I, J) R.set_fov(npoints=np.prod(I.shape)) assert_equal(R._from_data.shape, I.shape) half_shape = tuple([I.shape[i] / 2 for i in range(3)]) R.set_fov(spacing=(2, 2, 2)) assert_equal(R._from_data.shape, half_shape) R.set_fov(corner=half_shape) assert_equal(R._from_data.shape, half_shape) R.set_fov(size=half_shape) assert_equal(R._from_data.shape, half_shape) def test_histogram_masked_registration(): """ Test the histogram registration class. """ I = make_xyz_image(make_data_int16(dx=100, dy=100, dz=50), dummy_affine, 'scanner') J = make_xyz_image(make_data_int16(dx=100, dy=100, dz=50), dummy_affine, 'scanner') mask = (np.zeros((100,100,50)) == 1) mask[10:20,10:20,10:20] = True R = HistogramRegistration(I, J, to_mask=mask, from_mask=mask) sim1 = R.eval(Affine()) I = make_xyz_image(I.get_data()[mask].reshape(10,10,10), dummy_affine, 'scanner') J = make_xyz_image(J.get_data()[mask].reshape(10,10,10), dummy_affine, 'scanner') R = HistogramRegistration(I, J) sim2 = R.eval(Affine()) assert_equal(sim1, sim2) if __name__ == "__main__": import nose nose.run(argv=['', __file__]) nipy-0.3.0/nipy/algorithms/registration/tests/test_polyaffine.py000066400000000000000000000017221210344137400252430ustar00rootroot00000000000000import numpy as np from ..polyaffine import PolyAffine from ..affine import Affine def random_affine(): T = np.eye(4) T[0:3, 0:4] = np.random.rand(3, 4) return T def id_affine(): return np.eye(4) NCENTERS = 5 NPTS = 100 centers = [np.random.rand(3) for i in range(NCENTERS)] raf = random_affine() affines = [raf for i in range(NCENTERS)] #affines = [id_affine() for i in range(NCENTERS)] sigma = 1.0 xyz = np.random.rand(NPTS, 3) # test 1: crach test create polyaffine transform T = PolyAffine(centers, affines, sigma) # test 2: crash test apply method t = T.apply(xyz) # test 3: check apply does nice job c = np.array(centers) tc = T.apply(c) qc = np.array([np.dot(a[0:3, 0:3], b) + a[0:3, 3]\ for a, b in zip(affines, centers)]) # test 4: crash test compose method A = Affine(random_affine()) TA = T.compose(A) # test 5: crash test left compose method AT = A.compose(T) z = AT.apply(xyz) za = A.compose(Affine(raf)).apply(xyz) nipy-0.3.0/nipy/algorithms/registration/tests/test_register.py000066400000000000000000000025351210344137400247360ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np from .... import load_image from ....testing import anatfile from ..histogram_registration import HistogramRegistration from numpy.testing import assert_array_almost_equal anat_img = load_image(anatfile) def test_registers(): # Test registration to self returns identity for cost, interp, affine_type in (('cc', 'pv', 'rigid'), ('cc', 'tri', 'rigid'), ('cc', 'rand', 'rigid'), ('cc', 'pv', 'similarity'), ('cc', 'pv', 'affine'), ('cr', 'pv', 'rigid'), ('cr', 'pv', 'rigid'), ('crl1', 'pv', 'rigid'), ('mi', 'pv', 'rigid'), ('nmi', 'pv', 'rigid'), ): R = HistogramRegistration(anat_img, anat_img, similarity=cost, interp=interp) R.subsample([2,2,2]) affine = R.optimize(affine_type) yield assert_array_almost_equal, affine.as_affine(), np.eye(4), 2 nipy-0.3.0/nipy/algorithms/registration/tests/test_resample.py000066400000000000000000000025171210344137400247220ustar00rootroot00000000000000""" Testing resample function """ import numpy as np from ....core.image.image_spaces import (as_xyz_image, xyz_affine) from ....core.api import Image, vox2mni from ..resample import resample from ..affine import Affine from numpy.testing import assert_array_almost_equal def _test_resample(arr, interp_orders): # Check basic cases of resampling img = Image(arr, vox2mni(np.eye(4))) T = Affine() for i in interp_orders: img2 = resample(img, T, interp_order=i) assert_array_almost_equal(img2.get_data(), img.get_data()) img_aff = as_xyz_image(img) img2 = resample(img, T, reference=(img_aff.shape, xyz_affine(img_aff)), interp_order=i) assert_array_almost_equal(img2.get_data(), img.get_data()) def test_resample_float_data(): arr = np.random.rand(10, 11, 12) _test_resample(arr, (0, 1, 3, 5)) def test_resample_int_data(): arr = np.random.randint(100, size=(10, 11, 12)) - 50 _test_resample(arr, (3,)) def test_resample_uint_data(): arr = np.random.randint(100, size=(10, 11, 12)).astype('uint8') img = Image(arr, vox2mni(np.eye(4))) T = Affine((.5, .5, .5, .1, .1, .1, 0, 0, 0, 0, 0, 0)) img2 = resample(img, T) assert(np.min(img2.get_data()) >= 0) assert(np.max(img2.get_data()) < 255) nipy-0.3.0/nipy/algorithms/registration/tests/test_transform.py000066400000000000000000000011241210344137400251160ustar00rootroot00000000000000""" Testing """ import numpy as np from ..transform import Transform from numpy.testing import (assert_array_almost_equal, assert_array_equal) from nose.tools import assert_true, assert_equal, assert_raises def test_transform(): t = Transform(lambda x : x+1) pts = np.random.normal(size=(10,3)) assert_array_equal(t.apply(pts), pts+1) assert_raises(AttributeError, getattr, t, 'param') tm1 = Transform(lambda x : x-1) assert_array_equal(tm1.apply(pts), pts-1) tctm1 = t.compose(tm1) assert_array_almost_equal(tctm1.apply(pts), pts) nipy-0.3.0/nipy/algorithms/registration/transform.py000066400000000000000000000023221210344137400227160ustar00rootroot00000000000000""" Generic transform class This implementation specifies an API. We've done our best to avoid checking instances, so any class implementing this API should be valid in the places (like registration routines) that use transforms. If that isn't true, it's a bug. """ class Transform(object): """ A default transformation class This class specifies the tiny API. That is, the class should implement: * obj.param - the transformation exposed as a set of parameters. Changing param should change the transformation * obj.apply(pts) - accepts (N,3) array-like of points in 3 dimensions, returns an (N, 3) array of transformed points * obj.compose(xform) - accepts another object implementing ``apply``, and returns a new transformation object, where the resulting transformation is the composition of the ``obj`` transform onto the ``xform`` transform. """ def __init__(self, func): self.func = func def apply(self, pts): return self.func(pts) def compose(self, other): return self.__class__( lambda pts : self.func(other.apply(pts))) @property def param(self): raise AttributeError('No param for generic transform') nipy-0.3.0/nipy/algorithms/registration/wichmann_prng.c000066400000000000000000000022461210344137400233340ustar00rootroot00000000000000#include "wichmann_prng.h" #include /* Assumption to be verified: ix, iy, iz, it should be set to values between 1 and 400000 */ void prng_seed(int seed, prng_state* rng) { double r, rmax=(double)RAND_MAX; int imax = 400000; srand(seed); r = (double)rand()/rmax; rng->ix = (int)(imax*r); r = (double)rand()/rmax; rng->iy = (int)(imax*r); r = (double)rand()/rmax; rng->iz = (int)(imax*r); r = (double)rand()/rmax; rng->it = (int)(imax*r); return; } double prng_double(prng_state* rng) { double W; rng->ix = 11600 * (rng->ix % 185127) - 10379 * (rng->ix / 185127); rng->iy = 47003 * (rng->iy % 45688) - 10479 * (rng->iy / 45688); rng->iz = 23000 * (rng->iz % 93368) - 19423 * (rng->iz / 93368); rng->it = 33000 * (rng->it % 65075) - 8123 * (rng->it / 65075); if (rng->ix < 0) rng->ix = rng->ix + 2147483579; if (rng->iy < 0) rng->iy = rng->iy + 2147483543; if (rng->iz < 0) rng->iz = rng->iz + 2147483423; if (rng->it < 0) rng->it = rng->it + 2147483123; W = rng->ix/2147483579. + rng->iy/2147483543. + rng->iz/2147483423. + rng->it/2147483123.; return W - (int)W; } nipy-0.3.0/nipy/algorithms/registration/wichmann_prng.h000066400000000000000000000010461210344137400233360ustar00rootroot00000000000000#ifndef WICHMANN_PRNG #define WICHMANN_PRNG #ifdef __cplusplus extern "C" { #endif /* B.A. Wichmann, I.D. Hill, Generating good pseudo-random numbers, Computational Statistics & Data Analysis, Volume 51, Issue 3, 1 December 2006, Pages 1614-1622, ISSN 0167-9473, DOI: 10.1016/j.csda.2006.05.019. */ typedef struct { int ix; int iy; int iz; int it; } prng_state; extern void prng_seed(int seed, prng_state* rng); extern double prng_double(prng_state* prng); #ifdef __cplusplus } #endif #endif nipy-0.3.0/nipy/algorithms/resample.py000066400000000000000000000115721210344137400200100ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Some simple examples and utility functions for resampling. """ import copy import numpy as np from scipy.ndimage import affine_transform from nibabel.affines import from_matvec, to_matvec from .interpolation import ImageInterpolator from ..core.api import (Image, CoordinateMap, AffineTransform, ArrayCoordMap, compose) def resample_img2img(source, target, order=3): """ Resample `source` image to space of `target` image This wraps the resample function to resample one image onto another. The output of the function will give an image with shape of the target and data from the source Parameters ---------- source : ``Image`` Image instance that is to be resampled target : ``Image`` Image instance to which source is resampled The output image will have the same shape as the target, and the same coordmap order : ``int``, optional What order of interpolation to use in `scipy.ndimage` Returns ------- output : ``Image`` Image with interpolated data and output.coordmap == target.coordmap Examples -------- >>> from nipy.testing import funcfile, anatfile >>> from nipy.io.api import load_image >>> aimg_source = load_image(anatfile) >>> aimg_target = aimg_source >>> # in this case, we resample aimg to itself >>> resimg = resample_img2img(aimg_source, aimg_target) """ sip, sop = source.coordmap.ndims tip, top = target.coordmap.ndims #print sip, sop, tip, top if sop != top: raise ValueError("source coordmap output dimension not equal " "to target coordmap output dimension") mapping = np.eye(sop+1) # this would usually be 3+1 resimg = resample(source, target.coordmap, mapping, target.shape, order=order) return resimg def resample(image, target, mapping, shape, order=3): """ Resample `image` to `target` CoordinateMap Use a "world-to-world" mapping `mapping` and spline interpolation of a `order`. Here, "world-to-world" refers to the fact that mapping should be a callable that takes a physical coordinate in "target" and gives a physical coordinate in "image". Parameters ---------- image : Image instance image that is to be resampled target : CoordinateMap coordinate map for output image mapping : callable or tuple or array transformation from target.function_range to image.coordmap.function_range, i.e. 'world-to-world mapping'. Can be specified in three ways: a callable, a tuple (A, b) representing the mapping y=dot(A,x)+b or a representation of this mapping as an affine array, in homogeneous coordinates. shape : sequence of int shape of output array, in target.function_domain order : int, optional what order of interpolation to use in `scipy.ndimage` Returns ------- output : Image instance with interpolated data and output.coordmap == target """ if not callable(mapping): if type(mapping) is type(()): mapping = from_matvec(*mapping) # image world to target world mapping TW2IW = AffineTransform(target.function_range, image.coordmap.function_range, mapping) else: if isinstance(mapping, AffineTransform): TW2IW = mapping else: TW2IW = CoordinateMap(target.function_range, image.coordmap.function_range, mapping) # target voxel to image world mapping TV2IW = compose(TW2IW, target) # CoordinateMap describing mapping from target voxel to # image world coordinates if not isinstance(TV2IW, AffineTransform): # interpolator evaluates image at values image.coordmap.function_range, # i.e. physical coordinates rather than voxel coordinates grid = ArrayCoordMap.from_shape(TV2IW, shape) interp = ImageInterpolator(image, order=order) idata = interp.evaluate(grid.transposed_values) del(interp) else: # it is an affine transform, but, what if we compose? TV2IV = compose(image.coordmap.inverse(), TV2IW) if isinstance(TV2IV, AffineTransform): # still affine A, b = to_matvec(TV2IV.affine) idata = affine_transform(image.get_data(), A, offset=b, output_shape=shape, order=order) else: # not affine anymore interp = ImageInterpolator(image, order=order) grid = ArrayCoordMap.from_shape(TV2IV, shape) idata = interp.evaluate(grid.values) del(interp) return Image(idata, copy.copy(target)) nipy-0.3.0/nipy/algorithms/segmentation/000077500000000000000000000000001210344137400203155ustar00rootroot00000000000000nipy-0.3.0/nipy/algorithms/segmentation/__init__.py000066400000000000000000000002711210344137400224260ustar00rootroot00000000000000from .brain_segmentation import BrainT1Segmentation from .segmentation import Segmentation, moment_matching from nipy.testing import Tester test = Tester().test bench = Tester().bench nipy-0.3.0/nipy/algorithms/segmentation/_segmentation.c000066400000000000000000007630761210344137400233400ustar00rootroot00000000000000/* Generated by Cython 0.17.4 on Sat Jan 12 17:27:28 2013 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02040000 #error Cython requires Python 2.4+. #else #include /* For offsetof */ #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #endif #if PY_VERSION_HEX < 0x02050000 typedef int Py_ssize_t; #define PY_SSIZE_T_MAX INT_MAX #define PY_SSIZE_T_MIN INT_MIN #define PY_FORMAT_SIZE_T "" #define CYTHON_FORMAT_SSIZE_T "" #define PyInt_FromSsize_t(z) PyInt_FromLong(z) #define PyInt_AsSsize_t(o) __Pyx_PyInt_AsInt(o) #define PyNumber_Index(o) ((PyNumber_Check(o) && !PyFloat_Check(o)) ? PyNumber_Int(o) : \ (PyErr_Format(PyExc_TypeError, \ "expected index value, got %.200s", Py_TYPE(o)->tp_name), \ (PyObject*)0)) #define __Pyx_PyIndex_Check(o) (PyNumber_Check(o) && !PyFloat_Check(o) && \ !PyComplex_Check(o)) #define PyIndex_Check __Pyx_PyIndex_Check #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message) #define __PYX_BUILD_PY_SSIZE_T "i" #else #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #define __Pyx_PyIndex_Check PyIndex_Check #endif #if PY_VERSION_HEX < 0x02060000 #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) #define PyVarObject_HEAD_INIT(type, size) \ PyObject_HEAD_INIT(type) size, #define PyType_Modified(t) typedef struct { void *buf; PyObject *obj; Py_ssize_t len; Py_ssize_t itemsize; int readonly; int ndim; char *format; Py_ssize_t *shape; Py_ssize_t *strides; Py_ssize_t *suboffsets; void *internal; } Py_buffer; #define PyBUF_SIMPLE 0 #define PyBUF_WRITABLE 0x0001 #define PyBUF_FORMAT 0x0004 #define PyBUF_ND 0x0008 #define PyBUF_STRIDES (0x0010 | PyBUF_ND) #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) #define PyBUF_RECORDS (PyBUF_STRIDES | PyBUF_FORMAT | PyBUF_WRITABLE) #define PyBUF_FULL (PyBUF_INDIRECT | PyBUF_FORMAT | PyBUF_WRITABLE) typedef int (*getbufferproc)(PyObject *, Py_buffer *, int); typedef void (*releasebufferproc)(PyObject *, Py_buffer *); #endif #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #endif #if PY_MAJOR_VERSION < 3 && PY_MINOR_VERSION < 6 #define PyUnicode_FromString(s) PyUnicode_Decode(s, strlen(s), "UTF-8", "strict") #endif #if PY_MAJOR_VERSION >= 3 #define Py_TPFLAGS_CHECKTYPES 0 #define Py_TPFLAGS_HAVE_INDEX 0 #endif #if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3) #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ? \ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #else #define CYTHON_PEP393_ENABLED 0 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_READ(k, d, i) ((k=k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_VERSION_HEX < 0x02060000 #define PyBytesObject PyStringObject #define PyBytes_Type PyString_Type #define PyBytes_Check PyString_Check #define PyBytes_CheckExact PyString_CheckExact #define PyBytes_FromString PyString_FromString #define PyBytes_FromStringAndSize PyString_FromStringAndSize #define PyBytes_FromFormat PyString_FromFormat #define PyBytes_DecodeEscape PyString_DecodeEscape #define PyBytes_AsString PyString_AsString #define PyBytes_AsStringAndSize PyString_AsStringAndSize #define PyBytes_Size PyString_Size #define PyBytes_AS_STRING PyString_AS_STRING #define PyBytes_GET_SIZE PyString_GET_SIZE #define PyBytes_Repr PyString_Repr #define PyBytes_Concat PyString_Concat #define PyBytes_ConcatAndDel PyString_ConcatAndDel #endif #if PY_VERSION_HEX < 0x02060000 #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type) #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_VERSION_HEX < 0x03020000 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300) #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b) #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value) #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b) #else #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0))) #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1))) #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1))) #endif #if PY_MAJOR_VERSION >= 3 #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n))) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n))) #else #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n)) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_NAMESTR(n) ((char *)(n)) #define __Pyx_DOCSTR(n) ((char *)(n)) #else #define __Pyx_NAMESTR(n) (n) #define __Pyx_DOCSTR(n) (n) #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include #define __PYX_HAVE__nipy__algorithms__segmentation___segmentation #define __PYX_HAVE_API__nipy__algorithms__segmentation___segmentation #include "stdio.h" #include "stdlib.h" #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "mrf.h" #ifdef _OPENMP #include #endif /* _OPENMP */ #ifdef PYREX_WITHOUT_ASSERTIONS #define CYTHON_WITHOUT_ASSERTIONS #endif /* inline attribute */ #ifndef CYTHON_INLINE #if defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif /* unused attribute */ #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif typedef struct {PyObject **p; char *s; const long n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/ /* Type Conversion Predeclarations */ #define __Pyx_PyBytes_FromUString(s) PyBytes_FromString((char*)s) #define __Pyx_PyBytes_AsUString(s) ((unsigned char*) PyBytes_AsString(s)) #define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x); static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject*); #if CYTHON_COMPILING_IN_CPYTHON #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #ifdef __GNUC__ /* Test for GCC > 2.95 */ #if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* __GNUC__ > 2 ... */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ > 2 ... */ #else /* __GNUC__ */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static PyObject *__pyx_m; static PyObject *__pyx_b; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include #else #include #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "_segmentation.pyx", "numpy.pxd", "type.pxd", }; /* "numpy.pxd":723 * # in Cython to enable them only on the right systems. * * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t */ typedef npy_int8 __pyx_t_5numpy_int8_t; /* "numpy.pxd":724 * * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t */ typedef npy_int16 __pyx_t_5numpy_int16_t; /* "numpy.pxd":725 * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< * ctypedef npy_int64 int64_t * #ctypedef npy_int96 int96_t */ typedef npy_int32 __pyx_t_5numpy_int32_t; /* "numpy.pxd":726 * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< * #ctypedef npy_int96 int96_t * #ctypedef npy_int128 int128_t */ typedef npy_int64 __pyx_t_5numpy_int64_t; /* "numpy.pxd":730 * #ctypedef npy_int128 int128_t * * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t */ typedef npy_uint8 __pyx_t_5numpy_uint8_t; /* "numpy.pxd":731 * * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t */ typedef npy_uint16 __pyx_t_5numpy_uint16_t; /* "numpy.pxd":732 * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< * ctypedef npy_uint64 uint64_t * #ctypedef npy_uint96 uint96_t */ typedef npy_uint32 __pyx_t_5numpy_uint32_t; /* "numpy.pxd":733 * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< * #ctypedef npy_uint96 uint96_t * #ctypedef npy_uint128 uint128_t */ typedef npy_uint64 __pyx_t_5numpy_uint64_t; /* "numpy.pxd":737 * #ctypedef npy_uint128 uint128_t * * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< * ctypedef npy_float64 float64_t * #ctypedef npy_float80 float80_t */ typedef npy_float32 __pyx_t_5numpy_float32_t; /* "numpy.pxd":738 * * ctypedef npy_float32 float32_t * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< * #ctypedef npy_float80 float80_t * #ctypedef npy_float128 float128_t */ typedef npy_float64 __pyx_t_5numpy_float64_t; /* "numpy.pxd":747 * # The int types are mapped a bit surprising -- * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t # <<<<<<<<<<<<<< * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t */ typedef npy_long __pyx_t_5numpy_int_t; /* "numpy.pxd":748 * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< * ctypedef npy_longlong longlong_t * */ typedef npy_longlong __pyx_t_5numpy_long_t; /* "numpy.pxd":749 * ctypedef npy_long int_t * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< * * ctypedef npy_ulong uint_t */ typedef npy_longlong __pyx_t_5numpy_longlong_t; /* "numpy.pxd":751 * ctypedef npy_longlong longlong_t * * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t */ typedef npy_ulong __pyx_t_5numpy_uint_t; /* "numpy.pxd":752 * * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulonglong_t * */ typedef npy_ulonglong __pyx_t_5numpy_ulong_t; /* "numpy.pxd":753 * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< * * ctypedef npy_intp intp_t */ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; /* "numpy.pxd":755 * ctypedef npy_ulonglong ulonglong_t * * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< * ctypedef npy_uintp uintp_t * */ typedef npy_intp __pyx_t_5numpy_intp_t; /* "numpy.pxd":756 * * ctypedef npy_intp intp_t * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< * * ctypedef npy_double float_t */ typedef npy_uintp __pyx_t_5numpy_uintp_t; /* "numpy.pxd":758 * ctypedef npy_uintp uintp_t * * ctypedef npy_double float_t # <<<<<<<<<<<<<< * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t */ typedef npy_double __pyx_t_5numpy_float_t; /* "numpy.pxd":759 * * ctypedef npy_double float_t * ctypedef npy_double double_t # <<<<<<<<<<<<<< * ctypedef npy_longdouble longdouble_t * */ typedef npy_double __pyx_t_5numpy_double_t; /* "numpy.pxd":760 * ctypedef npy_double float_t * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cfloat cfloat_t */ typedef npy_longdouble __pyx_t_5numpy_longdouble_t; #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< float > __pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< double > __pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif /*--- Type declarations ---*/ /* "numpy.pxd":762 * ctypedef npy_longdouble longdouble_t * * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t */ typedef npy_cfloat __pyx_t_5numpy_cfloat_t; /* "numpy.pxd":763 * * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< * ctypedef npy_clongdouble clongdouble_t * */ typedef npy_cdouble __pyx_t_5numpy_cdouble_t; /* "numpy.pxd":764 * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cdouble complex_t */ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; /* "numpy.pxd":766 * ctypedef npy_clongdouble clongdouble_t * * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew1(a): */ typedef npy_cdouble __pyx_t_5numpy_complex_t; #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/ #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil) \ if (acquire_gil) { \ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ PyGILState_Release(__pyx_gilstate_save); \ } else { \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil) \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext() \ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif /* CYTHON_REFNANNY */ #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /*proto*/ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], \ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, \ const char* function_name); /*proto*/ static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/ static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/ static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } #define __Pyx_GetItemInt_List(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ __Pyx_GetItemInt_List_Fast(o, i) : \ __Pyx_GetItemInt_Generic(o, to_py_func(i))) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i) { #if CYTHON_COMPILING_IN_CPYTHON if (likely((0 <= i) & (i < PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, i); Py_INCREF(r); return r; } else if ((-PyList_GET_SIZE(o) <= i) & (i < 0)) { PyObject *r = PyList_GET_ITEM(o, PyList_GET_SIZE(o) + i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } #define __Pyx_GetItemInt_Tuple(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ __Pyx_GetItemInt_Tuple_Fast(o, i) : \ __Pyx_GetItemInt_Generic(o, to_py_func(i))) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i) { #if CYTHON_COMPILING_IN_CPYTHON if (likely((0 <= i) & (i < PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, i); Py_INCREF(r); return r; } else if ((-PyTuple_GET_SIZE(o) <= i) & (i < 0)) { PyObject *r = PyTuple_GET_ITEM(o, PyTuple_GET_SIZE(o) + i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } #define __Pyx_GetItemInt(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ __Pyx_GetItemInt_Fast(o, i) : \ __Pyx_GetItemInt_Generic(o, to_py_func(i))) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i) { #if CYTHON_COMPILING_IN_CPYTHON if (PyList_CheckExact(o)) { Py_ssize_t n = (likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if (likely((n >= 0) & (n < PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = (likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if (likely((n >= 0) & (n < PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { /* inlined PySequence_GetItem() */ PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (unlikely(l < 0)) return NULL; i += l; } return m->sq_item(o, i); } } #else if (PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); static CYTHON_INLINE int __Pyx_IterFinish(void); /*proto*/ static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); /*proto*/ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level); /*proto*/ #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if defined(_WIN32) && defined(__cplusplus) && CYTHON_CCOMPLEX #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); #if CYTHON_CCOMPLEX #define __Pyx_c_eqf(a, b) ((a)==(b)) #define __Pyx_c_sumf(a, b) ((a)+(b)) #define __Pyx_c_difff(a, b) ((a)-(b)) #define __Pyx_c_prodf(a, b) ((a)*(b)) #define __Pyx_c_quotf(a, b) ((a)/(b)) #define __Pyx_c_negf(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zerof(z) ((z)==(float)0) #define __Pyx_c_conjf(z) (::std::conj(z)) #if 1 #define __Pyx_c_absf(z) (::std::abs(z)) #define __Pyx_c_powf(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zerof(z) ((z)==0) #define __Pyx_c_conjf(z) (conjf(z)) #if 1 #define __Pyx_c_absf(z) (cabsf(z)) #define __Pyx_c_powf(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); #if CYTHON_CCOMPLEX #define __Pyx_c_eq(a, b) ((a)==(b)) #define __Pyx_c_sum(a, b) ((a)+(b)) #define __Pyx_c_diff(a, b) ((a)-(b)) #define __Pyx_c_prod(a, b) ((a)*(b)) #define __Pyx_c_quot(a, b) ((a)/(b)) #define __Pyx_c_neg(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero(z) ((z)==(double)0) #define __Pyx_c_conj(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs(z) (::std::abs(z)) #define __Pyx_c_pow(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero(z) ((z)==0) #define __Pyx_c_conj(z) (conj(z)) #if 1 #define __Pyx_c_abs(z) (cabs(z)) #define __Pyx_c_pow(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject *); static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject *); static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject *); static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject *); static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject *); static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject *); static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject *); static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject *); static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject *); static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject *); static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject *); static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject *); static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject *); static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject *); static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject *); static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject *); static int __Pyx_check_binary_version(void); #if !defined(__Pyx_PyIdentifier_FromString) #if PY_MAJOR_VERSION < 3 #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) #else #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) #endif #endif static PyObject *__Pyx_ImportModule(const char *name); /*proto*/ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /*proto*/ typedef struct { int code_line; PyCodeObject* code_object; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /*proto*/ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/ /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from 'cpython.object' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'libc.stdlib' */ /* Module declarations from 'numpy' */ /* Module declarations from 'numpy' */ static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ /* Module declarations from 'nipy.algorithms.segmentation._segmentation' */ #define __Pyx_MODULE_NAME "nipy.algorithms.segmentation._segmentation" int __pyx_module_is_main_nipy__algorithms__segmentation___segmentation = 0; /* Implementation of 'nipy.algorithms.segmentation._segmentation' */ static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_RuntimeError; static PyObject *__pyx_pf_4nipy_10algorithms_12segmentation_13_segmentation__ve_step(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_ppm, PyObject *__pyx_v_ref, PyObject *__pyx_v_XYZ, PyObject *__pyx_v_U, int __pyx_v_ngb_size, double __pyx_v_beta); /* proto */ static PyObject *__pyx_pf_4nipy_10algorithms_12segmentation_13_segmentation_2_make_edges(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_mask, int __pyx_v_ngb_size); /* proto */ static PyObject *__pyx_pf_4nipy_10algorithms_12segmentation_13_segmentation_4_interaction_energy(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_ppm, PyObject *__pyx_v_XYZ, PyObject *__pyx_v_U, int __pyx_v_ngb_size); /* proto */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ static char __pyx_k_1[] = "ppm array should be double C-contiguous"; static char __pyx_k_3[] = "ref array should be double C-contiguous"; static char __pyx_k_5[] = "XYZ array should be intp C-contiguous"; static char __pyx_k_7[] = "XYZ array should be 3D"; static char __pyx_k_9[] = "U array should be double C-contiguous"; static char __pyx_k_11[] = "Inconsistent shapes for ppm and ref arrays"; static char __pyx_k_13[] = "mask array should be intp and C-contiguous"; static char __pyx_k_19[] = "ndarray is not C contiguous"; static char __pyx_k_21[] = "ndarray is not Fortran contiguous"; static char __pyx_k_23[] = "Non-native byte order not supported"; static char __pyx_k_25[] = "unknown dtype code in numpy.pxd (%d)"; static char __pyx_k_26[] = "Format string allocated too short, see comment in numpy.pxd"; static char __pyx_k_29[] = "Format string allocated too short."; static char __pyx_k_31[] = "\nMarkov random field utils. \n\nAuthor: Alexis Roche, 2010.\n"; static char __pyx_k_32[] = "0.2"; static char __pyx_k_35[] = "/Users/mb312/dev_trees/nipy/nipy/algorithms/segmentation/_segmentation.pyx"; static char __pyx_k_36[] = "nipy.algorithms.segmentation._segmentation"; static char __pyx_k__B[] = "B"; static char __pyx_k__H[] = "H"; static char __pyx_k__I[] = "I"; static char __pyx_k__L[] = "L"; static char __pyx_k__O[] = "O"; static char __pyx_k__Q[] = "Q"; static char __pyx_k__U[] = "U"; static char __pyx_k__b[] = "b"; static char __pyx_k__d[] = "d"; static char __pyx_k__f[] = "f"; static char __pyx_k__g[] = "g"; static char __pyx_k__h[] = "h"; static char __pyx_k__i[] = "i"; static char __pyx_k__l[] = "l"; static char __pyx_k__q[] = "q"; static char __pyx_k__Zd[] = "Zd"; static char __pyx_k__Zf[] = "Zf"; static char __pyx_k__Zg[] = "Zg"; static char __pyx_k__np[] = "np"; static char __pyx_k__XYZ[] = "XYZ"; static char __pyx_k__ppm[] = "ppm"; static char __pyx_k__ref[] = "ref"; static char __pyx_k__beta[] = "beta"; static char __pyx_k__intp[] = "intp"; static char __pyx_k__mask[] = "mask"; static char __pyx_k__dtype[] = "dtype"; static char __pyx_k__flags[] = "flags"; static char __pyx_k__numpy[] = "numpy"; static char __pyx_k__range[] = "range"; static char __pyx_k__shape[] = "shape"; static char __pyx_k__double[] = "double"; static char __pyx_k____main__[] = "__main__"; static char __pyx_k____test__[] = "__test__"; static char __pyx_k___ve_step[] = "_ve_step"; static char __pyx_k__ngb_size[] = "ngb_size"; static char __pyx_k__ValueError[] = "ValueError"; static char __pyx_k____version__[] = "__version__"; static char __pyx_k___make_edges[] = "_make_edges"; static char __pyx_k__C_CONTIGUOUS[] = "C_CONTIGUOUS"; static char __pyx_k__RuntimeError[] = "RuntimeError"; static char __pyx_k___interaction_energy[] = "_interaction_energy"; static PyObject *__pyx_kp_s_1; static PyObject *__pyx_kp_s_11; static PyObject *__pyx_kp_s_13; static PyObject *__pyx_kp_u_19; static PyObject *__pyx_kp_u_21; static PyObject *__pyx_kp_u_23; static PyObject *__pyx_kp_u_25; static PyObject *__pyx_kp_u_26; static PyObject *__pyx_kp_u_29; static PyObject *__pyx_kp_s_3; static PyObject *__pyx_kp_s_32; static PyObject *__pyx_kp_s_35; static PyObject *__pyx_n_s_36; static PyObject *__pyx_kp_s_5; static PyObject *__pyx_kp_s_7; static PyObject *__pyx_kp_s_9; static PyObject *__pyx_n_s__C_CONTIGUOUS; static PyObject *__pyx_n_s__RuntimeError; static PyObject *__pyx_n_s__U; static PyObject *__pyx_n_s__ValueError; static PyObject *__pyx_n_s__XYZ; static PyObject *__pyx_n_s____main__; static PyObject *__pyx_n_s____test__; static PyObject *__pyx_n_s____version__; static PyObject *__pyx_n_s___interaction_energy; static PyObject *__pyx_n_s___make_edges; static PyObject *__pyx_n_s___ve_step; static PyObject *__pyx_n_s__beta; static PyObject *__pyx_n_s__double; static PyObject *__pyx_n_s__dtype; static PyObject *__pyx_n_s__flags; static PyObject *__pyx_n_s__intp; static PyObject *__pyx_n_s__mask; static PyObject *__pyx_n_s__ngb_size; static PyObject *__pyx_n_s__np; static PyObject *__pyx_n_s__numpy; static PyObject *__pyx_n_s__ppm; static PyObject *__pyx_n_s__range; static PyObject *__pyx_n_s__ref; static PyObject *__pyx_n_s__shape; static PyObject *__pyx_int_3; static PyObject *__pyx_int_15; static PyObject *__pyx_k_tuple_2; static PyObject *__pyx_k_tuple_4; static PyObject *__pyx_k_tuple_6; static PyObject *__pyx_k_tuple_8; static PyObject *__pyx_k_tuple_10; static PyObject *__pyx_k_tuple_12; static PyObject *__pyx_k_tuple_14; static PyObject *__pyx_k_tuple_15; static PyObject *__pyx_k_tuple_16; static PyObject *__pyx_k_tuple_17; static PyObject *__pyx_k_tuple_18; static PyObject *__pyx_k_tuple_20; static PyObject *__pyx_k_tuple_22; static PyObject *__pyx_k_tuple_24; static PyObject *__pyx_k_tuple_27; static PyObject *__pyx_k_tuple_28; static PyObject *__pyx_k_tuple_30; static PyObject *__pyx_k_tuple_33; static PyObject *__pyx_k_tuple_37; static PyObject *__pyx_k_tuple_39; static PyObject *__pyx_k_codeobj_34; static PyObject *__pyx_k_codeobj_38; static PyObject *__pyx_k_codeobj_40; /* Python wrapper */ static PyObject *__pyx_pw_4nipy_10algorithms_12segmentation_13_segmentation_1_ve_step(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4nipy_10algorithms_12segmentation_13_segmentation_1_ve_step = {__Pyx_NAMESTR("_ve_step"), (PyCFunction)__pyx_pw_4nipy_10algorithms_12segmentation_13_segmentation_1_ve_step, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}; static PyObject *__pyx_pw_4nipy_10algorithms_12segmentation_13_segmentation_1_ve_step(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_ppm = 0; PyObject *__pyx_v_ref = 0; PyObject *__pyx_v_XYZ = 0; PyObject *__pyx_v_U = 0; int __pyx_v_ngb_size; double __pyx_v_beta; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("_ve_step (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__ppm,&__pyx_n_s__ref,&__pyx_n_s__XYZ,&__pyx_n_s__U,&__pyx_n_s__ngb_size,&__pyx_n_s__beta,0}; PyObject* values[6] = {0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__ppm)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__ref)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_ve_step", 1, 6, 6, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__XYZ)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_ve_step", 1, 6, 6, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__U)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_ve_step", 1, 6, 6, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__ngb_size)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_ve_step", 1, 6, 6, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__beta)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_ve_step", 1, 6, 6, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_ve_step") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 6) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); } __pyx_v_ppm = values[0]; __pyx_v_ref = values[1]; __pyx_v_XYZ = values[2]; __pyx_v_U = values[3]; __pyx_v_ngb_size = __Pyx_PyInt_AsInt(values[4]); if (unlikely((__pyx_v_ngb_size == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_beta = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_beta == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("_ve_step", 1, 6, 6, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.algorithms.segmentation._segmentation._ve_step", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_10algorithms_12segmentation_13_segmentation__ve_step(__pyx_self, __pyx_v_ppm, __pyx_v_ref, __pyx_v_XYZ, __pyx_v_U, __pyx_v_ngb_size, __pyx_v_beta); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/algorithms/segmentation/_segmentation.pyx":39 * * * def _ve_step(ppm, ref, XYZ, U, int ngb_size, double beta): # <<<<<<<<<<<<<< * * if not ppm.flags['C_CONTIGUOUS'] or not ppm.dtype=='double': */ static PyObject *__pyx_pf_4nipy_10algorithms_12segmentation_13_segmentation__ve_step(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_ppm, PyObject *__pyx_v_ref, PyObject *__pyx_v_XYZ, PyObject *__pyx_v_U, int __pyx_v_ngb_size, double __pyx_v_beta) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_ve_step", 0); /* "nipy/algorithms/segmentation/_segmentation.pyx":41 * def _ve_step(ppm, ref, XYZ, U, int ngb_size, double beta): * * if not ppm.flags['C_CONTIGUOUS'] or not ppm.dtype=='double': # <<<<<<<<<<<<<< * raise ValueError('ppm array should be double C-contiguous') * if not ref.flags['C_CONTIGUOUS'] or not ref.dtype=='double': */ __pyx_t_1 = PyObject_GetAttr(__pyx_v_ppm, __pyx_n_s__flags); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 41; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__C_CONTIGUOUS)); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 41; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 41; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_4 = (!__pyx_t_3); if (!__pyx_t_4) { __pyx_t_2 = PyObject_GetAttr(__pyx_v_ppm, __pyx_n_s__dtype); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 41; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyObject_RichCompare(__pyx_t_2, ((PyObject *)__pyx_n_s__double), Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 41; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 41; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_5 = (!__pyx_t_3); __pyx_t_3 = __pyx_t_5; } else { __pyx_t_3 = __pyx_t_4; } if (__pyx_t_3) { /* "nipy/algorithms/segmentation/_segmentation.pyx":42 * * if not ppm.flags['C_CONTIGUOUS'] or not ppm.dtype=='double': * raise ValueError('ppm array should be double C-contiguous') # <<<<<<<<<<<<<< * if not ref.flags['C_CONTIGUOUS'] or not ref.dtype=='double': * raise ValueError('ref array should be double C-contiguous') */ __pyx_t_1 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 42; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; {__pyx_filename = __pyx_f[0]; __pyx_lineno = 42; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L3; } __pyx_L3:; /* "nipy/algorithms/segmentation/_segmentation.pyx":43 * if not ppm.flags['C_CONTIGUOUS'] or not ppm.dtype=='double': * raise ValueError('ppm array should be double C-contiguous') * if not ref.flags['C_CONTIGUOUS'] or not ref.dtype=='double': # <<<<<<<<<<<<<< * raise ValueError('ref array should be double C-contiguous') * if not XYZ.flags['C_CONTIGUOUS'] or not XYZ.dtype=='intp': */ __pyx_t_1 = PyObject_GetAttr(__pyx_v_ref, __pyx_n_s__flags); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__C_CONTIGUOUS)); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_4 = (!__pyx_t_3); if (!__pyx_t_4) { __pyx_t_2 = PyObject_GetAttr(__pyx_v_ref, __pyx_n_s__dtype); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyObject_RichCompare(__pyx_t_2, ((PyObject *)__pyx_n_s__double), Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_5 = (!__pyx_t_3); __pyx_t_3 = __pyx_t_5; } else { __pyx_t_3 = __pyx_t_4; } if (__pyx_t_3) { /* "nipy/algorithms/segmentation/_segmentation.pyx":44 * raise ValueError('ppm array should be double C-contiguous') * if not ref.flags['C_CONTIGUOUS'] or not ref.dtype=='double': * raise ValueError('ref array should be double C-contiguous') # <<<<<<<<<<<<<< * if not XYZ.flags['C_CONTIGUOUS'] or not XYZ.dtype=='intp': * raise ValueError('XYZ array should be intp C-contiguous') */ __pyx_t_1 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_4), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; {__pyx_filename = __pyx_f[0]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L4; } __pyx_L4:; /* "nipy/algorithms/segmentation/_segmentation.pyx":45 * if not ref.flags['C_CONTIGUOUS'] or not ref.dtype=='double': * raise ValueError('ref array should be double C-contiguous') * if not XYZ.flags['C_CONTIGUOUS'] or not XYZ.dtype=='intp': # <<<<<<<<<<<<<< * raise ValueError('XYZ array should be intp C-contiguous') * if not XYZ.shape[1] == 3: */ __pyx_t_1 = PyObject_GetAttr(__pyx_v_XYZ, __pyx_n_s__flags); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__C_CONTIGUOUS)); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_4 = (!__pyx_t_3); if (!__pyx_t_4) { __pyx_t_2 = PyObject_GetAttr(__pyx_v_XYZ, __pyx_n_s__dtype); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyObject_RichCompare(__pyx_t_2, ((PyObject *)__pyx_n_s__intp), Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_5 = (!__pyx_t_3); __pyx_t_3 = __pyx_t_5; } else { __pyx_t_3 = __pyx_t_4; } if (__pyx_t_3) { /* "nipy/algorithms/segmentation/_segmentation.pyx":46 * raise ValueError('ref array should be double C-contiguous') * if not XYZ.flags['C_CONTIGUOUS'] or not XYZ.dtype=='intp': * raise ValueError('XYZ array should be intp C-contiguous') # <<<<<<<<<<<<<< * if not XYZ.shape[1] == 3: * raise ValueError('XYZ array should be 3D') */ __pyx_t_1 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_6), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L5; } __pyx_L5:; /* "nipy/algorithms/segmentation/_segmentation.pyx":47 * if not XYZ.flags['C_CONTIGUOUS'] or not XYZ.dtype=='intp': * raise ValueError('XYZ array should be intp C-contiguous') * if not XYZ.shape[1] == 3: # <<<<<<<<<<<<<< * raise ValueError('XYZ array should be 3D') * if not U.flags['C_CONTIGUOUS'] or not U.dtype=='double': */ __pyx_t_1 = PyObject_GetAttr(__pyx_v_XYZ, __pyx_n_s__shape); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_GetItemInt(__pyx_t_1, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyObject_RichCompare(__pyx_t_2, __pyx_int_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_4 = (!__pyx_t_3); if (__pyx_t_4) { /* "nipy/algorithms/segmentation/_segmentation.pyx":48 * raise ValueError('XYZ array should be intp C-contiguous') * if not XYZ.shape[1] == 3: * raise ValueError('XYZ array should be 3D') # <<<<<<<<<<<<<< * if not U.flags['C_CONTIGUOUS'] or not U.dtype=='double': * raise ValueError('U array should be double C-contiguous') */ __pyx_t_1 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_8), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 48; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; {__pyx_filename = __pyx_f[0]; __pyx_lineno = 48; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L6; } __pyx_L6:; /* "nipy/algorithms/segmentation/_segmentation.pyx":49 * if not XYZ.shape[1] == 3: * raise ValueError('XYZ array should be 3D') * if not U.flags['C_CONTIGUOUS'] or not U.dtype=='double': # <<<<<<<<<<<<<< * raise ValueError('U array should be double C-contiguous') * if not ppm.shape[-1] == ref.shape[-1]: */ __pyx_t_1 = PyObject_GetAttr(__pyx_v_U, __pyx_n_s__flags); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__C_CONTIGUOUS)); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_3 = (!__pyx_t_4); if (!__pyx_t_3) { __pyx_t_2 = PyObject_GetAttr(__pyx_v_U, __pyx_n_s__dtype); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyObject_RichCompare(__pyx_t_2, ((PyObject *)__pyx_n_s__double), Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_5 = (!__pyx_t_4); __pyx_t_4 = __pyx_t_5; } else { __pyx_t_4 = __pyx_t_3; } if (__pyx_t_4) { /* "nipy/algorithms/segmentation/_segmentation.pyx":50 * raise ValueError('XYZ array should be 3D') * if not U.flags['C_CONTIGUOUS'] or not U.dtype=='double': * raise ValueError('U array should be double C-contiguous') # <<<<<<<<<<<<<< * if not ppm.shape[-1] == ref.shape[-1]: * raise ValueError('Inconsistent shapes for ppm and ref arrays') */ __pyx_t_1 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_10), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L7; } __pyx_L7:; /* "nipy/algorithms/segmentation/_segmentation.pyx":51 * if not U.flags['C_CONTIGUOUS'] or not U.dtype=='double': * raise ValueError('U array should be double C-contiguous') * if not ppm.shape[-1] == ref.shape[-1]: # <<<<<<<<<<<<<< * raise ValueError('Inconsistent shapes for ppm and ref arrays') * */ __pyx_t_1 = PyObject_GetAttr(__pyx_v_ppm, __pyx_n_s__shape); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_GetItemInt(__pyx_t_1, -1, sizeof(long), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyObject_GetAttr(__pyx_v_ref, __pyx_n_s__shape); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_6 = __Pyx_GetItemInt(__pyx_t_1, -1, sizeof(long), PyInt_FromLong); if (!__pyx_t_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyObject_RichCompare(__pyx_t_2, __pyx_t_6, Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_3 = (!__pyx_t_4); if (__pyx_t_3) { /* "nipy/algorithms/segmentation/_segmentation.pyx":52 * raise ValueError('U array should be double C-contiguous') * if not ppm.shape[-1] == ref.shape[-1]: * raise ValueError('Inconsistent shapes for ppm and ref arrays') # <<<<<<<<<<<<<< * * ve_step(ppm, ref, XYZ, U, */ __pyx_t_1 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_12), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L8; } __pyx_L8:; /* "nipy/algorithms/segmentation/_segmentation.pyx":55 * * ve_step(ppm, ref, XYZ, U, * ngb_size, beta) # <<<<<<<<<<<<<< * return ppm * */ ve_step(((PyArrayObject *)__pyx_v_ppm), ((PyArrayObject *)__pyx_v_ref), ((PyArrayObject *)__pyx_v_XYZ), ((PyArrayObject *)__pyx_v_U), __pyx_v_ngb_size, __pyx_v_beta); /* "nipy/algorithms/segmentation/_segmentation.pyx":56 * ve_step(ppm, ref, XYZ, U, * ngb_size, beta) * return ppm # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_ppm); __pyx_r = __pyx_v_ppm; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("nipy.algorithms.segmentation._segmentation._ve_step", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_10algorithms_12segmentation_13_segmentation_3_make_edges(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4nipy_10algorithms_12segmentation_13_segmentation_3_make_edges = {__Pyx_NAMESTR("_make_edges"), (PyCFunction)__pyx_pw_4nipy_10algorithms_12segmentation_13_segmentation_3_make_edges, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}; static PyObject *__pyx_pw_4nipy_10algorithms_12segmentation_13_segmentation_3_make_edges(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_mask = 0; int __pyx_v_ngb_size; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("_make_edges (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__mask,&__pyx_n_s__ngb_size,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__mask)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__ngb_size)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_make_edges", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 59; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_make_edges") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 59; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_mask = values[0]; __pyx_v_ngb_size = __Pyx_PyInt_AsInt(values[1]); if (unlikely((__pyx_v_ngb_size == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 59; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("_make_edges", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 59; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.algorithms.segmentation._segmentation._make_edges", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_10algorithms_12segmentation_13_segmentation_2_make_edges(__pyx_self, __pyx_v_mask, __pyx_v_ngb_size); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/algorithms/segmentation/_segmentation.pyx":59 * * * def _make_edges(mask, int ngb_size): # <<<<<<<<<<<<<< * * if not mask.flags['C_CONTIGUOUS'] or not mask.dtype=='intp': */ static PyObject *__pyx_pf_4nipy_10algorithms_12segmentation_13_segmentation_2_make_edges(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_mask, int __pyx_v_ngb_size) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_make_edges", 0); /* "nipy/algorithms/segmentation/_segmentation.pyx":61 * def _make_edges(mask, int ngb_size): * * if not mask.flags['C_CONTIGUOUS'] or not mask.dtype=='intp': # <<<<<<<<<<<<<< * raise ValueError('mask array should be intp and C-contiguous') * */ __pyx_t_1 = PyObject_GetAttr(__pyx_v_mask, __pyx_n_s__flags); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__C_CONTIGUOUS)); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_4 = (!__pyx_t_3); if (!__pyx_t_4) { __pyx_t_2 = PyObject_GetAttr(__pyx_v_mask, __pyx_n_s__dtype); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyObject_RichCompare(__pyx_t_2, ((PyObject *)__pyx_n_s__intp), Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_5 = (!__pyx_t_3); __pyx_t_3 = __pyx_t_5; } else { __pyx_t_3 = __pyx_t_4; } if (__pyx_t_3) { /* "nipy/algorithms/segmentation/_segmentation.pyx":62 * * if not mask.flags['C_CONTIGUOUS'] or not mask.dtype=='intp': * raise ValueError('mask array should be intp and C-contiguous') # <<<<<<<<<<<<<< * * return make_edges(mask, ngb_size) */ __pyx_t_1 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_14), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 62; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; {__pyx_filename = __pyx_f[0]; __pyx_lineno = 62; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L3; } __pyx_L3:; /* "nipy/algorithms/segmentation/_segmentation.pyx":64 * raise ValueError('mask array should be intp and C-contiguous') * * return make_edges(mask, ngb_size) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); if (!(likely(((__pyx_v_mask) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_mask, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 64; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_mask; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = ((PyObject *)make_edges(((PyArrayObject *)__pyx_t_1), __pyx_v_ngb_size)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 64; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("nipy.algorithms.segmentation._segmentation._make_edges", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_10algorithms_12segmentation_13_segmentation_5_interaction_energy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4nipy_10algorithms_12segmentation_13_segmentation_5_interaction_energy = {__Pyx_NAMESTR("_interaction_energy"), (PyCFunction)__pyx_pw_4nipy_10algorithms_12segmentation_13_segmentation_5_interaction_energy, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}; static PyObject *__pyx_pw_4nipy_10algorithms_12segmentation_13_segmentation_5_interaction_energy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_ppm = 0; PyObject *__pyx_v_XYZ = 0; PyObject *__pyx_v_U = 0; int __pyx_v_ngb_size; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("_interaction_energy (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__ppm,&__pyx_n_s__XYZ,&__pyx_n_s__U,&__pyx_n_s__ngb_size,0}; PyObject* values[4] = {0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__ppm)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__XYZ)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_interaction_energy", 1, 4, 4, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 67; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__U)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_interaction_energy", 1, 4, 4, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 67; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__ngb_size)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_interaction_energy", 1, 4, 4, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 67; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_interaction_energy") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 67; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 4) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); } __pyx_v_ppm = values[0]; __pyx_v_XYZ = values[1]; __pyx_v_U = values[2]; __pyx_v_ngb_size = __Pyx_PyInt_AsInt(values[3]); if (unlikely((__pyx_v_ngb_size == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 67; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("_interaction_energy", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 67; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.algorithms.segmentation._segmentation._interaction_energy", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_10algorithms_12segmentation_13_segmentation_4_interaction_energy(__pyx_self, __pyx_v_ppm, __pyx_v_XYZ, __pyx_v_U, __pyx_v_ngb_size); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/algorithms/segmentation/_segmentation.pyx":67 * * * def _interaction_energy(ppm, XYZ, U, int ngb_size): # <<<<<<<<<<<<<< * * if not ppm.flags['C_CONTIGUOUS'] or not ppm.dtype=='double': */ static PyObject *__pyx_pf_4nipy_10algorithms_12segmentation_13_segmentation_4_interaction_energy(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_ppm, PyObject *__pyx_v_XYZ, PyObject *__pyx_v_U, int __pyx_v_ngb_size) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_interaction_energy", 0); /* "nipy/algorithms/segmentation/_segmentation.pyx":69 * def _interaction_energy(ppm, XYZ, U, int ngb_size): * * if not ppm.flags['C_CONTIGUOUS'] or not ppm.dtype=='double': # <<<<<<<<<<<<<< * raise ValueError('ppm array should be double C-contiguous') * if not XYZ.flags['C_CONTIGUOUS'] or not XYZ.dtype=='intp': */ __pyx_t_1 = PyObject_GetAttr(__pyx_v_ppm, __pyx_n_s__flags); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__C_CONTIGUOUS)); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_4 = (!__pyx_t_3); if (!__pyx_t_4) { __pyx_t_2 = PyObject_GetAttr(__pyx_v_ppm, __pyx_n_s__dtype); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyObject_RichCompare(__pyx_t_2, ((PyObject *)__pyx_n_s__double), Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_5 = (!__pyx_t_3); __pyx_t_3 = __pyx_t_5; } else { __pyx_t_3 = __pyx_t_4; } if (__pyx_t_3) { /* "nipy/algorithms/segmentation/_segmentation.pyx":70 * * if not ppm.flags['C_CONTIGUOUS'] or not ppm.dtype=='double': * raise ValueError('ppm array should be double C-contiguous') # <<<<<<<<<<<<<< * if not XYZ.flags['C_CONTIGUOUS'] or not XYZ.dtype=='intp': * raise ValueError('XYZ array should be intp C-contiguous') */ __pyx_t_1 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_15), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; {__pyx_filename = __pyx_f[0]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L3; } __pyx_L3:; /* "nipy/algorithms/segmentation/_segmentation.pyx":71 * if not ppm.flags['C_CONTIGUOUS'] or not ppm.dtype=='double': * raise ValueError('ppm array should be double C-contiguous') * if not XYZ.flags['C_CONTIGUOUS'] or not XYZ.dtype=='intp': # <<<<<<<<<<<<<< * raise ValueError('XYZ array should be intp C-contiguous') * if not XYZ.shape[1] == 3: */ __pyx_t_1 = PyObject_GetAttr(__pyx_v_XYZ, __pyx_n_s__flags); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__C_CONTIGUOUS)); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_4 = (!__pyx_t_3); if (!__pyx_t_4) { __pyx_t_2 = PyObject_GetAttr(__pyx_v_XYZ, __pyx_n_s__dtype); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyObject_RichCompare(__pyx_t_2, ((PyObject *)__pyx_n_s__intp), Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_5 = (!__pyx_t_3); __pyx_t_3 = __pyx_t_5; } else { __pyx_t_3 = __pyx_t_4; } if (__pyx_t_3) { /* "nipy/algorithms/segmentation/_segmentation.pyx":72 * raise ValueError('ppm array should be double C-contiguous') * if not XYZ.flags['C_CONTIGUOUS'] or not XYZ.dtype=='intp': * raise ValueError('XYZ array should be intp C-contiguous') # <<<<<<<<<<<<<< * if not XYZ.shape[1] == 3: * raise ValueError('XYZ array should be 3D') */ __pyx_t_1 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_16), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 72; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; {__pyx_filename = __pyx_f[0]; __pyx_lineno = 72; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L4; } __pyx_L4:; /* "nipy/algorithms/segmentation/_segmentation.pyx":73 * if not XYZ.flags['C_CONTIGUOUS'] or not XYZ.dtype=='intp': * raise ValueError('XYZ array should be intp C-contiguous') * if not XYZ.shape[1] == 3: # <<<<<<<<<<<<<< * raise ValueError('XYZ array should be 3D') * if not U.flags['C_CONTIGUOUS'] or not U.dtype=='double': */ __pyx_t_1 = PyObject_GetAttr(__pyx_v_XYZ, __pyx_n_s__shape); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_GetItemInt(__pyx_t_1, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyObject_RichCompare(__pyx_t_2, __pyx_int_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_4 = (!__pyx_t_3); if (__pyx_t_4) { /* "nipy/algorithms/segmentation/_segmentation.pyx":74 * raise ValueError('XYZ array should be intp C-contiguous') * if not XYZ.shape[1] == 3: * raise ValueError('XYZ array should be 3D') # <<<<<<<<<<<<<< * if not U.flags['C_CONTIGUOUS'] or not U.dtype=='double': * raise ValueError('U array should be double C-contiguous') */ __pyx_t_1 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_17), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L5; } __pyx_L5:; /* "nipy/algorithms/segmentation/_segmentation.pyx":75 * if not XYZ.shape[1] == 3: * raise ValueError('XYZ array should be 3D') * if not U.flags['C_CONTIGUOUS'] or not U.dtype=='double': # <<<<<<<<<<<<<< * raise ValueError('U array should be double C-contiguous') * */ __pyx_t_1 = PyObject_GetAttr(__pyx_v_U, __pyx_n_s__flags); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 75; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__C_CONTIGUOUS)); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 75; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 75; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_3 = (!__pyx_t_4); if (!__pyx_t_3) { __pyx_t_2 = PyObject_GetAttr(__pyx_v_U, __pyx_n_s__dtype); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 75; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyObject_RichCompare(__pyx_t_2, ((PyObject *)__pyx_n_s__double), Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 75; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 75; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_5 = (!__pyx_t_4); __pyx_t_4 = __pyx_t_5; } else { __pyx_t_4 = __pyx_t_3; } if (__pyx_t_4) { /* "nipy/algorithms/segmentation/_segmentation.pyx":76 * raise ValueError('XYZ array should be 3D') * if not U.flags['C_CONTIGUOUS'] or not U.dtype=='double': * raise ValueError('U array should be double C-contiguous') # <<<<<<<<<<<<<< * * return interaction_energy(ppm, XYZ, U, */ __pyx_t_1 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_18), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L6; } __pyx_L6:; /* "nipy/algorithms/segmentation/_segmentation.pyx":78 * raise ValueError('U array should be double C-contiguous') * * return interaction_energy(ppm, XYZ, U, # <<<<<<<<<<<<<< * ngb_size) */ __Pyx_XDECREF(__pyx_r); /* "nipy/algorithms/segmentation/_segmentation.pyx":79 * * return interaction_energy(ppm, XYZ, U, * ngb_size) # <<<<<<<<<<<<<< */ __pyx_t_1 = PyFloat_FromDouble(interaction_energy(((PyArrayObject *)__pyx_v_ppm), ((PyArrayObject *)__pyx_v_XYZ), ((PyArrayObject *)__pyx_v_U), __pyx_v_ngb_size)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("nipy.algorithms.segmentation._segmentation._interaction_energy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":194 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_copy_shape; int __pyx_v_i; int __pyx_v_ndim; int __pyx_v_endian_detector; int __pyx_v_little_endian; int __pyx_v_t; char *__pyx_v_f; PyArray_Descr *__pyx_v_descr = 0; int __pyx_v_offset; int __pyx_v_hasfields; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; char *__pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "numpy.pxd":200 * # of flags * * if info == NULL: return # <<<<<<<<<<<<<< * * cdef int copy_shape, i, ndim */ __pyx_t_1 = (__pyx_v_info == NULL); if (__pyx_t_1) { __pyx_r = 0; goto __pyx_L0; goto __pyx_L3; } __pyx_L3:; /* "numpy.pxd":203 * * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((&endian_detector)[0] != 0) * */ __pyx_v_endian_detector = 1; /* "numpy.pxd":204 * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * * ndim = PyArray_NDIM(self) */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "numpy.pxd":206 * cdef bint little_endian = ((&endian_detector)[0] != 0) * * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< * * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); /* "numpy.pxd":208 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); if (__pyx_t_1) { /* "numpy.pxd":209 * * if sizeof(npy_intp) != sizeof(Py_ssize_t): * copy_shape = 1 # <<<<<<<<<<<<<< * else: * copy_shape = 0 */ __pyx_v_copy_shape = 1; goto __pyx_L4; } /*else*/ { /* "numpy.pxd":211 * copy_shape = 1 * else: * copy_shape = 0 # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) */ __pyx_v_copy_shape = 0; } __pyx_L4:; /* "numpy.pxd":213 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS); if (__pyx_t_1) { /* "numpy.pxd":214 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not C contiguous") * */ __pyx_t_2 = (!PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS)); __pyx_t_3 = __pyx_t_2; } else { __pyx_t_3 = __pyx_t_1; } if (__pyx_t_3) { /* "numpy.pxd":215 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_20), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L5; } __pyx_L5:; /* "numpy.pxd":217 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ __pyx_t_3 = ((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS); if (__pyx_t_3) { /* "numpy.pxd":218 * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not Fortran contiguous") * */ __pyx_t_1 = (!PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS)); __pyx_t_2 = __pyx_t_1; } else { __pyx_t_2 = __pyx_t_3; } if (__pyx_t_2) { /* "numpy.pxd":219 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_22), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L6; } __pyx_L6:; /* "numpy.pxd":221 * raise ValueError(u"ndarray is not Fortran contiguous") * * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< * info.ndim = ndim * if copy_shape: */ __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); /* "numpy.pxd":222 * * info.buf = PyArray_DATA(self) * info.ndim = ndim # <<<<<<<<<<<<<< * if copy_shape: * # Allocate new buffer for strides and shape info. */ __pyx_v_info->ndim = __pyx_v_ndim; /* "numpy.pxd":223 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ if (__pyx_v_copy_shape) { /* "numpy.pxd":226 * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) # <<<<<<<<<<<<<< * info.shape = info.strides + ndim * for i in range(ndim): */ __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2))); /* "numpy.pxd":227 * # This is allocated as one block, strides first. * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) * info.shape = info.strides + ndim # <<<<<<<<<<<<<< * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] */ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); /* "numpy.pxd":228 * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) * info.shape = info.strides + ndim * for i in range(ndim): # <<<<<<<<<<<<<< * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] */ __pyx_t_5 = __pyx_v_ndim; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "numpy.pxd":229 * info.shape = info.strides + ndim * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< * info.shape[i] = PyArray_DIMS(self)[i] * else: */ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); /* "numpy.pxd":230 * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< * else: * info.strides = PyArray_STRIDES(self) */ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); } goto __pyx_L7; } /*else*/ { /* "numpy.pxd":232 * info.shape[i] = PyArray_DIMS(self)[i] * else: * info.strides = PyArray_STRIDES(self) # <<<<<<<<<<<<<< * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL */ __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); /* "numpy.pxd":233 * else: * info.strides = PyArray_STRIDES(self) * info.shape = PyArray_DIMS(self) # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) */ __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); } __pyx_L7:; /* "numpy.pxd":234 * info.strides = PyArray_STRIDES(self) * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) */ __pyx_v_info->suboffsets = NULL; /* "numpy.pxd":235 * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< * info.readonly = not PyArray_ISWRITEABLE(self) * */ __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); /* "numpy.pxd":236 * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< * * cdef int t */ __pyx_v_info->readonly = (!PyArray_ISWRITEABLE(__pyx_v_self)); /* "numpy.pxd":239 * * cdef int t * cdef char* f = NULL # <<<<<<<<<<<<<< * cdef dtype descr = self.descr * cdef list stack */ __pyx_v_f = NULL; /* "numpy.pxd":240 * cdef int t * cdef char* f = NULL * cdef dtype descr = self.descr # <<<<<<<<<<<<<< * cdef list stack * cdef int offset */ __pyx_t_4 = ((PyObject *)__pyx_v_self->descr); __Pyx_INCREF(__pyx_t_4); __pyx_v_descr = ((PyArray_Descr *)__pyx_t_4); __pyx_t_4 = 0; /* "numpy.pxd":244 * cdef int offset * * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< * * if not hasfields and not copy_shape: */ __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); /* "numpy.pxd":246 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ __pyx_t_2 = (!__pyx_v_hasfields); if (__pyx_t_2) { __pyx_t_3 = (!__pyx_v_copy_shape); __pyx_t_1 = __pyx_t_3; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "numpy.pxd":248 * if not hasfields and not copy_shape: * # do not call releasebuffer * info.obj = None # <<<<<<<<<<<<<< * else: * # need to call releasebuffer */ __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = Py_None; goto __pyx_L10; } /*else*/ { /* "numpy.pxd":251 * else: * # need to call releasebuffer * info.obj = self # <<<<<<<<<<<<<< * * if not hasfields: */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); } __pyx_L10:; /* "numpy.pxd":253 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ __pyx_t_1 = (!__pyx_v_hasfields); if (__pyx_t_1) { /* "numpy.pxd":254 * * if not hasfields: * t = descr.type_num # <<<<<<<<<<<<<< * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): */ __pyx_t_5 = __pyx_v_descr->type_num; __pyx_v_t = __pyx_t_5; /* "numpy.pxd":255 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_1 = (__pyx_v_descr->byteorder == '>'); if (__pyx_t_1) { __pyx_t_2 = __pyx_v_little_endian; } else { __pyx_t_2 = __pyx_t_1; } if (!__pyx_t_2) { /* "numpy.pxd":256 * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" */ __pyx_t_1 = (__pyx_v_descr->byteorder == '<'); if (__pyx_t_1) { __pyx_t_3 = (!__pyx_v_little_endian); __pyx_t_7 = __pyx_t_3; } else { __pyx_t_7 = __pyx_t_1; } __pyx_t_1 = __pyx_t_7; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "numpy.pxd":257 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_24), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L12; } __pyx_L12:; /* "numpy.pxd":258 * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" */ __pyx_t_1 = (__pyx_v_t == NPY_BYTE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__b; goto __pyx_L13; } /* "numpy.pxd":259 * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" */ __pyx_t_1 = (__pyx_v_t == NPY_UBYTE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__B; goto __pyx_L13; } /* "numpy.pxd":260 * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" */ __pyx_t_1 = (__pyx_v_t == NPY_SHORT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__h; goto __pyx_L13; } /* "numpy.pxd":261 * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" */ __pyx_t_1 = (__pyx_v_t == NPY_USHORT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__H; goto __pyx_L13; } /* "numpy.pxd":262 * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" */ __pyx_t_1 = (__pyx_v_t == NPY_INT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__i; goto __pyx_L13; } /* "numpy.pxd":263 * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" */ __pyx_t_1 = (__pyx_v_t == NPY_UINT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__I; goto __pyx_L13; } /* "numpy.pxd":264 * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" */ __pyx_t_1 = (__pyx_v_t == NPY_LONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__l; goto __pyx_L13; } /* "numpy.pxd":265 * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" */ __pyx_t_1 = (__pyx_v_t == NPY_ULONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__L; goto __pyx_L13; } /* "numpy.pxd":266 * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" */ __pyx_t_1 = (__pyx_v_t == NPY_LONGLONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__q; goto __pyx_L13; } /* "numpy.pxd":267 * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" */ __pyx_t_1 = (__pyx_v_t == NPY_ULONGLONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Q; goto __pyx_L13; } /* "numpy.pxd":268 * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" */ __pyx_t_1 = (__pyx_v_t == NPY_FLOAT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__f; goto __pyx_L13; } /* "numpy.pxd":269 * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" */ __pyx_t_1 = (__pyx_v_t == NPY_DOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__d; goto __pyx_L13; } /* "numpy.pxd":270 * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" */ __pyx_t_1 = (__pyx_v_t == NPY_LONGDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__g; goto __pyx_L13; } /* "numpy.pxd":271 * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" */ __pyx_t_1 = (__pyx_v_t == NPY_CFLOAT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zf; goto __pyx_L13; } /* "numpy.pxd":272 * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" */ __pyx_t_1 = (__pyx_v_t == NPY_CDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zd; goto __pyx_L13; } /* "numpy.pxd":273 * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f = "O" * else: */ __pyx_t_1 = (__pyx_v_t == NPY_CLONGDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zg; goto __pyx_L13; } /* "numpy.pxd":274 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_1 = (__pyx_v_t == NPY_OBJECT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__O; goto __pyx_L13; } /*else*/ { /* "numpy.pxd":276 * elif t == NPY_OBJECT: f = "O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * info.format = f * return */ __pyx_t_4 = PyInt_FromLong(__pyx_v_t); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_8 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_25), __pyx_t_4); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_8)); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_8)); __Pyx_GIVEREF(((PyObject *)__pyx_t_8)); __pyx_t_8 = 0; __pyx_t_8 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L13:; /* "numpy.pxd":277 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f # <<<<<<<<<<<<<< * return * else: */ __pyx_v_info->format = __pyx_v_f; /* "numpy.pxd":278 * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f * return # <<<<<<<<<<<<<< * else: * info.format = stdlib.malloc(_buffer_format_string_len) */ __pyx_r = 0; goto __pyx_L0; goto __pyx_L11; } /*else*/ { /* "numpy.pxd":280 * return * else: * info.format = stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 */ __pyx_v_info->format = ((char *)malloc(255)); /* "numpy.pxd":281 * else: * info.format = stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< * offset = 0 * f = _util_dtypestring(descr, info.format + 1, */ (__pyx_v_info->format[0]) = '^'; /* "numpy.pxd":282 * info.format = stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 # <<<<<<<<<<<<<< * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, */ __pyx_v_offset = 0; /* "numpy.pxd":285 * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, * &offset) # <<<<<<<<<<<<<< * f[0] = c'\0' # Terminate format string * */ __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_9; /* "numpy.pxd":286 * info.format + _buffer_format_string_len, * &offset) * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< * * def __releasebuffer__(ndarray self, Py_buffer* info): */ (__pyx_v_f[0]) = '\x00'; } __pyx_L11:; __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_descr); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":288 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__releasebuffer__", 0); /* "numpy.pxd":289 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_t_1 = PyArray_HASFIELDS(__pyx_v_self); if (__pyx_t_1) { /* "numpy.pxd":290 * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): * stdlib.free(info.format) # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) */ free(__pyx_v_info->format); goto __pyx_L3; } __pyx_L3:; /* "numpy.pxd":291 * if PyArray_HASFIELDS(self): * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * stdlib.free(info.strides) * # info.shape was stored after info.strides in the same block */ __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); if (__pyx_t_1) { /* "numpy.pxd":292 * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) # <<<<<<<<<<<<<< * # info.shape was stored after info.strides in the same block * */ free(__pyx_v_info->strides); goto __pyx_L4; } __pyx_L4:; __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":768 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, a) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); /* "numpy.pxd":769 * * cdef inline object PyArray_MultiIterNew1(a): * return PyArray_MultiIterNew(1, a) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew2(a, b): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 769; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":771 * return PyArray_MultiIterNew(1, a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, a, b) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); /* "numpy.pxd":772 * * cdef inline object PyArray_MultiIterNew2(a, b): * return PyArray_MultiIterNew(2, a, b) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew3(a, b, c): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":774 * return PyArray_MultiIterNew(2, a, b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, a, b, c) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); /* "numpy.pxd":775 * * cdef inline object PyArray_MultiIterNew3(a, b, c): * return PyArray_MultiIterNew(3, a, b, c) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":777 * return PyArray_MultiIterNew(3, a, b, c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, a, b, c, d) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); /* "numpy.pxd":778 * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): * return PyArray_MultiIterNew(4, a, b, c, d) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 778; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":780 * return PyArray_MultiIterNew(4, a, b, c, d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, a, b, c, d, e) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); /* "numpy.pxd":781 * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): * return PyArray_MultiIterNew(5, a, b, c, d, e) # <<<<<<<<<<<<<< * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 781; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":783 * return PyArray_MultiIterNew(5, a, b, c, d, e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { PyArray_Descr *__pyx_v_child = 0; int __pyx_v_endian_detector; int __pyx_v_little_endian; PyObject *__pyx_v_fields = 0; PyObject *__pyx_v_childname = NULL; PyObject *__pyx_v_new_offset = NULL; PyObject *__pyx_v_t = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *(*__pyx_t_6)(PyObject *); int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_t_10; long __pyx_t_11; char *__pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_util_dtypestring", 0); /* "numpy.pxd":790 * cdef int delta_offset * cdef tuple i * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((&endian_detector)[0] != 0) * cdef tuple fields */ __pyx_v_endian_detector = 1; /* "numpy.pxd":791 * cdef tuple i * cdef int endian_detector = 1 * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * cdef tuple fields * */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "numpy.pxd":794 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ if (unlikely(((PyObject *)__pyx_v_descr->names) == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_1 = ((PyObject *)__pyx_v_descr->names); __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; for (;;) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif __Pyx_XDECREF(__pyx_v_childname); __pyx_v_childname = __pyx_t_3; __pyx_t_3 = 0; /* "numpy.pxd":795 * * for childname in descr.names: * fields = descr.fields[childname] # <<<<<<<<<<<<<< * child, new_offset = fields * */ __pyx_t_3 = PyObject_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (!__pyx_t_3) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected tuple, got %.200s", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF(((PyObject *)__pyx_v_fields)); __pyx_v_fields = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "numpy.pxd":796 * for childname in descr.names: * fields = descr.fields[childname] * child, new_offset = fields # <<<<<<<<<<<<<< * * if (end - f) - (new_offset - offset[0]) < 15: */ if (likely(PyTuple_CheckExact(((PyObject *)__pyx_v_fields)))) { PyObject* sequence = ((PyObject *)__pyx_v_fields); #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else if (1) { __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } else { Py_ssize_t index = -1; __pyx_t_5 = PyObject_GetIter(((PyObject *)__pyx_v_fields)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = Py_TYPE(__pyx_t_5)->tp_iternext; index = 0; __pyx_t_3 = __pyx_t_6(__pyx_t_5); if (unlikely(!__pyx_t_3)) goto __pyx_L5_unpacking_failed; __Pyx_GOTREF(__pyx_t_3); index = 1; __pyx_t_4 = __pyx_t_6(__pyx_t_5); if (unlikely(!__pyx_t_4)) goto __pyx_L5_unpacking_failed; __Pyx_GOTREF(__pyx_t_4); if (__Pyx_IternextUnpackEndCheck(__pyx_t_6(__pyx_t_5), 2) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = NULL; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L6_unpacking_done; __pyx_L5_unpacking_failed:; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_L6_unpacking_done:; } if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF(((PyObject *)__pyx_v_child)); __pyx_v_child = ((PyArray_Descr *)__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_v_new_offset); __pyx_v_new_offset = __pyx_t_4; __pyx_t_4 = 0; /* "numpy.pxd":798 * child, new_offset = fields * * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ __pyx_t_4 = PyInt_FromLong((__pyx_v_end - __pyx_v_f)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_Subtract(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyObject_RichCompare(__pyx_t_3, __pyx_int_15, Py_LT); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { /* "numpy.pxd":799 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_t_5 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_27), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L7; } __pyx_L7:; /* "numpy.pxd":801 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_7 = (__pyx_v_child->byteorder == '>'); if (__pyx_t_7) { __pyx_t_8 = __pyx_v_little_endian; } else { __pyx_t_8 = __pyx_t_7; } if (!__pyx_t_8) { /* "numpy.pxd":802 * * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * # One could encode it in the format string and have Cython */ __pyx_t_7 = (__pyx_v_child->byteorder == '<'); if (__pyx_t_7) { __pyx_t_9 = (!__pyx_v_little_endian); __pyx_t_10 = __pyx_t_9; } else { __pyx_t_10 = __pyx_t_7; } __pyx_t_7 = __pyx_t_10; } else { __pyx_t_7 = __pyx_t_8; } if (__pyx_t_7) { /* "numpy.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_28), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L8; } __pyx_L8:; /* "numpy.pxd":813 * * # Output padding bytes * while offset[0] < new_offset: # <<<<<<<<<<<<<< * f[0] = 120 # "x"; pad byte * f += 1 */ while (1) { __pyx_t_5 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_t_5, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!__pyx_t_7) break; /* "numpy.pxd":814 * # Output padding bytes * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< * f += 1 * offset[0] += 1 */ (__pyx_v_f[0]) = 120; /* "numpy.pxd":815 * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte * f += 1 # <<<<<<<<<<<<<< * offset[0] += 1 * */ __pyx_v_f = (__pyx_v_f + 1); /* "numpy.pxd":816 * f[0] = 120 # "x"; pad byte * f += 1 * offset[0] += 1 # <<<<<<<<<<<<<< * * offset[0] += child.itemsize */ __pyx_t_11 = 0; (__pyx_v_offset[__pyx_t_11]) = ((__pyx_v_offset[__pyx_t_11]) + 1); } /* "numpy.pxd":818 * offset[0] += 1 * * offset[0] += child.itemsize # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(child): */ __pyx_t_11 = 0; (__pyx_v_offset[__pyx_t_11]) = ((__pyx_v_offset[__pyx_t_11]) + __pyx_v_child->elsize); /* "numpy.pxd":820 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ __pyx_t_7 = (!PyDataType_HASFIELDS(__pyx_v_child)); if (__pyx_t_7) { /* "numpy.pxd":821 * * if not PyDataType_HASFIELDS(child): * t = child.type_num # <<<<<<<<<<<<<< * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") */ __pyx_t_3 = PyInt_FromLong(__pyx_v_child->type_num); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 821; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_XDECREF(__pyx_v_t); __pyx_v_t = __pyx_t_3; __pyx_t_3 = 0; /* "numpy.pxd":822 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ __pyx_t_7 = ((__pyx_v_end - __pyx_v_f) < 5); if (__pyx_t_7) { /* "numpy.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_t_3 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_30), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L12; } __pyx_L12:; /* "numpy.pxd":826 * * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" */ __pyx_t_3 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 98; goto __pyx_L13; } /* "numpy.pxd":827 * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" */ __pyx_t_5 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 66; goto __pyx_L13; } /* "numpy.pxd":828 * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" */ __pyx_t_3 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 104; goto __pyx_L13; } /* "numpy.pxd":829 * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" */ __pyx_t_5 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 72; goto __pyx_L13; } /* "numpy.pxd":830 * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" */ __pyx_t_3 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 105; goto __pyx_L13; } /* "numpy.pxd":831 * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" */ __pyx_t_5 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 73; goto __pyx_L13; } /* "numpy.pxd":832 * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" */ __pyx_t_3 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 108; goto __pyx_L13; } /* "numpy.pxd":833 * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" */ __pyx_t_5 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 76; goto __pyx_L13; } /* "numpy.pxd":834 * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" */ __pyx_t_3 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 113; goto __pyx_L13; } /* "numpy.pxd":835 * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" */ __pyx_t_5 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 81; goto __pyx_L13; } /* "numpy.pxd":836 * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" */ __pyx_t_3 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 102; goto __pyx_L13; } /* "numpy.pxd":837 * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf */ __pyx_t_5 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 100; goto __pyx_L13; } /* "numpy.pxd":838 * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd */ __pyx_t_3 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 103; goto __pyx_L13; } /* "numpy.pxd":839 * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg */ __pyx_t_5 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 102; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":840 * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" */ __pyx_t_3 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 100; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":841 * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: */ __pyx_t_5 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 103; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":842 * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_3 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 79; goto __pyx_L13; } /*else*/ { /* "numpy.pxd":844 * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * f += 1 * else: */ __pyx_t_5 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_25), __pyx_v_t); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_5)); __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_5)); __Pyx_GIVEREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L13:; /* "numpy.pxd":845 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * f += 1 # <<<<<<<<<<<<<< * else: * # Cython ignores struct boundary information ("T{...}"), */ __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L11; } /*else*/ { /* "numpy.pxd":849 * # Cython ignores struct boundary information ("T{...}"), * # so don't output it * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< * return f * */ __pyx_t_12 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_12 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 849; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_12; } __pyx_L11:; } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "numpy.pxd":850 * # so don't output it * f = _util_dtypestring(child, f, end, offset) * return f # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_f; goto __pyx_L0; __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_child); __Pyx_XDECREF(__pyx_v_fields); __Pyx_XDECREF(__pyx_v_childname); __Pyx_XDECREF(__pyx_v_new_offset); __Pyx_XDECREF(__pyx_v_t); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":965 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { PyObject *__pyx_v_baseptr; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("set_array_base", 0); /* "numpy.pxd":967 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ __pyx_t_1 = (__pyx_v_base == Py_None); if (__pyx_t_1) { /* "numpy.pxd":968 * cdef PyObject* baseptr * if base is None: * baseptr = NULL # <<<<<<<<<<<<<< * else: * Py_INCREF(base) # important to do this before decref below! */ __pyx_v_baseptr = NULL; goto __pyx_L3; } /*else*/ { /* "numpy.pxd":970 * baseptr = NULL * else: * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< * baseptr = base * Py_XDECREF(arr.base) */ Py_INCREF(__pyx_v_base); /* "numpy.pxd":971 * else: * Py_INCREF(base) # important to do this before decref below! * baseptr = base # <<<<<<<<<<<<<< * Py_XDECREF(arr.base) * arr.base = baseptr */ __pyx_v_baseptr = ((PyObject *)__pyx_v_base); } __pyx_L3:; /* "numpy.pxd":972 * Py_INCREF(base) # important to do this before decref below! * baseptr = base * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< * arr.base = baseptr * */ Py_XDECREF(__pyx_v_arr->base); /* "numpy.pxd":973 * baseptr = base * Py_XDECREF(arr.base) * arr.base = baseptr # <<<<<<<<<<<<<< * * cdef inline object get_array_base(ndarray arr): */ __pyx_v_arr->base = __pyx_v_baseptr; __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":975 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("get_array_base", 0); /* "numpy.pxd":976 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ __pyx_t_1 = (__pyx_v_arr->base == NULL); if (__pyx_t_1) { /* "numpy.pxd":977 * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: * return None # <<<<<<<<<<<<<< * else: * return arr.base */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; goto __pyx_L3; } /*else*/ { /* "numpy.pxd":979 * return None * else: * return arr.base # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); __pyx_r = ((PyObject *)__pyx_v_arr->base); goto __pyx_L0; } __pyx_L3:; __pyx_r = Py_None; __Pyx_INCREF(Py_None); __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, __Pyx_NAMESTR("_segmentation"), __Pyx_DOCSTR(__pyx_k_31), /* m_doc */ -1, /* m_size */ __pyx_methods /* m_methods */, NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_kp_s_1, __pyx_k_1, sizeof(__pyx_k_1), 0, 0, 1, 0}, {&__pyx_kp_s_11, __pyx_k_11, sizeof(__pyx_k_11), 0, 0, 1, 0}, {&__pyx_kp_s_13, __pyx_k_13, sizeof(__pyx_k_13), 0, 0, 1, 0}, {&__pyx_kp_u_19, __pyx_k_19, sizeof(__pyx_k_19), 0, 1, 0, 0}, {&__pyx_kp_u_21, __pyx_k_21, sizeof(__pyx_k_21), 0, 1, 0, 0}, {&__pyx_kp_u_23, __pyx_k_23, sizeof(__pyx_k_23), 0, 1, 0, 0}, {&__pyx_kp_u_25, __pyx_k_25, sizeof(__pyx_k_25), 0, 1, 0, 0}, {&__pyx_kp_u_26, __pyx_k_26, sizeof(__pyx_k_26), 0, 1, 0, 0}, {&__pyx_kp_u_29, __pyx_k_29, sizeof(__pyx_k_29), 0, 1, 0, 0}, {&__pyx_kp_s_3, __pyx_k_3, sizeof(__pyx_k_3), 0, 0, 1, 0}, {&__pyx_kp_s_32, __pyx_k_32, sizeof(__pyx_k_32), 0, 0, 1, 0}, {&__pyx_kp_s_35, __pyx_k_35, sizeof(__pyx_k_35), 0, 0, 1, 0}, {&__pyx_n_s_36, __pyx_k_36, sizeof(__pyx_k_36), 0, 0, 1, 1}, {&__pyx_kp_s_5, __pyx_k_5, sizeof(__pyx_k_5), 0, 0, 1, 0}, {&__pyx_kp_s_7, __pyx_k_7, sizeof(__pyx_k_7), 0, 0, 1, 0}, {&__pyx_kp_s_9, __pyx_k_9, sizeof(__pyx_k_9), 0, 0, 1, 0}, {&__pyx_n_s__C_CONTIGUOUS, __pyx_k__C_CONTIGUOUS, sizeof(__pyx_k__C_CONTIGUOUS), 0, 0, 1, 1}, {&__pyx_n_s__RuntimeError, __pyx_k__RuntimeError, sizeof(__pyx_k__RuntimeError), 0, 0, 1, 1}, {&__pyx_n_s__U, __pyx_k__U, sizeof(__pyx_k__U), 0, 0, 1, 1}, {&__pyx_n_s__ValueError, __pyx_k__ValueError, sizeof(__pyx_k__ValueError), 0, 0, 1, 1}, {&__pyx_n_s__XYZ, __pyx_k__XYZ, sizeof(__pyx_k__XYZ), 0, 0, 1, 1}, {&__pyx_n_s____main__, __pyx_k____main__, sizeof(__pyx_k____main__), 0, 0, 1, 1}, {&__pyx_n_s____test__, __pyx_k____test__, sizeof(__pyx_k____test__), 0, 0, 1, 1}, {&__pyx_n_s____version__, __pyx_k____version__, sizeof(__pyx_k____version__), 0, 0, 1, 1}, {&__pyx_n_s___interaction_energy, __pyx_k___interaction_energy, sizeof(__pyx_k___interaction_energy), 0, 0, 1, 1}, {&__pyx_n_s___make_edges, __pyx_k___make_edges, sizeof(__pyx_k___make_edges), 0, 0, 1, 1}, {&__pyx_n_s___ve_step, __pyx_k___ve_step, sizeof(__pyx_k___ve_step), 0, 0, 1, 1}, {&__pyx_n_s__beta, __pyx_k__beta, sizeof(__pyx_k__beta), 0, 0, 1, 1}, {&__pyx_n_s__double, __pyx_k__double, sizeof(__pyx_k__double), 0, 0, 1, 1}, {&__pyx_n_s__dtype, __pyx_k__dtype, sizeof(__pyx_k__dtype), 0, 0, 1, 1}, {&__pyx_n_s__flags, __pyx_k__flags, sizeof(__pyx_k__flags), 0, 0, 1, 1}, {&__pyx_n_s__intp, __pyx_k__intp, sizeof(__pyx_k__intp), 0, 0, 1, 1}, {&__pyx_n_s__mask, __pyx_k__mask, sizeof(__pyx_k__mask), 0, 0, 1, 1}, {&__pyx_n_s__ngb_size, __pyx_k__ngb_size, sizeof(__pyx_k__ngb_size), 0, 0, 1, 1}, {&__pyx_n_s__np, __pyx_k__np, sizeof(__pyx_k__np), 0, 0, 1, 1}, {&__pyx_n_s__numpy, __pyx_k__numpy, sizeof(__pyx_k__numpy), 0, 0, 1, 1}, {&__pyx_n_s__ppm, __pyx_k__ppm, sizeof(__pyx_k__ppm), 0, 0, 1, 1}, {&__pyx_n_s__range, __pyx_k__range, sizeof(__pyx_k__range), 0, 0, 1, 1}, {&__pyx_n_s__ref, __pyx_k__ref, sizeof(__pyx_k__ref), 0, 0, 1, 1}, {&__pyx_n_s__shape, __pyx_k__shape, sizeof(__pyx_k__shape), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_ValueError = __Pyx_GetName(__pyx_b, __pyx_n_s__ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 42; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_range = __Pyx_GetName(__pyx_b, __pyx_n_s__range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 228; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_RuntimeError = __Pyx_GetName(__pyx_b, __pyx_n_s__RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "nipy/algorithms/segmentation/_segmentation.pyx":42 * * if not ppm.flags['C_CONTIGUOUS'] or not ppm.dtype=='double': * raise ValueError('ppm array should be double C-contiguous') # <<<<<<<<<<<<<< * if not ref.flags['C_CONTIGUOUS'] or not ref.dtype=='double': * raise ValueError('ref array should be double C-contiguous') */ __pyx_k_tuple_2 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 42; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_2); __Pyx_INCREF(((PyObject *)__pyx_kp_s_1)); PyTuple_SET_ITEM(__pyx_k_tuple_2, 0, ((PyObject *)__pyx_kp_s_1)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_1)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_2)); /* "nipy/algorithms/segmentation/_segmentation.pyx":44 * raise ValueError('ppm array should be double C-contiguous') * if not ref.flags['C_CONTIGUOUS'] or not ref.dtype=='double': * raise ValueError('ref array should be double C-contiguous') # <<<<<<<<<<<<<< * if not XYZ.flags['C_CONTIGUOUS'] or not XYZ.dtype=='intp': * raise ValueError('XYZ array should be intp C-contiguous') */ __pyx_k_tuple_4 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_4); __Pyx_INCREF(((PyObject *)__pyx_kp_s_3)); PyTuple_SET_ITEM(__pyx_k_tuple_4, 0, ((PyObject *)__pyx_kp_s_3)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_3)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_4)); /* "nipy/algorithms/segmentation/_segmentation.pyx":46 * raise ValueError('ref array should be double C-contiguous') * if not XYZ.flags['C_CONTIGUOUS'] or not XYZ.dtype=='intp': * raise ValueError('XYZ array should be intp C-contiguous') # <<<<<<<<<<<<<< * if not XYZ.shape[1] == 3: * raise ValueError('XYZ array should be 3D') */ __pyx_k_tuple_6 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_6); __Pyx_INCREF(((PyObject *)__pyx_kp_s_5)); PyTuple_SET_ITEM(__pyx_k_tuple_6, 0, ((PyObject *)__pyx_kp_s_5)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_5)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_6)); /* "nipy/algorithms/segmentation/_segmentation.pyx":48 * raise ValueError('XYZ array should be intp C-contiguous') * if not XYZ.shape[1] == 3: * raise ValueError('XYZ array should be 3D') # <<<<<<<<<<<<<< * if not U.flags['C_CONTIGUOUS'] or not U.dtype=='double': * raise ValueError('U array should be double C-contiguous') */ __pyx_k_tuple_8 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 48; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_8); __Pyx_INCREF(((PyObject *)__pyx_kp_s_7)); PyTuple_SET_ITEM(__pyx_k_tuple_8, 0, ((PyObject *)__pyx_kp_s_7)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_7)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_8)); /* "nipy/algorithms/segmentation/_segmentation.pyx":50 * raise ValueError('XYZ array should be 3D') * if not U.flags['C_CONTIGUOUS'] or not U.dtype=='double': * raise ValueError('U array should be double C-contiguous') # <<<<<<<<<<<<<< * if not ppm.shape[-1] == ref.shape[-1]: * raise ValueError('Inconsistent shapes for ppm and ref arrays') */ __pyx_k_tuple_10 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_10); __Pyx_INCREF(((PyObject *)__pyx_kp_s_9)); PyTuple_SET_ITEM(__pyx_k_tuple_10, 0, ((PyObject *)__pyx_kp_s_9)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_9)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_10)); /* "nipy/algorithms/segmentation/_segmentation.pyx":52 * raise ValueError('U array should be double C-contiguous') * if not ppm.shape[-1] == ref.shape[-1]: * raise ValueError('Inconsistent shapes for ppm and ref arrays') # <<<<<<<<<<<<<< * * ve_step(ppm, ref, XYZ, U, */ __pyx_k_tuple_12 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_12); __Pyx_INCREF(((PyObject *)__pyx_kp_s_11)); PyTuple_SET_ITEM(__pyx_k_tuple_12, 0, ((PyObject *)__pyx_kp_s_11)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_11)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_12)); /* "nipy/algorithms/segmentation/_segmentation.pyx":62 * * if not mask.flags['C_CONTIGUOUS'] or not mask.dtype=='intp': * raise ValueError('mask array should be intp and C-contiguous') # <<<<<<<<<<<<<< * * return make_edges(mask, ngb_size) */ __pyx_k_tuple_14 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 62; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_14); __Pyx_INCREF(((PyObject *)__pyx_kp_s_13)); PyTuple_SET_ITEM(__pyx_k_tuple_14, 0, ((PyObject *)__pyx_kp_s_13)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_13)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_14)); /* "nipy/algorithms/segmentation/_segmentation.pyx":70 * * if not ppm.flags['C_CONTIGUOUS'] or not ppm.dtype=='double': * raise ValueError('ppm array should be double C-contiguous') # <<<<<<<<<<<<<< * if not XYZ.flags['C_CONTIGUOUS'] or not XYZ.dtype=='intp': * raise ValueError('XYZ array should be intp C-contiguous') */ __pyx_k_tuple_15 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_15); __Pyx_INCREF(((PyObject *)__pyx_kp_s_1)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 0, ((PyObject *)__pyx_kp_s_1)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_1)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_15)); /* "nipy/algorithms/segmentation/_segmentation.pyx":72 * raise ValueError('ppm array should be double C-contiguous') * if not XYZ.flags['C_CONTIGUOUS'] or not XYZ.dtype=='intp': * raise ValueError('XYZ array should be intp C-contiguous') # <<<<<<<<<<<<<< * if not XYZ.shape[1] == 3: * raise ValueError('XYZ array should be 3D') */ __pyx_k_tuple_16 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 72; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_16); __Pyx_INCREF(((PyObject *)__pyx_kp_s_5)); PyTuple_SET_ITEM(__pyx_k_tuple_16, 0, ((PyObject *)__pyx_kp_s_5)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_5)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_16)); /* "nipy/algorithms/segmentation/_segmentation.pyx":74 * raise ValueError('XYZ array should be intp C-contiguous') * if not XYZ.shape[1] == 3: * raise ValueError('XYZ array should be 3D') # <<<<<<<<<<<<<< * if not U.flags['C_CONTIGUOUS'] or not U.dtype=='double': * raise ValueError('U array should be double C-contiguous') */ __pyx_k_tuple_17 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_17)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_17); __Pyx_INCREF(((PyObject *)__pyx_kp_s_7)); PyTuple_SET_ITEM(__pyx_k_tuple_17, 0, ((PyObject *)__pyx_kp_s_7)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_7)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_17)); /* "nipy/algorithms/segmentation/_segmentation.pyx":76 * raise ValueError('XYZ array should be 3D') * if not U.flags['C_CONTIGUOUS'] or not U.dtype=='double': * raise ValueError('U array should be double C-contiguous') # <<<<<<<<<<<<<< * * return interaction_energy(ppm, XYZ, U, */ __pyx_k_tuple_18 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_18)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_18); __Pyx_INCREF(((PyObject *)__pyx_kp_s_9)); PyTuple_SET_ITEM(__pyx_k_tuple_18, 0, ((PyObject *)__pyx_kp_s_9)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_9)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_18)); /* "numpy.pxd":215 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_k_tuple_20 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_20)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_20); __Pyx_INCREF(((PyObject *)__pyx_kp_u_19)); PyTuple_SET_ITEM(__pyx_k_tuple_20, 0, ((PyObject *)__pyx_kp_u_19)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_19)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_20)); /* "numpy.pxd":219 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_k_tuple_22 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_22)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_22); __Pyx_INCREF(((PyObject *)__pyx_kp_u_21)); PyTuple_SET_ITEM(__pyx_k_tuple_22, 0, ((PyObject *)__pyx_kp_u_21)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_21)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_22)); /* "numpy.pxd":257 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_k_tuple_24 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_24)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_24); __Pyx_INCREF(((PyObject *)__pyx_kp_u_23)); PyTuple_SET_ITEM(__pyx_k_tuple_24, 0, ((PyObject *)__pyx_kp_u_23)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_23)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_24)); /* "numpy.pxd":799 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_k_tuple_27 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_27)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_27); __Pyx_INCREF(((PyObject *)__pyx_kp_u_26)); PyTuple_SET_ITEM(__pyx_k_tuple_27, 0, ((PyObject *)__pyx_kp_u_26)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_26)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_27)); /* "numpy.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_k_tuple_28 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_28)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_28); __Pyx_INCREF(((PyObject *)__pyx_kp_u_23)); PyTuple_SET_ITEM(__pyx_k_tuple_28, 0, ((PyObject *)__pyx_kp_u_23)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_23)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_28)); /* "numpy.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_k_tuple_30 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_30)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_30); __Pyx_INCREF(((PyObject *)__pyx_kp_u_29)); PyTuple_SET_ITEM(__pyx_k_tuple_30, 0, ((PyObject *)__pyx_kp_u_29)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_29)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_30)); /* "nipy/algorithms/segmentation/_segmentation.pyx":39 * * * def _ve_step(ppm, ref, XYZ, U, int ngb_size, double beta): # <<<<<<<<<<<<<< * * if not ppm.flags['C_CONTIGUOUS'] or not ppm.dtype=='double': */ __pyx_k_tuple_33 = PyTuple_New(6); if (unlikely(!__pyx_k_tuple_33)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_33); __Pyx_INCREF(((PyObject *)__pyx_n_s__ppm)); PyTuple_SET_ITEM(__pyx_k_tuple_33, 0, ((PyObject *)__pyx_n_s__ppm)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ppm)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ref)); PyTuple_SET_ITEM(__pyx_k_tuple_33, 1, ((PyObject *)__pyx_n_s__ref)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ref)); __Pyx_INCREF(((PyObject *)__pyx_n_s__XYZ)); PyTuple_SET_ITEM(__pyx_k_tuple_33, 2, ((PyObject *)__pyx_n_s__XYZ)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__XYZ)); __Pyx_INCREF(((PyObject *)__pyx_n_s__U)); PyTuple_SET_ITEM(__pyx_k_tuple_33, 3, ((PyObject *)__pyx_n_s__U)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__U)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ngb_size)); PyTuple_SET_ITEM(__pyx_k_tuple_33, 4, ((PyObject *)__pyx_n_s__ngb_size)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ngb_size)); __Pyx_INCREF(((PyObject *)__pyx_n_s__beta)); PyTuple_SET_ITEM(__pyx_k_tuple_33, 5, ((PyObject *)__pyx_n_s__beta)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__beta)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_33)); __pyx_k_codeobj_34 = (PyObject*)__Pyx_PyCode_New(6, 0, 6, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_33, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_35, __pyx_n_s___ve_step, 39, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_34)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/algorithms/segmentation/_segmentation.pyx":59 * * * def _make_edges(mask, int ngb_size): # <<<<<<<<<<<<<< * * if not mask.flags['C_CONTIGUOUS'] or not mask.dtype=='intp': */ __pyx_k_tuple_37 = PyTuple_New(2); if (unlikely(!__pyx_k_tuple_37)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 59; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_37); __Pyx_INCREF(((PyObject *)__pyx_n_s__mask)); PyTuple_SET_ITEM(__pyx_k_tuple_37, 0, ((PyObject *)__pyx_n_s__mask)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__mask)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ngb_size)); PyTuple_SET_ITEM(__pyx_k_tuple_37, 1, ((PyObject *)__pyx_n_s__ngb_size)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ngb_size)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_37)); __pyx_k_codeobj_38 = (PyObject*)__Pyx_PyCode_New(2, 0, 2, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_37, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_35, __pyx_n_s___make_edges, 59, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_38)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 59; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/algorithms/segmentation/_segmentation.pyx":67 * * * def _interaction_energy(ppm, XYZ, U, int ngb_size): # <<<<<<<<<<<<<< * * if not ppm.flags['C_CONTIGUOUS'] or not ppm.dtype=='double': */ __pyx_k_tuple_39 = PyTuple_New(4); if (unlikely(!__pyx_k_tuple_39)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 67; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_39); __Pyx_INCREF(((PyObject *)__pyx_n_s__ppm)); PyTuple_SET_ITEM(__pyx_k_tuple_39, 0, ((PyObject *)__pyx_n_s__ppm)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ppm)); __Pyx_INCREF(((PyObject *)__pyx_n_s__XYZ)); PyTuple_SET_ITEM(__pyx_k_tuple_39, 1, ((PyObject *)__pyx_n_s__XYZ)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__XYZ)); __Pyx_INCREF(((PyObject *)__pyx_n_s__U)); PyTuple_SET_ITEM(__pyx_k_tuple_39, 2, ((PyObject *)__pyx_n_s__U)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__U)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ngb_size)); PyTuple_SET_ITEM(__pyx_k_tuple_39, 3, ((PyObject *)__pyx_n_s__ngb_size)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ngb_size)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_39)); __pyx_k_codeobj_40 = (PyObject*)__Pyx_PyCode_New(4, 0, 4, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_39, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_35, __pyx_n_s___interaction_energy, 67, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_40)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 67; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_3 = PyInt_FromLong(3); if (unlikely(!__pyx_int_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_15 = PyInt_FromLong(15); if (unlikely(!__pyx_int_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC init_segmentation(void); /*proto*/ PyMODINIT_FUNC init_segmentation(void) #else PyMODINIT_FUNC PyInit__segmentation(void); /*proto*/ PyMODINIT_FUNC PyInit__segmentation(void) #endif { PyObject *__pyx_t_1 = NULL; __Pyx_RefNannyDeclarations #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit__segmentation(void)", 0); if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #ifdef __Pyx_CyFunction_USED if (__Pyx_CyFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4(__Pyx_NAMESTR("_segmentation"), __pyx_methods, __Pyx_DOCSTR(__pyx_k_31), 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (!PyDict_GetItemString(modules, "nipy.algorithms.segmentation._segmentation")) { if (unlikely(PyDict_SetItemString(modules, "nipy.algorithms.segmentation._segmentation", __pyx_m) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } } #endif __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); if (unlikely(!__pyx_b)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; /*--- Initialize various global constants etc. ---*/ if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_module_is_main_nipy__algorithms__segmentation___segmentation) { if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s____main__) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; } /*--- Builtin init code ---*/ if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Constants init code ---*/ if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Global init code ---*/ /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ /*--- Type import code ---*/ __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", #if CYTHON_COMPILING_IN_PYPY sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 861; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Variable import code ---*/ /*--- Function import code ---*/ /*--- Execution code ---*/ /* "nipy/algorithms/segmentation/_segmentation.pyx":9 * """ * * __version__ = '0.2' # <<<<<<<<<<<<<< * * # Includes */ if (PyObject_SetAttr(__pyx_m, __pyx_n_s____version__, ((PyObject *)__pyx_kp_s_32)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/algorithms/segmentation/_segmentation.pyx":33 * * # Initialize numpy * mrf_import_array() # <<<<<<<<<<<<<< * import_array() * import numpy as np */ mrf_import_array(); /* "nipy/algorithms/segmentation/_segmentation.pyx":34 * # Initialize numpy * mrf_import_array() * import_array() # <<<<<<<<<<<<<< * import numpy as np * */ import_array(); /* "nipy/algorithms/segmentation/_segmentation.pyx":35 * mrf_import_array() * import_array() * import numpy as np # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__numpy), 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 35; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__np, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 35; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/segmentation/_segmentation.pyx":39 * * * def _ve_step(ppm, ref, XYZ, U, int ngb_size, double beta): # <<<<<<<<<<<<<< * * if not ppm.flags['C_CONTIGUOUS'] or not ppm.dtype=='double': */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_10algorithms_12segmentation_13_segmentation_1_ve_step, NULL, __pyx_n_s_36); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s___ve_step, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/segmentation/_segmentation.pyx":59 * * * def _make_edges(mask, int ngb_size): # <<<<<<<<<<<<<< * * if not mask.flags['C_CONTIGUOUS'] or not mask.dtype=='intp': */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_10algorithms_12segmentation_13_segmentation_3_make_edges, NULL, __pyx_n_s_36); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 59; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s___make_edges, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 59; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/segmentation/_segmentation.pyx":67 * * * def _interaction_energy(ppm, XYZ, U, int ngb_size): # <<<<<<<<<<<<<< * * if not ppm.flags['C_CONTIGUOUS'] or not ppm.dtype=='double': */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_10algorithms_12segmentation_13_segmentation_5_interaction_energy, NULL, __pyx_n_s_36); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 67; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s___interaction_energy, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 67; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/segmentation/_segmentation.pyx":1 * # -*- Mode: Python -*- # <<<<<<<<<<<<<< * * """ */ __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); if (PyObject_SetAttr(__pyx_m, __pyx_n_s____test__, ((PyObject *)__pyx_t_1)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; /* "numpy.pxd":975 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); if (__pyx_m) { __Pyx_AddTraceback("init nipy.algorithms.segmentation._segmentation", __pyx_clineno, __pyx_lineno, __pyx_filename); Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init nipy.algorithms.segmentation._segmentation"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if PY_MAJOR_VERSION < 3 return; #else return __pyx_m; #endif } /* Runtime support code */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* CYTHON_REFNANNY */ static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) { PyObject *result; result = PyObject_GetAttr(dict, name); if (!result) { if (dict != __pyx_b) { PyErr_Clear(); result = PyObject_GetAttr(__pyx_b, name); } if (!result) { PyErr_SetObject(PyExc_NameError, name); } } return result; } static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%s() takes %s %" CYTHON_FORMAT_SSIZE_T "d positional argument%s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%s() got an unexpected keyword argument '%s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) { #if CYTHON_COMPILING_IN_CPYTHON PyObject *tmp_type, *tmp_value, *tmp_tb; PyThreadState *tstate = PyThreadState_GET(); tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_Restore(type, value, tb); #endif } static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(type, value, tb); #endif } #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } #if PY_VERSION_HEX < 0x02050000 if (PyClass_Check(type)) { #else if (PyType_Check(type)) { #endif #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; #if PY_VERSION_HEX < 0x02050000 if (PyInstance_Check(type)) { type = (PyObject*) ((PyInstanceObject*)type)->in_class; Py_INCREF(type); } else { type = 0; PyErr_SetString(PyExc_TypeError, "raise: exception must be an old-style class or instance"); goto raise_error; } #else type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } #endif } __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else /* Python 3+ */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyEval_CallObject(type, args); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause && cause != Py_None) { PyObject *fixed_cause; if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { PyThreadState *tstate = PyThreadState_GET(); PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } } bad: Py_XDECREF(owned_instance); return; } #endif static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_Format(PyExc_SystemError, "Missing type object"); return 0; } if (likely(PyObject_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%s to unpack", index, (index == 1) ? "" : "s"); } static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } static CYTHON_INLINE int __Pyx_IterFinish(void) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); PyObject* exc_type = tstate->curexc_type; if (unlikely(exc_type)) { if (likely(exc_type == PyExc_StopIteration) || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration)) { PyObject *exc_value, *exc_tb; exc_value = tstate->curexc_value; exc_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; Py_DECREF(exc_type); Py_XDECREF(exc_value); Py_XDECREF(exc_tb); return 0; } else { return -1; } } return 0; #else if (unlikely(PyErr_Occurred())) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) { PyErr_Clear(); return 0; } else { return -1; } } return 0; #endif } static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) { if (unlikely(retval)) { Py_DECREF(retval); __Pyx_RaiseTooManyValuesError(expected); return -1; } else { return __Pyx_IterFinish(); } return 0; } static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level) { PyObject *py_import = 0; PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; py_import = __Pyx_GetAttrString(__pyx_b, "__import__"); if (!py_import) goto bad; if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; #if PY_VERSION_HEX >= 0x02050000 { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { /* try package relative import first */ PyObject *py_level = PyInt_FromLong(1); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; /* try absolute import on failure */ } #endif if (!module) { PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); } } #else if (level>0) { PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4."); goto bad; } module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, NULL); #endif bad: Py_XDECREF(empty_list); Py_XDECREF(py_import); Py_XDECREF(empty_dict); return module; } #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return ::std::complex< float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y*(__pyx_t_float_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real*z.real + z.imag*z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(a, a); case 3: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, a); case 4: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_absf(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return ::std::complex< double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y*(__pyx_t_double_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real*z.real + z.imag*z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(a, a); case 3: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, a); case 4: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_abs(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject* x) { const unsigned char neg_one = (unsigned char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned char" : "value too large to convert to unsigned char"); } return (unsigned char)-1; } return (unsigned char)val; } return (unsigned char)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject* x) { const unsigned short neg_one = (unsigned short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned short" : "value too large to convert to unsigned short"); } return (unsigned short)-1; } return (unsigned short)val; } return (unsigned short)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject* x) { const unsigned int neg_one = (unsigned int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned int" : "value too large to convert to unsigned int"); } return (unsigned int)-1; } return (unsigned int)val; } return (unsigned int)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject* x) { const char neg_one = (char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to char" : "value too large to convert to char"); } return (char)-1; } return (char)val; } return (char)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject* x) { const short neg_one = (short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to short" : "value too large to convert to short"); } return (short)-1; } return (short)val; } return (short)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject* x) { const int neg_one = (int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to int" : "value too large to convert to int"); } return (int)-1; } return (int)val; } return (int)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject* x) { const signed char neg_one = (signed char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed char" : "value too large to convert to signed char"); } return (signed char)-1; } return (signed char)val; } return (signed char)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject* x) { const signed short neg_one = (signed short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed short" : "value too large to convert to signed short"); } return (signed short)-1; } return (signed short)val; } return (signed short)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject* x) { const signed int neg_one = (signed int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed int" : "value too large to convert to signed int"); } return (signed int)-1; } return (signed int)val; } return (signed int)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject* x) { const int neg_one = (int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to int" : "value too large to convert to int"); } return (int)-1; } return (int)val; } return (int)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) { const unsigned long neg_one = (unsigned long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned long"); return (unsigned long)-1; } return (unsigned long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned long"); return (unsigned long)-1; } return (unsigned long)PyLong_AsUnsignedLong(x); } else { return (unsigned long)PyLong_AsLong(x); } } else { unsigned long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned long)-1; val = __Pyx_PyInt_AsUnsignedLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject* x) { const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned PY_LONG_LONG"); return (unsigned PY_LONG_LONG)-1; } return (unsigned PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned PY_LONG_LONG"); return (unsigned PY_LONG_LONG)-1; } return (unsigned PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (unsigned PY_LONG_LONG)PyLong_AsLongLong(x); } } else { unsigned PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned PY_LONG_LONG)-1; val = __Pyx_PyInt_AsUnsignedLongLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject* x) { const long neg_one = (long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long)-1; } return (long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long)-1; } return (long)PyLong_AsUnsignedLong(x); } else { return (long)PyLong_AsLong(x); } } else { long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (long)-1; val = __Pyx_PyInt_AsLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) { const PY_LONG_LONG neg_one = (PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to PY_LONG_LONG"); return (PY_LONG_LONG)-1; } return (PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to PY_LONG_LONG"); return (PY_LONG_LONG)-1; } return (PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (PY_LONG_LONG)PyLong_AsLongLong(x); } } else { PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (PY_LONG_LONG)-1; val = __Pyx_PyInt_AsLongLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) { const signed long neg_one = (signed long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed long"); return (signed long)-1; } return (signed long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed long"); return (signed long)-1; } return (signed long)PyLong_AsUnsignedLong(x); } else { return (signed long)PyLong_AsLong(x); } } else { signed long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (signed long)-1; val = __Pyx_PyInt_AsSignedLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject* x) { const signed PY_LONG_LONG neg_one = (signed PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed PY_LONG_LONG"); return (signed PY_LONG_LONG)-1; } return (signed PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed PY_LONG_LONG"); return (signed PY_LONG_LONG)-1; } return (signed PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (signed PY_LONG_LONG)PyLong_AsLongLong(x); } } else { signed PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (signed PY_LONG_LONG)-1; val = __Pyx_PyInt_AsSignedLongLong(tmp); Py_DECREF(tmp); return val; } } static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); #if PY_VERSION_HEX < 0x02050000 return PyErr_Warn(NULL, message); #else return PyErr_WarnEx(NULL, message, 1); #endif } return 0; } #ifndef __PYX_HAVE_RT_ImportModule #define __PYX_HAVE_RT_ImportModule static PyObject *__Pyx_ImportModule(const char *name) { PyObject *py_name = 0; PyObject *py_module = 0; py_name = __Pyx_PyIdentifier_FromString(name); if (!py_name) goto bad; py_module = PyImport_Import(py_name); Py_DECREF(py_name); return py_module; bad: Py_XDECREF(py_name); return 0; } #endif #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict) { PyObject *py_module = 0; PyObject *result = 0; PyObject *py_name = 0; char warning[200]; py_module = __Pyx_ImportModule(module_name); if (!py_module) goto bad; py_name = __Pyx_PyIdentifier_FromString(class_name); if (!py_name) goto bad; result = PyObject_GetAttr(py_module, py_name); Py_DECREF(py_name); py_name = 0; Py_DECREF(py_module); py_module = 0; if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%s.%s is not a type object", module_name, class_name); goto bad; } if (!strict && (size_t)((PyTypeObject *)result)->tp_basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility", module_name, class_name); #if PY_VERSION_HEX < 0x02050000 if (PyErr_Warn(NULL, warning) < 0) goto bad; #else if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; #endif } else if ((size_t)((PyTypeObject *)result)->tp_basicsize != size) { PyErr_Format(PyExc_ValueError, "%s.%s has the wrong size, try recompiling", module_name, class_name); goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(py_module); Py_XDECREF(result); return NULL; } #endif static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = (start + end) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, /*int argcount,*/ 0, /*int kwonlyargcount,*/ 0, /*int nlocals,*/ 0, /*int stacksize,*/ 0, /*int flags,*/ __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, /*int firstlineno,*/ __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_globals = 0; PyFrameObject *py_frame = 0; py_code = __pyx_find_code_object(c_line ? c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? c_line : py_line, py_code); } py_globals = PyModule_GetDict(__pyx_m); if (!py_globals) goto bad; py_frame = PyFrame_New( PyThreadState_GET(), /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ py_globals, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; py_frame->f_lineno = py_line; PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else /* Python 3+ has unicode identifiers */ if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; ++t; } return 0; } /* Type Conversion Functions */ static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) { PyNumberMethods *m; const char *name = NULL; PyObject *res = NULL; #if PY_VERSION_HEX < 0x03000000 if (PyInt_Check(x) || PyLong_Check(x)) #else if (PyLong_Check(x)) #endif return Py_INCREF(x), x; m = Py_TYPE(x)->tp_as_number; #if PY_VERSION_HEX < 0x03000000 if (m && m->nb_int) { name = "int"; res = PyNumber_Int(x); } else if (m && m->nb_long) { name = "long"; res = PyNumber_Long(x); } #else if (m && m->nb_int) { name = "int"; res = PyNumber_Long(x); } #endif if (res) { #if PY_VERSION_HEX < 0x03000000 if (!PyInt_Check(res) && !PyLong_Check(res)) { #else if (!PyLong_Check(res)) { #endif PyErr_Format(PyExc_TypeError, "__%s__ returned non-%s (type %.200s)", name, name, Py_TYPE(res)->tp_name); Py_DECREF(res); return NULL; } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject* x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { #if PY_VERSION_HEX < 0x02050000 if (ival <= LONG_MAX) return PyInt_FromLong((long)ival); else { unsigned char *bytes = (unsigned char *) &ival; int one = 1; int little = (int)*(unsigned char*)&one; return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0); } #else return PyInt_FromSize_t(ival); #endif } static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject* x) { unsigned PY_LONG_LONG val = __Pyx_PyInt_AsUnsignedLongLong(x); if (unlikely(val == (unsigned PY_LONG_LONG)-1 && PyErr_Occurred())) { return (size_t)-1; } else if (unlikely(val != (unsigned PY_LONG_LONG)(size_t)val)) { PyErr_SetString(PyExc_OverflowError, "value too large to convert to size_t"); return (size_t)-1; } return (size_t)val; } #endif /* Py_PYTHON_H */ nipy-0.3.0/nipy/algorithms/segmentation/_segmentation.pyx000066400000000000000000000046661210344137400237270ustar00rootroot00000000000000# -*- Mode: Python -*- """ Markov random field utils. Author: Alexis Roche, 2010. """ __version__ = '0.2' # Includes from numpy cimport import_array, ndarray # Externals cdef extern from "mrf.h": void mrf_import_array() void ve_step(ndarray ppm, ndarray ref, ndarray XYZ, ndarray U, int ngb_size, double beta) ndarray make_edges(ndarray mask, int ngb_size) double interaction_energy(ndarray ppm, ndarray XYZ, ndarray U, int ngb_size) # Initialize numpy mrf_import_array() import_array() import numpy as np def _ve_step(ppm, ref, XYZ, U, int ngb_size, double beta): if not ppm.flags['C_CONTIGUOUS'] or not ppm.dtype=='double': raise ValueError('ppm array should be double C-contiguous') if not ref.flags['C_CONTIGUOUS'] or not ref.dtype=='double': raise ValueError('ref array should be double C-contiguous') if not XYZ.flags['C_CONTIGUOUS'] or not XYZ.dtype=='intp': raise ValueError('XYZ array should be intp C-contiguous') if not XYZ.shape[1] == 3: raise ValueError('XYZ array should be 3D') if not U.flags['C_CONTIGUOUS'] or not U.dtype=='double': raise ValueError('U array should be double C-contiguous') if not ppm.shape[-1] == ref.shape[-1]: raise ValueError('Inconsistent shapes for ppm and ref arrays') ve_step(ppm, ref, XYZ, U, ngb_size, beta) return ppm def _make_edges(mask, int ngb_size): if not mask.flags['C_CONTIGUOUS'] or not mask.dtype=='intp': raise ValueError('mask array should be intp and C-contiguous') return make_edges(mask, ngb_size) def _interaction_energy(ppm, XYZ, U, int ngb_size): if not ppm.flags['C_CONTIGUOUS'] or not ppm.dtype=='double': raise ValueError('ppm array should be double C-contiguous') if not XYZ.flags['C_CONTIGUOUS'] or not XYZ.dtype=='intp': raise ValueError('XYZ array should be intp C-contiguous') if not XYZ.shape[1] == 3: raise ValueError('XYZ array should be 3D') if not U.flags['C_CONTIGUOUS'] or not U.dtype=='double': raise ValueError('U array should be double C-contiguous') return interaction_energy(ppm, XYZ, U, ngb_size) nipy-0.3.0/nipy/algorithms/segmentation/brain_segmentation.py000066400000000000000000000101621210344137400245370ustar00rootroot00000000000000import numpy as np from .segmentation import (Segmentation, moment_matching, map_from_ppm) T1_ref_params = {} T1_ref_params['glob_mu'] = 1643.2 T1_ref_params['glob_sigma'] = 252772.3 T1_ref_params['3k'] = { 'mu': np.array([813.9, 1628.3, 2155.8]), 'sigma': np.array([46499.0, 30233.4, 17130.0])} T1_ref_params['4k'] = { 'mu': np.array([816.1, 1613.7, 1912.3, 2169.3]), 'sigma': np.array([47117.6, 27053.8, 8302.2, 14970.8])} T1_ref_params['5k'] = { 'mu': np.array([724.2, 1169.3, 1631.5, 1917.0, 2169.2]), 'sigma': np.array([22554.8, 21368.9, 20560.1, 7302.6, 14962.1])} class BrainT1Segmentation(object): def __init__(self, data, mask=None, model='3k', niters=25, ngb_size=6, beta=0.5, ref_params=None, init_params=None, convert=True): self.labels = ('CSF', 'GM', 'WM') self.data = data self.mask = mask mixmat = np.asarray(model) if mixmat.ndim == 2: nclasses = mixmat.shape[0] if nclasses < 3: raise ValueError('at least 3 classes required') if not mixmat.shape[1] == 3: raise ValueError('mixing matrix should have 3 rows') self.mixmat = mixmat elif model == '3k': self.mixmat = np.eye(3) elif model == '4k': self.mixmat = np.array([[1., 0., 0.], [0., 1., 0.], [0., 1., 0.], [0., 0., 1.]]) elif model == '5k': self.mixmat = np.array([[1., 0., 0.], [1., 0., 0.], [0., 1., 0.], [0., 1., 0.], [0., 0., 1.]]) else: raise ValueError('unknown brain segmentation model') self.niters = int(niters) self.beta = float(beta) self.ngb_size = int(ngb_size) # Class parameter initialization if init_params == None: if ref_params == None: ref_params = T1_ref_params self.init_mu, self.init_sigma = self._init_parameters(ref_params) else: self.init_mu = np.array(init_params[0], dtype='double') self.init_sigma = np.array(init_params[1], dtype='double') if not len(self.init_mu) == self.mixmat.shape[0]\ or not len(self.init_sigma) == self.mixmat.shape[0]: raise ValueError('Inconsistent initial parameter estimates') self._run() if convert: self.convert() else: self.label = map_from_ppm(self.ppm, self.mask) def _init_parameters(self, ref_params): if not self.mask == None: data = self.data[self.mask] else: data = self.data nclasses = self.mixmat.shape[0] if nclasses <= 5: key = str(self.mixmat.shape[0]) + 'k' ref_mu = ref_params[key]['mu'] ref_sigma = ref_params[key]['sigma'] else: ref_mu = np.linspace(ref_params['3k']['mu'][0], ref_params['3k']['mu'][-1], num=nclasses) ref_sigma = np.linspace(ref_params['3k']['sigma'][0], ref_params['3k']['sigma'][-1], num=nclasses) return moment_matching(data, ref_mu, ref_sigma, ref_params['glob_mu'], ref_params['glob_sigma']) def _run(self): S = Segmentation(self.data, mask=self.mask, mu=self.init_mu, sigma=self.init_sigma, ngb_size=self.ngb_size, beta=self.beta) S.run(niters=self.niters) self.mu = S.mu self.sigma = S.sigma self.ppm = S.ppm def convert(self): if self.ppm.shape[-1] == self.mixmat.shape[0]: self.ppm = np.dot(self.ppm, self.mixmat) self.label = map_from_ppm(self.ppm, self.mask) nipy-0.3.0/nipy/algorithms/segmentation/mrf.c000066400000000000000000000207551210344137400212560ustar00rootroot00000000000000#include "mrf.h" #include #include #ifdef _MSC_VER #define inline __inline #endif /* Numpy import */ void mrf_import_array(void) { import_array(); return; } /* Encode neighborhood systems using static arrays */ int ngb6 [] = {1,0,0, -1,0,0, 0,1,0, 0,-1,0, 0,0,1, 0,0,-1}; int ngb26 [] = {1,0,0, -1,0,0, 0,1,0, 0,-1,0, 1,1,0, -1,-1,0, 1,-1,0, -1,1,0, 1,0,1, -1,0,1, 0,1,1, 0,-1,1, 1,1,1, -1,-1,1, 1,-1,1, -1,1,1, 1,0,-1, -1,0,-1, 0,1,-1, 0,-1,-1, 1,1,-1, -1,-1,-1, 1,-1,-1, -1,1,-1, 0,0,1, 0,0,-1}; static int* _select_neighborhood_system(int ngb_size) { if (ngb_size == 6) return ngb6; else if (ngb_size == 26) return ngb26; else { fprintf(stderr, "Unknown neighborhood system\n"); return NULL; } } /* Perform the VE-step of a VEM algorithm for a general Markov random field segmentation model. Compute exp[-2 * beta * SUM_j (U * qj)] for a given voxel, where the sum is on the neighbors. ppm assumed C-contiguous double (X, Y, Z, K) ref assumed C-contiguous double (NPTS, K) XYZ assumed C-contiguous npy_intp (NPTS, 3) */ #define TINY 1e-300 /* Compute neighborhood 'agreement' term required by the VE-step at a particular voxel */ static void _ngb_integrate(double* res, const PyArrayObject* ppm, npy_intp x, npy_intp y, npy_intp z, const double* U, const int* ngb, npy_intp ngb_size) { npy_intp xn, yn, zn, pos, ngb_idx, k, kk; const int* buf_ngb; const double* ppm_data = (double*)ppm->data; double *buf, *buf_ppm, *q, *buf_U; npy_intp K = ppm->dimensions[3]; npy_intp u2 = ppm->dimensions[2]*K; npy_intp u1 = ppm->dimensions[1]*u2; npy_intp posmax = ppm->dimensions[0]*u1 - K; /* Re-initialize output array */ memset((void*)res, 0, K*sizeof(double)); /* Loop over neighbors */ buf_ngb = ngb; for (ngb_idx=0; ngb_idx posmax)) continue; /* Compute U*q */ buf_ppm = (double*)ppm_data + pos; for (k=0, buf=res, buf_U=(double*)U; kdimensions[3]; npy_intp u2 = ppm->dimensions[2]*K; npy_intp u1 = ppm->dimensions[1]*u2; const double* ref_data = (double*)ref->data; const double* U_data = (double*)U->data; npy_intp* xyz; int* ngb; /* Neighborhood system */ ngb = _select_neighborhood_system(ngb_size); /* Pointer to the data array */ ppm_data = (double*)ppm->data; /* Allocate auxiliary vectors */ p = (double*)calloc(K, sizeof(double)); /* Loop over points */ iter = (PyArrayIterObject*)PyArray_IterAllButAxis((PyObject*)XYZ, &axis); while(iter->index < iter->size) { /* Integrate the energy over the neighborhood */ xyz = PyArray_ITER_DATA(iter); x = xyz[0]; y = xyz[1]; z = xyz[2]; _ngb_integrate(p, ppm, x, y, z, U_data, (const int*)ngb, ngb_size); /* Apply exponential transform, multiply with reference and compute normalization constant */ psum = 0.0; for (k=0, pos=(iter->index)*K, buf=p; k TINY) for (k=0, buf=p; kdimensions[2]; npy_intp u1 = idx->dimensions[1]*u2; npy_intp u0 = idx->dimensions[0]*u1; npy_intp mask_size = 0, n_edges = 0; npy_intp idx_i; npy_intp *buf_idx; npy_intp *edges_data, *buf_edges; npy_intp ngb_idx; npy_intp pos; PyArrayObject* edges; npy_intp dim[2] = {0, 2}; /* First loop over the input array to determine the mask size */ while(iter->index < iter->size) { buf_idx = (npy_intp*)PyArray_ITER_DATA(iter); if (*buf_idx >= 0) mask_size ++; PyArray_ITER_NEXT(iter); } /* Allocate the array of edges using an upper bound of the required memory space */ edges_data = (npy_intp*)malloc(2 * ngb_size * mask_size * sizeof(npy_intp)); /* Second loop over the input array */ PyArray_ITER_RESET(iter); iter->contiguous = 0; /* To force coordinates to be updated */ buf_edges = edges_data; while(iter->index < iter->size) { xi = iter->coordinates[0]; yi = iter->coordinates[1]; zi = iter->coordinates[2]; buf_idx = (npy_intp*)PyArray_ITER_DATA(iter); idx_i = *buf_idx; /* Loop over neighbors if current point is within the mask */ if (idx_i >= 0) { buf_ngb = ngb; for (ngb_idx=0; ngb_idx= u0)) continue; buf_idx = (npy_intp*)idx->data + pos; if (*buf_idx < 0) continue; buf_edges[0] = idx_i; buf_edges[1] = *buf_idx; n_edges ++; buf_edges += 2; } } /* Increment iterator */ PyArray_ITER_NEXT(iter); } /* Reallocate edges array to account for connections suppressed due to masking */ edges_data = realloc((void *)edges_data, 2 * n_edges * sizeof(npy_intp)); dim[0] = n_edges; edges = (PyArrayObject*) PyArray_SimpleNewFromData(2, dim, NPY_INTP, (void*)edges_data); /* Transfer ownership to python (to avoid memory leaks!) */ edges->flags = (edges->flags) | NPY_OWNDATA; /* Free memory */ Py_XDECREF(iter); return edges; } /* Compute the interaction energy: sum_i,j qi^T U qj = sum_i qi^T sum_j U qj */ double interaction_energy(PyArrayObject* ppm, const PyArrayObject* XYZ, const PyArrayObject* U, int ngb_size) { npy_intp k, x, y, z, pos; double *p, *buf; double res = 0.0, tmp; PyArrayIterObject* iter; int axis = 1; double* ppm_data; npy_intp K = ppm->dimensions[3]; npy_intp u2 = ppm->dimensions[2]*K; npy_intp u1 = ppm->dimensions[1]*u2; npy_intp* xyz; const double* U_data = (double*)U->data; int* ngb; /* Neighborhood system */ ngb = _select_neighborhood_system(ngb_size); /* Pointer to ppm array */ ppm_data = (double*)ppm->data; /* Allocate auxiliary vector */ p = (double*)calloc(K, sizeof(double)); /* Loop over points */ iter = (PyArrayIterObject*)PyArray_IterAllButAxis((PyObject*)XYZ, &axis); while(iter->index < iter->size) { /* Compute the average ppm in the neighborhood */ xyz = PyArray_ITER_DATA(iter); x = xyz[0]; y = xyz[1]; z = xyz[2]; _ngb_integrate(p, ppm, x, y, z, U_data, (const int*)ngb, ngb_size); /* Calculate the dot product qi^T p where qi is the local posterior */ tmp = 0.0; pos = x*u1 + y*u2 + z*K; for (k=0, buf=p; k #include extern void mrf_import_array(void); extern void ve_step(PyArrayObject* ppm, const PyArrayObject* ref, const PyArrayObject* XYZ, const PyArrayObject* U, int ngb_size, double beta); extern PyArrayObject* make_edges(const PyArrayObject* mask, int ngb_size); extern double interaction_energy(PyArrayObject* ppm, const PyArrayObject* XYZ, const PyArrayObject* U, int ngb_size); #ifdef __cplusplus } #endif #endif nipy-0.3.0/nipy/algorithms/segmentation/segmentation.py000066400000000000000000000201521210344137400233640ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np from ._segmentation import _ve_step, _interaction_energy NITERS = 10 NGB_SIZE = 26 BETA = 0.1 nonzero = lambda x: np.maximum(x, 1e-50) log = lambda x: np.log(nonzero(x)) class Segmentation(object): def __init__(self, data, mask=None, mu=None, sigma=None, ppm=None, prior=None, U=None, ngb_size=NGB_SIZE, beta=BETA): """ Class for multichannel Markov random field image segmentation using the variational EM algorithm. For details regarding the underlying algorithm, see: Roche et al, 2011. On the convergence of EM-like algorithms for image segmentation using Markov random fields. Medical Image Analysis (DOI: 10.1016/j.media.2011.05.002). Parameters ---------- data : array-like Input image array mask : array-like or tuple of array Input mask to restrict the segmentation beta : float Markov regularization parameter mu : array-like Initial class-specific means sigma : array-like Initial class-specific variances """ data = data.squeeze() if not len(data.shape) in (3, 4): raise ValueError('Invalid input image') if len(data.shape) == 3: nchannels = 1 space_shape = data.shape else: nchannels = data.shape[-1] space_shape = data.shape[0:-1] self.nchannels = nchannels # Make default mask (required by MRF regularization). This wil # be passed to the _ve_step C-routine, which assumes a # contiguous int array and raise an error otherwise. Voxels on # the image borders are further rejected to avoid segmentation # faults. if mask == None: mask = np.ones(space_shape, dtype=bool) X, Y, Z = np.where(mask) XYZ = np.zeros((X.shape[0], 3), dtype='intp') XYZ[:, 0], XYZ[:, 1], XYZ[:, 2] = X, Y, Z self.XYZ = XYZ self.mask = mask self.data = data[mask] if nchannels == 1: self.data = np.reshape(self.data, (self.data.shape[0], 1)) # By default, the ppm is initialized as a collection of # uniform distributions if ppm == None: nclasses = len(mu) self.ppm = np.zeros(list(space_shape) + [nclasses]) self.ppm[mask] = 1. / nclasses self.is_ppm = False self.mu = np.array(mu, dtype='double').reshape(\ (nclasses, nchannels)) self.sigma = np.array(sigma, dtype='double').reshape(\ (nclasses, nchannels, nchannels)) elif mu == None: nclasses = ppm.shape[-1] self.ppm = np.asarray(ppm) self.is_ppm = True self.mu = np.zeros((nclasses, nchannels)) self.sigma = np.zeros((nclasses, nchannels, nchannels)) else: raise ValueError('missing information') self.nclasses = nclasses if not prior == None: self.prior = np.asarray(prior)[self.mask].reshape(\ [self.data.shape[0], nclasses]) else: self.prior = None self.ngb_size = int(ngb_size) self.set_markov_prior(beta, U=U) def set_markov_prior(self, beta, U=None): if not U == None: # make sure it's C-contiguous self.U = np.asarray(U).copy() else: # Potts model U = np.ones((self.nclasses, self.nclasses)) U[_diag_indices(self.nclasses)] = 0 self.U = U self.beta = float(beta) def vm_step(self, freeze=()): classes = range(self.nclasses) for i in freeze: classes.remove(i) for i in classes: P = self.ppm[..., i][self.mask].ravel() Z = nonzero(P.sum()) tmp = self.data.T * P.T mu = tmp.sum(1) / Z mu_ = mu.reshape((len(mu), 1)) sigma = np.dot(tmp, self.data) / Z - np.dot(mu_, mu_.T) self.mu[i] = mu self.sigma[i] = sigma def log_external_field(self): """ Compute the logarithm of the external field, where the external field is defined as the likelihood times the first-order component of the prior. """ lef = np.zeros([self.data.shape[0], self.nclasses]) for i in range(self.nclasses): centered_data = self.data - self.mu[i] if self.nchannels == 1: inv_sigma = 1. / nonzero(self.sigma[i]) norm_factor = np.sqrt(inv_sigma.squeeze()) else: inv_sigma = np.linalg.inv(self.sigma[i]) norm_factor = 1. / np.sqrt(\ nonzero(np.linalg.det(self.sigma[i]))) maha_dist = np.sum(centered_data * np.dot(inv_sigma, centered_data.T).T, 1) lef[:, i] = -.5 * maha_dist lef[:, i] += log(norm_factor) if not self.prior == None: lef += log(self.prior) return lef def normalized_external_field(self): f = self.log_external_field().T f -= np.max(f, 0) np.exp(f, f) f /= f.sum(0) return f.T def ve_step(self): nef = self.normalized_external_field() if self.beta == 0: self.ppm[self.mask] = np.reshape(\ nef, self.ppm[self.mask].shape) else: self.ppm = _ve_step(self.ppm, nef, self.XYZ, self.U, self.ngb_size, self.beta) def run(self, niters=NITERS, freeze=()): if self.is_ppm: self.vm_step(freeze=freeze) for i in range(niters): self.ve_step() self.vm_step(freeze=freeze) self.is_ppm = True def map(self): """ Return the maximum a posterior label map """ return map_from_ppm(self.ppm, self.mask) def free_energy(self, ppm=None): """ Compute the free energy defined as: F(q, theta) = int q(x) log q(x)/p(x,y/theta) dx associated with input parameters mu, sigma and beta (up to an ignored constant). """ if ppm == None: ppm = self.ppm q = ppm[self.mask] # Entropy term lef = self.log_external_field() f1 = np.sum(q * (log(q) - lef)) # Interaction term if self.beta > 0.0: f2 = self.beta * _interaction_energy(ppm, self.XYZ, self.U, self.ngb_size) else: f2 = 0.0 return f1 + f2 def _diag_indices(n, ndim=2): # diag_indices function present in numpy 1.4 and later. This for # compatibility with numpy < 1.4 idx = np.arange(n) return (idx,) * ndim def moment_matching(dat, mu, sigma, glob_mu, glob_sigma): """ Moment matching strategy for parameter initialization to feed a segmentation algorithm. Parameters ---------- data: array Image data. mu : array Template class-specific intensity means sigma : array Template class-specific intensity variances glob_mu : float Template global intensity mean glob_sigma : float Template global intensity variance Returns ------- dat_mu: array Guess of class-specific intensity means dat_sigma: array Guess of class-specific intensity variances """ dat_glob_mu = float(np.mean(dat)) dat_glob_sigma = float(np.var(dat)) a = np.sqrt(dat_glob_sigma / glob_sigma) b = dat_glob_mu - a * glob_mu dat_mu = a * mu + b dat_sigma = (a ** 2) * sigma return dat_mu, dat_sigma def map_from_ppm(ppm, mask=None): x = np.zeros(ppm.shape[0:-1], dtype='uint8') if mask == None: mask = ppm == 0 x[mask] = ppm[mask].argmax(-1) + 1 return x def binarize_ppm(q): """ Assume input ppm is masked (ndim==2) """ bin_q = np.zeros(q.shape) bin_q[(range(q.shape[0]), np.argmax(q, axis=1))] = 1. return bin_q nipy-0.3.0/nipy/algorithms/segmentation/setup.py000066400000000000000000000011771210344137400220350ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('segmentation', parent_package, top_path) config.add_subpackage('tests') config.add_include_dirs(config.name.replace('.', os.sep)) config.add_extension('_segmentation', sources=['_segmentation.pyx', 'mrf.c']) return config if __name__ == '__main__': print('This is the wrong setup.py file to run') nipy-0.3.0/nipy/algorithms/segmentation/tests/000077500000000000000000000000001210344137400214575ustar00rootroot00000000000000nipy-0.3.0/nipy/algorithms/segmentation/tests/__init__.py000066400000000000000000000000501210344137400235630ustar00rootroot00000000000000# Init to make test directory a package nipy-0.3.0/nipy/algorithms/segmentation/tests/test_segmentation.py000066400000000000000000000076601210344137400255760ustar00rootroot00000000000000""" Testing brain segmentation module """ import numpy as np from nose.tools import assert_equal, assert_almost_equal from numpy.testing import assert_array_almost_equal from ..segmentation import Segmentation from ..brain_segmentation import BrainT1Segmentation from ....io.files import load as load_image from ....testing import anatfile anat_img = load_image(anatfile) anat_mask = anat_img.get_data() > 0 DIMS = (30, 30, 20) def _check_dims(x, ndim, shape): if isinstance(shape, int): shape = (shape, ) for i in range(ndim): assert_equal(x.shape[i], shape[i]) def _test_brain_seg(model, niters=3, beta=0, ngb_size=6, init_params=None, convert=True): S = BrainT1Segmentation(anat_img.get_data(), mask=anat_mask, model=model, niters=niters, beta=beta, ngb_size=ngb_size, init_params=init_params, convert=convert) shape = anat_img.shape if convert: nclasses = 3 else: nclasses = S.mixmat.shape[0] # Check that the class attributes have appropriate dimensions _check_dims(S.ppm, 4, list(shape) + [nclasses]) _check_dims(S.label, 3, shape) _check_dims(S.mu, 1, S.mixmat.shape[0]) _check_dims(S.sigma, 1, S.mixmat.shape[0]) # Check that probabilities are zero outside the mask and sum up to # one inside the mask assert_almost_equal(S.ppm[True - S.mask].sum(-1).max(), 0) assert_almost_equal(S.ppm[S.mask].sum(-1).min(), 1) # Check that labels are zero outside the mask and > 1 inside the # mask assert_almost_equal(S.label[True - S.mask].max(), 0) assert_almost_equal(S.label[S.mask].min(), 1) def test_brain_seg1(): _test_brain_seg('3k', niters=3, beta=0.0, ngb_size=6) def test_brain_seg2(): _test_brain_seg('3k', niters=3, beta=0.5, ngb_size=6) def test_brain_seg3(): _test_brain_seg('4k', niters=3, beta=0.5, ngb_size=6) def test_brain_seg4(): _test_brain_seg('4k', niters=3, beta=0.5, ngb_size=26) def test_brain_seg5(): _test_brain_seg(np.array([[1., 0., 0.], [1., 0., 0.], [0., 1., 0.], [0., 1., 0.], [0., 0., 1.]]), niters=3, beta=0.5, ngb_size=6) def test_brain_seg6(): _test_brain_seg('3k', niters=3, beta=0.5, ngb_size=6, convert=False) def test_brain_seg7(): mu = np.array([0, 50, 100]) sigma = np.array([1000, 2000, 3000]) _test_brain_seg('3k', niters=3, beta=0.5, ngb_size=6, init_params=(mu, sigma)) def _test_segmentation(S, nchannels=1): assert_equal(S.nchannels, nchannels) nef = S.normalized_external_field() assert_array_almost_equal(nef.sum(-1), np.ones(nef.shape[0])) S.run(niters=5) label = S.map() assert_equal(label.ndim, 3) assert_equal(label.dtype, 'uint8') assert isinstance(S.free_energy(), float) def test_segmentation_3d(): data = np.random.random(DIMS) _test_segmentation(Segmentation(data, mu=[0.25, 0.75], sigma=[1, 1])) def test_segmentation_3d_with_MRF(): data = np.random.random(DIMS) _test_segmentation(Segmentation(data, mu=[0.25, 0.75], sigma=[1, 1], beta=.2)) def test_segmentation_3d_with_mask(): data = np.random.random(DIMS) mask = data > .1 if mask[0].size < 1: return _test_segmentation(Segmentation(data, mu=[0.25, 0.75], sigma=[1, 1], mask=mask)) def test_segmentation_3d_multichannel(): data = np.random.random(list(DIMS) + [2]) mask = data[..., 0] > .1 if mask[0].size < 1: return _test_segmentation(Segmentation(data, mu=[[0.25, 0.25], [0.75, 0.75]], sigma=[np.eye(2), np.eye(2)], mask=mask), nchannels=2) nipy-0.3.0/nipy/algorithms/setup.py000066400000000000000000000013461210344137400173360ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('algorithms', parent_package, top_path) config.add_subpackage('tests') config.add_subpackage('registration') config.add_subpackage('segmentation') config.add_subpackage('statistics') config.add_subpackage('diagnostics') config.add_subpackage('clustering') config.add_subpackage('utils') config.add_subpackage('graph') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipy-0.3.0/nipy/algorithms/statistics/000077500000000000000000000000001210344137400200125ustar00rootroot00000000000000nipy-0.3.0/nipy/algorithms/statistics/__init__.py000066400000000000000000000005361210344137400221270ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ TODO """ __docformat__ = 'restructuredtext' from . import intvol, rft, onesample, formula from ._quantile import _quantile as quantile, _median as median from nipy.testing import Tester test = Tester().test bench = Tester().bench nipy-0.3.0/nipy/algorithms/statistics/_quantile.c000066400000000000000000006674001210344137400221540ustar00rootroot00000000000000/* Generated by Cython 0.17.4 on Sat Jan 12 17:27:29 2013 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02040000 #error Cython requires Python 2.4+. #else #include /* For offsetof */ #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #endif #if PY_VERSION_HEX < 0x02050000 typedef int Py_ssize_t; #define PY_SSIZE_T_MAX INT_MAX #define PY_SSIZE_T_MIN INT_MIN #define PY_FORMAT_SIZE_T "" #define CYTHON_FORMAT_SSIZE_T "" #define PyInt_FromSsize_t(z) PyInt_FromLong(z) #define PyInt_AsSsize_t(o) __Pyx_PyInt_AsInt(o) #define PyNumber_Index(o) ((PyNumber_Check(o) && !PyFloat_Check(o)) ? PyNumber_Int(o) : \ (PyErr_Format(PyExc_TypeError, \ "expected index value, got %.200s", Py_TYPE(o)->tp_name), \ (PyObject*)0)) #define __Pyx_PyIndex_Check(o) (PyNumber_Check(o) && !PyFloat_Check(o) && \ !PyComplex_Check(o)) #define PyIndex_Check __Pyx_PyIndex_Check #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message) #define __PYX_BUILD_PY_SSIZE_T "i" #else #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #define __Pyx_PyIndex_Check PyIndex_Check #endif #if PY_VERSION_HEX < 0x02060000 #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) #define PyVarObject_HEAD_INIT(type, size) \ PyObject_HEAD_INIT(type) size, #define PyType_Modified(t) typedef struct { void *buf; PyObject *obj; Py_ssize_t len; Py_ssize_t itemsize; int readonly; int ndim; char *format; Py_ssize_t *shape; Py_ssize_t *strides; Py_ssize_t *suboffsets; void *internal; } Py_buffer; #define PyBUF_SIMPLE 0 #define PyBUF_WRITABLE 0x0001 #define PyBUF_FORMAT 0x0004 #define PyBUF_ND 0x0008 #define PyBUF_STRIDES (0x0010 | PyBUF_ND) #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) #define PyBUF_RECORDS (PyBUF_STRIDES | PyBUF_FORMAT | PyBUF_WRITABLE) #define PyBUF_FULL (PyBUF_INDIRECT | PyBUF_FORMAT | PyBUF_WRITABLE) typedef int (*getbufferproc)(PyObject *, Py_buffer *, int); typedef void (*releasebufferproc)(PyObject *, Py_buffer *); #endif #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #endif #if PY_MAJOR_VERSION < 3 && PY_MINOR_VERSION < 6 #define PyUnicode_FromString(s) PyUnicode_Decode(s, strlen(s), "UTF-8", "strict") #endif #if PY_MAJOR_VERSION >= 3 #define Py_TPFLAGS_CHECKTYPES 0 #define Py_TPFLAGS_HAVE_INDEX 0 #endif #if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3) #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ? \ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #else #define CYTHON_PEP393_ENABLED 0 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_READ(k, d, i) ((k=k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_VERSION_HEX < 0x02060000 #define PyBytesObject PyStringObject #define PyBytes_Type PyString_Type #define PyBytes_Check PyString_Check #define PyBytes_CheckExact PyString_CheckExact #define PyBytes_FromString PyString_FromString #define PyBytes_FromStringAndSize PyString_FromStringAndSize #define PyBytes_FromFormat PyString_FromFormat #define PyBytes_DecodeEscape PyString_DecodeEscape #define PyBytes_AsString PyString_AsString #define PyBytes_AsStringAndSize PyString_AsStringAndSize #define PyBytes_Size PyString_Size #define PyBytes_AS_STRING PyString_AS_STRING #define PyBytes_GET_SIZE PyString_GET_SIZE #define PyBytes_Repr PyString_Repr #define PyBytes_Concat PyString_Concat #define PyBytes_ConcatAndDel PyString_ConcatAndDel #endif #if PY_VERSION_HEX < 0x02060000 #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type) #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_VERSION_HEX < 0x03020000 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300) #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b) #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value) #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b) #else #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0))) #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1))) #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1))) #endif #if PY_MAJOR_VERSION >= 3 #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n))) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n))) #else #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n)) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_NAMESTR(n) ((char *)(n)) #define __Pyx_DOCSTR(n) ((char *)(n)) #else #define __Pyx_NAMESTR(n) (n) #define __Pyx_DOCSTR(n) (n) #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include #define __PYX_HAVE__nipy__algorithms__statistics___quantile #define __PYX_HAVE_API__nipy__algorithms__statistics___quantile #include "stdio.h" #include "stdlib.h" #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "quantile.h" #ifdef _OPENMP #include #endif /* _OPENMP */ #ifdef PYREX_WITHOUT_ASSERTIONS #define CYTHON_WITHOUT_ASSERTIONS #endif /* inline attribute */ #ifndef CYTHON_INLINE #if defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif /* unused attribute */ #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif typedef struct {PyObject **p; char *s; const long n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/ /* Type Conversion Predeclarations */ #define __Pyx_PyBytes_FromUString(s) PyBytes_FromString((char*)s) #define __Pyx_PyBytes_AsUString(s) ((unsigned char*) PyBytes_AsString(s)) #define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x); static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject*); #if CYTHON_COMPILING_IN_CPYTHON #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #ifdef __GNUC__ /* Test for GCC > 2.95 */ #if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* __GNUC__ > 2 ... */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ > 2 ... */ #else /* __GNUC__ */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static PyObject *__pyx_m; static PyObject *__pyx_b; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include #else #include #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "_quantile.pyx", "numpy.pxd", "type.pxd", }; /* "numpy.pxd":723 * # in Cython to enable them only on the right systems. * * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t */ typedef npy_int8 __pyx_t_5numpy_int8_t; /* "numpy.pxd":724 * * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t */ typedef npy_int16 __pyx_t_5numpy_int16_t; /* "numpy.pxd":725 * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< * ctypedef npy_int64 int64_t * #ctypedef npy_int96 int96_t */ typedef npy_int32 __pyx_t_5numpy_int32_t; /* "numpy.pxd":726 * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< * #ctypedef npy_int96 int96_t * #ctypedef npy_int128 int128_t */ typedef npy_int64 __pyx_t_5numpy_int64_t; /* "numpy.pxd":730 * #ctypedef npy_int128 int128_t * * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t */ typedef npy_uint8 __pyx_t_5numpy_uint8_t; /* "numpy.pxd":731 * * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t */ typedef npy_uint16 __pyx_t_5numpy_uint16_t; /* "numpy.pxd":732 * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< * ctypedef npy_uint64 uint64_t * #ctypedef npy_uint96 uint96_t */ typedef npy_uint32 __pyx_t_5numpy_uint32_t; /* "numpy.pxd":733 * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< * #ctypedef npy_uint96 uint96_t * #ctypedef npy_uint128 uint128_t */ typedef npy_uint64 __pyx_t_5numpy_uint64_t; /* "numpy.pxd":737 * #ctypedef npy_uint128 uint128_t * * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< * ctypedef npy_float64 float64_t * #ctypedef npy_float80 float80_t */ typedef npy_float32 __pyx_t_5numpy_float32_t; /* "numpy.pxd":738 * * ctypedef npy_float32 float32_t * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< * #ctypedef npy_float80 float80_t * #ctypedef npy_float128 float128_t */ typedef npy_float64 __pyx_t_5numpy_float64_t; /* "numpy.pxd":747 * # The int types are mapped a bit surprising -- * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t # <<<<<<<<<<<<<< * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t */ typedef npy_long __pyx_t_5numpy_int_t; /* "numpy.pxd":748 * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< * ctypedef npy_longlong longlong_t * */ typedef npy_longlong __pyx_t_5numpy_long_t; /* "numpy.pxd":749 * ctypedef npy_long int_t * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< * * ctypedef npy_ulong uint_t */ typedef npy_longlong __pyx_t_5numpy_longlong_t; /* "numpy.pxd":751 * ctypedef npy_longlong longlong_t * * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t */ typedef npy_ulong __pyx_t_5numpy_uint_t; /* "numpy.pxd":752 * * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulonglong_t * */ typedef npy_ulonglong __pyx_t_5numpy_ulong_t; /* "numpy.pxd":753 * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< * * ctypedef npy_intp intp_t */ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; /* "numpy.pxd":755 * ctypedef npy_ulonglong ulonglong_t * * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< * ctypedef npy_uintp uintp_t * */ typedef npy_intp __pyx_t_5numpy_intp_t; /* "numpy.pxd":756 * * ctypedef npy_intp intp_t * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< * * ctypedef npy_double float_t */ typedef npy_uintp __pyx_t_5numpy_uintp_t; /* "numpy.pxd":758 * ctypedef npy_uintp uintp_t * * ctypedef npy_double float_t # <<<<<<<<<<<<<< * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t */ typedef npy_double __pyx_t_5numpy_float_t; /* "numpy.pxd":759 * * ctypedef npy_double float_t * ctypedef npy_double double_t # <<<<<<<<<<<<<< * ctypedef npy_longdouble longdouble_t * */ typedef npy_double __pyx_t_5numpy_double_t; /* "numpy.pxd":760 * ctypedef npy_double float_t * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cfloat cfloat_t */ typedef npy_longdouble __pyx_t_5numpy_longdouble_t; #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< float > __pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< double > __pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif /*--- Type declarations ---*/ /* "numpy.pxd":762 * ctypedef npy_longdouble longdouble_t * * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t */ typedef npy_cfloat __pyx_t_5numpy_cfloat_t; /* "numpy.pxd":763 * * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< * ctypedef npy_clongdouble clongdouble_t * */ typedef npy_cdouble __pyx_t_5numpy_cdouble_t; /* "numpy.pxd":764 * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cdouble complex_t */ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; /* "numpy.pxd":766 * ctypedef npy_clongdouble clongdouble_t * * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew1(a): */ typedef npy_cdouble __pyx_t_5numpy_complex_t; #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/ #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil) \ if (acquire_gil) { \ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ PyGILState_Release(__pyx_gilstate_save); \ } else { \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil) \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext() \ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif /* CYTHON_REFNANNY */ #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /*proto*/ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], \ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, \ const char* function_name); /*proto*/ static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/ static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/ #define __Pyx_SetItemInt(o, i, v, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ __Pyx_SetItemInt_Fast(o, i, v) : \ __Pyx_SetItemInt_Generic(o, to_py_func(i), v)) static CYTHON_INLINE int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v) { int r; if (!j) return -1; r = PyObject_SetItem(o, j, v); Py_DECREF(j); return r; } static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v) { #if CYTHON_COMPILING_IN_CPYTHON if (PyList_CheckExact(o)) { Py_ssize_t n = (likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if (likely((n >= 0) & (n < PyList_GET_SIZE(o)))) { PyObject* old = PyList_GET_ITEM(o, n); Py_INCREF(v); PyList_SET_ITEM(o, n, v); Py_DECREF(old); return 1; } } else { /* inlined PySequence_SetItem() */ PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_ass_item)) { if (unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (unlikely(l < 0)) return -1; i += l; } return m->sq_ass_item(o, i, v); } } #else #if CYTHON_COMPILING_IN_PYPY if (PySequence_Check(o) && !PyDict_Check(o)) { #else if (PySequence_Check(o)) { #endif return PySequence_SetItem(o, i, v); } #endif return __Pyx_SetItemInt_Generic(o, PyInt_FromSsize_t(i), v); } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } #define __Pyx_GetItemInt_List(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ __Pyx_GetItemInt_List_Fast(o, i) : \ __Pyx_GetItemInt_Generic(o, to_py_func(i))) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i) { #if CYTHON_COMPILING_IN_CPYTHON if (likely((0 <= i) & (i < PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, i); Py_INCREF(r); return r; } else if ((-PyList_GET_SIZE(o) <= i) & (i < 0)) { PyObject *r = PyList_GET_ITEM(o, PyList_GET_SIZE(o) + i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } #define __Pyx_GetItemInt_Tuple(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ __Pyx_GetItemInt_Tuple_Fast(o, i) : \ __Pyx_GetItemInt_Generic(o, to_py_func(i))) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i) { #if CYTHON_COMPILING_IN_CPYTHON if (likely((0 <= i) & (i < PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, i); Py_INCREF(r); return r; } else if ((-PyTuple_GET_SIZE(o) <= i) & (i < 0)) { PyObject *r = PyTuple_GET_ITEM(o, PyTuple_GET_SIZE(o) + i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } #define __Pyx_GetItemInt(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ __Pyx_GetItemInt_Fast(o, i) : \ __Pyx_GetItemInt_Generic(o, to_py_func(i))) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i) { #if CYTHON_COMPILING_IN_CPYTHON if (PyList_CheckExact(o)) { Py_ssize_t n = (likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if (likely((n >= 0) & (n < PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = (likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if (likely((n >= 0) & (n < PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { /* inlined PySequence_GetItem() */ PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (unlikely(l < 0)) return NULL; i += l; } return m->sq_item(o, i); } } #else if (PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); static CYTHON_INLINE int __Pyx_IterFinish(void); /*proto*/ static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); /*proto*/ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level); /*proto*/ #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if defined(_WIN32) && defined(__cplusplus) && CYTHON_CCOMPLEX #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); #if CYTHON_CCOMPLEX #define __Pyx_c_eqf(a, b) ((a)==(b)) #define __Pyx_c_sumf(a, b) ((a)+(b)) #define __Pyx_c_difff(a, b) ((a)-(b)) #define __Pyx_c_prodf(a, b) ((a)*(b)) #define __Pyx_c_quotf(a, b) ((a)/(b)) #define __Pyx_c_negf(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zerof(z) ((z)==(float)0) #define __Pyx_c_conjf(z) (::std::conj(z)) #if 1 #define __Pyx_c_absf(z) (::std::abs(z)) #define __Pyx_c_powf(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zerof(z) ((z)==0) #define __Pyx_c_conjf(z) (conjf(z)) #if 1 #define __Pyx_c_absf(z) (cabsf(z)) #define __Pyx_c_powf(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); #if CYTHON_CCOMPLEX #define __Pyx_c_eq(a, b) ((a)==(b)) #define __Pyx_c_sum(a, b) ((a)+(b)) #define __Pyx_c_diff(a, b) ((a)-(b)) #define __Pyx_c_prod(a, b) ((a)*(b)) #define __Pyx_c_quot(a, b) ((a)/(b)) #define __Pyx_c_neg(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero(z) ((z)==(double)0) #define __Pyx_c_conj(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs(z) (::std::abs(z)) #define __Pyx_c_pow(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero(z) ((z)==0) #define __Pyx_c_conj(z) (conj(z)) #if 1 #define __Pyx_c_abs(z) (cabs(z)) #define __Pyx_c_pow(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject *); static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject *); static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject *); static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject *); static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject *); static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject *); static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject *); static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject *); static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject *); static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject *); static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject *); static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject *); static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject *); static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject *); static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject *); static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject *); static int __Pyx_check_binary_version(void); #if !defined(__Pyx_PyIdentifier_FromString) #if PY_MAJOR_VERSION < 3 #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) #else #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) #endif #endif static PyObject *__Pyx_ImportModule(const char *name); /*proto*/ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /*proto*/ typedef struct { int code_line; PyCodeObject* code_object; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /*proto*/ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/ /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from 'cpython.object' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'libc.stdlib' */ /* Module declarations from 'numpy' */ /* Module declarations from 'numpy' */ static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ /* Module declarations from 'nipy.algorithms.statistics._quantile' */ #define __Pyx_MODULE_NAME "nipy.algorithms.statistics._quantile" int __pyx_module_is_main_nipy__algorithms__statistics___quantile = 0; /* Implementation of 'nipy.algorithms.statistics._quantile' */ static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_RuntimeError; static PyObject *__pyx_pf_4nipy_10algorithms_10statistics_9_quantile__quantile(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X, double __pyx_v_ratio, int __pyx_v_interp, int __pyx_v_axis); /* proto */ static PyObject *__pyx_pf_4nipy_10algorithms_10statistics_9_quantile_2_median(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X, PyObject *__pyx_v_axis); /* proto */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ static char __pyx_k_1[] = "ratio must be in range 0..1"; static char __pyx_k_3[] = "ndarray is not C contiguous"; static char __pyx_k_5[] = "ndarray is not Fortran contiguous"; static char __pyx_k_7[] = "Non-native byte order not supported"; static char __pyx_k_9[] = "unknown dtype code in numpy.pxd (%d)"; static char __pyx_k_10[] = "Format string allocated too short, see comment in numpy.pxd"; static char __pyx_k_13[] = "Format string allocated too short."; static char __pyx_k_15[] = "\nVery fast quantile computation using partial sorting.\nAuthor: Alexis Roche.\n"; static char __pyx_k_16[] = "0.1"; static char __pyx_k_19[] = "/Users/mb312/dev_trees/nipy/nipy/algorithms/statistics/_quantile.pyx"; static char __pyx_k_20[] = "nipy.algorithms.statistics._quantile"; static char __pyx_k__B[] = "B"; static char __pyx_k__H[] = "H"; static char __pyx_k__I[] = "I"; static char __pyx_k__L[] = "L"; static char __pyx_k__O[] = "O"; static char __pyx_k__Q[] = "Q"; static char __pyx_k__X[] = "X"; static char __pyx_k__Y[] = "Y"; static char __pyx_k__b[] = "b"; static char __pyx_k__d[] = "d"; static char __pyx_k__f[] = "f"; static char __pyx_k__g[] = "g"; static char __pyx_k__h[] = "h"; static char __pyx_k__i[] = "i"; static char __pyx_k__l[] = "l"; static char __pyx_k__q[] = "q"; static char __pyx_k__x[] = "x"; static char __pyx_k__y[] = "y"; static char __pyx_k__Zd[] = "Zd"; static char __pyx_k__Zf[] = "Zf"; static char __pyx_k__Zg[] = "Zg"; static char __pyx_k__np[] = "np"; static char __pyx_k__itX[] = "itX"; static char __pyx_k__itY[] = "itY"; static char __pyx_k__axis[] = "axis"; static char __pyx_k__dims[] = "dims"; static char __pyx_k__size[] = "size"; static char __pyx_k__dtype[] = "dtype"; static char __pyx_k__numpy[] = "numpy"; static char __pyx_k__range[] = "range"; static char __pyx_k__ratio[] = "ratio"; static char __pyx_k__shape[] = "shape"; static char __pyx_k__zeros[] = "zeros"; static char __pyx_k__double[] = "double"; static char __pyx_k__interp[] = "interp"; static char __pyx_k__stride[] = "stride"; static char __pyx_k___median[] = "_median"; static char __pyx_k__asarray[] = "asarray"; static char __pyx_k__strides[] = "strides"; static char __pyx_k____main__[] = "__main__"; static char __pyx_k____test__[] = "__test__"; static char __pyx_k___quantile[] = "_quantile"; static char __pyx_k__ValueError[] = "ValueError"; static char __pyx_k____version__[] = "__version__"; static char __pyx_k__RuntimeError[] = "RuntimeError"; static PyObject *__pyx_kp_s_1; static PyObject *__pyx_kp_u_10; static PyObject *__pyx_kp_u_13; static PyObject *__pyx_kp_s_16; static PyObject *__pyx_kp_s_19; static PyObject *__pyx_n_s_20; static PyObject *__pyx_kp_u_3; static PyObject *__pyx_kp_u_5; static PyObject *__pyx_kp_u_7; static PyObject *__pyx_kp_u_9; static PyObject *__pyx_n_s__RuntimeError; static PyObject *__pyx_n_s__ValueError; static PyObject *__pyx_n_s__X; static PyObject *__pyx_n_s__Y; static PyObject *__pyx_n_s____main__; static PyObject *__pyx_n_s____test__; static PyObject *__pyx_n_s____version__; static PyObject *__pyx_n_s___median; static PyObject *__pyx_n_s___quantile; static PyObject *__pyx_n_s__asarray; static PyObject *__pyx_n_s__axis; static PyObject *__pyx_n_s__dims; static PyObject *__pyx_n_s__double; static PyObject *__pyx_n_s__dtype; static PyObject *__pyx_n_s__interp; static PyObject *__pyx_n_s__itX; static PyObject *__pyx_n_s__itY; static PyObject *__pyx_n_s__np; static PyObject *__pyx_n_s__numpy; static PyObject *__pyx_n_s__range; static PyObject *__pyx_n_s__ratio; static PyObject *__pyx_n_s__shape; static PyObject *__pyx_n_s__size; static PyObject *__pyx_n_s__stride; static PyObject *__pyx_n_s__strides; static PyObject *__pyx_n_s__x; static PyObject *__pyx_n_s__y; static PyObject *__pyx_n_s__zeros; static PyObject *__pyx_int_0; static PyObject *__pyx_int_1; static PyObject *__pyx_int_15; static PyObject *__pyx_k_tuple_2; static PyObject *__pyx_k_tuple_4; static PyObject *__pyx_k_tuple_6; static PyObject *__pyx_k_tuple_8; static PyObject *__pyx_k_tuple_11; static PyObject *__pyx_k_tuple_12; static PyObject *__pyx_k_tuple_14; static PyObject *__pyx_k_tuple_17; static PyObject *__pyx_k_tuple_21; static PyObject *__pyx_k_codeobj_18; static PyObject *__pyx_k_codeobj_22; /* Python wrapper */ static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_9_quantile_1_quantile(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_10algorithms_10statistics_9_quantile__quantile[] = "\n Fast quantile computation using partial sorting. This function has\n similar behavior to `scipy.percentile` but runs significantly\n faster for large arrays.\n\n Parameters\n ----------\n X : array\n Input array. Will be internally converted into an array of\n doubles if needed.\n\n ratio : float\n A value in range [0, 1] defining the desired quantiles (the\n higher the ratio, the higher the quantiles).\n\n interp : boolean\n Determine whether quantiles are interpolated.\n\n axis : int\n Axis along which quantiles are computed.\n\n Output\n ------\n Y : array\n Array of quantiles\n "; static PyMethodDef __pyx_mdef_4nipy_10algorithms_10statistics_9_quantile_1_quantile = {__Pyx_NAMESTR("_quantile"), (PyCFunction)__pyx_pw_4nipy_10algorithms_10statistics_9_quantile_1_quantile, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_10algorithms_10statistics_9_quantile__quantile)}; static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_9_quantile_1_quantile(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_X = 0; double __pyx_v_ratio; int __pyx_v_interp; int __pyx_v_axis; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("_quantile (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__X,&__pyx_n_s__ratio,&__pyx_n_s__interp,&__pyx_n_s__axis,0}; PyObject* values[4] = {0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__X)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__ratio)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_quantile", 0, 2, 4, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__interp); if (value) { values[2] = value; kw_args--; } } case 3: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__axis); if (value) { values[3] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_quantile") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_X = values[0]; __pyx_v_ratio = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_ratio == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L3_error;} if (values[2]) { __pyx_v_interp = __Pyx_PyInt_AsInt(values[2]); if (unlikely((__pyx_v_interp == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { /* "nipy/algorithms/statistics/_quantile.pyx":24 * # This is faster than scipy.stats.scoreatpercentile owing to partial * # sorting * def _quantile(X, double ratio, int interp=False, int axis=0): # <<<<<<<<<<<<<< * """ * Fast quantile computation using partial sorting. This function has */ __pyx_v_interp = ((int)0); } if (values[3]) { __pyx_v_axis = __Pyx_PyInt_AsInt(values[3]); if (unlikely((__pyx_v_axis == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_axis = ((int)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("_quantile", 0, 2, 4, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.algorithms.statistics._quantile._quantile", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_10algorithms_10statistics_9_quantile__quantile(__pyx_self, __pyx_v_X, __pyx_v_ratio, __pyx_v_interp, __pyx_v_axis); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4nipy_10algorithms_10statistics_9_quantile__quantile(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X, double __pyx_v_ratio, int __pyx_v_interp, int __pyx_v_axis) { double *__pyx_v_x; double *__pyx_v_y; long __pyx_v_size; long __pyx_v_stride; PyArrayIterObject *__pyx_v_itX = 0; PyArrayIterObject *__pyx_v_itY = 0; PyObject *__pyx_v_dims = NULL; PyObject *__pyx_v_Y = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; long __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_quantile", 0); __Pyx_INCREF(__pyx_v_X); /* "nipy/algorithms/statistics/_quantile.pyx":56 * * # Convert the input array to double if needed * X = np.asarray(X, dtype='double') # <<<<<<<<<<<<<< * * # Check the input ratio is in range (0,1) */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__asarray); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_X); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_X); __Pyx_GIVEREF(__pyx_v_X); __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_3)); if (PyDict_SetItem(__pyx_t_3, ((PyObject *)__pyx_n_s__dtype), ((PyObject *)__pyx_n_s__double)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_4 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), ((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_v_X); __pyx_v_X = __pyx_t_4; __pyx_t_4 = 0; /* "nipy/algorithms/statistics/_quantile.pyx":59 * * # Check the input ratio is in range (0,1) * if ratio < 0 or ratio > 1: # <<<<<<<<<<<<<< * raise ValueError('ratio must be in range 0..1') * */ __pyx_t_5 = (__pyx_v_ratio < 0.0); if (!__pyx_t_5) { __pyx_t_6 = (__pyx_v_ratio > 1.0); __pyx_t_7 = __pyx_t_6; } else { __pyx_t_7 = __pyx_t_5; } if (__pyx_t_7) { /* "nipy/algorithms/statistics/_quantile.pyx":60 * # Check the input ratio is in range (0,1) * if ratio < 0 or ratio > 1: * raise ValueError('ratio must be in range 0..1') # <<<<<<<<<<<<<< * * # Allocate output array Y */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L3; } __pyx_L3:; /* "nipy/algorithms/statistics/_quantile.pyx":63 * * # Allocate output array Y * dims = list(X.shape) # <<<<<<<<<<<<<< * dims[axis] = 1 * Y = np.zeros(dims) */ __pyx_t_4 = PyObject_GetAttr(__pyx_v_X, __pyx_n_s__shape); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyObject_Call(((PyObject *)((PyObject*)(&PyList_Type))), ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __pyx_v_dims = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; /* "nipy/algorithms/statistics/_quantile.pyx":64 * # Allocate output array Y * dims = list(X.shape) * dims[axis] = 1 # <<<<<<<<<<<<<< * Y = np.zeros(dims) * */ if (__Pyx_SetItemInt(((PyObject *)__pyx_v_dims), __pyx_v_axis, __pyx_int_1, sizeof(int), PyInt_FromLong) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 64; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/algorithms/statistics/_quantile.pyx":65 * dims = list(X.shape) * dims[axis] = 1 * Y = np.zeros(dims) # <<<<<<<<<<<<<< * * # Set size and stride along specified axis */ __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__zeros); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_INCREF(((PyObject *)__pyx_v_dims)); PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_v_dims)); __Pyx_GIVEREF(((PyObject *)__pyx_v_dims)); __pyx_t_1 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __pyx_v_Y = __pyx_t_1; __pyx_t_1 = 0; /* "nipy/algorithms/statistics/_quantile.pyx":68 * * # Set size and stride along specified axis * size = X.shape[axis] # <<<<<<<<<<<<<< * stride = X.strides[axis] / sizeof(double) * */ __pyx_t_1 = PyObject_GetAttr(__pyx_v_X, __pyx_n_s__shape); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 68; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_GetItemInt(__pyx_t_1, __pyx_v_axis, sizeof(int), PyInt_FromLong); if (!__pyx_t_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 68; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_8 = __Pyx_PyInt_AsLong(__pyx_t_4); if (unlikely((__pyx_t_8 == (long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 68; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_size = __pyx_t_8; /* "nipy/algorithms/statistics/_quantile.pyx":69 * # Set size and stride along specified axis * size = X.shape[axis] * stride = X.strides[axis] / sizeof(double) # <<<<<<<<<<<<<< * * # Create array iterators */ __pyx_t_4 = PyObject_GetAttr(__pyx_v_X, __pyx_n_s__strides); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_4, __pyx_v_axis, sizeof(int), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyInt_FromSize_t((sizeof(double))); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyNumber_Divide(__pyx_t_1, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_8 = __Pyx_PyInt_AsLong(__pyx_t_3); if (unlikely((__pyx_t_8 == (long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_stride = __pyx_t_8; /* "nipy/algorithms/statistics/_quantile.pyx":72 * * # Create array iterators * itX = np.PyArray_IterAllButAxis(X, &axis) # <<<<<<<<<<<<<< * itY = np.PyArray_IterAllButAxis(Y, &axis) * */ __pyx_t_3 = PyArray_IterAllButAxis(__pyx_v_X, (&__pyx_v_axis)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 72; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_flatiter))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 72; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_itX = ((PyArrayIterObject *)__pyx_t_3); __pyx_t_3 = 0; /* "nipy/algorithms/statistics/_quantile.pyx":73 * # Create array iterators * itX = np.PyArray_IterAllButAxis(X, &axis) * itY = np.PyArray_IterAllButAxis(Y, &axis) # <<<<<<<<<<<<<< * * # Loop */ __pyx_t_3 = PyArray_IterAllButAxis(__pyx_v_Y, (&__pyx_v_axis)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_flatiter))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_itY = ((PyArrayIterObject *)__pyx_t_3); __pyx_t_3 = 0; /* "nipy/algorithms/statistics/_quantile.pyx":76 * * # Loop * while np.PyArray_ITER_NOTDONE(itX): # <<<<<<<<<<<<<< * x = np.PyArray_ITER_DATA(itX) * y = np.PyArray_ITER_DATA(itY) */ while (1) { __pyx_t_7 = PyArray_ITER_NOTDONE(__pyx_v_itX); if (!__pyx_t_7) break; /* "nipy/algorithms/statistics/_quantile.pyx":77 * # Loop * while np.PyArray_ITER_NOTDONE(itX): * x = np.PyArray_ITER_DATA(itX) # <<<<<<<<<<<<<< * y = np.PyArray_ITER_DATA(itY) * y[0] = quantile(x, size, stride, ratio, interp) */ __pyx_v_x = ((double *)PyArray_ITER_DATA(__pyx_v_itX)); /* "nipy/algorithms/statistics/_quantile.pyx":78 * while np.PyArray_ITER_NOTDONE(itX): * x = np.PyArray_ITER_DATA(itX) * y = np.PyArray_ITER_DATA(itY) # <<<<<<<<<<<<<< * y[0] = quantile(x, size, stride, ratio, interp) * np.PyArray_ITER_NEXT(itX) */ __pyx_v_y = ((double *)PyArray_ITER_DATA(__pyx_v_itY)); /* "nipy/algorithms/statistics/_quantile.pyx":79 * x = np.PyArray_ITER_DATA(itX) * y = np.PyArray_ITER_DATA(itY) * y[0] = quantile(x, size, stride, ratio, interp) # <<<<<<<<<<<<<< * np.PyArray_ITER_NEXT(itX) * np.PyArray_ITER_NEXT(itY) */ (__pyx_v_y[0]) = quantile(__pyx_v_x, __pyx_v_size, __pyx_v_stride, __pyx_v_ratio, __pyx_v_interp); /* "nipy/algorithms/statistics/_quantile.pyx":80 * y = np.PyArray_ITER_DATA(itY) * y[0] = quantile(x, size, stride, ratio, interp) * np.PyArray_ITER_NEXT(itX) # <<<<<<<<<<<<<< * np.PyArray_ITER_NEXT(itY) * */ PyArray_ITER_NEXT(__pyx_v_itX); /* "nipy/algorithms/statistics/_quantile.pyx":81 * y[0] = quantile(x, size, stride, ratio, interp) * np.PyArray_ITER_NEXT(itX) * np.PyArray_ITER_NEXT(itY) # <<<<<<<<<<<<<< * * return Y */ PyArray_ITER_NEXT(__pyx_v_itY); } /* "nipy/algorithms/statistics/_quantile.pyx":83 * np.PyArray_ITER_NEXT(itY) * * return Y # <<<<<<<<<<<<<< * * # This is faster than numpy.stats */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_Y); __pyx_r = __pyx_v_Y; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("nipy.algorithms.statistics._quantile._quantile", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_itX); __Pyx_XDECREF((PyObject *)__pyx_v_itY); __Pyx_XDECREF(__pyx_v_dims); __Pyx_XDECREF(__pyx_v_Y); __Pyx_XDECREF(__pyx_v_X); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_9_quantile_3_median(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_10algorithms_10statistics_9_quantile_2_median[] = "\n Fast median computation using partial sorting. This function is\n similar to `numpy.median` but runs significantly faster for large\n arrays.\n\n Parameters\n ----------\n X : array\n Input array. Will be internally converted into an array of\n doubles if needed.\n\n axis : int\n Axis along which medians are computed.\n\n Output\n ------\n Y : array\n Array of medians\n "; static PyMethodDef __pyx_mdef_4nipy_10algorithms_10statistics_9_quantile_3_median = {__Pyx_NAMESTR("_median"), (PyCFunction)__pyx_pw_4nipy_10algorithms_10statistics_9_quantile_3_median, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_10algorithms_10statistics_9_quantile_2_median)}; static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_9_quantile_3_median(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_X = 0; PyObject *__pyx_v_axis = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("_median (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__X,&__pyx_n_s__axis,0}; PyObject* values[2] = {0,0}; values[1] = ((PyObject *)__pyx_int_0); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__X)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__axis); if (value) { values[1] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_median") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_X = values[0]; __pyx_v_axis = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("_median", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.algorithms.statistics._quantile._median", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_10algorithms_10statistics_9_quantile_2_median(__pyx_self, __pyx_v_X, __pyx_v_axis); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/algorithms/statistics/_quantile.pyx":88 * # due to the underlying algorithm that relies on * # partial sorting as opposed to full sorting. * def _median(X, axis=0): # <<<<<<<<<<<<<< * """ * Fast median computation using partial sorting. This function is */ static PyObject *__pyx_pf_4nipy_10algorithms_10statistics_9_quantile_2_median(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X, PyObject *__pyx_v_axis) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_median", 0); /* "nipy/algorithms/statistics/_quantile.pyx":108 * Array of medians * """ * return _quantile(X, axis=axis, ratio=0.5, interp=True) # <<<<<<<<<<<<<< * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s___quantile); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_X); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_X); __Pyx_GIVEREF(__pyx_v_X); __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_3)); if (PyDict_SetItem(__pyx_t_3, ((PyObject *)__pyx_n_s__axis), __pyx_v_axis) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_4 = PyFloat_FromDouble(0.5); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_t_3, ((PyObject *)__pyx_n_s__ratio), __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyBool_FromLong(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_t_3, ((PyObject *)__pyx_n_s__interp), __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_2), ((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("nipy.algorithms.statistics._quantile._median", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":194 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_copy_shape; int __pyx_v_i; int __pyx_v_ndim; int __pyx_v_endian_detector; int __pyx_v_little_endian; int __pyx_v_t; char *__pyx_v_f; PyArray_Descr *__pyx_v_descr = 0; int __pyx_v_offset; int __pyx_v_hasfields; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; char *__pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "numpy.pxd":200 * # of flags * * if info == NULL: return # <<<<<<<<<<<<<< * * cdef int copy_shape, i, ndim */ __pyx_t_1 = (__pyx_v_info == NULL); if (__pyx_t_1) { __pyx_r = 0; goto __pyx_L0; goto __pyx_L3; } __pyx_L3:; /* "numpy.pxd":203 * * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((&endian_detector)[0] != 0) * */ __pyx_v_endian_detector = 1; /* "numpy.pxd":204 * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * * ndim = PyArray_NDIM(self) */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "numpy.pxd":206 * cdef bint little_endian = ((&endian_detector)[0] != 0) * * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< * * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); /* "numpy.pxd":208 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); if (__pyx_t_1) { /* "numpy.pxd":209 * * if sizeof(npy_intp) != sizeof(Py_ssize_t): * copy_shape = 1 # <<<<<<<<<<<<<< * else: * copy_shape = 0 */ __pyx_v_copy_shape = 1; goto __pyx_L4; } /*else*/ { /* "numpy.pxd":211 * copy_shape = 1 * else: * copy_shape = 0 # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) */ __pyx_v_copy_shape = 0; } __pyx_L4:; /* "numpy.pxd":213 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS); if (__pyx_t_1) { /* "numpy.pxd":214 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not C contiguous") * */ __pyx_t_2 = (!PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS)); __pyx_t_3 = __pyx_t_2; } else { __pyx_t_3 = __pyx_t_1; } if (__pyx_t_3) { /* "numpy.pxd":215 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_4), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L5; } __pyx_L5:; /* "numpy.pxd":217 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ __pyx_t_3 = ((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS); if (__pyx_t_3) { /* "numpy.pxd":218 * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not Fortran contiguous") * */ __pyx_t_1 = (!PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS)); __pyx_t_2 = __pyx_t_1; } else { __pyx_t_2 = __pyx_t_3; } if (__pyx_t_2) { /* "numpy.pxd":219 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_6), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L6; } __pyx_L6:; /* "numpy.pxd":221 * raise ValueError(u"ndarray is not Fortran contiguous") * * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< * info.ndim = ndim * if copy_shape: */ __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); /* "numpy.pxd":222 * * info.buf = PyArray_DATA(self) * info.ndim = ndim # <<<<<<<<<<<<<< * if copy_shape: * # Allocate new buffer for strides and shape info. */ __pyx_v_info->ndim = __pyx_v_ndim; /* "numpy.pxd":223 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ if (__pyx_v_copy_shape) { /* "numpy.pxd":226 * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) # <<<<<<<<<<<<<< * info.shape = info.strides + ndim * for i in range(ndim): */ __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2))); /* "numpy.pxd":227 * # This is allocated as one block, strides first. * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) * info.shape = info.strides + ndim # <<<<<<<<<<<<<< * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] */ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); /* "numpy.pxd":228 * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) * info.shape = info.strides + ndim * for i in range(ndim): # <<<<<<<<<<<<<< * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] */ __pyx_t_5 = __pyx_v_ndim; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "numpy.pxd":229 * info.shape = info.strides + ndim * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< * info.shape[i] = PyArray_DIMS(self)[i] * else: */ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); /* "numpy.pxd":230 * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< * else: * info.strides = PyArray_STRIDES(self) */ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); } goto __pyx_L7; } /*else*/ { /* "numpy.pxd":232 * info.shape[i] = PyArray_DIMS(self)[i] * else: * info.strides = PyArray_STRIDES(self) # <<<<<<<<<<<<<< * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL */ __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); /* "numpy.pxd":233 * else: * info.strides = PyArray_STRIDES(self) * info.shape = PyArray_DIMS(self) # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) */ __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); } __pyx_L7:; /* "numpy.pxd":234 * info.strides = PyArray_STRIDES(self) * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) */ __pyx_v_info->suboffsets = NULL; /* "numpy.pxd":235 * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< * info.readonly = not PyArray_ISWRITEABLE(self) * */ __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); /* "numpy.pxd":236 * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< * * cdef int t */ __pyx_v_info->readonly = (!PyArray_ISWRITEABLE(__pyx_v_self)); /* "numpy.pxd":239 * * cdef int t * cdef char* f = NULL # <<<<<<<<<<<<<< * cdef dtype descr = self.descr * cdef list stack */ __pyx_v_f = NULL; /* "numpy.pxd":240 * cdef int t * cdef char* f = NULL * cdef dtype descr = self.descr # <<<<<<<<<<<<<< * cdef list stack * cdef int offset */ __pyx_t_4 = ((PyObject *)__pyx_v_self->descr); __Pyx_INCREF(__pyx_t_4); __pyx_v_descr = ((PyArray_Descr *)__pyx_t_4); __pyx_t_4 = 0; /* "numpy.pxd":244 * cdef int offset * * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< * * if not hasfields and not copy_shape: */ __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); /* "numpy.pxd":246 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ __pyx_t_2 = (!__pyx_v_hasfields); if (__pyx_t_2) { __pyx_t_3 = (!__pyx_v_copy_shape); __pyx_t_1 = __pyx_t_3; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "numpy.pxd":248 * if not hasfields and not copy_shape: * # do not call releasebuffer * info.obj = None # <<<<<<<<<<<<<< * else: * # need to call releasebuffer */ __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = Py_None; goto __pyx_L10; } /*else*/ { /* "numpy.pxd":251 * else: * # need to call releasebuffer * info.obj = self # <<<<<<<<<<<<<< * * if not hasfields: */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); } __pyx_L10:; /* "numpy.pxd":253 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ __pyx_t_1 = (!__pyx_v_hasfields); if (__pyx_t_1) { /* "numpy.pxd":254 * * if not hasfields: * t = descr.type_num # <<<<<<<<<<<<<< * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): */ __pyx_t_5 = __pyx_v_descr->type_num; __pyx_v_t = __pyx_t_5; /* "numpy.pxd":255 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_1 = (__pyx_v_descr->byteorder == '>'); if (__pyx_t_1) { __pyx_t_2 = __pyx_v_little_endian; } else { __pyx_t_2 = __pyx_t_1; } if (!__pyx_t_2) { /* "numpy.pxd":256 * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" */ __pyx_t_1 = (__pyx_v_descr->byteorder == '<'); if (__pyx_t_1) { __pyx_t_3 = (!__pyx_v_little_endian); __pyx_t_7 = __pyx_t_3; } else { __pyx_t_7 = __pyx_t_1; } __pyx_t_1 = __pyx_t_7; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "numpy.pxd":257 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_8), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L12; } __pyx_L12:; /* "numpy.pxd":258 * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" */ __pyx_t_1 = (__pyx_v_t == NPY_BYTE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__b; goto __pyx_L13; } /* "numpy.pxd":259 * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" */ __pyx_t_1 = (__pyx_v_t == NPY_UBYTE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__B; goto __pyx_L13; } /* "numpy.pxd":260 * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" */ __pyx_t_1 = (__pyx_v_t == NPY_SHORT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__h; goto __pyx_L13; } /* "numpy.pxd":261 * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" */ __pyx_t_1 = (__pyx_v_t == NPY_USHORT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__H; goto __pyx_L13; } /* "numpy.pxd":262 * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" */ __pyx_t_1 = (__pyx_v_t == NPY_INT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__i; goto __pyx_L13; } /* "numpy.pxd":263 * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" */ __pyx_t_1 = (__pyx_v_t == NPY_UINT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__I; goto __pyx_L13; } /* "numpy.pxd":264 * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" */ __pyx_t_1 = (__pyx_v_t == NPY_LONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__l; goto __pyx_L13; } /* "numpy.pxd":265 * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" */ __pyx_t_1 = (__pyx_v_t == NPY_ULONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__L; goto __pyx_L13; } /* "numpy.pxd":266 * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" */ __pyx_t_1 = (__pyx_v_t == NPY_LONGLONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__q; goto __pyx_L13; } /* "numpy.pxd":267 * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" */ __pyx_t_1 = (__pyx_v_t == NPY_ULONGLONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Q; goto __pyx_L13; } /* "numpy.pxd":268 * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" */ __pyx_t_1 = (__pyx_v_t == NPY_FLOAT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__f; goto __pyx_L13; } /* "numpy.pxd":269 * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" */ __pyx_t_1 = (__pyx_v_t == NPY_DOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__d; goto __pyx_L13; } /* "numpy.pxd":270 * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" */ __pyx_t_1 = (__pyx_v_t == NPY_LONGDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__g; goto __pyx_L13; } /* "numpy.pxd":271 * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" */ __pyx_t_1 = (__pyx_v_t == NPY_CFLOAT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zf; goto __pyx_L13; } /* "numpy.pxd":272 * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" */ __pyx_t_1 = (__pyx_v_t == NPY_CDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zd; goto __pyx_L13; } /* "numpy.pxd":273 * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f = "O" * else: */ __pyx_t_1 = (__pyx_v_t == NPY_CLONGDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zg; goto __pyx_L13; } /* "numpy.pxd":274 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_1 = (__pyx_v_t == NPY_OBJECT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__O; goto __pyx_L13; } /*else*/ { /* "numpy.pxd":276 * elif t == NPY_OBJECT: f = "O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * info.format = f * return */ __pyx_t_4 = PyInt_FromLong(__pyx_v_t); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_8 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_9), __pyx_t_4); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_8)); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_8)); __Pyx_GIVEREF(((PyObject *)__pyx_t_8)); __pyx_t_8 = 0; __pyx_t_8 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L13:; /* "numpy.pxd":277 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f # <<<<<<<<<<<<<< * return * else: */ __pyx_v_info->format = __pyx_v_f; /* "numpy.pxd":278 * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f * return # <<<<<<<<<<<<<< * else: * info.format = stdlib.malloc(_buffer_format_string_len) */ __pyx_r = 0; goto __pyx_L0; goto __pyx_L11; } /*else*/ { /* "numpy.pxd":280 * return * else: * info.format = stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 */ __pyx_v_info->format = ((char *)malloc(255)); /* "numpy.pxd":281 * else: * info.format = stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< * offset = 0 * f = _util_dtypestring(descr, info.format + 1, */ (__pyx_v_info->format[0]) = '^'; /* "numpy.pxd":282 * info.format = stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 # <<<<<<<<<<<<<< * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, */ __pyx_v_offset = 0; /* "numpy.pxd":285 * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, * &offset) # <<<<<<<<<<<<<< * f[0] = c'\0' # Terminate format string * */ __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_9; /* "numpy.pxd":286 * info.format + _buffer_format_string_len, * &offset) * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< * * def __releasebuffer__(ndarray self, Py_buffer* info): */ (__pyx_v_f[0]) = '\x00'; } __pyx_L11:; __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_descr); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":288 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__releasebuffer__", 0); /* "numpy.pxd":289 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_t_1 = PyArray_HASFIELDS(__pyx_v_self); if (__pyx_t_1) { /* "numpy.pxd":290 * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): * stdlib.free(info.format) # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) */ free(__pyx_v_info->format); goto __pyx_L3; } __pyx_L3:; /* "numpy.pxd":291 * if PyArray_HASFIELDS(self): * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * stdlib.free(info.strides) * # info.shape was stored after info.strides in the same block */ __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); if (__pyx_t_1) { /* "numpy.pxd":292 * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) # <<<<<<<<<<<<<< * # info.shape was stored after info.strides in the same block * */ free(__pyx_v_info->strides); goto __pyx_L4; } __pyx_L4:; __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":768 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, a) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); /* "numpy.pxd":769 * * cdef inline object PyArray_MultiIterNew1(a): * return PyArray_MultiIterNew(1, a) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew2(a, b): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 769; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":771 * return PyArray_MultiIterNew(1, a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, a, b) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); /* "numpy.pxd":772 * * cdef inline object PyArray_MultiIterNew2(a, b): * return PyArray_MultiIterNew(2, a, b) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew3(a, b, c): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":774 * return PyArray_MultiIterNew(2, a, b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, a, b, c) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); /* "numpy.pxd":775 * * cdef inline object PyArray_MultiIterNew3(a, b, c): * return PyArray_MultiIterNew(3, a, b, c) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":777 * return PyArray_MultiIterNew(3, a, b, c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, a, b, c, d) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); /* "numpy.pxd":778 * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): * return PyArray_MultiIterNew(4, a, b, c, d) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 778; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":780 * return PyArray_MultiIterNew(4, a, b, c, d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, a, b, c, d, e) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); /* "numpy.pxd":781 * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): * return PyArray_MultiIterNew(5, a, b, c, d, e) # <<<<<<<<<<<<<< * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 781; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":783 * return PyArray_MultiIterNew(5, a, b, c, d, e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { PyArray_Descr *__pyx_v_child = 0; int __pyx_v_endian_detector; int __pyx_v_little_endian; PyObject *__pyx_v_fields = 0; PyObject *__pyx_v_childname = NULL; PyObject *__pyx_v_new_offset = NULL; PyObject *__pyx_v_t = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *(*__pyx_t_6)(PyObject *); int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_t_10; long __pyx_t_11; char *__pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_util_dtypestring", 0); /* "numpy.pxd":790 * cdef int delta_offset * cdef tuple i * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((&endian_detector)[0] != 0) * cdef tuple fields */ __pyx_v_endian_detector = 1; /* "numpy.pxd":791 * cdef tuple i * cdef int endian_detector = 1 * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * cdef tuple fields * */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "numpy.pxd":794 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ if (unlikely(((PyObject *)__pyx_v_descr->names) == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_1 = ((PyObject *)__pyx_v_descr->names); __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; for (;;) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif __Pyx_XDECREF(__pyx_v_childname); __pyx_v_childname = __pyx_t_3; __pyx_t_3 = 0; /* "numpy.pxd":795 * * for childname in descr.names: * fields = descr.fields[childname] # <<<<<<<<<<<<<< * child, new_offset = fields * */ __pyx_t_3 = PyObject_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (!__pyx_t_3) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected tuple, got %.200s", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF(((PyObject *)__pyx_v_fields)); __pyx_v_fields = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "numpy.pxd":796 * for childname in descr.names: * fields = descr.fields[childname] * child, new_offset = fields # <<<<<<<<<<<<<< * * if (end - f) - (new_offset - offset[0]) < 15: */ if (likely(PyTuple_CheckExact(((PyObject *)__pyx_v_fields)))) { PyObject* sequence = ((PyObject *)__pyx_v_fields); #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else if (1) { __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } else { Py_ssize_t index = -1; __pyx_t_5 = PyObject_GetIter(((PyObject *)__pyx_v_fields)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = Py_TYPE(__pyx_t_5)->tp_iternext; index = 0; __pyx_t_3 = __pyx_t_6(__pyx_t_5); if (unlikely(!__pyx_t_3)) goto __pyx_L5_unpacking_failed; __Pyx_GOTREF(__pyx_t_3); index = 1; __pyx_t_4 = __pyx_t_6(__pyx_t_5); if (unlikely(!__pyx_t_4)) goto __pyx_L5_unpacking_failed; __Pyx_GOTREF(__pyx_t_4); if (__Pyx_IternextUnpackEndCheck(__pyx_t_6(__pyx_t_5), 2) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = NULL; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L6_unpacking_done; __pyx_L5_unpacking_failed:; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_L6_unpacking_done:; } if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF(((PyObject *)__pyx_v_child)); __pyx_v_child = ((PyArray_Descr *)__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_v_new_offset); __pyx_v_new_offset = __pyx_t_4; __pyx_t_4 = 0; /* "numpy.pxd":798 * child, new_offset = fields * * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ __pyx_t_4 = PyInt_FromLong((__pyx_v_end - __pyx_v_f)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_Subtract(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyObject_RichCompare(__pyx_t_3, __pyx_int_15, Py_LT); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { /* "numpy.pxd":799 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_t_5 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_11), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L7; } __pyx_L7:; /* "numpy.pxd":801 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_7 = (__pyx_v_child->byteorder == '>'); if (__pyx_t_7) { __pyx_t_8 = __pyx_v_little_endian; } else { __pyx_t_8 = __pyx_t_7; } if (!__pyx_t_8) { /* "numpy.pxd":802 * * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * # One could encode it in the format string and have Cython */ __pyx_t_7 = (__pyx_v_child->byteorder == '<'); if (__pyx_t_7) { __pyx_t_9 = (!__pyx_v_little_endian); __pyx_t_10 = __pyx_t_9; } else { __pyx_t_10 = __pyx_t_7; } __pyx_t_7 = __pyx_t_10; } else { __pyx_t_7 = __pyx_t_8; } if (__pyx_t_7) { /* "numpy.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_12), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L8; } __pyx_L8:; /* "numpy.pxd":813 * * # Output padding bytes * while offset[0] < new_offset: # <<<<<<<<<<<<<< * f[0] = 120 # "x"; pad byte * f += 1 */ while (1) { __pyx_t_5 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_t_5, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!__pyx_t_7) break; /* "numpy.pxd":814 * # Output padding bytes * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< * f += 1 * offset[0] += 1 */ (__pyx_v_f[0]) = 120; /* "numpy.pxd":815 * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte * f += 1 # <<<<<<<<<<<<<< * offset[0] += 1 * */ __pyx_v_f = (__pyx_v_f + 1); /* "numpy.pxd":816 * f[0] = 120 # "x"; pad byte * f += 1 * offset[0] += 1 # <<<<<<<<<<<<<< * * offset[0] += child.itemsize */ __pyx_t_11 = 0; (__pyx_v_offset[__pyx_t_11]) = ((__pyx_v_offset[__pyx_t_11]) + 1); } /* "numpy.pxd":818 * offset[0] += 1 * * offset[0] += child.itemsize # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(child): */ __pyx_t_11 = 0; (__pyx_v_offset[__pyx_t_11]) = ((__pyx_v_offset[__pyx_t_11]) + __pyx_v_child->elsize); /* "numpy.pxd":820 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ __pyx_t_7 = (!PyDataType_HASFIELDS(__pyx_v_child)); if (__pyx_t_7) { /* "numpy.pxd":821 * * if not PyDataType_HASFIELDS(child): * t = child.type_num # <<<<<<<<<<<<<< * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") */ __pyx_t_3 = PyInt_FromLong(__pyx_v_child->type_num); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 821; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_XDECREF(__pyx_v_t); __pyx_v_t = __pyx_t_3; __pyx_t_3 = 0; /* "numpy.pxd":822 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ __pyx_t_7 = ((__pyx_v_end - __pyx_v_f) < 5); if (__pyx_t_7) { /* "numpy.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_t_3 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_14), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L12; } __pyx_L12:; /* "numpy.pxd":826 * * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" */ __pyx_t_3 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 98; goto __pyx_L13; } /* "numpy.pxd":827 * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" */ __pyx_t_5 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 66; goto __pyx_L13; } /* "numpy.pxd":828 * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" */ __pyx_t_3 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 104; goto __pyx_L13; } /* "numpy.pxd":829 * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" */ __pyx_t_5 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 72; goto __pyx_L13; } /* "numpy.pxd":830 * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" */ __pyx_t_3 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 105; goto __pyx_L13; } /* "numpy.pxd":831 * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" */ __pyx_t_5 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 73; goto __pyx_L13; } /* "numpy.pxd":832 * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" */ __pyx_t_3 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 108; goto __pyx_L13; } /* "numpy.pxd":833 * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" */ __pyx_t_5 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 76; goto __pyx_L13; } /* "numpy.pxd":834 * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" */ __pyx_t_3 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 113; goto __pyx_L13; } /* "numpy.pxd":835 * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" */ __pyx_t_5 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 81; goto __pyx_L13; } /* "numpy.pxd":836 * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" */ __pyx_t_3 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 102; goto __pyx_L13; } /* "numpy.pxd":837 * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf */ __pyx_t_5 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 100; goto __pyx_L13; } /* "numpy.pxd":838 * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd */ __pyx_t_3 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 103; goto __pyx_L13; } /* "numpy.pxd":839 * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg */ __pyx_t_5 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 102; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":840 * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" */ __pyx_t_3 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 100; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":841 * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: */ __pyx_t_5 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 103; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":842 * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_3 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 79; goto __pyx_L13; } /*else*/ { /* "numpy.pxd":844 * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * f += 1 * else: */ __pyx_t_5 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_9), __pyx_v_t); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_5)); __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_5)); __Pyx_GIVEREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L13:; /* "numpy.pxd":845 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * f += 1 # <<<<<<<<<<<<<< * else: * # Cython ignores struct boundary information ("T{...}"), */ __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L11; } /*else*/ { /* "numpy.pxd":849 * # Cython ignores struct boundary information ("T{...}"), * # so don't output it * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< * return f * */ __pyx_t_12 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_12 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 849; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_12; } __pyx_L11:; } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "numpy.pxd":850 * # so don't output it * f = _util_dtypestring(child, f, end, offset) * return f # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_f; goto __pyx_L0; __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_child); __Pyx_XDECREF(__pyx_v_fields); __Pyx_XDECREF(__pyx_v_childname); __Pyx_XDECREF(__pyx_v_new_offset); __Pyx_XDECREF(__pyx_v_t); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":965 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { PyObject *__pyx_v_baseptr; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("set_array_base", 0); /* "numpy.pxd":967 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ __pyx_t_1 = (__pyx_v_base == Py_None); if (__pyx_t_1) { /* "numpy.pxd":968 * cdef PyObject* baseptr * if base is None: * baseptr = NULL # <<<<<<<<<<<<<< * else: * Py_INCREF(base) # important to do this before decref below! */ __pyx_v_baseptr = NULL; goto __pyx_L3; } /*else*/ { /* "numpy.pxd":970 * baseptr = NULL * else: * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< * baseptr = base * Py_XDECREF(arr.base) */ Py_INCREF(__pyx_v_base); /* "numpy.pxd":971 * else: * Py_INCREF(base) # important to do this before decref below! * baseptr = base # <<<<<<<<<<<<<< * Py_XDECREF(arr.base) * arr.base = baseptr */ __pyx_v_baseptr = ((PyObject *)__pyx_v_base); } __pyx_L3:; /* "numpy.pxd":972 * Py_INCREF(base) # important to do this before decref below! * baseptr = base * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< * arr.base = baseptr * */ Py_XDECREF(__pyx_v_arr->base); /* "numpy.pxd":973 * baseptr = base * Py_XDECREF(arr.base) * arr.base = baseptr # <<<<<<<<<<<<<< * * cdef inline object get_array_base(ndarray arr): */ __pyx_v_arr->base = __pyx_v_baseptr; __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":975 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("get_array_base", 0); /* "numpy.pxd":976 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ __pyx_t_1 = (__pyx_v_arr->base == NULL); if (__pyx_t_1) { /* "numpy.pxd":977 * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: * return None # <<<<<<<<<<<<<< * else: * return arr.base */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; goto __pyx_L3; } /*else*/ { /* "numpy.pxd":979 * return None * else: * return arr.base # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); __pyx_r = ((PyObject *)__pyx_v_arr->base); goto __pyx_L0; } __pyx_L3:; __pyx_r = Py_None; __Pyx_INCREF(Py_None); __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, __Pyx_NAMESTR("_quantile"), __Pyx_DOCSTR(__pyx_k_15), /* m_doc */ -1, /* m_size */ __pyx_methods /* m_methods */, NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_kp_s_1, __pyx_k_1, sizeof(__pyx_k_1), 0, 0, 1, 0}, {&__pyx_kp_u_10, __pyx_k_10, sizeof(__pyx_k_10), 0, 1, 0, 0}, {&__pyx_kp_u_13, __pyx_k_13, sizeof(__pyx_k_13), 0, 1, 0, 0}, {&__pyx_kp_s_16, __pyx_k_16, sizeof(__pyx_k_16), 0, 0, 1, 0}, {&__pyx_kp_s_19, __pyx_k_19, sizeof(__pyx_k_19), 0, 0, 1, 0}, {&__pyx_n_s_20, __pyx_k_20, sizeof(__pyx_k_20), 0, 0, 1, 1}, {&__pyx_kp_u_3, __pyx_k_3, sizeof(__pyx_k_3), 0, 1, 0, 0}, {&__pyx_kp_u_5, __pyx_k_5, sizeof(__pyx_k_5), 0, 1, 0, 0}, {&__pyx_kp_u_7, __pyx_k_7, sizeof(__pyx_k_7), 0, 1, 0, 0}, {&__pyx_kp_u_9, __pyx_k_9, sizeof(__pyx_k_9), 0, 1, 0, 0}, {&__pyx_n_s__RuntimeError, __pyx_k__RuntimeError, sizeof(__pyx_k__RuntimeError), 0, 0, 1, 1}, {&__pyx_n_s__ValueError, __pyx_k__ValueError, sizeof(__pyx_k__ValueError), 0, 0, 1, 1}, {&__pyx_n_s__X, __pyx_k__X, sizeof(__pyx_k__X), 0, 0, 1, 1}, {&__pyx_n_s__Y, __pyx_k__Y, sizeof(__pyx_k__Y), 0, 0, 1, 1}, {&__pyx_n_s____main__, __pyx_k____main__, sizeof(__pyx_k____main__), 0, 0, 1, 1}, {&__pyx_n_s____test__, __pyx_k____test__, sizeof(__pyx_k____test__), 0, 0, 1, 1}, {&__pyx_n_s____version__, __pyx_k____version__, sizeof(__pyx_k____version__), 0, 0, 1, 1}, {&__pyx_n_s___median, __pyx_k___median, sizeof(__pyx_k___median), 0, 0, 1, 1}, {&__pyx_n_s___quantile, __pyx_k___quantile, sizeof(__pyx_k___quantile), 0, 0, 1, 1}, {&__pyx_n_s__asarray, __pyx_k__asarray, sizeof(__pyx_k__asarray), 0, 0, 1, 1}, {&__pyx_n_s__axis, __pyx_k__axis, sizeof(__pyx_k__axis), 0, 0, 1, 1}, {&__pyx_n_s__dims, __pyx_k__dims, sizeof(__pyx_k__dims), 0, 0, 1, 1}, {&__pyx_n_s__double, __pyx_k__double, sizeof(__pyx_k__double), 0, 0, 1, 1}, {&__pyx_n_s__dtype, __pyx_k__dtype, sizeof(__pyx_k__dtype), 0, 0, 1, 1}, {&__pyx_n_s__interp, __pyx_k__interp, sizeof(__pyx_k__interp), 0, 0, 1, 1}, {&__pyx_n_s__itX, __pyx_k__itX, sizeof(__pyx_k__itX), 0, 0, 1, 1}, {&__pyx_n_s__itY, __pyx_k__itY, sizeof(__pyx_k__itY), 0, 0, 1, 1}, {&__pyx_n_s__np, __pyx_k__np, sizeof(__pyx_k__np), 0, 0, 1, 1}, {&__pyx_n_s__numpy, __pyx_k__numpy, sizeof(__pyx_k__numpy), 0, 0, 1, 1}, {&__pyx_n_s__range, __pyx_k__range, sizeof(__pyx_k__range), 0, 0, 1, 1}, {&__pyx_n_s__ratio, __pyx_k__ratio, sizeof(__pyx_k__ratio), 0, 0, 1, 1}, {&__pyx_n_s__shape, __pyx_k__shape, sizeof(__pyx_k__shape), 0, 0, 1, 1}, {&__pyx_n_s__size, __pyx_k__size, sizeof(__pyx_k__size), 0, 0, 1, 1}, {&__pyx_n_s__stride, __pyx_k__stride, sizeof(__pyx_k__stride), 0, 0, 1, 1}, {&__pyx_n_s__strides, __pyx_k__strides, sizeof(__pyx_k__strides), 0, 0, 1, 1}, {&__pyx_n_s__x, __pyx_k__x, sizeof(__pyx_k__x), 0, 0, 1, 1}, {&__pyx_n_s__y, __pyx_k__y, sizeof(__pyx_k__y), 0, 0, 1, 1}, {&__pyx_n_s__zeros, __pyx_k__zeros, sizeof(__pyx_k__zeros), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_ValueError = __Pyx_GetName(__pyx_b, __pyx_n_s__ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_range = __Pyx_GetName(__pyx_b, __pyx_n_s__range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 228; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_RuntimeError = __Pyx_GetName(__pyx_b, __pyx_n_s__RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "nipy/algorithms/statistics/_quantile.pyx":60 * # Check the input ratio is in range (0,1) * if ratio < 0 or ratio > 1: * raise ValueError('ratio must be in range 0..1') # <<<<<<<<<<<<<< * * # Allocate output array Y */ __pyx_k_tuple_2 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_2); __Pyx_INCREF(((PyObject *)__pyx_kp_s_1)); PyTuple_SET_ITEM(__pyx_k_tuple_2, 0, ((PyObject *)__pyx_kp_s_1)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_1)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_2)); /* "numpy.pxd":215 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_k_tuple_4 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_4); __Pyx_INCREF(((PyObject *)__pyx_kp_u_3)); PyTuple_SET_ITEM(__pyx_k_tuple_4, 0, ((PyObject *)__pyx_kp_u_3)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_3)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_4)); /* "numpy.pxd":219 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_k_tuple_6 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_6); __Pyx_INCREF(((PyObject *)__pyx_kp_u_5)); PyTuple_SET_ITEM(__pyx_k_tuple_6, 0, ((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_6)); /* "numpy.pxd":257 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_k_tuple_8 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_8); __Pyx_INCREF(((PyObject *)__pyx_kp_u_7)); PyTuple_SET_ITEM(__pyx_k_tuple_8, 0, ((PyObject *)__pyx_kp_u_7)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_7)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_8)); /* "numpy.pxd":799 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_k_tuple_11 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_11)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_11); __Pyx_INCREF(((PyObject *)__pyx_kp_u_10)); PyTuple_SET_ITEM(__pyx_k_tuple_11, 0, ((PyObject *)__pyx_kp_u_10)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_10)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_11)); /* "numpy.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_k_tuple_12 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_12)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_12); __Pyx_INCREF(((PyObject *)__pyx_kp_u_7)); PyTuple_SET_ITEM(__pyx_k_tuple_12, 0, ((PyObject *)__pyx_kp_u_7)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_7)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_12)); /* "numpy.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_k_tuple_14 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_14)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_14); __Pyx_INCREF(((PyObject *)__pyx_kp_u_13)); PyTuple_SET_ITEM(__pyx_k_tuple_14, 0, ((PyObject *)__pyx_kp_u_13)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_13)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_14)); /* "nipy/algorithms/statistics/_quantile.pyx":24 * # This is faster than scipy.stats.scoreatpercentile owing to partial * # sorting * def _quantile(X, double ratio, int interp=False, int axis=0): # <<<<<<<<<<<<<< * """ * Fast quantile computation using partial sorting. This function has */ __pyx_k_tuple_17 = PyTuple_New(12); if (unlikely(!__pyx_k_tuple_17)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_17); __Pyx_INCREF(((PyObject *)__pyx_n_s__X)); PyTuple_SET_ITEM(__pyx_k_tuple_17, 0, ((PyObject *)__pyx_n_s__X)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__X)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ratio)); PyTuple_SET_ITEM(__pyx_k_tuple_17, 1, ((PyObject *)__pyx_n_s__ratio)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ratio)); __Pyx_INCREF(((PyObject *)__pyx_n_s__interp)); PyTuple_SET_ITEM(__pyx_k_tuple_17, 2, ((PyObject *)__pyx_n_s__interp)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__interp)); __Pyx_INCREF(((PyObject *)__pyx_n_s__axis)); PyTuple_SET_ITEM(__pyx_k_tuple_17, 3, ((PyObject *)__pyx_n_s__axis)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__axis)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_17, 4, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__y)); PyTuple_SET_ITEM(__pyx_k_tuple_17, 5, ((PyObject *)__pyx_n_s__y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__size)); PyTuple_SET_ITEM(__pyx_k_tuple_17, 6, ((PyObject *)__pyx_n_s__size)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__size)); __Pyx_INCREF(((PyObject *)__pyx_n_s__stride)); PyTuple_SET_ITEM(__pyx_k_tuple_17, 7, ((PyObject *)__pyx_n_s__stride)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__stride)); __Pyx_INCREF(((PyObject *)__pyx_n_s__itX)); PyTuple_SET_ITEM(__pyx_k_tuple_17, 8, ((PyObject *)__pyx_n_s__itX)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__itX)); __Pyx_INCREF(((PyObject *)__pyx_n_s__itY)); PyTuple_SET_ITEM(__pyx_k_tuple_17, 9, ((PyObject *)__pyx_n_s__itY)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__itY)); __Pyx_INCREF(((PyObject *)__pyx_n_s__dims)); PyTuple_SET_ITEM(__pyx_k_tuple_17, 10, ((PyObject *)__pyx_n_s__dims)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__dims)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Y)); PyTuple_SET_ITEM(__pyx_k_tuple_17, 11, ((PyObject *)__pyx_n_s__Y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Y)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_17)); __pyx_k_codeobj_18 = (PyObject*)__Pyx_PyCode_New(4, 0, 12, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_17, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_19, __pyx_n_s___quantile, 24, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_18)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/algorithms/statistics/_quantile.pyx":88 * # due to the underlying algorithm that relies on * # partial sorting as opposed to full sorting. * def _median(X, axis=0): # <<<<<<<<<<<<<< * """ * Fast median computation using partial sorting. This function is */ __pyx_k_tuple_21 = PyTuple_New(2); if (unlikely(!__pyx_k_tuple_21)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_21); __Pyx_INCREF(((PyObject *)__pyx_n_s__X)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 0, ((PyObject *)__pyx_n_s__X)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__X)); __Pyx_INCREF(((PyObject *)__pyx_n_s__axis)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 1, ((PyObject *)__pyx_n_s__axis)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__axis)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_21)); __pyx_k_codeobj_22 = (PyObject*)__Pyx_PyCode_New(2, 0, 2, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_21, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_19, __pyx_n_s___median, 88, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_22)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_15 = PyInt_FromLong(15); if (unlikely(!__pyx_int_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC init_quantile(void); /*proto*/ PyMODINIT_FUNC init_quantile(void) #else PyMODINIT_FUNC PyInit__quantile(void); /*proto*/ PyMODINIT_FUNC PyInit__quantile(void) #endif { PyObject *__pyx_t_1 = NULL; __Pyx_RefNannyDeclarations #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit__quantile(void)", 0); if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #ifdef __Pyx_CyFunction_USED if (__Pyx_CyFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4(__Pyx_NAMESTR("_quantile"), __pyx_methods, __Pyx_DOCSTR(__pyx_k_15), 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (!PyDict_GetItemString(modules, "nipy.algorithms.statistics._quantile")) { if (unlikely(PyDict_SetItemString(modules, "nipy.algorithms.statistics._quantile", __pyx_m) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } } #endif __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); if (unlikely(!__pyx_b)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; /*--- Initialize various global constants etc. ---*/ if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_module_is_main_nipy__algorithms__statistics___quantile) { if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s____main__) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; } /*--- Builtin init code ---*/ if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Constants init code ---*/ if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Global init code ---*/ /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ /*--- Type import code ---*/ __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", #if CYTHON_COMPILING_IN_PYPY sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 861; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Variable import code ---*/ /*--- Function import code ---*/ /*--- Execution code ---*/ /* "nipy/algorithms/statistics/_quantile.pyx":8 * """ * * __version__ = '0.1' # <<<<<<<<<<<<<< * * import numpy as np */ if (PyObject_SetAttr(__pyx_m, __pyx_n_s____version__, ((PyObject *)__pyx_kp_s_16)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 8; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/algorithms/statistics/_quantile.pyx":10 * __version__ = '0.1' * * import numpy as np # <<<<<<<<<<<<<< * cimport numpy as np * */ __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__numpy), 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 10; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__np, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 10; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/statistics/_quantile.pyx":20 * int interp) * * np.import_array() # <<<<<<<<<<<<<< * * # This is faster than scipy.stats.scoreatpercentile owing to partial */ import_array(); /* "nipy/algorithms/statistics/_quantile.pyx":24 * # This is faster than scipy.stats.scoreatpercentile owing to partial * # sorting * def _quantile(X, double ratio, int interp=False, int axis=0): # <<<<<<<<<<<<<< * """ * Fast quantile computation using partial sorting. This function has */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_10algorithms_10statistics_9_quantile_1_quantile, NULL, __pyx_n_s_20); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s___quantile, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/statistics/_quantile.pyx":88 * # due to the underlying algorithm that relies on * # partial sorting as opposed to full sorting. * def _median(X, axis=0): # <<<<<<<<<<<<<< * """ * Fast median computation using partial sorting. This function is */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_10algorithms_10statistics_9_quantile_3_median, NULL, __pyx_n_s_20); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s___median, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/statistics/_quantile.pyx":1 * # -*- Mode: Python -*- Not really, but the syntax is close enough # <<<<<<<<<<<<<< * * """ */ __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); if (PyObject_SetAttr(__pyx_m, __pyx_n_s____test__, ((PyObject *)__pyx_t_1)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; /* "numpy.pxd":975 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); if (__pyx_m) { __Pyx_AddTraceback("init nipy.algorithms.statistics._quantile", __pyx_clineno, __pyx_lineno, __pyx_filename); Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init nipy.algorithms.statistics._quantile"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if PY_MAJOR_VERSION < 3 return; #else return __pyx_m; #endif } /* Runtime support code */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* CYTHON_REFNANNY */ static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) { PyObject *result; result = PyObject_GetAttr(dict, name); if (!result) { if (dict != __pyx_b) { PyErr_Clear(); result = PyObject_GetAttr(__pyx_b, name); } if (!result) { PyErr_SetObject(PyExc_NameError, name); } } return result; } static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%s() takes %s %" CYTHON_FORMAT_SSIZE_T "d positional argument%s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%s() got an unexpected keyword argument '%s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) { #if CYTHON_COMPILING_IN_CPYTHON PyObject *tmp_type, *tmp_value, *tmp_tb; PyThreadState *tstate = PyThreadState_GET(); tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_Restore(type, value, tb); #endif } static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(type, value, tb); #endif } #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } #if PY_VERSION_HEX < 0x02050000 if (PyClass_Check(type)) { #else if (PyType_Check(type)) { #endif #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; #if PY_VERSION_HEX < 0x02050000 if (PyInstance_Check(type)) { type = (PyObject*) ((PyInstanceObject*)type)->in_class; Py_INCREF(type); } else { type = 0; PyErr_SetString(PyExc_TypeError, "raise: exception must be an old-style class or instance"); goto raise_error; } #else type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } #endif } __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else /* Python 3+ */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyEval_CallObject(type, args); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause && cause != Py_None) { PyObject *fixed_cause; if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { PyThreadState *tstate = PyThreadState_GET(); PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } } bad: Py_XDECREF(owned_instance); return; } #endif static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_Format(PyExc_SystemError, "Missing type object"); return 0; } if (likely(PyObject_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%s to unpack", index, (index == 1) ? "" : "s"); } static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } static CYTHON_INLINE int __Pyx_IterFinish(void) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); PyObject* exc_type = tstate->curexc_type; if (unlikely(exc_type)) { if (likely(exc_type == PyExc_StopIteration) || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration)) { PyObject *exc_value, *exc_tb; exc_value = tstate->curexc_value; exc_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; Py_DECREF(exc_type); Py_XDECREF(exc_value); Py_XDECREF(exc_tb); return 0; } else { return -1; } } return 0; #else if (unlikely(PyErr_Occurred())) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) { PyErr_Clear(); return 0; } else { return -1; } } return 0; #endif } static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) { if (unlikely(retval)) { Py_DECREF(retval); __Pyx_RaiseTooManyValuesError(expected); return -1; } else { return __Pyx_IterFinish(); } return 0; } static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level) { PyObject *py_import = 0; PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; py_import = __Pyx_GetAttrString(__pyx_b, "__import__"); if (!py_import) goto bad; if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; #if PY_VERSION_HEX >= 0x02050000 { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { /* try package relative import first */ PyObject *py_level = PyInt_FromLong(1); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; /* try absolute import on failure */ } #endif if (!module) { PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); } } #else if (level>0) { PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4."); goto bad; } module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, NULL); #endif bad: Py_XDECREF(empty_list); Py_XDECREF(py_import); Py_XDECREF(empty_dict); return module; } #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return ::std::complex< float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y*(__pyx_t_float_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real*z.real + z.imag*z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(a, a); case 3: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, a); case 4: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_absf(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return ::std::complex< double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y*(__pyx_t_double_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real*z.real + z.imag*z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(a, a); case 3: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, a); case 4: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_abs(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject* x) { const unsigned char neg_one = (unsigned char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned char" : "value too large to convert to unsigned char"); } return (unsigned char)-1; } return (unsigned char)val; } return (unsigned char)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject* x) { const unsigned short neg_one = (unsigned short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned short" : "value too large to convert to unsigned short"); } return (unsigned short)-1; } return (unsigned short)val; } return (unsigned short)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject* x) { const unsigned int neg_one = (unsigned int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned int" : "value too large to convert to unsigned int"); } return (unsigned int)-1; } return (unsigned int)val; } return (unsigned int)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject* x) { const char neg_one = (char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to char" : "value too large to convert to char"); } return (char)-1; } return (char)val; } return (char)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject* x) { const short neg_one = (short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to short" : "value too large to convert to short"); } return (short)-1; } return (short)val; } return (short)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject* x) { const int neg_one = (int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to int" : "value too large to convert to int"); } return (int)-1; } return (int)val; } return (int)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject* x) { const signed char neg_one = (signed char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed char" : "value too large to convert to signed char"); } return (signed char)-1; } return (signed char)val; } return (signed char)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject* x) { const signed short neg_one = (signed short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed short" : "value too large to convert to signed short"); } return (signed short)-1; } return (signed short)val; } return (signed short)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject* x) { const signed int neg_one = (signed int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed int" : "value too large to convert to signed int"); } return (signed int)-1; } return (signed int)val; } return (signed int)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject* x) { const int neg_one = (int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to int" : "value too large to convert to int"); } return (int)-1; } return (int)val; } return (int)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) { const unsigned long neg_one = (unsigned long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned long"); return (unsigned long)-1; } return (unsigned long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned long"); return (unsigned long)-1; } return (unsigned long)PyLong_AsUnsignedLong(x); } else { return (unsigned long)PyLong_AsLong(x); } } else { unsigned long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned long)-1; val = __Pyx_PyInt_AsUnsignedLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject* x) { const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned PY_LONG_LONG"); return (unsigned PY_LONG_LONG)-1; } return (unsigned PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned PY_LONG_LONG"); return (unsigned PY_LONG_LONG)-1; } return (unsigned PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (unsigned PY_LONG_LONG)PyLong_AsLongLong(x); } } else { unsigned PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned PY_LONG_LONG)-1; val = __Pyx_PyInt_AsUnsignedLongLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject* x) { const long neg_one = (long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long)-1; } return (long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long)-1; } return (long)PyLong_AsUnsignedLong(x); } else { return (long)PyLong_AsLong(x); } } else { long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (long)-1; val = __Pyx_PyInt_AsLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) { const PY_LONG_LONG neg_one = (PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to PY_LONG_LONG"); return (PY_LONG_LONG)-1; } return (PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to PY_LONG_LONG"); return (PY_LONG_LONG)-1; } return (PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (PY_LONG_LONG)PyLong_AsLongLong(x); } } else { PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (PY_LONG_LONG)-1; val = __Pyx_PyInt_AsLongLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) { const signed long neg_one = (signed long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed long"); return (signed long)-1; } return (signed long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed long"); return (signed long)-1; } return (signed long)PyLong_AsUnsignedLong(x); } else { return (signed long)PyLong_AsLong(x); } } else { signed long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (signed long)-1; val = __Pyx_PyInt_AsSignedLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject* x) { const signed PY_LONG_LONG neg_one = (signed PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed PY_LONG_LONG"); return (signed PY_LONG_LONG)-1; } return (signed PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed PY_LONG_LONG"); return (signed PY_LONG_LONG)-1; } return (signed PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (signed PY_LONG_LONG)PyLong_AsLongLong(x); } } else { signed PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (signed PY_LONG_LONG)-1; val = __Pyx_PyInt_AsSignedLongLong(tmp); Py_DECREF(tmp); return val; } } static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); #if PY_VERSION_HEX < 0x02050000 return PyErr_Warn(NULL, message); #else return PyErr_WarnEx(NULL, message, 1); #endif } return 0; } #ifndef __PYX_HAVE_RT_ImportModule #define __PYX_HAVE_RT_ImportModule static PyObject *__Pyx_ImportModule(const char *name) { PyObject *py_name = 0; PyObject *py_module = 0; py_name = __Pyx_PyIdentifier_FromString(name); if (!py_name) goto bad; py_module = PyImport_Import(py_name); Py_DECREF(py_name); return py_module; bad: Py_XDECREF(py_name); return 0; } #endif #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict) { PyObject *py_module = 0; PyObject *result = 0; PyObject *py_name = 0; char warning[200]; py_module = __Pyx_ImportModule(module_name); if (!py_module) goto bad; py_name = __Pyx_PyIdentifier_FromString(class_name); if (!py_name) goto bad; result = PyObject_GetAttr(py_module, py_name); Py_DECREF(py_name); py_name = 0; Py_DECREF(py_module); py_module = 0; if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%s.%s is not a type object", module_name, class_name); goto bad; } if (!strict && (size_t)((PyTypeObject *)result)->tp_basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility", module_name, class_name); #if PY_VERSION_HEX < 0x02050000 if (PyErr_Warn(NULL, warning) < 0) goto bad; #else if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; #endif } else if ((size_t)((PyTypeObject *)result)->tp_basicsize != size) { PyErr_Format(PyExc_ValueError, "%s.%s has the wrong size, try recompiling", module_name, class_name); goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(py_module); Py_XDECREF(result); return NULL; } #endif static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = (start + end) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, /*int argcount,*/ 0, /*int kwonlyargcount,*/ 0, /*int nlocals,*/ 0, /*int stacksize,*/ 0, /*int flags,*/ __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, /*int firstlineno,*/ __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_globals = 0; PyFrameObject *py_frame = 0; py_code = __pyx_find_code_object(c_line ? c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? c_line : py_line, py_code); } py_globals = PyModule_GetDict(__pyx_m); if (!py_globals) goto bad; py_frame = PyFrame_New( PyThreadState_GET(), /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ py_globals, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; py_frame->f_lineno = py_line; PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else /* Python 3+ has unicode identifiers */ if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; ++t; } return 0; } /* Type Conversion Functions */ static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) { PyNumberMethods *m; const char *name = NULL; PyObject *res = NULL; #if PY_VERSION_HEX < 0x03000000 if (PyInt_Check(x) || PyLong_Check(x)) #else if (PyLong_Check(x)) #endif return Py_INCREF(x), x; m = Py_TYPE(x)->tp_as_number; #if PY_VERSION_HEX < 0x03000000 if (m && m->nb_int) { name = "int"; res = PyNumber_Int(x); } else if (m && m->nb_long) { name = "long"; res = PyNumber_Long(x); } #else if (m && m->nb_int) { name = "int"; res = PyNumber_Long(x); } #endif if (res) { #if PY_VERSION_HEX < 0x03000000 if (!PyInt_Check(res) && !PyLong_Check(res)) { #else if (!PyLong_Check(res)) { #endif PyErr_Format(PyExc_TypeError, "__%s__ returned non-%s (type %.200s)", name, name, Py_TYPE(res)->tp_name); Py_DECREF(res); return NULL; } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject* x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { #if PY_VERSION_HEX < 0x02050000 if (ival <= LONG_MAX) return PyInt_FromLong((long)ival); else { unsigned char *bytes = (unsigned char *) &ival; int one = 1; int little = (int)*(unsigned char*)&one; return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0); } #else return PyInt_FromSize_t(ival); #endif } static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject* x) { unsigned PY_LONG_LONG val = __Pyx_PyInt_AsUnsignedLongLong(x); if (unlikely(val == (unsigned PY_LONG_LONG)-1 && PyErr_Occurred())) { return (size_t)-1; } else if (unlikely(val != (unsigned PY_LONG_LONG)(size_t)val)) { PyErr_SetString(PyExc_OverflowError, "value too large to convert to size_t"); return (size_t)-1; } return (size_t)val; } #endif /* Py_PYTHON_H */ nipy-0.3.0/nipy/algorithms/statistics/_quantile.pyx000066400000000000000000000053601210344137400225410ustar00rootroot00000000000000# -*- Mode: Python -*- Not really, but the syntax is close enough """ Very fast quantile computation using partial sorting. Author: Alexis Roche. """ __version__ = '0.1' import numpy as np cimport numpy as np cdef extern from "quantile.h": double quantile(double* data, np.npy_intp size, np.npy_intp stride, double r, int interp) np.import_array() # This is faster than scipy.stats.scoreatpercentile owing to partial # sorting def _quantile(X, double ratio, int interp=False, int axis=0): """ Fast quantile computation using partial sorting. This function has similar behavior to `scipy.percentile` but runs significantly faster for large arrays. Parameters ---------- X : array Input array. Will be internally converted into an array of doubles if needed. ratio : float A value in range [0, 1] defining the desired quantiles (the higher the ratio, the higher the quantiles). interp : boolean Determine whether quantiles are interpolated. axis : int Axis along which quantiles are computed. Output ------ Y : array Array of quantiles """ cdef double *x, *y cdef long int size, stride cdef np.flatiter itX, itY # Convert the input array to double if needed X = np.asarray(X, dtype='double') # Check the input ratio is in range (0,1) if ratio < 0 or ratio > 1: raise ValueError('ratio must be in range 0..1') # Allocate output array Y dims = list(X.shape) dims[axis] = 1 Y = np.zeros(dims) # Set size and stride along specified axis size = X.shape[axis] stride = X.strides[axis] / sizeof(double) # Create array iterators itX = np.PyArray_IterAllButAxis(X, &axis) itY = np.PyArray_IterAllButAxis(Y, &axis) # Loop while np.PyArray_ITER_NOTDONE(itX): x = np.PyArray_ITER_DATA(itX) y = np.PyArray_ITER_DATA(itY) y[0] = quantile(x, size, stride, ratio, interp) np.PyArray_ITER_NEXT(itX) np.PyArray_ITER_NEXT(itY) return Y # This is faster than numpy.stats # due to the underlying algorithm that relies on # partial sorting as opposed to full sorting. def _median(X, axis=0): """ Fast median computation using partial sorting. This function is similar to `numpy.median` but runs significantly faster for large arrays. Parameters ---------- X : array Input array. Will be internally converted into an array of doubles if needed. axis : int Axis along which medians are computed. Output ------ Y : array Array of medians """ return _quantile(X, axis=axis, ratio=0.5, interp=True) nipy-0.3.0/nipy/algorithms/statistics/api.py000066400000000000000000000010161210344137400211330ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Pseudo-package for some important statistics symbols For example: >>> from nipy.algorithms.statistics.api import Formula """ from .formula import formulae from .formula.formulae import (Formula, Factor, Term, terms, make_recarray, natural_spline) from .models import (model, regression, glm, family) from .models.regression import (OLSModel, ARModel, WLSModel, isestimable) nipy-0.3.0/nipy/algorithms/statistics/bench/000077500000000000000000000000001210344137400210715ustar00rootroot00000000000000nipy-0.3.0/nipy/algorithms/statistics/bench/__init__.py000066400000000000000000000000451210344137400232010ustar00rootroot00000000000000# Init for benchmarks for algorithms nipy-0.3.0/nipy/algorithms/statistics/bench/bench_intvol.py000066400000000000000000000057031210344137400241220ustar00rootroot00000000000000import sys import numpy as np from .. import intvol from ..tests.test_intrinsic_volumes import nonintersecting_boxes, randorth import numpy.testing as npt def bench_lips3d(): np.random.seed(20111001) phi = intvol.Lips3d EC3d = intvol.EC3d repeat = 4 bx_sz = 60 box1, box2, edge1, edge2 = nonintersecting_boxes((bx_sz,)*3) c = np.indices(box1.shape).astype(np.float) sys.stdout.flush() print "\nIntrinsic volumes 3D" print "--------------------" print 'Box1 %6.2f\n' % npt.measure('phi(c,box1)', repeat), print 'Box2 %6.2f\n' % npt.measure('phi(c, box2)', repeat), print 'Box1+2 %6.2f\n' % npt.measure('phi(c, box1 + box2)', repeat), d = np.random.standard_normal((10,) + (bx_sz,) * 3) print 'Box1+2 d %6.2f\n' % npt.measure('phi(d, box1 + box2)', repeat), U = randorth(p=6)[0:3] e = np.dot(U.T, c.reshape((c.shape[0], -1))) e.shape = (e.shape[0],) + c.shape[1:] print 'Box1+2 e %6.2f\n' % npt.measure('phi(e, box1 + box2)', repeat), print 'Box1+2 EC %6.2f\n' % npt.measure('EC3d(box1 + box2)', repeat), sys.stdout.flush() def bench_lips2d(): np.random.seed(20111001) phi = intvol.Lips2d EC2d = intvol.EC2d repeat = 4 bx_sz = 500 box1, box2, edge1, edge2 = nonintersecting_boxes((bx_sz,)*2) c = np.indices(box1.shape).astype(np.float) sys.stdout.flush() print "\nIntrinsic volumes 2D" print "--------------------" print 'Box1 %6.2f\n' % npt.measure('phi(c,box1)', repeat), print 'Box2 %6.2f\n' % npt.measure('phi(c, box2)', repeat), print 'Box1+2 %6.2f\n' % npt.measure('phi(c, box1 + box2)', repeat), d = np.random.standard_normal((10,) + (bx_sz,) * 2) print 'Box1+2 d %6.2f\n' % npt.measure('phi(d, box1 + box2)', repeat), U = randorth(p=6)[0:2] e = np.dot(U.T, c.reshape((c.shape[0], -1))) e.shape = (e.shape[0],) + c.shape[1:] print 'Box1+2 e %6.2f\n' % npt.measure('phi(e, box1 + box2)', repeat), print 'Box1+2 EC %6.2f\n' % npt.measure('EC2d(box1 + box2)', repeat), sys.stdout.flush() def bench_lips1d(): np.random.seed(20111001) phi = intvol.Lips1d EC1d = intvol.EC1d repeat = 4 bx_sz = 100000 box1, box2, edge1, edge2 = nonintersecting_boxes((bx_sz,)) c = np.indices(box1.shape).astype(np.float) sys.stdout.flush() print "\nIntrinsic volumes 1D" print "--------------------" print 'Box1 %6.2f\n' % npt.measure('phi(c,box1)', repeat), print 'Box2 %6.2f\n' % npt.measure('phi(c, box2)', repeat), print 'Box1+2 %6.2f\n' % npt.measure('phi(c, box1 + box2)', repeat), d = np.random.standard_normal((10, bx_sz)) print 'Box1+2 d %6.2f\n' % npt.measure('phi(d, box1 + box2)', repeat), U = randorth(p=6)[0:1] e = np.dot(U.T, c.reshape((c.shape[0], -1))) e.shape = (e.shape[0],) + c.shape[1:] print 'Box1+2 e %6.2f\n' % npt.measure('phi(e, box1 + box2)', repeat), print 'Box1+2 EC %6.2f\n' % npt.measure('EC1d(box1 + box2)', repeat), sys.stdout.flush() nipy-0.3.0/nipy/algorithms/statistics/empirical_pvalue.py000066400000000000000000000451611210344137400237140ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Routines to get corrected p-values estimates, based on the observations. It implements 3 approaches: - Benjamini-Hochberg FDR: http://en.wikipedia.org/wiki/False_discovery_rate - a class that fits a Gaussian model to the central part of an histogram, following [1] [1] Schwartzman A, Dougherty RF, Lee J, Ghahremani D, Taylor JE. Empirical null and false discovery rate analysis in neuroimaging. Neuroimage. 2009 Jan 1;44(1):71-82. PubMed PMID: 18547821. DOI: 10.1016/j.neuroimage.2008.04.182 This is typically necessary to estimate a FDR when one is not certain that the data behaves as a standard normal under H_0. - a model based on Gaussian mixture modelling 'a la Oxford' Author : Bertrand Thirion, Yaroslav Halchenko, 2008-2012 """ import numpy as np from numpy.linalg import pinv import scipy.stats as st def check_p_values(p_values): """Basic checks on the p_values array: values should be within [0,1] Assures also that p_values are at least in 1d array. None of the checks is performed if p_values is None. Parameters ---------- p_values : array of shape (n) The sample p-values Returns ------- p_values : array of shape (n) The sample p-values """ if p_values is None: return None # Take all elements unfolded and assure having at least 1d p_values = np.atleast_1d(np.ravel(p_values)) if np.any(np.isnan(p_values)): raise ValueError("%d values are NaN" % (sum(np.isnan(p_values)))) if p_values.min() < 0: raise ValueError("Negative p-values. Min=%g" % (p_values.min(),)) if p_values.max() > 1: raise ValueError("P-values greater than 1! Max=%g" % ( p_values.max(),)) return p_values def gaussian_fdr(x): """Return the FDR associated with each value assuming a Gaussian distribution """ return fdr(st.norm.sf(np.squeeze(x))) def gaussian_fdr_threshold(x, alpha=0.05): """Return FDR threshold given normal variates Given an array x of normal variates, this function returns the critical p-value associated with alpha. x is explicitly assumed to be normal distributed under H_0 Parameters ----------- x: ndarray input data alpha: float, optional desired significance Returns ------- threshold : float threshold, given as a Gaussian critical value """ pvals = st.norm.sf(x) pth = fdr_threshold(pvals, alpha) return st.norm.isf(pth) def fdr_threshold(p_values, alpha=0.05): """Return FDR threshold given p values Parameters ----------- p_values : array of shape (n), optional The samples p-value alpha : float, optional The desired FDR significance Returns ------- critical_p_value: float The p value corresponding to the FDR alpha """ p_values = check_p_values(p_values) n_samples = np.size(p_values) p_corr = alpha / n_samples sp_values = np.sort(p_values) critical_set = sp_values[ sp_values < p_corr * np.arange(1, n_samples + 1)] if len(critical_set) > 0: critical_p_value = critical_set.max() else: critical_p_value = p_corr return critical_p_value def fdr(p_values=None, verbose=0): """Returns the FDR associated with each p value Parameters ----------- p_values : ndarray of shape (n) The samples p-value Returns ------- q : array of shape(n) The corresponding fdr values """ p_values = check_p_values(p_values) n_samples = p_values.size order = p_values.argsort() sp_values = p_values[order] # compute q while in ascending order q = np.minimum(1, n_samples * sp_values / np.arange(1, n_samples + 1)) for i in range(n_samples - 1, 0, - 1): q[i - 1] = min(q[i], q[i - 1]) # reorder the results inverse_order = np.arange(n_samples) inverse_order[order] = np.arange(n_samples) q = q[inverse_order] if verbose: import matplotlib.pylab as mp mp.figure() mp.xlabel('Input p-value') mp.plot(p_values, q, '.') mp.ylabel('Associated fdr') return q class NormalEmpiricalNull(object): """Class to compute the empirical null normal fit to the data. The data which is used to estimate the FDR, assuming a Gaussian null from Schwartzmann et al., NeuroImage 44 (2009) 71--82 """ def __init__(self, x): """Initialize an empirical null normal object. Parameters ----------- x : 1D ndarray The data used to estimate the empirical null. """ x = np.reshape(x, (- 1)) self.x = np.sort(x) self.n = np.size(x) self.learned = 0 def learn(self, left=0.2, right=0.8): """ Estimate the proportion, mean and variance of a Gaussian distribution for a fraction of the data Parameters ---------- left: float, optional Left cut parameter to prevent fitting non-gaussian data right: float, optional Right cut parameter to prevent fitting non-gaussian data Notes ----- This method stores the following attributes: * mu = mu * p0 = min(1, np.exp(lp0)) * sqsigma: standard deviation of the estimated normal distribution * sigma: np.sqrt(sqsigma) : variance of the estimated normal distribution """ # take a central subsample of x x = self.x[int(self.n * left): int(self.n * right)] # generate the histogram step = 3.5 * np.std(self.x) / np.exp(np.log(self.n) / 3) bins = max(10, (self.x.max() - self.x.min()) / step) hist, ledge = np.histogram(x, bins=bins) step = ledge[1] - ledge[0] medge = ledge + 0.5 * step # remove null bins whist = hist > 0 hist = hist[whist] medge = medge[whist] hist = hist.astype('f') # fit the histogram dmtx = np.ones((3, np.sum(whist))) dmtx[1] = medge dmtx[2] = medge ** 2 coef = np.dot(np.log(hist), pinv(dmtx)) sqsigma = -1.0 / (2 * coef[2]) mu = coef[1] * sqsigma lp0 = (coef[0] - np.log(step * self.n) + 0.5 * np.log(2 * np.pi * sqsigma) + mu ** 2 / (2 * sqsigma)) self.mu = mu self.p0 = min(1, np.exp(lp0)) self.sigma = np.sqrt(sqsigma) self.sqsigma = sqsigma def fdrcurve(self): """ Returns the FDR associated with any point of self.x """ import scipy.stats as st if self.learned == 0: self.learn() efp = (self.p0 * st.norm.sf(self.x, self.mu, self.sigma) * self.n / np.arange(self.n, 0, - 1)) efp = np.minimum(efp, 1) ix = np.argsort(self.x) for i in range(np.size(efp) - 1, 0, - 1): efp[ix[i - 1]] = np.maximum(efp[ix[i]], efp[ix[i - 1]]) self.sorted_x = self.x[ix] self.sorted_fdr = efp[ix] return efp def threshold(self, alpha=0.05, verbose=0): """ Compute the threshold corresponding to an alpha-level FDR for x Parameters ----------- alpha : float, optional the chosen false discovery rate threshold. verbose : boolean, optional the verbosity level, if True a plot is generated. Returns ------- theta: float the critical value associated with the provided FDR """ efp = self.fdrcurve() if verbose: self.plot(efp, alpha) if efp[-1] > alpha: print "the maximal value is %f , the corresponding FDR is %f " \ % (self.x[ - 1], efp[ - 1]) return np.inf j = np.argmin(efp[:: - 1] < alpha) + 1 return 0.5 * (self.x[ - j] + self.x[ - j + 1]) def uncorrected_threshold(self, alpha=0.001, verbose=0): """Compute the threshold corresponding to a specificity alpha for x Parameters ----------- alpha : float, optional the chosen false discovery rate (FDR) threshold. verbose : boolean, optional the verbosity level, if True a plot is generated. Returns ------- theta: float the critical value associated with the provided p-value """ if self.learned == 0: self.learn() threshold = st.norm.isf(alpha, self.mu, self.sigma) if not np.isfinite(threshold): threshold = np.inf if verbose: self.plot() return threshold def fdr(self, theta): """Given a threshold theta, find the estimated FDR Parameters ---------- theta : float or array of shape (n_samples) values to test Returns ------- afp : value of array of shape(n) """ from scipy.stats import norm self.fdrcurve() if np.isscalar(theta): if theta > self.sorted_x[ - 1]: return 0 maj = np.where(self.sorted_x >= theta)[0][0] efp = (self.p0 * norm.sf(theta, self.mu, self.sigma) * self.n\ / np.sum(self.x >= theta)) efp = np.maximum(self.sorted_fdr[maj], efp) else: efp = [] for th in theta: if th > self.sorted_x[ - 1]: efp.append(0) continue maj = self.sorted_fdr[np.where(self.sorted_x >= th)[0][0]] efp.append(np.maximum(maj, self.p0 * st.norm.sf(th, self.mu, self.sigma) * self.n / np.sum(self.x >= th))) efp = np.array(efp) # efp = np.minimum(efp, 1) return efp def plot(self, efp=None, alpha=0.05, bar=1, mpaxes=None): """Plot the histogram of x Parameters ------------ efp : float, optional The empirical FDR (corresponding to x) if efp==None, the false positive rate threshold plot is not drawn. alpha : float, optional The chosen FDR threshold bar=1 : bool, optional mpaxes=None: if not None, handle to an axes where the fig will be drawn. Avoids creating unnecessarily new figures """ if not self.learned: self.learn() n = np.size(self.x) bins = max(10, int(2 * np.exp(np.log(n) / 3.))) hist, ledge = np.histogram(self.x, bins=bins) hist = hist.astype('f') / hist.sum() step = ledge[1] - ledge[0] medge = ledge + 0.5 * step import scipy.stats as st g = self.p0 * st.norm.pdf(medge, self.mu, self.sigma) hist /= step import matplotlib.pylab as mp if mpaxes == None: mp.figure() ax = mp.subplot(1, 1, 1) else: ax = mpaxes if bar: # We need to cut ledge to len(hist) to accommodate for pre and # post numpy 1.3 hist semantic change. ax.bar(ledge[:len(hist)], hist, step) else: ax.plot(medge[:len(hist)], hist, linewidth=2) ax.plot(medge, g, 'r', linewidth=2) ax.set_title('Robust fit of the histogram', fontsize=12) l = ax.legend(('empirical null', 'data'), loc=0) for t in l.get_texts(): t.set_fontsize(12) ax.set_xticklabels(ax.get_xticks(), fontsize=12) ax.set_yticklabels(ax.get_yticks(), fontsize=12) if efp != None: ax.plot(self.x, np.minimum(alpha, efp), 'k') def three_classes_GMM_fit(x, test=None, alpha=0.01, prior_strength=100, verbose=0, fixed_scale=False, mpaxes=None, bias=0, theta=0, return_estimator=False): """Fit the data with a 3-classes Gaussian Mixture Model, i.e. compute some probability that the voxels of a certain map are in class disactivated, null or active Parameters ---------- x: array of shape (nvox,1) The map to be analysed test: array of shape(nbitems,1), optional the test values for which the p-value needs to be computed by default (if None), test=x alpha: float, optional the prior weights of the positive and negative classes prior_strength: float, optional the confidence on the prior (should be compared to size(x)) verbose: int verbosity mode fixed_scale: bool, optional boolean, variance parameterization. if True, the variance is locked to 1 otherwise, it is estimated from the data mpaxes: axes handle used to plot the figure in verbose mode if None, new axes are created bias: bool allows a rescaling of the posterior probability that takes into account the threshold theta. Not rigorous. theta: float the threshold used to correct the posterior p-values when bias=1; normally, it is such that test>theta note that if theta = -np.inf, the method has a standard behaviour return_estimator: boolean, optional If return_estimator is true, the estimator object is returned. Returns ------- bfp : array of shape (nbitems,3): the posterior probability of each test item belonging to each component in the GMM (sum to 1 across the 3 classes) if np.size(test)==0, i.e. nbitem==0, None is returned estimator : nipy.labs.clustering.GMM object The estimator object, returned only if return_estimator is true. Notes ----- Our convention is that: * class 1 represents the negative class * class 2 represents the null class * class 3 represents the positive class """ from ..clustering.bgmm import VBGMM from ..clustering.gmm import GridDescriptor nvox = np.size(x) x = np.reshape(x, (nvox, 1)) if test == None: test = x if np.size(test) == 0: return None sx = np.sort(x, 0) nclasses = 3 # set the priors from a reasonable model of the data (!) # prior means mb0 = np.mean(sx[ : alpha * nvox]) mb2 = np.mean(sx[(1 - alpha) * nvox:]) prior_means = np.reshape(np.array([mb0, 0, mb2]), (nclasses, 1)) if fixed_scale: prior_scale = np.ones((nclasses, 1, 1)) * 1. / (prior_strength) else: prior_scale = np.ones((nclasses, 1, 1)) * 1. / \ (prior_strength * np.var(x)) prior_dof = np.ones(nclasses) * prior_strength prior_weights = np.array([alpha, 1 - 2 * alpha, alpha]) * prior_strength prior_shrinkage = np.ones(nclasses) * prior_strength # instantiate the class and set the priors BayesianGMM = VBGMM(nclasses, 1, prior_means, prior_scale, prior_weights, prior_shrinkage, prior_dof) BayesianGMM.set_priors(prior_means, prior_weights, prior_scale, prior_dof, prior_shrinkage) # estimate the model BayesianGMM.estimate(x, delta=1.e-8, verbose=max(0, verbose-1)) # create a sampling grid if (verbose or bias): gd = GridDescriptor(1) gd.set([x.min(), x.max()], 100) gdm = gd.make_grid().squeeze() lj = BayesianGMM.likelihood(gd.make_grid()) # estimate the prior weights bfp = BayesianGMM.likelihood(test) if bias: lw = np.sum(lj[gdm > theta], 0) weights = BayesianGMM.weights / (BayesianGMM.weights.sum()) bfp = (lw / weights) * BayesianGMM.slikelihood(test) if verbose and (mpaxes is not False): BayesianGMM.show_components(x, gd, lj, mpaxes) bfp = (bfp.T / bfp.sum(1)).T if not return_estimator: return bfp else: return bfp, BayesianGMM def gamma_gaussian_fit(x, test=None, verbose=0, mpaxes=None, bias=1, gaussian_mix=0, return_estimator=False): """ Computing some prior probabilities that the voxels of a certain map are in class disactivated, null or active using a gamma-Gaussian mixture Parameters ------------ x: array of shape (nvox,) the map to be analysed test: array of shape (nbitems,), optional the test values for which the p-value needs to be computed by default, test = x verbose: 0, 1 or 2, optional verbosity mode, 0 is quiet, and 2 calls matplotlib to display graphs. mpaxes: matplotlib axes, option. axes handle used to plot the figure in verbose mode if None, new axes are created bias: float, optional lower bound on the Gaussian variance (to avoid shrinkage) gaussian_mix: float, optional if nonzero, lower bound on the Gaussian mixing weight (to avoid shrinkage) return_estimator: boolean, optional if return_estimator is true, the estimator object is returned. Returns ------- bfp: array of shape (nbitems,3) The probability of each component in the mixture model for each test value estimator: nipy.labs.clustering.ggmixture.GGGM object The estimator object, returned only if return_estimator is true. """ from ..clustering import ggmixture Ggg = ggmixture.GGGM() Ggg.init_fdr(x) Ggg.estimate(x, niter=100, delta=1.e-8, bias=bias, verbose=0, gaussian_mix=gaussian_mix) if mpaxes is not False: # hyper-verbose mode Ggg.show(x, mpaxes=mpaxes) Ggg.parameters() if test is None: test = x test = np.reshape(test, np.size(test)) bfp = np.array(Ggg.posterior(test)).T if return_estimator: return bfp, Ggg return bfp def smoothed_histogram_from_samples(x, bins=None, nbins=256, normalized=False): """Returns the smooth histogram corresponding to the density underlying the samples in x Parameters ---------- x: array of shape(n_samples), input data bins: array of shape(nbins+1), optional, the bins location nbins: int, optional, the number of bins of the resulting histogram normalized: bool, optional if True, the result is returned as a density value Returns ------- h: array of shape (nbins) the histogram bins: array of shape(nbins+1), the bins location """ from scipy.ndimage import gaussian_filter1d # first define the bins if bins is None: h, bins = np.histogram(x, nbins) bins = bins.mean() + 1.2 * (bins - bins.mean()) h, bins = np.histogram(x, bins) # possibly normalize to density h = 1.0 * h dc = bins[1] - bins[0] if normalized: h /= (dc * h.sum()) # define the optimal width sigma = x.std() / (dc * np.exp(.2 * np.log(x.size))) # smooth the histogram h = gaussian_filter1d(h, sigma, mode='constant') return h, bins nipy-0.3.0/nipy/algorithms/statistics/formula/000077500000000000000000000000001210344137400214575ustar00rootroot00000000000000nipy-0.3.0/nipy/algorithms/statistics/formula/__init__.py000066400000000000000000000002171210344137400235700ustar00rootroot00000000000000""" Formula and related objects """ from .formulae import (Formula, Term, terms, Factor, make_recarray, natural_spline) nipy-0.3.0/nipy/algorithms/statistics/formula/formulae.py000066400000000000000000001226421210344137400236520ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ''' Formula objects =============== A formula is basically a sympy expression for the mean of something of the form:: mean = sum([Beta(e)*e for e in expr]) Or, a linear combination of sympy expressions, with each one multiplied by its own "Beta". The elements of expr can be instances of Term (for a linear regression formula, they would all be instances of Term). But, in general, there might be some other parameters (i.e. sympy.Symbol instances) that are not Terms. The design matrix is made up of columns that are the derivatives of mean with respect to everything that is not a Term, evaluted at a recarray that has field names given by [str(t) for t in self.terms]. For those familiar with R's formula syntax, if we wanted a design matrix like the following:: > s.table = read.table("http://www-stat.stanford.edu/~jtaylo/courses/stats191/data/supervisor.table", header=T) > d = model.matrix(lm(Y ~ X1*X3, s.table) ) > d (Intercept) X1 X3 X1:X3 1 1 51 39 1989 2 1 64 54 3456 3 1 70 69 4830 4 1 63 47 2961 5 1 78 66 5148 6 1 55 44 2420 7 1 67 56 3752 8 1 75 55 4125 9 1 82 67 5494 10 1 61 47 2867 11 1 53 58 3074 12 1 60 39 2340 13 1 62 42 2604 14 1 83 45 3735 15 1 77 72 5544 16 1 90 72 6480 17 1 85 69 5865 18 1 60 75 4500 19 1 70 57 3990 20 1 58 54 3132 21 1 40 34 1360 22 1 61 62 3782 23 1 66 50 3300 24 1 37 58 2146 25 1 54 48 2592 26 1 77 63 4851 27 1 75 74 5550 28 1 57 45 2565 29 1 85 71 6035 30 1 82 59 4838 attr(,"assign") [1] 0 1 2 3 > With the Formula, it looks like this: >>> r = np.rec.array([ ... (43, 51, 30, 39, 61, 92, 45), (63, 64, 51, 54, 63, 73, 47), ... (71, 70, 68, 69, 76, 86, 48), (61, 63, 45, 47, 54, 84, 35), ... (81, 78, 56, 66, 71, 83, 47), (43, 55, 49, 44, 54, 49, 34), ... (58, 67, 42, 56, 66, 68, 35), (71, 75, 50, 55, 70, 66, 41), ... (72, 82, 72, 67, 71, 83, 31), (67, 61, 45, 47, 62, 80, 41), ... (64, 53, 53, 58, 58, 67, 34), (67, 60, 47, 39, 59, 74, 41), ... (69, 62, 57, 42, 55, 63, 25), (68, 83, 83, 45, 59, 77, 35), ... (77, 77, 54, 72, 79, 77, 46), (81, 90, 50, 72, 60, 54, 36), ... (74, 85, 64, 69, 79, 79, 63), (65, 60, 65, 75, 55, 80, 60), ... (65, 70, 46, 57, 75, 85, 46), (50, 58, 68, 54, 64, 78, 52), ... (50, 40, 33, 34, 43, 64, 33), (64, 61, 52, 62, 66, 80, 41), ... (53, 66, 52, 50, 63, 80, 37), (40, 37, 42, 58, 50, 57, 49), ... (63, 54, 42, 48, 66, 75, 33), (66, 77, 66, 63, 88, 76, 72), ... (78, 75, 58, 74, 80, 78, 49), (48, 57, 44, 45, 51, 83, 38), ... (85, 85, 71, 71, 77, 74, 55), (82, 82, 39, 59, 64, 78, 39)], ... dtype=[('y', '>> x1 = Term('x1'); x3 = Term('x3') >>> f = Formula([x1, x3, x1*x3]) + I >>> f.mean _b0*x1 + _b1*x3 + _b2*x1*x3 + _b3 The I is the "intercept" term, I have explicity not used R's default of adding it to everything. >>> f.design(r) array([(51.0, 39.0, 1989.0, 1.0), (64.0, 54.0, 3456.0, 1.0), (70.0, 69.0, 4830.0, 1.0), (63.0, 47.0, 2961.0, 1.0), (78.0, 66.0, 5148.0, 1.0), (55.0, 44.0, 2420.0, 1.0), (67.0, 56.0, 3752.0, 1.0), (75.0, 55.0, 4125.0, 1.0), (82.0, 67.0, 5494.0, 1.0), (61.0, 47.0, 2867.0, 1.0), (53.0, 58.0, 3074.0, 1.0), (60.0, 39.0, 2340.0, 1.0), (62.0, 42.0, 2604.0, 1.0), (83.0, 45.0, 3735.0, 1.0), (77.0, 72.0, 5544.0, 1.0), (90.0, 72.0, 6480.0, 1.0), (85.0, 69.0, 5865.0, 1.0), (60.0, 75.0, 4500.0, 1.0), (70.0, 57.0, 3990.0, 1.0), (58.0, 54.0, 3132.0, 1.0), (40.0, 34.0, 1360.0, 1.0), (61.0, 62.0, 3782.0, 1.0), (66.0, 50.0, 3300.0, 1.0), (37.0, 58.0, 2146.0, 1.0), (54.0, 48.0, 2592.0, 1.0), (77.0, 63.0, 4851.0, 1.0), (75.0, 74.0, 5550.0, 1.0), (57.0, 45.0, 2565.0, 1.0), (85.0, 71.0, 6035.0, 1.0), (82.0, 59.0, 4838.0, 1.0)], dtype=[('x1', '>> t = Term('x') >>> xval = np.array([(3,),(4,),(5,)], np.dtype([('x', np.float)])) >>> f = t.formula >>> d = f.design(xval) >>> print d.dtype.descr [('x', '>> f.design(xval, return_float=True) array([ 3., 4., 5.]) """ # This flag is defined to avoid using isinstance in getterms # and getparams. _term_flag = True def _getformula(self): return Formula([self]) formula = property(_getformula, doc="Return a Formula with only terms=[self].") def __add__(self, other): if self == other: return self else: return sympy.Symbol.__add__(self, other) # time symbol T = Term('t') def terms(names, **kwargs): ''' Return list of terms with names given by `names` This is just a convenience in defining a set of terms, and is the equivalent of ``sympy.symbols`` for defining symbols in sympy. We enforce the sympy 0.7.0 behavior of returning symbol "abc" from input "abc", rthan than 3 symbols "a", "b", "c". Parameters ---------- names : str or sequence of str If a single str, can specify multiple ``Term``s with string containing space or ',' as separator. \\**kwargs : keyword arguments keyword arguments as for ``sympy.symbols`` Returns ------- ts : ``Term`` or tuple ``Term`` instance or list of ``Term`` instance objects named from `names` Examples -------- >>> terms(('a', 'b', 'c')) (a, b, c) >>> terms('a, b, c') (a, b, c) >>> terms('abc') abc ''' if (SYMPY_0p6 and isinstance(names, basestring) and not set(', ').intersection(names)): if not kwargs.get('each_char', False): # remove each_char (or no-op if absent) kwargs.pop('each_char', None) names = (names,) syms = sympy.symbols(names, **kwargs) try: len(syms) except TypeError: return Term(syms.name) return tuple(Term(s.name) for s in syms) class FactorTerm(Term): """ Boolean Term derived from a Factor. Its properties are the same as a Term except that its product with itself is itself. """ # This flag is defined to avoid using isinstance in getterms _factor_term_flag = True def __new__(cls, name, level): # Names or levels can be byte strings new = Term.__new__(cls, "%s_%s" % (to_str(name), to_str(level))) new.level = level new.factor_name = name return new def __mul__(self, other): if self == other: return self else: return sympy.Symbol.__mul__(self, other) class Beta(sympy.symbol.Dummy): ''' A symbol tied to a Term `term` ''' def __new__(cls, name, term): new = sympy.symbol.Dummy.__new__(cls, name) new._term = term return new def getparams(expression): """ Return the parameters of an expression that are not Term instances but are instances of sympy.Symbol. Examples -------- >>> x, y, z = [Term(l) for l in 'xyz'] >>> f = Formula([x,y,z]) >>> getparams(f) [] >>> f.mean _b0*x + _b1*y + _b2*z >>> getparams(f.mean) [_b0, _b1, _b2] >>> th = sympy.Symbol('theta') >>> f.mean*sympy.exp(th) (_b0*x + _b1*y + _b2*z)*exp(theta) >>> getparams(f.mean*sympy.exp(th)) [theta, _b0, _b1, _b2] """ atoms = set([]) expression = np.array(expression) if expression.shape == (): expression = expression.reshape((1,)) if expression.ndim > 1: expression = expression.reshape((np.product(expression.shape),)) for term in expression: atoms = atoms.union(sympy.sympify(term).atoms()) params = [] for atom in atoms: if isinstance(atom, sympy.Symbol) and not is_term(atom): params.append(atom) params.sort() return params def getterms(expression): """ Return the all instances of Term in an expression. Examples -------- >>> x, y, z = [Term(l) for l in 'xyz'] >>> f = Formula([x,y,z]) >>> getterms(f) [x, y, z] >>> getterms(f.mean) [x, y, z] """ atoms = set([]) expression = np.array(expression) if expression.shape == (): expression = expression.reshape((1,)) if expression.ndim > 1: expression = expression.reshape((np.product(expression.shape),)) for e in expression: atoms = atoms.union(e.atoms()) terms = [] for atom in atoms: if is_term(atom): terms.append(atom) terms.sort() return terms def make_recarray(rows, names, dtypes=None): """ Create recarray from `rows` with field names `names` Create a recarray with named columns from a list of rows and names for the columns. If dtype is None, the dtype is based on rows if it is an np.ndarray, else the data is cast as np.float. If dtypes are supplied, it uses the dtypes to create a np.dtype unless rows is an np.ndarray, in which case dtypes are ignored Parameters ---------- rows: array-like Rows that will be turned into an recarray. names: sequence Sequence of strings - names for the columns. dtypes: [str or np.dtype] Used to create a np.dtype, can be np.dtypes or string. Returns ------- v : np.ndarray Examples -------- The following tests depend on machine byte order to pass >>> arr = np.array([[3,4 ], [4, 6], [6, 8]]) >>> make_recarray(arr, ['x', 'y']) #doctest: +ELLIPSIS array([[(3, 4)], [(4, 6)], [(6, 8)]], dtype=[('x', '...'), ('y', '...')]) >>> r = make_recarray(arr, ['w', 'u']) >>> make_recarray(r, ['x', 'y']) #doctest: +ELLIPSIS array([[(3, 4)], [(4, 6)], [(6, 8)]], dtype=[('x', '...'), ('y', '...')]) >>> make_recarray([[3, 4], [4, 6], [7, 9]], 'wv', [np.float, np.int]) #doctest: +ELLIPSIS array([(3.0, 4), (4.0, 6), (7.0, 9)], dtype=[('w', '...'), ('v', '...')]) """ # XXX This function is sort of one of convenience # Would be nice to use DataArray or something like that # to add axis names. if isinstance(rows, np.ndarray): if rows.dtype.isbuiltin: dtype = np.dtype([(n, rows.dtype) for n in names]) else: dtype = np.dtype([(n, d[1]) for n, d in zip(names, rows.dtype.descr)]) if dtypes is not None: raise ValueError('dtypes not used if rows is an ndarray') return rows.view(dtype) if dtypes is None: dtype = np.dtype([(n, np.float) for n in names]) else: dtype = np.dtype([(n, d) for n, d in zip(names, dtypes)]) nrows = [] vector = -1 for r in rows: if vector < 0: a = np.array(r) if a.shape == (): vector = True else: vector = False if not vector: nrows.append(tuple(r)) else: nrows.append(r) if vector: if len(names) != 1: # a 'row vector' nrows = tuple(nrows) return np.array(nrows, dtype) else: nrows = np.array([(r,) for r in nrows], dtype) return np.array(nrows, dtype) class Formula(object): """ A Formula is a model for a mean in a regression model. It is often given by a sequence of sympy expressions, with the mean model being the sum of each term multiplied by a linear regression coefficient. The expressions may depend on additional Symbol instances, giving a non-linear regression model. """ # This flag is defined to avoid using isinstance _formula_flag = True def __init__(self, seq, char = 'b'): """ Parameters ---------- seq : sequence of ``sympy.Basic`` char : str, optional character for regression coefficient """ self._terms = np.asarray(seq) self._counter = 0 self.char = char # Properties def _getcoefs(self): if not hasattr(self, '_coefs'): self._coefs = {} for term in self.terms: self._coefs.setdefault(term, Beta("%s%d" % (self.char, self._counter), term)) self._counter += 1 return self._coefs coefs = property(_getcoefs, doc='Coefficients in the linear regression formula.') def _getterms(self): t = self._terms # The Rmode flag is meant to emulate R's implicit addition of an # intercept to every formula. It currently cannot be changed. Rmode = False if Rmode: if sympy.Number(1) not in self._terms: t = np.array(list(t) + [sympy.Number(1)]) return t terms = property(_getterms, doc='Terms in the linear regression formula.') def _getmean(self): """ Expression for mean Expression for the mean, expressed as a linear combination of terms, each with dummy variables in front. """ b = [self.coefs[term] for term in self.terms] return np.sum(np.array(b)*self.terms) mean = property(_getmean, doc="Expression for the mean, expressed " "as a linear combination of terms, each with dummy " "variables in front.") def _getdiff(self): params = sorted(list(set(getparams(self.mean)))) return [sympy.diff(self.mean, p).doit() for p in params] design_expr = property(_getdiff) def _getdtype(self): vnames = [str(s) for s in self.design_expr] return np.dtype([(n, np.float) for n in vnames]) dtype = property(_getdtype, doc='The dtype of the design matrix of the Formula.') def __repr__(self): return """Formula(%s)""" % `list(self.terms)` def __getitem__(self, key): """ Return the term such that str(term) == key. Parameters ---------- key : str name of term to retrieve Returns ------- term : sympy.Expression """ names = [str(t) for t in self.terms] try: idx = names.index(key) except ValueError: raise ValueError('term %s not found' % key) return self.terms[idx] @staticmethod def fromrec(rec, keep=[], drop=[]): """ Construct Formula from recarray For fields with a string-dtype, it is assumed that these are qualtiatitve regressors, i.e. Factors. Parameters ---------- rec: recarray Recarray whose field names will be used to create a formula. keep: [] Field names to explicitly keep, dropping all others. drop: [] Field names to drop. """ f = {} for n in rec.dtype.names: if rec[n].dtype.kind in 'SOU': f[n] = Factor.fromcol(rec[n], n) else: f[n] = Term(n).formula for d in drop: del(f[d]) if keep: return np.sum([t for n, t in f.items() if n in keep]) else: return np.sum(f.values()) def subs(self, old, new): """ Perform a sympy substitution on all terms in the Formula Returns a new instance of the same class Parameters ---------- old : sympy.Basic The expression to be changed new : sympy.Basic The value to change it to. Returns ------- newf : Formula Examples -------- >>> s, t = [Term(l) for l in 'st'] >>> f, g = [sympy.Function(l) for l in 'fg'] >>> form = Formula([f(t),g(s)]) >>> newform = form.subs(g, sympy.Function('h')) >>> newform.terms array([f(t), h(s)], dtype=object) >>> form.terms array([f(t), g(s)], dtype=object) """ return self.__class__([term.subs(old, new) for term in self.terms]) def __add__(self, other): """ Create a new Formula by combining terms of other with those of self. >>> x, y, z = [Term(l) for l in 'xyz'] >>> f1 = Formula([x,y,z]) >>> f2 = Formula([y])+I >>> f3=f1+f2 >>> sorted(f1.terms) [x, y, z] >>> sorted(f2.terms) [1, y] >>> sorted(f3.terms) [1, x, y, y, z] """ if not is_formula(other): raise ValueError('only Formula objects can be added to a Formula') f = self.__class__(np.hstack([self.terms, other.terms])) return f def __sub__(self, other): """ Create a new Formula by deleting terms in other from self. No exceptions are raised for terms in other that do not appear in self. >>> x, y, z = [Term(l) for l in 'xyz'] >>> f1 = Formula([x,y,z]) >>> f2 = Formula([y])+I >>> f1.mean _b0*x + _b1*y + _b2*z >>> f2.mean _b0*y + _b1 >>> f3=f2-f1 >>> f3.mean _b0 >>> f4=f1-f2 >>> f4.mean _b0*x + _b1*z """ if not is_formula(other): raise ValueError('only Formula objects can be subtracted from a Formula') d = list(set(self.terms).difference(other.terms)) return self.__class__(d) def __array__(self): return self.terms def _getparams(self): return getparams(self.mean) params = property(_getparams, doc='The parameters in the Formula.') def __mul__(self, other): if not is_formula(other): raise ValueError('only two Formulas can be multiplied together') if is_factor(self): if self == other: return self v = [] # Compute the pairwise product of each term # If either one is a Term, use Term's multiplication for sterm in self.terms: for oterm in other.terms: if is_term(sterm): v.append(Term.__mul__(sterm, oterm)) elif is_term(oterm): v.append(Term.__mul__(oterm, sterm)) else: v.append(sterm*oterm) return Formula(tuple(np.unique(v))) def __eq__(self, other): s = np.array(self) o = np.array(other) if s.shape != o.shape: return False return np.alltrue(np.equal(np.array(self), np.array(other))) def _setup_design(self): """ Create a callable object to evaluate the design matrix at a given set of parameter values to be specified by a recarray and observed Term values, also specified by a recarray. """ # the design expression is the differentiation of the expression # for the mean. It is a list d = self.design_expr # Before evaluating, we recreate the formula # with numbered terms, and numbered parameters. # This renaming has no impact on the # final design matrix as the # callable, self._f below, is a lambda # that does not care about the names of the terms. # First, find all terms in the mean expression, # and rename them in the form "__t%d__" with a # random offset. # This may cause a possible problem # when there are parameters named something like "__t%d__". # Using the random offset will minimize the possibility # of this happening. # This renaming is here principally because of the intercept. random_offset = np.random.random_integers(low=0, high=2**30) terms = getterms(self.mean) newterms = [] for i, t in enumerate(terms): newt = sympy.Symbol("__t%d__" % (i + random_offset)) for j, _ in enumerate(d): d[j] = d[j].subs(t, newt) newterms.append(newt) # Next, find all parameters that remain in the design expression. # In a standard regression model, there will be no parameters # because they will all be differentiated away in computing # self.design_expr. In nonlinear models, parameters will remain. params = getparams(self.design_expr) newparams = [] for i, p in enumerate(params): newp = make_dummy("__p%d__" % (i + random_offset)) for j, _ in enumerate(d): d[j] = d[j].subs(p, newp) newparams.append(newp) # If there are any aliased functions, these need to be added # to the name space before sympy lambdifies the expression # These "aliased" functions are used for things like # the natural splines, etc. You can represent natural splines # with sympy but the expression is pretty awful. Note that # ``d`` here is list giving the differentiation of the # expression for the mean. self._f(...) therefore also returns # a list self._f = lambdify(newparams + newterms, d, ("numpy")) # The input to self.design will be a recarray of that must # have field names that the Formula will expect to see. # However, if any of self.terms are FactorTerms, then the field # in the recarray will not actually be in the Term. # # For example, if there is a Factor 'f' with levels ['a','b'], # there will be terms 'f_a' and 'f_b', though the input to # design will have a field named 'f'. In this sense, # the recarray used in the call to self.design # is not really made up of terms, but "preterms". # In this case, the callable preterm = [] for t in terms: if not is_factor_term(t): preterm.append(str(t)) else: preterm.append(t.factor_name) preterm = list(set(preterm)) # There is also an argument for parameters that are not # Terms. self._dtypes = {'param':np.dtype([(str(p), np.float) for p in params]), 'term':np.dtype([(str(t), np.float) for t in terms]), 'preterm':np.dtype([(n, np.float) for n in preterm])} self.__terms = terms def design(self, input, param=None, return_float=False, contrasts=None): """ Construct the design matrix, and optional contrast matrices. Parameters ---------- input : np.recarray Recarray including fields needed to compute the Terms in getparams(self.design_expr). param : None or np.recarray Recarray including fields that are not Terms in getparams(self.design_expr) return_float : bool, optional If True, return a np.float array rather than a np.recarray contrasts : None or dict, optional Contrasts. The items in this dictionary should be (str, Formula) pairs where a contrast matrix is constructed for each Formula by evaluating its design at the same parameters as self.design. If not None, then the return_float is set to True. Returns ------- des : 2D array design matrix cmatrices : dict, optional Dictionary with keys from `contrasts` input, and contrast matrices corresponding to `des` design matrix. Returned only if `contrasts` input is not None """ self._setup_design() preterm_recarray = input param_recarray = param # The input to design should have field names for all fields in self._dtypes['preterm'] if not set(preterm_recarray.dtype.names).issuperset(self._dtypes['preterm'].names): raise ValueError("for term, expecting a recarray with " "dtype having the following names: %s" % `self._dtypes['preterm'].names`) # The parameters should have field names for all fields in self._dtypes['param'] if param_recarray is not None: if not set(param_recarray.dtype.names).issuperset(self._dtypes['param'].names): raise ValueError("for param, expecting a recarray with " "dtype having the following names: %s" % `self._dtypes['param'].names`) # If the only term is an intercept, # the return value is a matrix of 1's. if list(self.terms) == [sympy.Number(1)]: a = np.ones(preterm_recarray.shape[0], np.float) if not return_float: a = a.view(np.dtype([('intercept', np.float)])) return a elif not self._dtypes['term']: raise ValueError("none of the expresssions are self.terms " "are Term instances; shape of resulting " "undefined") # The term_recarray is essentially the same as preterm_recarray, # except that all factors in self are expanded # into their respective binary columns. term_recarray = np.zeros(preterm_recarray.shape[0], dtype=self._dtypes['term']) for t in self.__terms: if not is_factor_term(t): term_recarray[t.name] = preterm_recarray[t.name] else: factor_col = preterm_recarray[t.factor_name] # Python 3: If column type is bytes, convert to string, to allow # level comparison if factor_col.dtype.kind == 'S': factor_col = factor_col.astype('U') fl_ind = np.array([x == t.level for x in factor_col]).reshape(-1) term_recarray['%s_%s' % (t.factor_name, t.level)] = fl_ind # The lambda created in self._setup_design needs to take a tuple of # columns as argument, not an ndarray, so each column # is extracted and put into float_tuple. float_array = term_recarray.view(np.float) float_array.shape = (term_recarray.shape[0], -1) float_array = float_array.T float_tuple = tuple(float_array) # If there are any parameters, they also must be extracted # and put into a tuple with the order specified # by self._dtypes['param'] if param_recarray is not None: param = tuple(float(param_recarray[n]) for n in self._dtypes['param'].names) else: param = () # Evaluate the design at the parameters and tuple of arrays D = self._f(*(param+float_tuple)) # TODO: check if this next stepis necessary # I think it is because the lambda evaluates sympy.Number(1) to 1 # and not an array. D_tuple = [np.asarray(w) for w in D] need_to_modify_shape = [] OK_row_shapes = [] for i, row in enumerate(D_tuple): if row.shape in [(),(1,)]: need_to_modify_shape.append(i) else: OK_row_shapes.append(row.shape[0]) # Make sure that each array has the correct shape. # The columns in need_to_modify should just be # the intercept column, which evaluates to have shape == (). # This makes sure that it has the correct number of rows. for i in need_to_modify_shape: D_tuple[i].shape = () D_tuple[i] = np.multiply.outer(D_tuple[i], np.ones(preterm_recarray.shape[0])) # At this point, all the columns have the correct shape and the # design matrix is almost ready to output. D = np.array(D_tuple).T # If we will return a float matrix or any contrasts, # we may have some reshaping to do. if contrasts is None: contrasts = {} if return_float or contrasts: # If the design matrix is just a column of 1s # return a 1-dimensional array. D = np.squeeze(D.astype(np.float)) # If there are contrasts, the pseudo-inverse of D # must be computed. if contrasts: if D.ndim == 1: _D = D.reshape((D.shape[0], 1)) else: _D = D pinvD = np.linalg.pinv(_D) else: # Correct the dtype. # XXX There seems to be a lot of messing around with the dtype. # This would be a convenient place to just add # labels like a DataArray. D = np.array([tuple(r) for r in D], self.dtype) # Compute the contrast matrices, if any. if contrasts: cmatrices = {} for key, cf in contrasts.items(): if not is_formula(cf): cf = Formula([cf]) L = cf.design(input, param=param_recarray, return_float=True) cmatrices[key] = contrast_from_cols_or_rows(L, _D, pseudo=pinvD) return D, cmatrices else: return D def natural_spline(t, knots=None, order=3, intercept=False): """ Return a Formula containing a natural spline Spline for a Term with specified `knots` and `order`. Parameters ---------- t : ``Term`` knots : None or sequence, optional Sequence of float. Default None (same as empty list) order : int, optional Order of the spline. Defaults to a cubic (==3) intercept : bool, optional If True, include a constant function in the natural spline. Default is False Returns ------- formula : Formula A Formula with (len(knots) + order) Terms (if intercept=False, otherwise includes one more Term), made up of the natural spline functions. Examples -------- >>> x = Term('x') >>> n = natural_spline(x, knots=[1,3,4], order=3) >>> xval = np.array([3,5,7.]).view(np.dtype([('x', np.float)])) >>> n.design(xval, return_float=True) array([[ 3., 9., 27., 8., 0., -0.], [ 5., 25., 125., 64., 8., 1.], [ 7., 49., 343., 216., 64., 27.]]) >>> d = n.design(xval) >>> print d.dtype.descr [('ns_1(x)', '>> f = Factor('a', ['x','y']) >>> sf = f.stratify('theta') >>> sf.mean _theta0*a_x + _theta1*a_y """ if not set(str(variable)).issubset(ascii_letters + digits): raise ValueError('variable should be interpretable as a ' 'name and not have anything but digits ' 'and numbers') variable = sympy.sympify(variable) f = Formula(self._terms, char=variable) f.name = self.name return f @staticmethod def fromcol(col, name): """ Create a Factor from a column array. Parameters ---------- col : ndarray an array with ndim==1 name : str name of the Factor Returns ------- factor : Factor Examples -------- >>> data = np.array([(3,'a'),(4,'a'),(5,'b'),(3,'b')], np.dtype([('x', np.float), ('y', 'S1')])) >>> f1 = Factor.fromcol(data['y'], 'y') >>> f2 = Factor.fromcol(data['x'], 'x') >>> d = f1.design(data) >>> print d.dtype.descr [('y_a', '>> d = f2.design(data) >>> print d.dtype.descr [('x_3', ' 1): raise ValueError('expecting an array that can be thought ' 'of as a column or field of a recarray') levels = np.unique(col) if not col.dtype.names and not name: name = 'factor' elif col.dtype.names: name = col.dtype.names[0] return Factor(name, levels) def contrast_from_cols_or_rows(L, D, pseudo=None): """ Construct a contrast matrix from a design matrix D (possibly with its pseudo inverse already computed) and a matrix L that either specifies something in the column space of D or the row space of D. Parameters ---------- L : ndarray Matrix used to try and construct a contrast. D : ndarray Design matrix used to create the contrast. pseudo : None or array-like, optional If not None, gives pseudo-inverse of `D`. Allows you to pass this if it is already calculated. Returns ------- C : ndarray Matrix with C.shape[1] == D.shape[1] representing an estimable contrast. Notes ----- From an n x p design matrix D and a matrix L, tries to determine a p x q contrast matrix C which determines a contrast of full rank, i.e. the n x q matrix dot(transpose(C), pinv(D)) is full rank. L must satisfy either L.shape[0] == n or L.shape[1] == p. If L.shape[0] == n, then L is thought of as representing columns in the column space of D. If L.shape[1] == p, then L is thought of as what is known as a contrast matrix. In this case, this function returns an estimable contrast corresponding to the dot(D, L.T) This always produces a meaningful contrast, not always with the intended properties because q is always non-zero unless L is identically 0. That is, it produces a contrast that spans the column space of L (after projection onto the column space of D). """ L = np.asarray(L) D = np.asarray(D) n, p = D.shape if L.shape[0] != n and L.shape[1] != p: raise ValueError('shape of L and D mismatched') if pseudo is None: pseudo = pinv(D) if L.shape[0] == n: C = np.dot(pseudo, L).T else: C = np.dot(pseudo, np.dot(D, L.T)).T Lp = np.dot(D, C.T) if len(Lp.shape) == 1: Lp.shape = (n, 1) Lp_rank = matrix_rank(Lp) if Lp_rank != Lp.shape[1]: Lp = full_rank(Lp, Lp_rank) C = np.dot(pseudo, Lp).T return np.squeeze(C) class RandomEffects(Formula): """ Covariance matrices for common random effects analyses. Examples -------- Two subjects (here named 2 and 3): >>> subj = make_recarray([2,2,2,3,3], 's') >>> subj_factor = Factor('s', [2,3]) By default the covariance matrix is symbolic. The display differs a little between sympy versions (hence we don't check it in the doctests): >>> c = RandomEffects(subj_factor.terms) >>> c.cov(subj) #doctest: +IGNORE_OUTPUT array([[_s2_0, _s2_0, _s2_0, 0, 0], [_s2_0, _s2_0, _s2_0, 0, 0], [_s2_0, _s2_0, _s2_0, 0, 0], [0, 0, 0, _s2_1, _s2_1], [0, 0, 0, _s2_1, _s2_1]], dtype=object) With a numeric `sigma`, you get a numeric array: >>> c = RandomEffects(subj_factor.terms, sigma=np.array([[4,1],[1,6]])) >>> c.cov(subj) array([[ 4., 4., 4., 1., 1.], [ 4., 4., 4., 1., 1.], [ 4., 4., 4., 1., 1.], [ 1., 1., 1., 6., 6.], [ 1., 1., 1., 6., 6.]]) """ def __init__(self, seq, sigma=None, char = 'e'): """ Initialize random effects instance Parameters ---------- seq : [``sympy.Basic``] sigma : ndarray Covariance of the random effects. Defaults to a diagonal with entries for each random effect. char : character for regression coefficient """ self._terms = np.asarray(seq) q = self._terms.shape[0] self._counter = 0 if sigma is None: self.sigma = np.diag([make_dummy('s2_%d' % i) for i in range(q)]) else: self.sigma = sigma if self.sigma.shape != (q,q): raise ValueError('incorrect shape for covariance ' 'of random effects, ' 'should have shape %s' % repr(q,q)) self.char = char def cov(self, term, param=None): """ Compute the covariance matrix for some given data. Parameters ---------- term : np.recarray Recarray including fields corresponding to the Terms in getparams(self.design_expr). param : np.recarray Recarray including fields that are not Terms in getparams(self.design_expr) Returns ------- C : ndarray Covariance matrix implied by design and self.sigma. """ D = self.design(term, param=param, return_float=True) return np.dot(D, np.dot(self.sigma, D.T)) def is_term(obj): """ Is obj a Term? """ return hasattr(obj, "_term_flag") def is_factor_term(obj): """ Is obj a FactorTerm? """ return hasattr(obj, "_factor_term_flag") def is_formula(obj): """ Is obj a Formula? """ return hasattr(obj, "_formula_flag") def is_factor(obj): """ Is obj a Factor? """ return hasattr(obj, "_factor_flag") nipy-0.3.0/nipy/algorithms/statistics/formula/setup.py000066400000000000000000000005311210344137400231700ustar00rootroot00000000000000 def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('formula',parent_package,top_path) config.add_subpackage('tests') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipy-0.3.0/nipy/algorithms/statistics/formula/tests/000077500000000000000000000000001210344137400226215ustar00rootroot00000000000000nipy-0.3.0/nipy/algorithms/statistics/formula/tests/__init__.py000066400000000000000000000000271210344137400247310ustar00rootroot00000000000000# Make tests a package nipy-0.3.0/nipy/algorithms/statistics/formula/tests/test_formula.py000066400000000000000000000343731210344137400257110ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Test functions for formulae """ import numpy as np import sympy from nipy.fixes.sympy.utilities.lambdify import implemented_function from .. import formulae as F from ..formulae import terms, Term from nibabel.py3k import asbytes from nose.tools import (assert_true, assert_equal, assert_false, assert_raises) from numpy.testing import assert_almost_equal, assert_array_equal def test_terms(): t = terms('a') assert_true(isinstance(t, Term)) a, b, c = Term('a'), Term('b'), Term('c') assert_equal(t, a) ts = terms(('a', 'b', 'c')) assert_equal(ts, (a, b, c)) # a string without separator chars returns one symbol. This is the # sympy 0.7 behavior assert_equal(terms('abc'), Term('abc')) # separators return multiple symbols assert_equal(terms('a b c'), (a, b, c)) assert_equal(terms('a, b, c'), (a, b, c)) # no arg is an error assert_raises(TypeError, terms) # but empty arg returns empty tuple assert_equal(terms(()), ()) # Test behavior of deprecated each_char kwarg try: res = terms('abc', each_char=False) except TypeError: return assert_equal(res, Term('abc')) assert_equal(terms('abc', each_char=True), (a, b, c)) def test_getparams_terms(): t = F.Term('t') x, y, z = [sympy.Symbol(l) for l in 'xyz'] yield assert_equal, set(F.getparams(x*y*t)), set([x,y]) yield assert_equal, set(F.getterms(x*y*t)), set([t]) matrix_expr = np.array([[x,y*t],[y,z]]) yield assert_equal, set(F.getparams(matrix_expr)), set([x,y,z]) yield assert_equal, set(F.getterms(matrix_expr)), set([t]) def test_formula_params(): t = F.Term('t') x, y = [sympy.Symbol(l) for l in 'xy'] f = F.Formula([t*x,y]) yield assert_equal, set(f.params), set([x,y] + list(f.coefs.values())) def test_contrast1(): x = F.Term('x') yield assert_equal, x, x+x y = F.Term('y') z = F.Term('z') f = F.Formula([x,y]) arr = F.make_recarray([[3,5,4],[8,21,-1],[4,6,-2]], 'xyz') D, C = f.design(arr, contrasts={'x':x.formula, 'diff':F.Formula([x-y]), 'sum':F.Formula([x+y]), 'both':F.Formula([x-y,x+y])}) yield assert_almost_equal, C['x'], np.array([1,0]) yield assert_almost_equal, C['diff'], np.array([1,-1]) yield assert_almost_equal, C['sum'], np.array([1,1]) yield assert_almost_equal, C['both'], np.array([[1,-1],[1,1]]) f = F.Formula([x,y,z]) arr = F.make_recarray([[3,5,4],[8,21,-1],[4,6,-2]], 'xyz') D, C = f.design(arr, contrasts={'x':x.formula, 'diff':F.Formula([x-y]), 'sum':F.Formula([x+y]), 'both':F.Formula([x-y,x+y])}) yield assert_almost_equal, C['x'], np.array([1,0,0]) yield assert_almost_equal, C['diff'], np.array([1,-1,0]) yield assert_almost_equal, C['sum'], np.array([1,1,0]) yield assert_almost_equal, C['both'], np.array([[1,-1,0],[1,1,0]]) def test_formula_from_recarray(): D = np.rec.array([ (43, 51, 30, 39, 61, 92, 'blue'), (63, 64, 51, 54, 63, 73, 'blue'), (71, 70, 68, 69, 76, 86, 'red'), (61, 63, 45, 47, 54, 84, 'red'), (81, 78, 56, 66, 71, 83, 'blue'), (43, 55, 49, 44, 54, 49, 'blue'), (58, 67, 42, 56, 66, 68, 'green'), (71, 75, 50, 55, 70, 66, 'green'), (72, 82, 72, 67, 71, 83, 'blue'), (67, 61, 45, 47, 62, 80, 'red'), (64, 53, 53, 58, 58, 67, 'blue'), (67, 60, 47, 39, 59, 74, 'green'), (69, 62, 57, 42, 55, 63, 'blue'), (68, 83, 83, 45, 59, 77, 'red'), (77, 77, 54, 72, 79, 77, 'red'), (81, 90, 50, 72, 60, 54, 'blue'), (74, 85, 64, 69, 79, 79, 'green'), (65, 60, 65, 75, 55, 80, 'green'), (65, 70, 46, 57, 75, 85, 'red'), (50, 58, 68, 54, 64, 78, 'red'), (50, 40, 33, 34, 43, 64, 'blue'), (64, 61, 52, 62, 66, 80, 'blue'), (53, 66, 52, 50, 63, 80, 'red'), (40, 37, 42, 58, 50, 57, 'red'), (63, 54, 42, 48, 66, 75, 'blue'), (66, 77, 66, 63, 88, 76, 'blue'), (78, 75, 58, 74, 80, 78, 'red'), (48, 57, 44, 45, 51, 83, 'blue'), (85, 85, 71, 71, 77, 74, 'red'), (82, 82, 39, 59, 64, 78, 'blue')], dtype=[('y', 'i8'), ('x1', 'i8'), ('x2', 'i8'), ('x3', 'i8'), ('x4', 'i8'), ('x5', 'i8'), ('x6', '|S5')]) f = F.Formula.fromrec(D, drop='y') assert_equal(set([str(t) for t in f.terms]), set(['x1', 'x2', 'x3', 'x4', 'x5', 'x6_green', 'x6_blue', 'x6_red'])) assert_equal(set([str(t) for t in f.design_expr]), set(['x1', 'x2', 'x3', 'x4', 'x5', 'x6_green', 'x6_blue', 'x6_red'])) def test_random_effects(): subj = F.make_recarray([2,2,2,3,3], 's') subj_factor = F.Factor('s', [2,3]) c = F.RandomEffects(subj_factor.terms, sigma=np.array([[4,1],[1,6]])) C = c.cov(subj) assert_almost_equal(C, [[4,4,4,1,1], [4,4,4,1,1], [4,4,4,1,1], [1,1,1,6,6], [1,1,1,6,6]]) # Sympy 0.7.0 does not cancel 1.0 * A to A; however, the dot product in the # covariance calculation returns floats, which are them multiplied by the # terms to give term * 1.0, etc. We just insert the annoying floating point # here for the test, relying on sympy to do the same thing here as in the # dot product a = sympy.Symbol('a') * 1.0 b = sympy.Symbol('b') * 1.0 c = F.RandomEffects(subj_factor.terms, sigma=np.array([[a,0],[0,b]])) C = c.cov(subj) t = np.equal(C, [[a,a,a,0,0], [a,a,a,0,0], [a,a,a,0,0], [0,0,0,b,b], [0,0,0,b,b]]) assert_true(np.alltrue(t)) def test_design_expression(): t1 = F.Term("x") t2 = F.Term('y') f = t1.formula + t2.formula assert_true(str(f.design_expr) in ['[x, y]', '[y, x]']) def test_formula_property(): # Check that you can create a Formula with one term t1 = F.Term("x") f = t1.formula assert_equal(f.design_expr, [t1]) def test_mul(): f = F.Factor('t', [2,3]) f2 = F.Factor('t', [2,3,4]) t2 = f['t_2'] x = F.Term('x') yield assert_equal, t2, t2*t2 yield assert_equal, f, f*f yield assert_false, f == f2 yield assert_equal, set((t2*x).atoms()), set([t2,x]) def test_make_recarray(): m = F.make_recarray([[3,4],[4,6],[7,9]], 'wv', [np.float, np.int]) assert_equal(m.dtype.names, ('w', 'v')) m2 = F.make_recarray(m, 'xy') assert_equal(m2.dtype.names, ('x', 'y')) def test_str_formula(): t1 = F.Term('x') t2 = F.Term('y') f = F.Formula([t1, t2]) yield assert_equal, str(f), "Formula([x, y])" def test_design(): # Check that you get the design matrix we expect t1 = F.Term("x") t2 = F.Term('y') n = F.make_recarray([2,4,5], 'x') assert_almost_equal(t1.formula.design(n)['x'], n['x']) f = t1.formula + t2.formula n = F.make_recarray([(2,3),(4,5),(5,6)], 'xy') assert_almost_equal(f.design(n)['x'], n['x']) assert_almost_equal(f.design(n)['y'], n['y']) f = t1.formula + t2.formula + F.I + t1.formula * t2.formula assert_almost_equal(f.design(n)['x'], n['x']) assert_almost_equal(f.design(n)['y'], n['y']) assert_almost_equal(f.design(n)['1'], 1) assert_almost_equal(f.design(n)['x*y'], n['x']*n['y']) # drop x field, check that design raises error ny = np.recarray(n.shape, dtype=[('x', n.dtype['x'])]) ny['x'] = n['x'] assert_raises(ValueError, f.design, ny) n = np.array([(2,3,'a'),(4,5,'b'),(5,6,'a')], np.dtype([('x', np.float), ('y', np.float), ('f', 'S1')])) f = F.Factor('f', ['a','b']) ff = t1.formula * f + F.I assert_almost_equal(ff.design(n)['f_a*x'], n['x']*[1,0,1]) assert_almost_equal(ff.design(n)['f_b*x'], n['x']*[0,1,0]) assert_almost_equal(ff.design(n)['1'], 1) def test_design_inputs(): # Check we can send in fields of type 'S', 'U', 'O' for design regf = F.Formula(F.terms('x, y')) f = F.Factor('f', ['a', 'b']) ff = regf + f for field_type in ('S1', 'U1', 'O'): data = np.array([(2, 3, 'a'), (4, 5, 'b'), (5, 6, 'a')], dtype = [('x', np.float), ('y', np.float), ('f', field_type)]) assert_array_equal(ff.design(data, return_float=True), [[2, 3, 1, 0], [4, 5, 0, 1], [5, 6, 1, 0]]) def test_formula_inputs(): # Check we can send in fields of type 'S', 'U', 'O' for factor levels level_names = ['red', 'green', 'blue'] for field_type in ('S', 'U', 'O'): levels = np.array(level_names, dtype=field_type) f = F.Factor('myname', levels) assert_equal(f.levels, level_names) # Sending in byte objects levels = [asbytes(L) for L in level_names] f = F.Factor('myname', levels) assert_equal(f.levels, level_names) def test_alias(): x = F.Term('x') f = implemented_function('f', lambda x: 2*x) g = implemented_function('g', lambda x: np.sqrt(x)) ff = F.Formula([f(x), g(x)**2]) n = F.make_recarray([2,4,5], 'x') assert_almost_equal(ff.design(n)['f(x)'], n['x']*2) assert_almost_equal(ff.design(n)['g(x)**2'], n['x']) def test_factor_getterm(): fac = F.Factor('f', 'ab') yield assert_equal, fac['f_a'], fac.get_term('a') fac = F.Factor('f', [1,2]) yield assert_equal, fac['f_1'], fac.get_term(1) fac = F.Factor('f', [1,2]) yield assert_raises, ValueError, fac.get_term, '1' m = fac.main_effect yield assert_equal, set(m.terms), set([fac['f_1']-fac['f_2']]) def test_stratify(): fac = F.Factor('x', [2,3]) y = sympy.Symbol('y') f = sympy.Function('f') yield assert_raises, ValueError, fac.stratify, f(y) def test_nonlin1(): # Fit an exponential curve, with the exponent stratified by a factor # with a common intercept and multiplicative factor in front of the # exponential x = F.Term('x') fac = F.Factor('f', 'ab') f = F.Formula([sympy.exp(fac.stratify(x).mean)]) + F.I params = F.getparams(f.mean) assert_equal(set([str(p) for p in params]), set(['_x0', '_x1', '_b0', '_b1'])) test1 = set(['1', 'exp(_x0*f_a + _x1*f_b)', '_b0*f_a*exp(_x0*f_a + _x1*f_b)', '_b0*f_b*exp(_x0*f_a + _x1*f_b)']) test2 = set(['1', 'exp(_x0*f_a + _x1*f_b)', '_b1*f_a*exp(_x0*f_a + _x1*f_b)', '_b1*f_b*exp(_x0*f_a + _x1*f_b)']) assert_true(test1 or test2) n = F.make_recarray([(2,3,'a'),(4,5,'b'),(5,6,'a')], 'xyf', ['d','d','S1']) p = F.make_recarray([1,2,3,4], ['_x0', '_x1', '_b0', '_b1']) A = f.design(n, p) print A, A.dtype def test_intercept(): dz = F.make_recarray([2,3,4],'z') v = F.I.design(dz, return_float=False) assert_equal(v.dtype.names, ('intercept',)) def test_nonlin2(): dz = F.make_recarray([2,3,4],'z') z = F.Term('z') t = sympy.Symbol('th') p = F.make_recarray([3], ['tt']) f = F.Formula([sympy.exp(t*z)]) yield assert_raises, ValueError, f.design, dz, p def test_Rintercept(): x = F.Term('x') y = F.Term('x') xf = x.formula yf = y.formula newf = (xf+F.I)*(yf+F.I) assert_equal(set(newf.terms), set([x,y,x*y,sympy.Number(1)])) def test_return_float(): x = F.Term('x') f = F.Formula([x,x**2]) xx= F.make_recarray(np.linspace(0,10,11), 'x') dtype = f.design(xx).dtype yield assert_equal, set(dtype.names), set(['x', 'x**2']) dtype = f.design(xx, return_float=True).dtype yield assert_equal, dtype, np.float def test_subtract(): x, y, z = [F.Term(l) for l in 'xyz'] f1 = F.Formula([x,y]) f2 = F.Formula([x,y,z]) f3 = f2 - f1 yield assert_equal, set(f3.terms), set([z]) f4 = F.Formula([y,z]) f5 = f1 - f4 yield assert_equal, set(f5.terms), set([x]) def test_subs(): t1 = F.Term("x") t2 = F.Term('y') z = F.Term('z') f = F.Formula([t1, t2]) g = f.subs(t1, z) yield assert_equal, list(g.terms), [z, t2] def test_natural_spline(): xt=F.Term('x') ns=F.natural_spline(xt, knots=[2,6,9]) xx= F.make_recarray(np.linspace(0,10,101), 'x') dd=ns.design(xx, return_float=True) xx = xx['x'] yield assert_almost_equal, dd[:,0], xx yield assert_almost_equal, dd[:,1], xx**2 yield assert_almost_equal, dd[:,2], xx**3 yield assert_almost_equal, dd[:,3], (xx-2)**3*np.greater_equal(xx,2) yield assert_almost_equal, dd[:,4], (xx-6)**3*np.greater_equal(xx,6) yield assert_almost_equal, dd[:,5], (xx-9)**3*np.greater_equal(xx,9) ns=F.natural_spline(xt, knots=[2,9,6], intercept=True) xx= F.make_recarray(np.linspace(0,10,101), 'x') dd=ns.design(xx, return_float=True) xx = xx['x'] yield assert_almost_equal, dd[:,0], 1 yield assert_almost_equal, dd[:,1], xx yield assert_almost_equal, dd[:,2], xx**2 yield assert_almost_equal, dd[:,3], xx**3 yield assert_almost_equal, dd[:,4], (xx-2)**3*np.greater_equal(xx,2) yield assert_almost_equal, dd[:,5], (xx-9)**3*np.greater_equal(xx,9) yield assert_almost_equal, dd[:,6], (xx-6)**3*np.greater_equal(xx,6) def test_factor_term(): # Test that byte strings, unicode strings and objects convert correctly for nt in 'S3', 'U3', 'O': ndt = np.dtype(nt) for lt in 'S3', 'U3', 'O': ldt = np.dtype(lt) name = np.asscalar(np.array('foo', ndt)) level = np.asscalar(np.array('bar', ldt)) ft = F.FactorTerm(name, level) assert_equal(str(ft), 'foo_bar') nipy-0.3.0/nipy/algorithms/statistics/histogram.c000066400000000000000000005714011210344137400221630ustar00rootroot00000000000000/* Generated by Cython 0.17.4 on Sat Jan 12 17:27:30 2013 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02040000 #error Cython requires Python 2.4+. #else #include /* For offsetof */ #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #endif #if PY_VERSION_HEX < 0x02050000 typedef int Py_ssize_t; #define PY_SSIZE_T_MAX INT_MAX #define PY_SSIZE_T_MIN INT_MIN #define PY_FORMAT_SIZE_T "" #define CYTHON_FORMAT_SSIZE_T "" #define PyInt_FromSsize_t(z) PyInt_FromLong(z) #define PyInt_AsSsize_t(o) __Pyx_PyInt_AsInt(o) #define PyNumber_Index(o) ((PyNumber_Check(o) && !PyFloat_Check(o)) ? PyNumber_Int(o) : \ (PyErr_Format(PyExc_TypeError, \ "expected index value, got %.200s", Py_TYPE(o)->tp_name), \ (PyObject*)0)) #define __Pyx_PyIndex_Check(o) (PyNumber_Check(o) && !PyFloat_Check(o) && \ !PyComplex_Check(o)) #define PyIndex_Check __Pyx_PyIndex_Check #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message) #define __PYX_BUILD_PY_SSIZE_T "i" #else #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #define __Pyx_PyIndex_Check PyIndex_Check #endif #if PY_VERSION_HEX < 0x02060000 #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) #define PyVarObject_HEAD_INIT(type, size) \ PyObject_HEAD_INIT(type) size, #define PyType_Modified(t) typedef struct { void *buf; PyObject *obj; Py_ssize_t len; Py_ssize_t itemsize; int readonly; int ndim; char *format; Py_ssize_t *shape; Py_ssize_t *strides; Py_ssize_t *suboffsets; void *internal; } Py_buffer; #define PyBUF_SIMPLE 0 #define PyBUF_WRITABLE 0x0001 #define PyBUF_FORMAT 0x0004 #define PyBUF_ND 0x0008 #define PyBUF_STRIDES (0x0010 | PyBUF_ND) #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) #define PyBUF_RECORDS (PyBUF_STRIDES | PyBUF_FORMAT | PyBUF_WRITABLE) #define PyBUF_FULL (PyBUF_INDIRECT | PyBUF_FORMAT | PyBUF_WRITABLE) typedef int (*getbufferproc)(PyObject *, Py_buffer *, int); typedef void (*releasebufferproc)(PyObject *, Py_buffer *); #endif #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #endif #if PY_MAJOR_VERSION < 3 && PY_MINOR_VERSION < 6 #define PyUnicode_FromString(s) PyUnicode_Decode(s, strlen(s), "UTF-8", "strict") #endif #if PY_MAJOR_VERSION >= 3 #define Py_TPFLAGS_CHECKTYPES 0 #define Py_TPFLAGS_HAVE_INDEX 0 #endif #if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3) #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ? \ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #else #define CYTHON_PEP393_ENABLED 0 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_READ(k, d, i) ((k=k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_VERSION_HEX < 0x02060000 #define PyBytesObject PyStringObject #define PyBytes_Type PyString_Type #define PyBytes_Check PyString_Check #define PyBytes_CheckExact PyString_CheckExact #define PyBytes_FromString PyString_FromString #define PyBytes_FromStringAndSize PyString_FromStringAndSize #define PyBytes_FromFormat PyString_FromFormat #define PyBytes_DecodeEscape PyString_DecodeEscape #define PyBytes_AsString PyString_AsString #define PyBytes_AsStringAndSize PyString_AsStringAndSize #define PyBytes_Size PyString_Size #define PyBytes_AS_STRING PyString_AS_STRING #define PyBytes_GET_SIZE PyString_GET_SIZE #define PyBytes_Repr PyString_Repr #define PyBytes_Concat PyString_Concat #define PyBytes_ConcatAndDel PyString_ConcatAndDel #endif #if PY_VERSION_HEX < 0x02060000 #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type) #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_VERSION_HEX < 0x03020000 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300) #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b) #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value) #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b) #else #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0))) #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1))) #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1))) #endif #if PY_MAJOR_VERSION >= 3 #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n))) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n))) #else #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n)) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_NAMESTR(n) ((char *)(n)) #define __Pyx_DOCSTR(n) ((char *)(n)) #else #define __Pyx_NAMESTR(n) (n) #define __Pyx_DOCSTR(n) (n) #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include #define __PYX_HAVE__nipy__algorithms__statistics__histogram #define __PYX_HAVE_API__nipy__algorithms__statistics__histogram #include "stdio.h" #include "stdlib.h" #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #ifdef _OPENMP #include #endif /* _OPENMP */ #ifdef PYREX_WITHOUT_ASSERTIONS #define CYTHON_WITHOUT_ASSERTIONS #endif /* inline attribute */ #ifndef CYTHON_INLINE #if defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif /* unused attribute */ #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif typedef struct {PyObject **p; char *s; const long n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/ /* Type Conversion Predeclarations */ #define __Pyx_PyBytes_FromUString(s) PyBytes_FromString((char*)s) #define __Pyx_PyBytes_AsUString(s) ((unsigned char*) PyBytes_AsString(s)) #define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x); static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject*); #if CYTHON_COMPILING_IN_CPYTHON #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #ifdef __GNUC__ /* Test for GCC > 2.95 */ #if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* __GNUC__ > 2 ... */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ > 2 ... */ #else /* __GNUC__ */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static PyObject *__pyx_m; static PyObject *__pyx_b; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include #else #include #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "histogram.pyx", "numpy.pxd", "type.pxd", }; /* "numpy.pxd":723 * # in Cython to enable them only on the right systems. * * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t */ typedef npy_int8 __pyx_t_5numpy_int8_t; /* "numpy.pxd":724 * * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t */ typedef npy_int16 __pyx_t_5numpy_int16_t; /* "numpy.pxd":725 * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< * ctypedef npy_int64 int64_t * #ctypedef npy_int96 int96_t */ typedef npy_int32 __pyx_t_5numpy_int32_t; /* "numpy.pxd":726 * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< * #ctypedef npy_int96 int96_t * #ctypedef npy_int128 int128_t */ typedef npy_int64 __pyx_t_5numpy_int64_t; /* "numpy.pxd":730 * #ctypedef npy_int128 int128_t * * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t */ typedef npy_uint8 __pyx_t_5numpy_uint8_t; /* "numpy.pxd":731 * * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t */ typedef npy_uint16 __pyx_t_5numpy_uint16_t; /* "numpy.pxd":732 * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< * ctypedef npy_uint64 uint64_t * #ctypedef npy_uint96 uint96_t */ typedef npy_uint32 __pyx_t_5numpy_uint32_t; /* "numpy.pxd":733 * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< * #ctypedef npy_uint96 uint96_t * #ctypedef npy_uint128 uint128_t */ typedef npy_uint64 __pyx_t_5numpy_uint64_t; /* "numpy.pxd":737 * #ctypedef npy_uint128 uint128_t * * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< * ctypedef npy_float64 float64_t * #ctypedef npy_float80 float80_t */ typedef npy_float32 __pyx_t_5numpy_float32_t; /* "numpy.pxd":738 * * ctypedef npy_float32 float32_t * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< * #ctypedef npy_float80 float80_t * #ctypedef npy_float128 float128_t */ typedef npy_float64 __pyx_t_5numpy_float64_t; /* "numpy.pxd":747 * # The int types are mapped a bit surprising -- * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t # <<<<<<<<<<<<<< * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t */ typedef npy_long __pyx_t_5numpy_int_t; /* "numpy.pxd":748 * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< * ctypedef npy_longlong longlong_t * */ typedef npy_longlong __pyx_t_5numpy_long_t; /* "numpy.pxd":749 * ctypedef npy_long int_t * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< * * ctypedef npy_ulong uint_t */ typedef npy_longlong __pyx_t_5numpy_longlong_t; /* "numpy.pxd":751 * ctypedef npy_longlong longlong_t * * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t */ typedef npy_ulong __pyx_t_5numpy_uint_t; /* "numpy.pxd":752 * * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulonglong_t * */ typedef npy_ulonglong __pyx_t_5numpy_ulong_t; /* "numpy.pxd":753 * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< * * ctypedef npy_intp intp_t */ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; /* "numpy.pxd":755 * ctypedef npy_ulonglong ulonglong_t * * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< * ctypedef npy_uintp uintp_t * */ typedef npy_intp __pyx_t_5numpy_intp_t; /* "numpy.pxd":756 * * ctypedef npy_intp intp_t * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< * * ctypedef npy_double float_t */ typedef npy_uintp __pyx_t_5numpy_uintp_t; /* "numpy.pxd":758 * ctypedef npy_uintp uintp_t * * ctypedef npy_double float_t # <<<<<<<<<<<<<< * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t */ typedef npy_double __pyx_t_5numpy_float_t; /* "numpy.pxd":759 * * ctypedef npy_double float_t * ctypedef npy_double double_t # <<<<<<<<<<<<<< * ctypedef npy_longdouble longdouble_t * */ typedef npy_double __pyx_t_5numpy_double_t; /* "numpy.pxd":760 * ctypedef npy_double float_t * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cfloat cfloat_t */ typedef npy_longdouble __pyx_t_5numpy_longdouble_t; #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< float > __pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< double > __pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif /*--- Type declarations ---*/ /* "numpy.pxd":762 * ctypedef npy_longdouble longdouble_t * * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t */ typedef npy_cfloat __pyx_t_5numpy_cfloat_t; /* "numpy.pxd":763 * * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< * ctypedef npy_clongdouble clongdouble_t * */ typedef npy_cdouble __pyx_t_5numpy_cdouble_t; /* "numpy.pxd":764 * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cdouble complex_t */ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; /* "numpy.pxd":766 * ctypedef npy_clongdouble clongdouble_t * * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew1(a): */ typedef npy_cdouble __pyx_t_5numpy_complex_t; #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/ #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil) \ if (acquire_gil) { \ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ PyGILState_Release(__pyx_gilstate_save); \ } else { \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil) \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext() \ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif /* CYTHON_REFNANNY */ #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/ static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/ static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); static CYTHON_INLINE int __Pyx_IterFinish(void); /*proto*/ static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); /*proto*/ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level); /*proto*/ #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if defined(_WIN32) && defined(__cplusplus) && CYTHON_CCOMPLEX #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); #if CYTHON_CCOMPLEX #define __Pyx_c_eqf(a, b) ((a)==(b)) #define __Pyx_c_sumf(a, b) ((a)+(b)) #define __Pyx_c_difff(a, b) ((a)-(b)) #define __Pyx_c_prodf(a, b) ((a)*(b)) #define __Pyx_c_quotf(a, b) ((a)/(b)) #define __Pyx_c_negf(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zerof(z) ((z)==(float)0) #define __Pyx_c_conjf(z) (::std::conj(z)) #if 1 #define __Pyx_c_absf(z) (::std::abs(z)) #define __Pyx_c_powf(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zerof(z) ((z)==0) #define __Pyx_c_conjf(z) (conjf(z)) #if 1 #define __Pyx_c_absf(z) (cabsf(z)) #define __Pyx_c_powf(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); #if CYTHON_CCOMPLEX #define __Pyx_c_eq(a, b) ((a)==(b)) #define __Pyx_c_sum(a, b) ((a)+(b)) #define __Pyx_c_diff(a, b) ((a)-(b)) #define __Pyx_c_prod(a, b) ((a)*(b)) #define __Pyx_c_quot(a, b) ((a)/(b)) #define __Pyx_c_neg(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero(z) ((z)==(double)0) #define __Pyx_c_conj(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs(z) (::std::abs(z)) #define __Pyx_c_pow(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero(z) ((z)==0) #define __Pyx_c_conj(z) (conj(z)) #if 1 #define __Pyx_c_abs(z) (cabs(z)) #define __Pyx_c_pow(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject *); static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject *); static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject *); static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject *); static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject *); static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject *); static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject *); static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject *); static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject *); static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject *); static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject *); static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject *); static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject *); static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject *); static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject *); static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject *); static int __Pyx_check_binary_version(void); #if !defined(__Pyx_PyIdentifier_FromString) #if PY_MAJOR_VERSION < 3 #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) #else #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) #endif #endif static PyObject *__Pyx_ImportModule(const char *name); /*proto*/ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /*proto*/ typedef struct { int code_line; PyCodeObject* code_object; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /*proto*/ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/ /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from 'cpython.object' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'libc.stdlib' */ /* Module declarations from 'numpy' */ /* Module declarations from 'numpy' */ static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ /* Module declarations from 'nipy.algorithms.statistics.histogram' */ #define __Pyx_MODULE_NAME "nipy.algorithms.statistics.histogram" int __pyx_module_is_main_nipy__algorithms__statistics__histogram = 0; /* Implementation of 'nipy.algorithms.statistics.histogram' */ static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_RuntimeError; static PyObject *__pyx_pf_4nipy_10algorithms_10statistics_9histogram_histogram(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_x); /* proto */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ static char __pyx_k_1[] = "input array should have uintp data type"; static char __pyx_k_3[] = "ndarray is not C contiguous"; static char __pyx_k_5[] = "ndarray is not Fortran contiguous"; static char __pyx_k_7[] = "Non-native byte order not supported"; static char __pyx_k_9[] = "unknown dtype code in numpy.pxd (%d)"; static char __pyx_k_10[] = "Format string allocated too short, see comment in numpy.pxd"; static char __pyx_k_13[] = "Format string allocated too short."; static char __pyx_k_15[] = "\nAuthor: Alexis Roche, 2012.\n"; static char __pyx_k_18[] = "/Users/mb312/dev_trees/nipy/nipy/algorithms/statistics/histogram.pyx"; static char __pyx_k_19[] = "nipy.algorithms.statistics.histogram"; static char __pyx_k__B[] = "B"; static char __pyx_k__H[] = "H"; static char __pyx_k__I[] = "I"; static char __pyx_k__L[] = "L"; static char __pyx_k__O[] = "O"; static char __pyx_k__Q[] = "Q"; static char __pyx_k__b[] = "b"; static char __pyx_k__d[] = "d"; static char __pyx_k__f[] = "f"; static char __pyx_k__g[] = "g"; static char __pyx_k__h[] = "h"; static char __pyx_k__i[] = "i"; static char __pyx_k__l[] = "l"; static char __pyx_k__q[] = "q"; static char __pyx_k__x[] = "x"; static char __pyx_k__Zd[] = "Zd"; static char __pyx_k__Zf[] = "Zf"; static char __pyx_k__Zg[] = "Zg"; static char __pyx_k__hv[] = "hv"; static char __pyx_k__it[] = "it"; static char __pyx_k__np[] = "np"; static char __pyx_k__xv[] = "xv"; static char __pyx_k__max[] = "max"; static char __pyx_k__flat[] = "flat"; static char __pyx_k__dtype[] = "dtype"; static char __pyx_k__nbins[] = "nbins"; static char __pyx_k__numpy[] = "numpy"; static char __pyx_k__range[] = "range"; static char __pyx_k__uintp[] = "uintp"; static char __pyx_k__zeros[] = "zeros"; static char __pyx_k____main__[] = "__main__"; static char __pyx_k____test__[] = "__test__"; static char __pyx_k__histogram[] = "histogram"; static char __pyx_k__ValueError[] = "ValueError"; static char __pyx_k__RuntimeError[] = "RuntimeError"; static PyObject *__pyx_kp_s_1; static PyObject *__pyx_kp_u_10; static PyObject *__pyx_kp_u_13; static PyObject *__pyx_kp_s_18; static PyObject *__pyx_n_s_19; static PyObject *__pyx_kp_u_3; static PyObject *__pyx_kp_u_5; static PyObject *__pyx_kp_u_7; static PyObject *__pyx_kp_u_9; static PyObject *__pyx_n_s__RuntimeError; static PyObject *__pyx_n_s__ValueError; static PyObject *__pyx_n_s____main__; static PyObject *__pyx_n_s____test__; static PyObject *__pyx_n_s__dtype; static PyObject *__pyx_n_s__flat; static PyObject *__pyx_n_s__h; static PyObject *__pyx_n_s__histogram; static PyObject *__pyx_n_s__hv; static PyObject *__pyx_n_s__it; static PyObject *__pyx_n_s__max; static PyObject *__pyx_n_s__nbins; static PyObject *__pyx_n_s__np; static PyObject *__pyx_n_s__numpy; static PyObject *__pyx_n_s__range; static PyObject *__pyx_n_s__uintp; static PyObject *__pyx_n_s__x; static PyObject *__pyx_n_s__xv; static PyObject *__pyx_n_s__zeros; static PyObject *__pyx_int_15; static PyObject *__pyx_k_tuple_2; static PyObject *__pyx_k_tuple_4; static PyObject *__pyx_k_tuple_6; static PyObject *__pyx_k_tuple_8; static PyObject *__pyx_k_tuple_11; static PyObject *__pyx_k_tuple_12; static PyObject *__pyx_k_tuple_14; static PyObject *__pyx_k_tuple_16; static PyObject *__pyx_k_codeobj_17; /* Python wrapper */ static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_9histogram_1histogram(PyObject *__pyx_self, PyObject *__pyx_v_x); /*proto*/ static char __pyx_doc_4nipy_10algorithms_10statistics_9histogram_histogram[] = "\n Fast histogram computation assuming input array is of uintp data\n type.\n\n Parameters\n ----------\n x: array-like\n Assumed with uintp dtype\n\n Output\n ------\n h: 1d array\n Histogram\n "; static PyMethodDef __pyx_mdef_4nipy_10algorithms_10statistics_9histogram_1histogram = {__Pyx_NAMESTR("histogram"), (PyCFunction)__pyx_pw_4nipy_10algorithms_10statistics_9histogram_1histogram, METH_O, __Pyx_DOCSTR(__pyx_doc_4nipy_10algorithms_10statistics_9histogram_histogram)}; static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_9histogram_1histogram(PyObject *__pyx_self, PyObject *__pyx_v_x) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("histogram (wrapper)", 0); __pyx_r = __pyx_pf_4nipy_10algorithms_10statistics_9histogram_histogram(__pyx_self, ((PyObject *)__pyx_v_x)); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/algorithms/statistics/histogram.pyx":11 * np.import_array() * * def histogram(x): # <<<<<<<<<<<<<< * """ * Fast histogram computation assuming input array is of uintp data */ static PyObject *__pyx_pf_4nipy_10algorithms_10statistics_9histogram_histogram(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_x) { npy_uintp __pyx_v_xv; npy_uintp __pyx_v_nbins; PyArrayIterObject *__pyx_v_it = 0; PyArrayObject *__pyx_v_h = 0; npy_uintp *__pyx_v_hv; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; int __pyx_t_4; npy_uintp __pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; long __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("histogram", 0); /* "nipy/algorithms/statistics/histogram.pyx":26 * Histogram * """ * if not x.dtype=='uintp': # <<<<<<<<<<<<<< * raise ValueError('input array should have uintp data type') * */ __pyx_t_1 = PyObject_GetAttr(__pyx_v_x, __pyx_n_s__dtype); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, ((PyObject *)__pyx_n_s__uintp), Py_EQ); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_4 = (!__pyx_t_3); if (__pyx_t_4) { /* "nipy/algorithms/statistics/histogram.pyx":27 * """ * if not x.dtype=='uintp': * raise ValueError('input array should have uintp data type') # <<<<<<<<<<<<<< * * cdef np.npy_uintp xv */ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_2), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 27; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; {__pyx_filename = __pyx_f[0]; __pyx_lineno = 27; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L3; } __pyx_L3:; /* "nipy/algorithms/statistics/histogram.pyx":30 * * cdef np.npy_uintp xv * cdef np.npy_uintp nbins = x.max() + 1 # <<<<<<<<<<<<<< * cdef np.flatiter it = x.flat * cdef np.ndarray h = np.zeros(nbins, dtype='uintp') */ __pyx_t_2 = PyObject_GetAttr(__pyx_v_x, __pyx_n_s__max); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_5 = __Pyx_PyInt_AsSize_t(__pyx_t_1); if (unlikely((__pyx_t_5 == (npy_uintp)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_nbins = (((npy_uintp)__pyx_t_5) + 1); /* "nipy/algorithms/statistics/histogram.pyx":31 * cdef np.npy_uintp xv * cdef np.npy_uintp nbins = x.max() + 1 * cdef np.flatiter it = x.flat # <<<<<<<<<<<<<< * cdef np.ndarray h = np.zeros(nbins, dtype='uintp') * cdef np.npy_uintp* hv */ __pyx_t_1 = PyObject_GetAttr(__pyx_v_x, __pyx_n_s__flat); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_flatiter))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_it = ((PyArrayIterObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/statistics/histogram.pyx":32 * cdef np.npy_uintp nbins = x.max() + 1 * cdef np.flatiter it = x.flat * cdef np.ndarray h = np.zeros(nbins, dtype='uintp') # <<<<<<<<<<<<<< * cdef np.npy_uintp* hv * */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__zeros); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyInt_FromSize_t(__pyx_v_nbins); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__dtype), ((PyObject *)__pyx_n_s__uintp)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_7 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_6), ((PyObject *)__pyx_t_1)); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; if (!(likely(((__pyx_t_7) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_7, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_h = ((PyArrayObject *)__pyx_t_7); __pyx_t_7 = 0; /* "nipy/algorithms/statistics/histogram.pyx":35 * cdef np.npy_uintp* hv * * while np.PyArray_ITER_NOTDONE(it): # <<<<<<<<<<<<<< * xv = (np.PyArray_ITER_DATA(it))[0] * hv = np.PyArray_DATA(h) + xv */ while (1) { __pyx_t_4 = PyArray_ITER_NOTDONE(__pyx_v_it); if (!__pyx_t_4) break; /* "nipy/algorithms/statistics/histogram.pyx":36 * * while np.PyArray_ITER_NOTDONE(it): * xv = (np.PyArray_ITER_DATA(it))[0] # <<<<<<<<<<<<<< * hv = np.PyArray_DATA(h) + xv * hv[0] += 1 */ __pyx_v_xv = (((npy_uintp *)PyArray_ITER_DATA(__pyx_v_it))[0]); /* "nipy/algorithms/statistics/histogram.pyx":37 * while np.PyArray_ITER_NOTDONE(it): * xv = (np.PyArray_ITER_DATA(it))[0] * hv = np.PyArray_DATA(h) + xv # <<<<<<<<<<<<<< * hv[0] += 1 * np.PyArray_ITER_NEXT(it) */ __pyx_v_hv = (((npy_uintp *)PyArray_DATA(__pyx_v_h)) + __pyx_v_xv); /* "nipy/algorithms/statistics/histogram.pyx":38 * xv = (np.PyArray_ITER_DATA(it))[0] * hv = np.PyArray_DATA(h) + xv * hv[0] += 1 # <<<<<<<<<<<<<< * np.PyArray_ITER_NEXT(it) * */ __pyx_t_8 = 0; (__pyx_v_hv[__pyx_t_8]) = ((__pyx_v_hv[__pyx_t_8]) + 1); /* "nipy/algorithms/statistics/histogram.pyx":39 * hv = np.PyArray_DATA(h) + xv * hv[0] += 1 * np.PyArray_ITER_NEXT(it) # <<<<<<<<<<<<<< * * return h */ PyArray_ITER_NEXT(__pyx_v_it); } /* "nipy/algorithms/statistics/histogram.pyx":41 * np.PyArray_ITER_NEXT(it) * * return h # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_h)); __pyx_r = ((PyObject *)__pyx_v_h); goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("nipy.algorithms.statistics.histogram.histogram", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_it); __Pyx_XDECREF((PyObject *)__pyx_v_h); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":194 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_copy_shape; int __pyx_v_i; int __pyx_v_ndim; int __pyx_v_endian_detector; int __pyx_v_little_endian; int __pyx_v_t; char *__pyx_v_f; PyArray_Descr *__pyx_v_descr = 0; int __pyx_v_offset; int __pyx_v_hasfields; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; char *__pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "numpy.pxd":200 * # of flags * * if info == NULL: return # <<<<<<<<<<<<<< * * cdef int copy_shape, i, ndim */ __pyx_t_1 = (__pyx_v_info == NULL); if (__pyx_t_1) { __pyx_r = 0; goto __pyx_L0; goto __pyx_L3; } __pyx_L3:; /* "numpy.pxd":203 * * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((&endian_detector)[0] != 0) * */ __pyx_v_endian_detector = 1; /* "numpy.pxd":204 * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * * ndim = PyArray_NDIM(self) */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "numpy.pxd":206 * cdef bint little_endian = ((&endian_detector)[0] != 0) * * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< * * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); /* "numpy.pxd":208 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); if (__pyx_t_1) { /* "numpy.pxd":209 * * if sizeof(npy_intp) != sizeof(Py_ssize_t): * copy_shape = 1 # <<<<<<<<<<<<<< * else: * copy_shape = 0 */ __pyx_v_copy_shape = 1; goto __pyx_L4; } /*else*/ { /* "numpy.pxd":211 * copy_shape = 1 * else: * copy_shape = 0 # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) */ __pyx_v_copy_shape = 0; } __pyx_L4:; /* "numpy.pxd":213 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS); if (__pyx_t_1) { /* "numpy.pxd":214 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not C contiguous") * */ __pyx_t_2 = (!PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS)); __pyx_t_3 = __pyx_t_2; } else { __pyx_t_3 = __pyx_t_1; } if (__pyx_t_3) { /* "numpy.pxd":215 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_4), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L5; } __pyx_L5:; /* "numpy.pxd":217 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ __pyx_t_3 = ((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS); if (__pyx_t_3) { /* "numpy.pxd":218 * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not Fortran contiguous") * */ __pyx_t_1 = (!PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS)); __pyx_t_2 = __pyx_t_1; } else { __pyx_t_2 = __pyx_t_3; } if (__pyx_t_2) { /* "numpy.pxd":219 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_6), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L6; } __pyx_L6:; /* "numpy.pxd":221 * raise ValueError(u"ndarray is not Fortran contiguous") * * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< * info.ndim = ndim * if copy_shape: */ __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); /* "numpy.pxd":222 * * info.buf = PyArray_DATA(self) * info.ndim = ndim # <<<<<<<<<<<<<< * if copy_shape: * # Allocate new buffer for strides and shape info. */ __pyx_v_info->ndim = __pyx_v_ndim; /* "numpy.pxd":223 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ if (__pyx_v_copy_shape) { /* "numpy.pxd":226 * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) # <<<<<<<<<<<<<< * info.shape = info.strides + ndim * for i in range(ndim): */ __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2))); /* "numpy.pxd":227 * # This is allocated as one block, strides first. * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) * info.shape = info.strides + ndim # <<<<<<<<<<<<<< * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] */ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); /* "numpy.pxd":228 * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) * info.shape = info.strides + ndim * for i in range(ndim): # <<<<<<<<<<<<<< * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] */ __pyx_t_5 = __pyx_v_ndim; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "numpy.pxd":229 * info.shape = info.strides + ndim * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< * info.shape[i] = PyArray_DIMS(self)[i] * else: */ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); /* "numpy.pxd":230 * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< * else: * info.strides = PyArray_STRIDES(self) */ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); } goto __pyx_L7; } /*else*/ { /* "numpy.pxd":232 * info.shape[i] = PyArray_DIMS(self)[i] * else: * info.strides = PyArray_STRIDES(self) # <<<<<<<<<<<<<< * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL */ __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); /* "numpy.pxd":233 * else: * info.strides = PyArray_STRIDES(self) * info.shape = PyArray_DIMS(self) # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) */ __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); } __pyx_L7:; /* "numpy.pxd":234 * info.strides = PyArray_STRIDES(self) * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) */ __pyx_v_info->suboffsets = NULL; /* "numpy.pxd":235 * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< * info.readonly = not PyArray_ISWRITEABLE(self) * */ __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); /* "numpy.pxd":236 * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< * * cdef int t */ __pyx_v_info->readonly = (!PyArray_ISWRITEABLE(__pyx_v_self)); /* "numpy.pxd":239 * * cdef int t * cdef char* f = NULL # <<<<<<<<<<<<<< * cdef dtype descr = self.descr * cdef list stack */ __pyx_v_f = NULL; /* "numpy.pxd":240 * cdef int t * cdef char* f = NULL * cdef dtype descr = self.descr # <<<<<<<<<<<<<< * cdef list stack * cdef int offset */ __pyx_t_4 = ((PyObject *)__pyx_v_self->descr); __Pyx_INCREF(__pyx_t_4); __pyx_v_descr = ((PyArray_Descr *)__pyx_t_4); __pyx_t_4 = 0; /* "numpy.pxd":244 * cdef int offset * * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< * * if not hasfields and not copy_shape: */ __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); /* "numpy.pxd":246 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ __pyx_t_2 = (!__pyx_v_hasfields); if (__pyx_t_2) { __pyx_t_3 = (!__pyx_v_copy_shape); __pyx_t_1 = __pyx_t_3; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "numpy.pxd":248 * if not hasfields and not copy_shape: * # do not call releasebuffer * info.obj = None # <<<<<<<<<<<<<< * else: * # need to call releasebuffer */ __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = Py_None; goto __pyx_L10; } /*else*/ { /* "numpy.pxd":251 * else: * # need to call releasebuffer * info.obj = self # <<<<<<<<<<<<<< * * if not hasfields: */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); } __pyx_L10:; /* "numpy.pxd":253 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ __pyx_t_1 = (!__pyx_v_hasfields); if (__pyx_t_1) { /* "numpy.pxd":254 * * if not hasfields: * t = descr.type_num # <<<<<<<<<<<<<< * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): */ __pyx_t_5 = __pyx_v_descr->type_num; __pyx_v_t = __pyx_t_5; /* "numpy.pxd":255 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_1 = (__pyx_v_descr->byteorder == '>'); if (__pyx_t_1) { __pyx_t_2 = __pyx_v_little_endian; } else { __pyx_t_2 = __pyx_t_1; } if (!__pyx_t_2) { /* "numpy.pxd":256 * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" */ __pyx_t_1 = (__pyx_v_descr->byteorder == '<'); if (__pyx_t_1) { __pyx_t_3 = (!__pyx_v_little_endian); __pyx_t_7 = __pyx_t_3; } else { __pyx_t_7 = __pyx_t_1; } __pyx_t_1 = __pyx_t_7; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "numpy.pxd":257 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_8), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L12; } __pyx_L12:; /* "numpy.pxd":258 * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" */ __pyx_t_1 = (__pyx_v_t == NPY_BYTE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__b; goto __pyx_L13; } /* "numpy.pxd":259 * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" */ __pyx_t_1 = (__pyx_v_t == NPY_UBYTE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__B; goto __pyx_L13; } /* "numpy.pxd":260 * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" */ __pyx_t_1 = (__pyx_v_t == NPY_SHORT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__h; goto __pyx_L13; } /* "numpy.pxd":261 * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" */ __pyx_t_1 = (__pyx_v_t == NPY_USHORT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__H; goto __pyx_L13; } /* "numpy.pxd":262 * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" */ __pyx_t_1 = (__pyx_v_t == NPY_INT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__i; goto __pyx_L13; } /* "numpy.pxd":263 * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" */ __pyx_t_1 = (__pyx_v_t == NPY_UINT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__I; goto __pyx_L13; } /* "numpy.pxd":264 * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" */ __pyx_t_1 = (__pyx_v_t == NPY_LONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__l; goto __pyx_L13; } /* "numpy.pxd":265 * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" */ __pyx_t_1 = (__pyx_v_t == NPY_ULONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__L; goto __pyx_L13; } /* "numpy.pxd":266 * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" */ __pyx_t_1 = (__pyx_v_t == NPY_LONGLONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__q; goto __pyx_L13; } /* "numpy.pxd":267 * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" */ __pyx_t_1 = (__pyx_v_t == NPY_ULONGLONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Q; goto __pyx_L13; } /* "numpy.pxd":268 * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" */ __pyx_t_1 = (__pyx_v_t == NPY_FLOAT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__f; goto __pyx_L13; } /* "numpy.pxd":269 * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" */ __pyx_t_1 = (__pyx_v_t == NPY_DOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__d; goto __pyx_L13; } /* "numpy.pxd":270 * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" */ __pyx_t_1 = (__pyx_v_t == NPY_LONGDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__g; goto __pyx_L13; } /* "numpy.pxd":271 * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" */ __pyx_t_1 = (__pyx_v_t == NPY_CFLOAT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zf; goto __pyx_L13; } /* "numpy.pxd":272 * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" */ __pyx_t_1 = (__pyx_v_t == NPY_CDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zd; goto __pyx_L13; } /* "numpy.pxd":273 * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f = "O" * else: */ __pyx_t_1 = (__pyx_v_t == NPY_CLONGDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zg; goto __pyx_L13; } /* "numpy.pxd":274 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_1 = (__pyx_v_t == NPY_OBJECT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__O; goto __pyx_L13; } /*else*/ { /* "numpy.pxd":276 * elif t == NPY_OBJECT: f = "O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * info.format = f * return */ __pyx_t_4 = PyInt_FromLong(__pyx_v_t); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_8 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_9), __pyx_t_4); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_8)); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_8)); __Pyx_GIVEREF(((PyObject *)__pyx_t_8)); __pyx_t_8 = 0; __pyx_t_8 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L13:; /* "numpy.pxd":277 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f # <<<<<<<<<<<<<< * return * else: */ __pyx_v_info->format = __pyx_v_f; /* "numpy.pxd":278 * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f * return # <<<<<<<<<<<<<< * else: * info.format = stdlib.malloc(_buffer_format_string_len) */ __pyx_r = 0; goto __pyx_L0; goto __pyx_L11; } /*else*/ { /* "numpy.pxd":280 * return * else: * info.format = stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 */ __pyx_v_info->format = ((char *)malloc(255)); /* "numpy.pxd":281 * else: * info.format = stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< * offset = 0 * f = _util_dtypestring(descr, info.format + 1, */ (__pyx_v_info->format[0]) = '^'; /* "numpy.pxd":282 * info.format = stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 # <<<<<<<<<<<<<< * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, */ __pyx_v_offset = 0; /* "numpy.pxd":285 * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, * &offset) # <<<<<<<<<<<<<< * f[0] = c'\0' # Terminate format string * */ __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_9; /* "numpy.pxd":286 * info.format + _buffer_format_string_len, * &offset) * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< * * def __releasebuffer__(ndarray self, Py_buffer* info): */ (__pyx_v_f[0]) = '\x00'; } __pyx_L11:; __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_descr); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":288 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__releasebuffer__", 0); /* "numpy.pxd":289 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_t_1 = PyArray_HASFIELDS(__pyx_v_self); if (__pyx_t_1) { /* "numpy.pxd":290 * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): * stdlib.free(info.format) # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) */ free(__pyx_v_info->format); goto __pyx_L3; } __pyx_L3:; /* "numpy.pxd":291 * if PyArray_HASFIELDS(self): * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * stdlib.free(info.strides) * # info.shape was stored after info.strides in the same block */ __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); if (__pyx_t_1) { /* "numpy.pxd":292 * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) # <<<<<<<<<<<<<< * # info.shape was stored after info.strides in the same block * */ free(__pyx_v_info->strides); goto __pyx_L4; } __pyx_L4:; __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":768 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, a) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); /* "numpy.pxd":769 * * cdef inline object PyArray_MultiIterNew1(a): * return PyArray_MultiIterNew(1, a) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew2(a, b): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 769; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":771 * return PyArray_MultiIterNew(1, a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, a, b) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); /* "numpy.pxd":772 * * cdef inline object PyArray_MultiIterNew2(a, b): * return PyArray_MultiIterNew(2, a, b) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew3(a, b, c): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":774 * return PyArray_MultiIterNew(2, a, b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, a, b, c) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); /* "numpy.pxd":775 * * cdef inline object PyArray_MultiIterNew3(a, b, c): * return PyArray_MultiIterNew(3, a, b, c) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":777 * return PyArray_MultiIterNew(3, a, b, c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, a, b, c, d) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); /* "numpy.pxd":778 * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): * return PyArray_MultiIterNew(4, a, b, c, d) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 778; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":780 * return PyArray_MultiIterNew(4, a, b, c, d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, a, b, c, d, e) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); /* "numpy.pxd":781 * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): * return PyArray_MultiIterNew(5, a, b, c, d, e) # <<<<<<<<<<<<<< * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 781; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":783 * return PyArray_MultiIterNew(5, a, b, c, d, e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { PyArray_Descr *__pyx_v_child = 0; int __pyx_v_endian_detector; int __pyx_v_little_endian; PyObject *__pyx_v_fields = 0; PyObject *__pyx_v_childname = NULL; PyObject *__pyx_v_new_offset = NULL; PyObject *__pyx_v_t = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *(*__pyx_t_6)(PyObject *); int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_t_10; long __pyx_t_11; char *__pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_util_dtypestring", 0); /* "numpy.pxd":790 * cdef int delta_offset * cdef tuple i * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((&endian_detector)[0] != 0) * cdef tuple fields */ __pyx_v_endian_detector = 1; /* "numpy.pxd":791 * cdef tuple i * cdef int endian_detector = 1 * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * cdef tuple fields * */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "numpy.pxd":794 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ if (unlikely(((PyObject *)__pyx_v_descr->names) == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_1 = ((PyObject *)__pyx_v_descr->names); __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; for (;;) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif __Pyx_XDECREF(__pyx_v_childname); __pyx_v_childname = __pyx_t_3; __pyx_t_3 = 0; /* "numpy.pxd":795 * * for childname in descr.names: * fields = descr.fields[childname] # <<<<<<<<<<<<<< * child, new_offset = fields * */ __pyx_t_3 = PyObject_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (!__pyx_t_3) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected tuple, got %.200s", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF(((PyObject *)__pyx_v_fields)); __pyx_v_fields = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "numpy.pxd":796 * for childname in descr.names: * fields = descr.fields[childname] * child, new_offset = fields # <<<<<<<<<<<<<< * * if (end - f) - (new_offset - offset[0]) < 15: */ if (likely(PyTuple_CheckExact(((PyObject *)__pyx_v_fields)))) { PyObject* sequence = ((PyObject *)__pyx_v_fields); #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else if (1) { __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } else { Py_ssize_t index = -1; __pyx_t_5 = PyObject_GetIter(((PyObject *)__pyx_v_fields)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = Py_TYPE(__pyx_t_5)->tp_iternext; index = 0; __pyx_t_3 = __pyx_t_6(__pyx_t_5); if (unlikely(!__pyx_t_3)) goto __pyx_L5_unpacking_failed; __Pyx_GOTREF(__pyx_t_3); index = 1; __pyx_t_4 = __pyx_t_6(__pyx_t_5); if (unlikely(!__pyx_t_4)) goto __pyx_L5_unpacking_failed; __Pyx_GOTREF(__pyx_t_4); if (__Pyx_IternextUnpackEndCheck(__pyx_t_6(__pyx_t_5), 2) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = NULL; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L6_unpacking_done; __pyx_L5_unpacking_failed:; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_L6_unpacking_done:; } if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF(((PyObject *)__pyx_v_child)); __pyx_v_child = ((PyArray_Descr *)__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_v_new_offset); __pyx_v_new_offset = __pyx_t_4; __pyx_t_4 = 0; /* "numpy.pxd":798 * child, new_offset = fields * * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ __pyx_t_4 = PyInt_FromLong((__pyx_v_end - __pyx_v_f)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_Subtract(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyObject_RichCompare(__pyx_t_3, __pyx_int_15, Py_LT); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { /* "numpy.pxd":799 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_t_5 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_11), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L7; } __pyx_L7:; /* "numpy.pxd":801 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_7 = (__pyx_v_child->byteorder == '>'); if (__pyx_t_7) { __pyx_t_8 = __pyx_v_little_endian; } else { __pyx_t_8 = __pyx_t_7; } if (!__pyx_t_8) { /* "numpy.pxd":802 * * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * # One could encode it in the format string and have Cython */ __pyx_t_7 = (__pyx_v_child->byteorder == '<'); if (__pyx_t_7) { __pyx_t_9 = (!__pyx_v_little_endian); __pyx_t_10 = __pyx_t_9; } else { __pyx_t_10 = __pyx_t_7; } __pyx_t_7 = __pyx_t_10; } else { __pyx_t_7 = __pyx_t_8; } if (__pyx_t_7) { /* "numpy.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_12), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L8; } __pyx_L8:; /* "numpy.pxd":813 * * # Output padding bytes * while offset[0] < new_offset: # <<<<<<<<<<<<<< * f[0] = 120 # "x"; pad byte * f += 1 */ while (1) { __pyx_t_5 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_t_5, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!__pyx_t_7) break; /* "numpy.pxd":814 * # Output padding bytes * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< * f += 1 * offset[0] += 1 */ (__pyx_v_f[0]) = 120; /* "numpy.pxd":815 * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte * f += 1 # <<<<<<<<<<<<<< * offset[0] += 1 * */ __pyx_v_f = (__pyx_v_f + 1); /* "numpy.pxd":816 * f[0] = 120 # "x"; pad byte * f += 1 * offset[0] += 1 # <<<<<<<<<<<<<< * * offset[0] += child.itemsize */ __pyx_t_11 = 0; (__pyx_v_offset[__pyx_t_11]) = ((__pyx_v_offset[__pyx_t_11]) + 1); } /* "numpy.pxd":818 * offset[0] += 1 * * offset[0] += child.itemsize # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(child): */ __pyx_t_11 = 0; (__pyx_v_offset[__pyx_t_11]) = ((__pyx_v_offset[__pyx_t_11]) + __pyx_v_child->elsize); /* "numpy.pxd":820 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ __pyx_t_7 = (!PyDataType_HASFIELDS(__pyx_v_child)); if (__pyx_t_7) { /* "numpy.pxd":821 * * if not PyDataType_HASFIELDS(child): * t = child.type_num # <<<<<<<<<<<<<< * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") */ __pyx_t_3 = PyInt_FromLong(__pyx_v_child->type_num); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 821; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_XDECREF(__pyx_v_t); __pyx_v_t = __pyx_t_3; __pyx_t_3 = 0; /* "numpy.pxd":822 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ __pyx_t_7 = ((__pyx_v_end - __pyx_v_f) < 5); if (__pyx_t_7) { /* "numpy.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_t_3 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_14), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L12; } __pyx_L12:; /* "numpy.pxd":826 * * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" */ __pyx_t_3 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 98; goto __pyx_L13; } /* "numpy.pxd":827 * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" */ __pyx_t_5 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 66; goto __pyx_L13; } /* "numpy.pxd":828 * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" */ __pyx_t_3 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 104; goto __pyx_L13; } /* "numpy.pxd":829 * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" */ __pyx_t_5 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 72; goto __pyx_L13; } /* "numpy.pxd":830 * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" */ __pyx_t_3 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 105; goto __pyx_L13; } /* "numpy.pxd":831 * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" */ __pyx_t_5 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 73; goto __pyx_L13; } /* "numpy.pxd":832 * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" */ __pyx_t_3 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 108; goto __pyx_L13; } /* "numpy.pxd":833 * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" */ __pyx_t_5 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 76; goto __pyx_L13; } /* "numpy.pxd":834 * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" */ __pyx_t_3 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 113; goto __pyx_L13; } /* "numpy.pxd":835 * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" */ __pyx_t_5 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 81; goto __pyx_L13; } /* "numpy.pxd":836 * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" */ __pyx_t_3 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 102; goto __pyx_L13; } /* "numpy.pxd":837 * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf */ __pyx_t_5 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 100; goto __pyx_L13; } /* "numpy.pxd":838 * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd */ __pyx_t_3 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 103; goto __pyx_L13; } /* "numpy.pxd":839 * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg */ __pyx_t_5 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 102; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":840 * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" */ __pyx_t_3 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 100; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":841 * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: */ __pyx_t_5 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 103; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":842 * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_3 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 79; goto __pyx_L13; } /*else*/ { /* "numpy.pxd":844 * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * f += 1 * else: */ __pyx_t_5 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_9), __pyx_v_t); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_5)); __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_5)); __Pyx_GIVEREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L13:; /* "numpy.pxd":845 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * f += 1 # <<<<<<<<<<<<<< * else: * # Cython ignores struct boundary information ("T{...}"), */ __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L11; } /*else*/ { /* "numpy.pxd":849 * # Cython ignores struct boundary information ("T{...}"), * # so don't output it * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< * return f * */ __pyx_t_12 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_12 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 849; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_12; } __pyx_L11:; } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "numpy.pxd":850 * # so don't output it * f = _util_dtypestring(child, f, end, offset) * return f # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_f; goto __pyx_L0; __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_child); __Pyx_XDECREF(__pyx_v_fields); __Pyx_XDECREF(__pyx_v_childname); __Pyx_XDECREF(__pyx_v_new_offset); __Pyx_XDECREF(__pyx_v_t); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":965 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { PyObject *__pyx_v_baseptr; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("set_array_base", 0); /* "numpy.pxd":967 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ __pyx_t_1 = (__pyx_v_base == Py_None); if (__pyx_t_1) { /* "numpy.pxd":968 * cdef PyObject* baseptr * if base is None: * baseptr = NULL # <<<<<<<<<<<<<< * else: * Py_INCREF(base) # important to do this before decref below! */ __pyx_v_baseptr = NULL; goto __pyx_L3; } /*else*/ { /* "numpy.pxd":970 * baseptr = NULL * else: * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< * baseptr = base * Py_XDECREF(arr.base) */ Py_INCREF(__pyx_v_base); /* "numpy.pxd":971 * else: * Py_INCREF(base) # important to do this before decref below! * baseptr = base # <<<<<<<<<<<<<< * Py_XDECREF(arr.base) * arr.base = baseptr */ __pyx_v_baseptr = ((PyObject *)__pyx_v_base); } __pyx_L3:; /* "numpy.pxd":972 * Py_INCREF(base) # important to do this before decref below! * baseptr = base * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< * arr.base = baseptr * */ Py_XDECREF(__pyx_v_arr->base); /* "numpy.pxd":973 * baseptr = base * Py_XDECREF(arr.base) * arr.base = baseptr # <<<<<<<<<<<<<< * * cdef inline object get_array_base(ndarray arr): */ __pyx_v_arr->base = __pyx_v_baseptr; __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":975 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("get_array_base", 0); /* "numpy.pxd":976 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ __pyx_t_1 = (__pyx_v_arr->base == NULL); if (__pyx_t_1) { /* "numpy.pxd":977 * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: * return None # <<<<<<<<<<<<<< * else: * return arr.base */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; goto __pyx_L3; } /*else*/ { /* "numpy.pxd":979 * return None * else: * return arr.base # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); __pyx_r = ((PyObject *)__pyx_v_arr->base); goto __pyx_L0; } __pyx_L3:; __pyx_r = Py_None; __Pyx_INCREF(Py_None); __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, __Pyx_NAMESTR("histogram"), __Pyx_DOCSTR(__pyx_k_15), /* m_doc */ -1, /* m_size */ __pyx_methods /* m_methods */, NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_kp_s_1, __pyx_k_1, sizeof(__pyx_k_1), 0, 0, 1, 0}, {&__pyx_kp_u_10, __pyx_k_10, sizeof(__pyx_k_10), 0, 1, 0, 0}, {&__pyx_kp_u_13, __pyx_k_13, sizeof(__pyx_k_13), 0, 1, 0, 0}, {&__pyx_kp_s_18, __pyx_k_18, sizeof(__pyx_k_18), 0, 0, 1, 0}, {&__pyx_n_s_19, __pyx_k_19, sizeof(__pyx_k_19), 0, 0, 1, 1}, {&__pyx_kp_u_3, __pyx_k_3, sizeof(__pyx_k_3), 0, 1, 0, 0}, {&__pyx_kp_u_5, __pyx_k_5, sizeof(__pyx_k_5), 0, 1, 0, 0}, {&__pyx_kp_u_7, __pyx_k_7, sizeof(__pyx_k_7), 0, 1, 0, 0}, {&__pyx_kp_u_9, __pyx_k_9, sizeof(__pyx_k_9), 0, 1, 0, 0}, {&__pyx_n_s__RuntimeError, __pyx_k__RuntimeError, sizeof(__pyx_k__RuntimeError), 0, 0, 1, 1}, {&__pyx_n_s__ValueError, __pyx_k__ValueError, sizeof(__pyx_k__ValueError), 0, 0, 1, 1}, {&__pyx_n_s____main__, __pyx_k____main__, sizeof(__pyx_k____main__), 0, 0, 1, 1}, {&__pyx_n_s____test__, __pyx_k____test__, sizeof(__pyx_k____test__), 0, 0, 1, 1}, {&__pyx_n_s__dtype, __pyx_k__dtype, sizeof(__pyx_k__dtype), 0, 0, 1, 1}, {&__pyx_n_s__flat, __pyx_k__flat, sizeof(__pyx_k__flat), 0, 0, 1, 1}, {&__pyx_n_s__h, __pyx_k__h, sizeof(__pyx_k__h), 0, 0, 1, 1}, {&__pyx_n_s__histogram, __pyx_k__histogram, sizeof(__pyx_k__histogram), 0, 0, 1, 1}, {&__pyx_n_s__hv, __pyx_k__hv, sizeof(__pyx_k__hv), 0, 0, 1, 1}, {&__pyx_n_s__it, __pyx_k__it, sizeof(__pyx_k__it), 0, 0, 1, 1}, {&__pyx_n_s__max, __pyx_k__max, sizeof(__pyx_k__max), 0, 0, 1, 1}, {&__pyx_n_s__nbins, __pyx_k__nbins, sizeof(__pyx_k__nbins), 0, 0, 1, 1}, {&__pyx_n_s__np, __pyx_k__np, sizeof(__pyx_k__np), 0, 0, 1, 1}, {&__pyx_n_s__numpy, __pyx_k__numpy, sizeof(__pyx_k__numpy), 0, 0, 1, 1}, {&__pyx_n_s__range, __pyx_k__range, sizeof(__pyx_k__range), 0, 0, 1, 1}, {&__pyx_n_s__uintp, __pyx_k__uintp, sizeof(__pyx_k__uintp), 0, 0, 1, 1}, {&__pyx_n_s__x, __pyx_k__x, sizeof(__pyx_k__x), 0, 0, 1, 1}, {&__pyx_n_s__xv, __pyx_k__xv, sizeof(__pyx_k__xv), 0, 0, 1, 1}, {&__pyx_n_s__zeros, __pyx_k__zeros, sizeof(__pyx_k__zeros), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_ValueError = __Pyx_GetName(__pyx_b, __pyx_n_s__ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 27; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_range = __Pyx_GetName(__pyx_b, __pyx_n_s__range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 228; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_RuntimeError = __Pyx_GetName(__pyx_b, __pyx_n_s__RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "nipy/algorithms/statistics/histogram.pyx":27 * """ * if not x.dtype=='uintp': * raise ValueError('input array should have uintp data type') # <<<<<<<<<<<<<< * * cdef np.npy_uintp xv */ __pyx_k_tuple_2 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 27; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_2); __Pyx_INCREF(((PyObject *)__pyx_kp_s_1)); PyTuple_SET_ITEM(__pyx_k_tuple_2, 0, ((PyObject *)__pyx_kp_s_1)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_1)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_2)); /* "numpy.pxd":215 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_k_tuple_4 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_4); __Pyx_INCREF(((PyObject *)__pyx_kp_u_3)); PyTuple_SET_ITEM(__pyx_k_tuple_4, 0, ((PyObject *)__pyx_kp_u_3)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_3)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_4)); /* "numpy.pxd":219 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_k_tuple_6 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_6); __Pyx_INCREF(((PyObject *)__pyx_kp_u_5)); PyTuple_SET_ITEM(__pyx_k_tuple_6, 0, ((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_6)); /* "numpy.pxd":257 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_k_tuple_8 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_8); __Pyx_INCREF(((PyObject *)__pyx_kp_u_7)); PyTuple_SET_ITEM(__pyx_k_tuple_8, 0, ((PyObject *)__pyx_kp_u_7)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_7)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_8)); /* "numpy.pxd":799 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_k_tuple_11 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_11)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_11); __Pyx_INCREF(((PyObject *)__pyx_kp_u_10)); PyTuple_SET_ITEM(__pyx_k_tuple_11, 0, ((PyObject *)__pyx_kp_u_10)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_10)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_11)); /* "numpy.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_k_tuple_12 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_12)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_12); __Pyx_INCREF(((PyObject *)__pyx_kp_u_7)); PyTuple_SET_ITEM(__pyx_k_tuple_12, 0, ((PyObject *)__pyx_kp_u_7)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_7)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_12)); /* "numpy.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_k_tuple_14 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_14)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_14); __Pyx_INCREF(((PyObject *)__pyx_kp_u_13)); PyTuple_SET_ITEM(__pyx_k_tuple_14, 0, ((PyObject *)__pyx_kp_u_13)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_13)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_14)); /* "nipy/algorithms/statistics/histogram.pyx":11 * np.import_array() * * def histogram(x): # <<<<<<<<<<<<<< * """ * Fast histogram computation assuming input array is of uintp data */ __pyx_k_tuple_16 = PyTuple_New(6); if (unlikely(!__pyx_k_tuple_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 11; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_16); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_16, 0, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__xv)); PyTuple_SET_ITEM(__pyx_k_tuple_16, 1, ((PyObject *)__pyx_n_s__xv)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__xv)); __Pyx_INCREF(((PyObject *)__pyx_n_s__nbins)); PyTuple_SET_ITEM(__pyx_k_tuple_16, 2, ((PyObject *)__pyx_n_s__nbins)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__nbins)); __Pyx_INCREF(((PyObject *)__pyx_n_s__it)); PyTuple_SET_ITEM(__pyx_k_tuple_16, 3, ((PyObject *)__pyx_n_s__it)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__it)); __Pyx_INCREF(((PyObject *)__pyx_n_s__h)); PyTuple_SET_ITEM(__pyx_k_tuple_16, 4, ((PyObject *)__pyx_n_s__h)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__h)); __Pyx_INCREF(((PyObject *)__pyx_n_s__hv)); PyTuple_SET_ITEM(__pyx_k_tuple_16, 5, ((PyObject *)__pyx_n_s__hv)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__hv)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_16)); __pyx_k_codeobj_17 = (PyObject*)__Pyx_PyCode_New(1, 0, 6, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_16, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_18, __pyx_n_s__histogram, 11, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_17)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 11; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_15 = PyInt_FromLong(15); if (unlikely(!__pyx_int_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC inithistogram(void); /*proto*/ PyMODINIT_FUNC inithistogram(void) #else PyMODINIT_FUNC PyInit_histogram(void); /*proto*/ PyMODINIT_FUNC PyInit_histogram(void) #endif { PyObject *__pyx_t_1 = NULL; __Pyx_RefNannyDeclarations #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_histogram(void)", 0); if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #ifdef __Pyx_CyFunction_USED if (__Pyx_CyFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4(__Pyx_NAMESTR("histogram"), __pyx_methods, __Pyx_DOCSTR(__pyx_k_15), 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (!PyDict_GetItemString(modules, "nipy.algorithms.statistics.histogram")) { if (unlikely(PyDict_SetItemString(modules, "nipy.algorithms.statistics.histogram", __pyx_m) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } } #endif __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); if (unlikely(!__pyx_b)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; /*--- Initialize various global constants etc. ---*/ if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_module_is_main_nipy__algorithms__statistics__histogram) { if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s____main__) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; } /*--- Builtin init code ---*/ if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Constants init code ---*/ if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Global init code ---*/ /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ /*--- Type import code ---*/ __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", #if CYTHON_COMPILING_IN_PYPY sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 861; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Variable import code ---*/ /*--- Function import code ---*/ /*--- Execution code ---*/ /* "nipy/algorithms/statistics/histogram.pyx":6 * """ * * import numpy as np # <<<<<<<<<<<<<< * cimport numpy as np * */ __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__numpy), 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 6; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__np, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 6; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/statistics/histogram.pyx":9 * cimport numpy as np * * np.import_array() # <<<<<<<<<<<<<< * * def histogram(x): */ import_array(); /* "nipy/algorithms/statistics/histogram.pyx":11 * np.import_array() * * def histogram(x): # <<<<<<<<<<<<<< * """ * Fast histogram computation assuming input array is of uintp data */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_10algorithms_10statistics_9histogram_1histogram, NULL, __pyx_n_s_19); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 11; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__histogram, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 11; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/statistics/histogram.pyx":1 * # -*- Mode: Python -*- Not really, but the syntax is close enough # <<<<<<<<<<<<<< * """ * Author: Alexis Roche, 2012. */ __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); if (PyObject_SetAttr(__pyx_m, __pyx_n_s____test__, ((PyObject *)__pyx_t_1)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; /* "numpy.pxd":975 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); if (__pyx_m) { __Pyx_AddTraceback("init nipy.algorithms.statistics.histogram", __pyx_clineno, __pyx_lineno, __pyx_filename); Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init nipy.algorithms.statistics.histogram"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if PY_MAJOR_VERSION < 3 return; #else return __pyx_m; #endif } /* Runtime support code */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* CYTHON_REFNANNY */ static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) { PyObject *result; result = PyObject_GetAttr(dict, name); if (!result) { if (dict != __pyx_b) { PyErr_Clear(); result = PyObject_GetAttr(__pyx_b, name); } if (!result) { PyErr_SetObject(PyExc_NameError, name); } } return result; } static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) { #if CYTHON_COMPILING_IN_CPYTHON PyObject *tmp_type, *tmp_value, *tmp_tb; PyThreadState *tstate = PyThreadState_GET(); tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_Restore(type, value, tb); #endif } static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(type, value, tb); #endif } #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } #if PY_VERSION_HEX < 0x02050000 if (PyClass_Check(type)) { #else if (PyType_Check(type)) { #endif #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; #if PY_VERSION_HEX < 0x02050000 if (PyInstance_Check(type)) { type = (PyObject*) ((PyInstanceObject*)type)->in_class; Py_INCREF(type); } else { type = 0; PyErr_SetString(PyExc_TypeError, "raise: exception must be an old-style class or instance"); goto raise_error; } #else type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } #endif } __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else /* Python 3+ */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyEval_CallObject(type, args); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause && cause != Py_None) { PyObject *fixed_cause; if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { PyThreadState *tstate = PyThreadState_GET(); PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } } bad: Py_XDECREF(owned_instance); return; } #endif static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_Format(PyExc_SystemError, "Missing type object"); return 0; } if (likely(PyObject_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%s to unpack", index, (index == 1) ? "" : "s"); } static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } static CYTHON_INLINE int __Pyx_IterFinish(void) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); PyObject* exc_type = tstate->curexc_type; if (unlikely(exc_type)) { if (likely(exc_type == PyExc_StopIteration) || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration)) { PyObject *exc_value, *exc_tb; exc_value = tstate->curexc_value; exc_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; Py_DECREF(exc_type); Py_XDECREF(exc_value); Py_XDECREF(exc_tb); return 0; } else { return -1; } } return 0; #else if (unlikely(PyErr_Occurred())) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) { PyErr_Clear(); return 0; } else { return -1; } } return 0; #endif } static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) { if (unlikely(retval)) { Py_DECREF(retval); __Pyx_RaiseTooManyValuesError(expected); return -1; } else { return __Pyx_IterFinish(); } return 0; } static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level) { PyObject *py_import = 0; PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; py_import = __Pyx_GetAttrString(__pyx_b, "__import__"); if (!py_import) goto bad; if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; #if PY_VERSION_HEX >= 0x02050000 { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { /* try package relative import first */ PyObject *py_level = PyInt_FromLong(1); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; /* try absolute import on failure */ } #endif if (!module) { PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); } } #else if (level>0) { PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4."); goto bad; } module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, NULL); #endif bad: Py_XDECREF(empty_list); Py_XDECREF(py_import); Py_XDECREF(empty_dict); return module; } #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return ::std::complex< float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y*(__pyx_t_float_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real*z.real + z.imag*z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(a, a); case 3: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, a); case 4: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_absf(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return ::std::complex< double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y*(__pyx_t_double_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real*z.real + z.imag*z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(a, a); case 3: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, a); case 4: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_abs(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject* x) { const unsigned char neg_one = (unsigned char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned char" : "value too large to convert to unsigned char"); } return (unsigned char)-1; } return (unsigned char)val; } return (unsigned char)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject* x) { const unsigned short neg_one = (unsigned short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned short" : "value too large to convert to unsigned short"); } return (unsigned short)-1; } return (unsigned short)val; } return (unsigned short)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject* x) { const unsigned int neg_one = (unsigned int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned int" : "value too large to convert to unsigned int"); } return (unsigned int)-1; } return (unsigned int)val; } return (unsigned int)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject* x) { const char neg_one = (char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to char" : "value too large to convert to char"); } return (char)-1; } return (char)val; } return (char)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject* x) { const short neg_one = (short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to short" : "value too large to convert to short"); } return (short)-1; } return (short)val; } return (short)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject* x) { const int neg_one = (int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to int" : "value too large to convert to int"); } return (int)-1; } return (int)val; } return (int)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject* x) { const signed char neg_one = (signed char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed char" : "value too large to convert to signed char"); } return (signed char)-1; } return (signed char)val; } return (signed char)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject* x) { const signed short neg_one = (signed short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed short" : "value too large to convert to signed short"); } return (signed short)-1; } return (signed short)val; } return (signed short)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject* x) { const signed int neg_one = (signed int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed int" : "value too large to convert to signed int"); } return (signed int)-1; } return (signed int)val; } return (signed int)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject* x) { const int neg_one = (int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to int" : "value too large to convert to int"); } return (int)-1; } return (int)val; } return (int)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) { const unsigned long neg_one = (unsigned long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned long"); return (unsigned long)-1; } return (unsigned long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned long"); return (unsigned long)-1; } return (unsigned long)PyLong_AsUnsignedLong(x); } else { return (unsigned long)PyLong_AsLong(x); } } else { unsigned long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned long)-1; val = __Pyx_PyInt_AsUnsignedLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject* x) { const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned PY_LONG_LONG"); return (unsigned PY_LONG_LONG)-1; } return (unsigned PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned PY_LONG_LONG"); return (unsigned PY_LONG_LONG)-1; } return (unsigned PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (unsigned PY_LONG_LONG)PyLong_AsLongLong(x); } } else { unsigned PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned PY_LONG_LONG)-1; val = __Pyx_PyInt_AsUnsignedLongLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject* x) { const long neg_one = (long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long)-1; } return (long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long)-1; } return (long)PyLong_AsUnsignedLong(x); } else { return (long)PyLong_AsLong(x); } } else { long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (long)-1; val = __Pyx_PyInt_AsLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) { const PY_LONG_LONG neg_one = (PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to PY_LONG_LONG"); return (PY_LONG_LONG)-1; } return (PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to PY_LONG_LONG"); return (PY_LONG_LONG)-1; } return (PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (PY_LONG_LONG)PyLong_AsLongLong(x); } } else { PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (PY_LONG_LONG)-1; val = __Pyx_PyInt_AsLongLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) { const signed long neg_one = (signed long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed long"); return (signed long)-1; } return (signed long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed long"); return (signed long)-1; } return (signed long)PyLong_AsUnsignedLong(x); } else { return (signed long)PyLong_AsLong(x); } } else { signed long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (signed long)-1; val = __Pyx_PyInt_AsSignedLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject* x) { const signed PY_LONG_LONG neg_one = (signed PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed PY_LONG_LONG"); return (signed PY_LONG_LONG)-1; } return (signed PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed PY_LONG_LONG"); return (signed PY_LONG_LONG)-1; } return (signed PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (signed PY_LONG_LONG)PyLong_AsLongLong(x); } } else { signed PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (signed PY_LONG_LONG)-1; val = __Pyx_PyInt_AsSignedLongLong(tmp); Py_DECREF(tmp); return val; } } static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); #if PY_VERSION_HEX < 0x02050000 return PyErr_Warn(NULL, message); #else return PyErr_WarnEx(NULL, message, 1); #endif } return 0; } #ifndef __PYX_HAVE_RT_ImportModule #define __PYX_HAVE_RT_ImportModule static PyObject *__Pyx_ImportModule(const char *name) { PyObject *py_name = 0; PyObject *py_module = 0; py_name = __Pyx_PyIdentifier_FromString(name); if (!py_name) goto bad; py_module = PyImport_Import(py_name); Py_DECREF(py_name); return py_module; bad: Py_XDECREF(py_name); return 0; } #endif #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict) { PyObject *py_module = 0; PyObject *result = 0; PyObject *py_name = 0; char warning[200]; py_module = __Pyx_ImportModule(module_name); if (!py_module) goto bad; py_name = __Pyx_PyIdentifier_FromString(class_name); if (!py_name) goto bad; result = PyObject_GetAttr(py_module, py_name); Py_DECREF(py_name); py_name = 0; Py_DECREF(py_module); py_module = 0; if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%s.%s is not a type object", module_name, class_name); goto bad; } if (!strict && (size_t)((PyTypeObject *)result)->tp_basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility", module_name, class_name); #if PY_VERSION_HEX < 0x02050000 if (PyErr_Warn(NULL, warning) < 0) goto bad; #else if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; #endif } else if ((size_t)((PyTypeObject *)result)->tp_basicsize != size) { PyErr_Format(PyExc_ValueError, "%s.%s has the wrong size, try recompiling", module_name, class_name); goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(py_module); Py_XDECREF(result); return NULL; } #endif static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = (start + end) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, /*int argcount,*/ 0, /*int kwonlyargcount,*/ 0, /*int nlocals,*/ 0, /*int stacksize,*/ 0, /*int flags,*/ __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, /*int firstlineno,*/ __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_globals = 0; PyFrameObject *py_frame = 0; py_code = __pyx_find_code_object(c_line ? c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? c_line : py_line, py_code); } py_globals = PyModule_GetDict(__pyx_m); if (!py_globals) goto bad; py_frame = PyFrame_New( PyThreadState_GET(), /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ py_globals, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; py_frame->f_lineno = py_line; PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else /* Python 3+ has unicode identifiers */ if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; ++t; } return 0; } /* Type Conversion Functions */ static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) { PyNumberMethods *m; const char *name = NULL; PyObject *res = NULL; #if PY_VERSION_HEX < 0x03000000 if (PyInt_Check(x) || PyLong_Check(x)) #else if (PyLong_Check(x)) #endif return Py_INCREF(x), x; m = Py_TYPE(x)->tp_as_number; #if PY_VERSION_HEX < 0x03000000 if (m && m->nb_int) { name = "int"; res = PyNumber_Int(x); } else if (m && m->nb_long) { name = "long"; res = PyNumber_Long(x); } #else if (m && m->nb_int) { name = "int"; res = PyNumber_Long(x); } #endif if (res) { #if PY_VERSION_HEX < 0x03000000 if (!PyInt_Check(res) && !PyLong_Check(res)) { #else if (!PyLong_Check(res)) { #endif PyErr_Format(PyExc_TypeError, "__%s__ returned non-%s (type %.200s)", name, name, Py_TYPE(res)->tp_name); Py_DECREF(res); return NULL; } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject* x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { #if PY_VERSION_HEX < 0x02050000 if (ival <= LONG_MAX) return PyInt_FromLong((long)ival); else { unsigned char *bytes = (unsigned char *) &ival; int one = 1; int little = (int)*(unsigned char*)&one; return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0); } #else return PyInt_FromSize_t(ival); #endif } static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject* x) { unsigned PY_LONG_LONG val = __Pyx_PyInt_AsUnsignedLongLong(x); if (unlikely(val == (unsigned PY_LONG_LONG)-1 && PyErr_Occurred())) { return (size_t)-1; } else if (unlikely(val != (unsigned PY_LONG_LONG)(size_t)val)) { PyErr_SetString(PyExc_OverflowError, "value too large to convert to size_t"); return (size_t)-1; } return (size_t)val; } #endif /* Py_PYTHON_H */ nipy-0.3.0/nipy/algorithms/statistics/histogram.pyx000066400000000000000000000016401210344137400225520ustar00rootroot00000000000000# -*- Mode: Python -*- Not really, but the syntax is close enough """ Author: Alexis Roche, 2012. """ import numpy as np cimport numpy as np np.import_array() def histogram(x): """ Fast histogram computation assuming input array is of uintp data type. Parameters ---------- x: array-like Assumed with uintp dtype Output ------ h: 1d array Histogram """ if not x.dtype=='uintp': raise ValueError('input array should have uintp data type') cdef np.npy_uintp xv cdef np.npy_uintp nbins = x.max() + 1 cdef np.flatiter it = x.flat cdef np.ndarray h = np.zeros(nbins, dtype='uintp') cdef np.npy_uintp* hv while np.PyArray_ITER_NOTDONE(it): xv = (np.PyArray_ITER_DATA(it))[0] hv = np.PyArray_DATA(h) + xv hv[0] += 1 np.PyArray_ITER_NEXT(it) return h nipy-0.3.0/nipy/algorithms/statistics/intvol.c000066400000000000000000041611161210344137400215030ustar00rootroot00000000000000/* Generated by Cython 0.17.4 on Sat Jan 12 17:27:31 2013 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02040000 #error Cython requires Python 2.4+. #else #include /* For offsetof */ #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #endif #if PY_VERSION_HEX < 0x02050000 typedef int Py_ssize_t; #define PY_SSIZE_T_MAX INT_MAX #define PY_SSIZE_T_MIN INT_MIN #define PY_FORMAT_SIZE_T "" #define CYTHON_FORMAT_SSIZE_T "" #define PyInt_FromSsize_t(z) PyInt_FromLong(z) #define PyInt_AsSsize_t(o) __Pyx_PyInt_AsInt(o) #define PyNumber_Index(o) ((PyNumber_Check(o) && !PyFloat_Check(o)) ? PyNumber_Int(o) : \ (PyErr_Format(PyExc_TypeError, \ "expected index value, got %.200s", Py_TYPE(o)->tp_name), \ (PyObject*)0)) #define __Pyx_PyIndex_Check(o) (PyNumber_Check(o) && !PyFloat_Check(o) && \ !PyComplex_Check(o)) #define PyIndex_Check __Pyx_PyIndex_Check #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message) #define __PYX_BUILD_PY_SSIZE_T "i" #else #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #define __Pyx_PyIndex_Check PyIndex_Check #endif #if PY_VERSION_HEX < 0x02060000 #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) #define PyVarObject_HEAD_INIT(type, size) \ PyObject_HEAD_INIT(type) size, #define PyType_Modified(t) typedef struct { void *buf; PyObject *obj; Py_ssize_t len; Py_ssize_t itemsize; int readonly; int ndim; char *format; Py_ssize_t *shape; Py_ssize_t *strides; Py_ssize_t *suboffsets; void *internal; } Py_buffer; #define PyBUF_SIMPLE 0 #define PyBUF_WRITABLE 0x0001 #define PyBUF_FORMAT 0x0004 #define PyBUF_ND 0x0008 #define PyBUF_STRIDES (0x0010 | PyBUF_ND) #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) #define PyBUF_RECORDS (PyBUF_STRIDES | PyBUF_FORMAT | PyBUF_WRITABLE) #define PyBUF_FULL (PyBUF_INDIRECT | PyBUF_FORMAT | PyBUF_WRITABLE) typedef int (*getbufferproc)(PyObject *, Py_buffer *, int); typedef void (*releasebufferproc)(PyObject *, Py_buffer *); #endif #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #endif #if PY_MAJOR_VERSION < 3 && PY_MINOR_VERSION < 6 #define PyUnicode_FromString(s) PyUnicode_Decode(s, strlen(s), "UTF-8", "strict") #endif #if PY_MAJOR_VERSION >= 3 #define Py_TPFLAGS_CHECKTYPES 0 #define Py_TPFLAGS_HAVE_INDEX 0 #endif #if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3) #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ? \ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #else #define CYTHON_PEP393_ENABLED 0 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_READ(k, d, i) ((k=k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_VERSION_HEX < 0x02060000 #define PyBytesObject PyStringObject #define PyBytes_Type PyString_Type #define PyBytes_Check PyString_Check #define PyBytes_CheckExact PyString_CheckExact #define PyBytes_FromString PyString_FromString #define PyBytes_FromStringAndSize PyString_FromStringAndSize #define PyBytes_FromFormat PyString_FromFormat #define PyBytes_DecodeEscape PyString_DecodeEscape #define PyBytes_AsString PyString_AsString #define PyBytes_AsStringAndSize PyString_AsStringAndSize #define PyBytes_Size PyString_Size #define PyBytes_AS_STRING PyString_AS_STRING #define PyBytes_GET_SIZE PyString_GET_SIZE #define PyBytes_Repr PyString_Repr #define PyBytes_Concat PyString_Concat #define PyBytes_ConcatAndDel PyString_ConcatAndDel #endif #if PY_VERSION_HEX < 0x02060000 #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type) #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_VERSION_HEX < 0x03020000 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300) #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b) #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value) #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b) #else #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0))) #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1))) #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1))) #endif #if PY_MAJOR_VERSION >= 3 #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n))) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n))) #else #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n)) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_NAMESTR(n) ((char *)(n)) #define __Pyx_DOCSTR(n) ((char *)(n)) #else #define __Pyx_NAMESTR(n) (n) #define __Pyx_DOCSTR(n) (n) #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include #define __PYX_HAVE__nipy__algorithms__statistics__intvol #define __PYX_HAVE_API__nipy__algorithms__statistics__intvol #include "stdio.h" #include "stdlib.h" #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "math.h" #ifdef _OPENMP #include #endif /* _OPENMP */ #ifdef PYREX_WITHOUT_ASSERTIONS #define CYTHON_WITHOUT_ASSERTIONS #endif /* inline attribute */ #ifndef CYTHON_INLINE #if defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif /* unused attribute */ #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif typedef struct {PyObject **p; char *s; const long n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/ /* Type Conversion Predeclarations */ #define __Pyx_PyBytes_FromUString(s) PyBytes_FromString((char*)s) #define __Pyx_PyBytes_AsUString(s) ((unsigned char*) PyBytes_AsString(s)) #define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x); static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject*); #if CYTHON_COMPILING_IN_CPYTHON #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #ifdef __GNUC__ /* Test for GCC > 2.95 */ #if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* __GNUC__ > 2 ... */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ > 2 ... */ #else /* __GNUC__ */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static PyObject *__pyx_m; static PyObject *__pyx_b; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include #else #include #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "intvol.pyx", "numpy.pxd", "type.pxd", }; #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; /* for error messages only */ struct __Pyx_StructField_* fields; size_t size; /* sizeof(type) */ size_t arraysize[8]; /* length of array in each dimension */ int ndim; char typegroup; /* _R_eal, _C_omplex, Signed _I_nt, _U_nsigned int, _S_truct, _P_ointer, _O_bject, c_H_ar */ char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /* "numpy.pxd":723 * # in Cython to enable them only on the right systems. * * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t */ typedef npy_int8 __pyx_t_5numpy_int8_t; /* "numpy.pxd":724 * * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t */ typedef npy_int16 __pyx_t_5numpy_int16_t; /* "numpy.pxd":725 * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< * ctypedef npy_int64 int64_t * #ctypedef npy_int96 int96_t */ typedef npy_int32 __pyx_t_5numpy_int32_t; /* "numpy.pxd":726 * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< * #ctypedef npy_int96 int96_t * #ctypedef npy_int128 int128_t */ typedef npy_int64 __pyx_t_5numpy_int64_t; /* "numpy.pxd":730 * #ctypedef npy_int128 int128_t * * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t */ typedef npy_uint8 __pyx_t_5numpy_uint8_t; /* "numpy.pxd":731 * * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t */ typedef npy_uint16 __pyx_t_5numpy_uint16_t; /* "numpy.pxd":732 * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< * ctypedef npy_uint64 uint64_t * #ctypedef npy_uint96 uint96_t */ typedef npy_uint32 __pyx_t_5numpy_uint32_t; /* "numpy.pxd":733 * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< * #ctypedef npy_uint96 uint96_t * #ctypedef npy_uint128 uint128_t */ typedef npy_uint64 __pyx_t_5numpy_uint64_t; /* "numpy.pxd":737 * #ctypedef npy_uint128 uint128_t * * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< * ctypedef npy_float64 float64_t * #ctypedef npy_float80 float80_t */ typedef npy_float32 __pyx_t_5numpy_float32_t; /* "numpy.pxd":738 * * ctypedef npy_float32 float32_t * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< * #ctypedef npy_float80 float80_t * #ctypedef npy_float128 float128_t */ typedef npy_float64 __pyx_t_5numpy_float64_t; /* "numpy.pxd":747 * # The int types are mapped a bit surprising -- * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t # <<<<<<<<<<<<<< * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t */ typedef npy_long __pyx_t_5numpy_int_t; /* "numpy.pxd":748 * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< * ctypedef npy_longlong longlong_t * */ typedef npy_longlong __pyx_t_5numpy_long_t; /* "numpy.pxd":749 * ctypedef npy_long int_t * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< * * ctypedef npy_ulong uint_t */ typedef npy_longlong __pyx_t_5numpy_longlong_t; /* "numpy.pxd":751 * ctypedef npy_longlong longlong_t * * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t */ typedef npy_ulong __pyx_t_5numpy_uint_t; /* "numpy.pxd":752 * * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulonglong_t * */ typedef npy_ulonglong __pyx_t_5numpy_ulong_t; /* "numpy.pxd":753 * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< * * ctypedef npy_intp intp_t */ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; /* "numpy.pxd":755 * ctypedef npy_ulonglong ulonglong_t * * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< * ctypedef npy_uintp uintp_t * */ typedef npy_intp __pyx_t_5numpy_intp_t; /* "numpy.pxd":756 * * ctypedef npy_intp intp_t * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< * * ctypedef npy_double float_t */ typedef npy_uintp __pyx_t_5numpy_uintp_t; /* "numpy.pxd":758 * ctypedef npy_uintp uintp_t * * ctypedef npy_double float_t # <<<<<<<<<<<<<< * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t */ typedef npy_double __pyx_t_5numpy_float_t; /* "numpy.pxd":759 * * ctypedef npy_double float_t * ctypedef npy_double double_t # <<<<<<<<<<<<<< * ctypedef npy_longdouble longdouble_t * */ typedef npy_double __pyx_t_5numpy_double_t; /* "numpy.pxd":760 * ctypedef npy_double float_t * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cfloat cfloat_t */ typedef npy_longdouble __pyx_t_5numpy_longdouble_t; #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< float > __pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< double > __pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif /*--- Type declarations ---*/ /* "numpy.pxd":762 * ctypedef npy_longdouble longdouble_t * * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t */ typedef npy_cfloat __pyx_t_5numpy_cfloat_t; /* "numpy.pxd":763 * * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< * ctypedef npy_clongdouble clongdouble_t * */ typedef npy_cdouble __pyx_t_5numpy_cdouble_t; /* "numpy.pxd":764 * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cdouble complex_t */ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; /* "numpy.pxd":766 * ctypedef npy_clongdouble clongdouble_t * * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew1(a): */ typedef npy_cdouble __pyx_t_5numpy_complex_t; #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/ #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil) \ if (acquire_gil) { \ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ PyGILState_Release(__pyx_gilstate_save); \ } else { \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil) \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext() \ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif /* CYTHON_REFNANNY */ #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /*proto*/ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], \ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, \ const char* function_name); /*proto*/ #if PY_VERSION_HEX < 0x02050000 #ifndef PyAnySet_CheckExact #define PyAnySet_CheckExact(ob) \ ((ob)->ob_type == &PySet_Type || \ (ob)->ob_type == &PyFrozenSet_Type) #define PySet_New(iterable) \ PyObject_CallFunctionObjArgs((PyObject *)&PySet_Type, (iterable), NULL) #define Pyx_PyFrozenSet_New(iterable) \ PyObject_CallFunctionObjArgs((PyObject *)&PyFrozenSet_Type, (iterable), NULL) #define PySet_Size(anyset) \ PyObject_Size((anyset)) #define PySet_Contains(anyset, key) \ PySequence_Contains((anyset), (key)) #define PySet_Pop(set) \ PyObject_CallMethod(set, (char *)"pop", NULL) static CYTHON_INLINE int PySet_Clear(PyObject *set) { PyObject *ret = PyObject_CallMethod(set, (char *)"clear", NULL); if (!ret) return -1; Py_DECREF(ret); return 0; } static CYTHON_INLINE int PySet_Discard(PyObject *set, PyObject *key) { PyObject *ret = PyObject_CallMethod(set, (char *)"discard", (char *)"O", key); if (!ret) return -1; Py_DECREF(ret); return 0; } static CYTHON_INLINE int PySet_Add(PyObject *set, PyObject *key) { PyObject *ret = PyObject_CallMethod(set, (char *)"add", (char *)"O", key); if (!ret) return -1; Py_DECREF(ret); return 0; } #endif /* PyAnySet_CheckExact (<= Py2.4) */ #endif /* < Py2.5 */ static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/ static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/ static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); static void __Pyx_RaiseBufferFallbackError(void); /*proto*/ static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } #define __Pyx_GetItemInt_List(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ __Pyx_GetItemInt_List_Fast(o, i) : \ __Pyx_GetItemInt_Generic(o, to_py_func(i))) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i) { #if CYTHON_COMPILING_IN_CPYTHON if (likely((0 <= i) & (i < PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, i); Py_INCREF(r); return r; } else if ((-PyList_GET_SIZE(o) <= i) & (i < 0)) { PyObject *r = PyList_GET_ITEM(o, PyList_GET_SIZE(o) + i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } #define __Pyx_GetItemInt_Tuple(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ __Pyx_GetItemInt_Tuple_Fast(o, i) : \ __Pyx_GetItemInt_Generic(o, to_py_func(i))) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i) { #if CYTHON_COMPILING_IN_CPYTHON if (likely((0 <= i) & (i < PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, i); Py_INCREF(r); return r; } else if ((-PyTuple_GET_SIZE(o) <= i) & (i < 0)) { PyObject *r = PyTuple_GET_ITEM(o, PyTuple_GET_SIZE(o) + i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } #define __Pyx_GetItemInt(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ __Pyx_GetItemInt_Fast(o, i) : \ __Pyx_GetItemInt_Generic(o, to_py_func(i))) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i) { #if CYTHON_COMPILING_IN_CPYTHON if (PyList_CheckExact(o)) { Py_ssize_t n = (likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if (likely((n >= 0) & (n < PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = (likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if (likely((n >= 0) & (n < PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { /* inlined PySequence_GetItem() */ PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (unlikely(l < 0)) return NULL; i += l; } return m->sq_item(o, i); } } #else if (PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } static void __Pyx_RaiseBufferIndexError(int axis); /*proto*/ #define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0) #define __Pyx_BufPtrStrided2d(type, buf, i0, s0, i1, s1) (type)((char*)buf + i0 * s0 + i1 * s1) #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len)) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_PyList_Append(L,x) PyList_Append(L,x) #endif static CYTHON_INLINE int __Pyx_mod_int(int, int); /* proto */ static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact); /*proto*/ static CYTHON_INLINE long __Pyx_mod_long(long, long); /* proto */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); static CYTHON_INLINE int __Pyx_IterFinish(void); /*proto*/ static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); /*proto*/ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif static Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0}; static Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1}; static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level); /*proto*/ static CYTHON_INLINE void __Pyx_RaiseImportError(PyObject *name); static CYTHON_INLINE Py_intptr_t __Pyx_PyInt_from_py_Py_intptr_t(PyObject *); static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_Py_intptr_t(Py_intptr_t); #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if defined(_WIN32) && defined(__cplusplus) && CYTHON_CCOMPLEX #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); #if CYTHON_CCOMPLEX #define __Pyx_c_eqf(a, b) ((a)==(b)) #define __Pyx_c_sumf(a, b) ((a)+(b)) #define __Pyx_c_difff(a, b) ((a)-(b)) #define __Pyx_c_prodf(a, b) ((a)*(b)) #define __Pyx_c_quotf(a, b) ((a)/(b)) #define __Pyx_c_negf(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zerof(z) ((z)==(float)0) #define __Pyx_c_conjf(z) (::std::conj(z)) #if 1 #define __Pyx_c_absf(z) (::std::abs(z)) #define __Pyx_c_powf(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zerof(z) ((z)==0) #define __Pyx_c_conjf(z) (conjf(z)) #if 1 #define __Pyx_c_absf(z) (cabsf(z)) #define __Pyx_c_powf(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); #if CYTHON_CCOMPLEX #define __Pyx_c_eq(a, b) ((a)==(b)) #define __Pyx_c_sum(a, b) ((a)+(b)) #define __Pyx_c_diff(a, b) ((a)-(b)) #define __Pyx_c_prod(a, b) ((a)*(b)) #define __Pyx_c_quot(a, b) ((a)/(b)) #define __Pyx_c_neg(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero(z) ((z)==(double)0) #define __Pyx_c_conj(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs(z) (::std::abs(z)) #define __Pyx_c_pow(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero(z) ((z)==0) #define __Pyx_c_conj(z) (conj(z)) #if 1 #define __Pyx_c_abs(z) (cabs(z)) #define __Pyx_c_pow(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject *); static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject *); static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject *); static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject *); static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject *); static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject *); static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject *); static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject *); static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject *); static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject *); static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject *); static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject *); static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject *); static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject *); static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject *); static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject *); static int __Pyx_check_binary_version(void); #if !defined(__Pyx_PyIdentifier_FromString) #if PY_MAJOR_VERSION < 3 #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) #else #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) #endif #endif static PyObject *__Pyx_ImportModule(const char *name); /*proto*/ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /*proto*/ typedef struct { int code_line; PyCodeObject* code_object; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /*proto*/ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/ /* Module declarations from 'cython' */ /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from 'cpython.object' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'libc.stdlib' */ /* Module declarations from 'numpy' */ /* Module declarations from 'numpy' */ static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ /* Module declarations from 'nipy.algorithms.statistics.intvol' */ static double __pyx_v_4nipy_10algorithms_10statistics_6intvol_PI; static double __pyx_f_4nipy_10algorithms_10statistics_6intvol_mu3_tet(double, double, double, double, double, double, double, double, double, double, int __pyx_skip_dispatch); /*proto*/ static double __pyx_f_4nipy_10algorithms_10statistics_6intvol_mu2_tet(double, double, double, double, double, double, double, double, double, double, int __pyx_skip_dispatch); /*proto*/ static double __pyx_f_4nipy_10algorithms_10statistics_6intvol_mu1_tet(double, double, double, double, double, double, double, double, double, double, int __pyx_skip_dispatch); /*proto*/ static CYTHON_INLINE double __pyx_f_4nipy_10algorithms_10statistics_6intvol_limited_acos(double); /*proto*/ static double __pyx_f_4nipy_10algorithms_10statistics_6intvol__mu1_tetface(double, double, double, double, double, double, double, double, double, double, int __pyx_skip_dispatch); /*proto*/ static double __pyx_f_4nipy_10algorithms_10statistics_6intvol_mu2_tri(double, double, double, double, double, double, int __pyx_skip_dispatch); /*proto*/ static double __pyx_f_4nipy_10algorithms_10statistics_6intvol_mu1_tri(double, double, double, double, double, double, int __pyx_skip_dispatch); /*proto*/ static double __pyx_f_4nipy_10algorithms_10statistics_6intvol_mu1_edge(double, double, double, int __pyx_skip_dispatch); /*proto*/ static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t = { "intp_t", NULL, sizeof(__pyx_t_5numpy_intp_t), { 0 }, 0, IS_UNSIGNED(__pyx_t_5numpy_intp_t) ? 'U' : 'I', IS_UNSIGNED(__pyx_t_5numpy_intp_t), 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_float_t = { "float_t", NULL, sizeof(__pyx_t_5numpy_float_t), { 0 }, 0, 'R', 0, 0 }; #define __Pyx_MODULE_NAME "nipy.algorithms.statistics.intvol" int __pyx_module_is_main_nipy__algorithms__statistics__intvol = 0; /* Implementation of 'nipy.algorithms.statistics.intvol' */ static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_sorted; static PyObject *__pyx_builtin_RuntimeError; static PyObject *__pyx_pf_4nipy_10algorithms_10statistics_6intvol_mu3_tet(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_D00, double __pyx_v_D01, double __pyx_v_D02, double __pyx_v_D03, double __pyx_v_D11, double __pyx_v_D12, double __pyx_v_D13, double __pyx_v_D22, double __pyx_v_D23, double __pyx_v_D33); /* proto */ static PyObject *__pyx_pf_4nipy_10algorithms_10statistics_6intvol_2mu2_tet(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_D00, double __pyx_v_D01, double __pyx_v_D02, double __pyx_v_D03, double __pyx_v_D11, double __pyx_v_D12, double __pyx_v_D13, double __pyx_v_D22, double __pyx_v_D23, double __pyx_v_D33); /* proto */ static PyObject *__pyx_pf_4nipy_10algorithms_10statistics_6intvol_4mu1_tet(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_D00, double __pyx_v_D01, double __pyx_v_D02, double __pyx_v_D03, double __pyx_v_D11, double __pyx_v_D12, double __pyx_v_D13, double __pyx_v_D22, double __pyx_v_D23, double __pyx_v_D33); /* proto */ static PyObject *__pyx_pf_4nipy_10algorithms_10statistics_6intvol_6_mu1_tetface(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_Ds0s0, double __pyx_v_Ds0s1, double __pyx_v_Ds1s1, double __pyx_v_Ds0t0, double __pyx_v_Ds0t1, double __pyx_v_Ds1t0, double __pyx_v_Ds1t1, double __pyx_v_Dt0t0, double __pyx_v_Dt0t1, double __pyx_v_Dt1t1); /* proto */ static PyObject *__pyx_pf_4nipy_10algorithms_10statistics_6intvol_8mu2_tri(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_D00, double __pyx_v_D01, double __pyx_v_D02, double __pyx_v_D11, double __pyx_v_D12, double __pyx_v_D22); /* proto */ static PyObject *__pyx_pf_4nipy_10algorithms_10statistics_6intvol_10mu1_tri(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_D00, double __pyx_v_D01, double __pyx_v_D02, double __pyx_v_D11, double __pyx_v_D12, double __pyx_v_D22); /* proto */ static PyObject *__pyx_pf_4nipy_10algorithms_10statistics_6intvol_12mu1_edge(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_D00, double __pyx_v_D01, double __pyx_v_D11); /* proto */ static PyObject *__pyx_pf_4nipy_10algorithms_10statistics_6intvol_14EC3d(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_mask); /* proto */ static PyObject *__pyx_pf_4nipy_10algorithms_10statistics_6intvol_16Lips3d(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_coords, PyObject *__pyx_v_mask); /* proto */ static PyObject *__pyx_pf_4nipy_10algorithms_10statistics_6intvol_18_convert_stride3(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_v, PyObject *__pyx_v_stride1, PyObject *__pyx_v_stride2); /* proto */ static PyObject *__pyx_pf_4nipy_10algorithms_10statistics_6intvol_20_convert_stride2(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_v, PyObject *__pyx_v_stride1, PyObject *__pyx_v_stride2); /* proto */ static PyObject *__pyx_pf_4nipy_10algorithms_10statistics_6intvol_22_convert_stride1(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_v, PyObject *__pyx_v_stride1, PyObject *__pyx_v_stride2); /* proto */ static PyObject *__pyx_pf_4nipy_10algorithms_10statistics_6intvol_24Lips2d(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_coords, PyObject *__pyx_v_mask); /* proto */ static PyObject *__pyx_pf_4nipy_10algorithms_10statistics_6intvol_26EC2d(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_mask); /* proto */ static PyObject *__pyx_pf_4nipy_10algorithms_10statistics_6intvol_28Lips1d(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_coords, PyArrayObject *__pyx_v_mask); /* proto */ static PyObject *__pyx_pf_4nipy_10algorithms_10statistics_6intvol_30EC1d(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_mask); /* proto */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ static char __pyx_k_1[] = "mask should be filled with 0/1 values, but be of type np.int"; static char __pyx_k_8[] = "cube_with_strides_center"; static char __pyx_k_17[] = "shape of mask does not match coordinates"; static char __pyx_k_73[] = "ndarray is not C contiguous"; static char __pyx_k_75[] = "ndarray is not Fortran contiguous"; static char __pyx_k_77[] = "Non-native byte order not supported"; static char __pyx_k_79[] = "unknown dtype code in numpy.pxd (%d)"; static char __pyx_k_80[] = "Format string allocated too short, see comment in numpy.pxd"; static char __pyx_k_83[] = "Format string allocated too short."; static char __pyx_k_85[] = "\nThe estimators for the intrinsic volumes appearing in this module\nwere partially supported by NSF grant DMS-0405970.\n\nTaylor, J.E. & Worsley, K.J. (2007). \"Detecting sparse signal in random fields,\n with an application to brain mapping.\"\n Journal of the American Statistical Association, 102(479):913-928.\n\n"; static char __pyx_k_86[] = "scipy.sparse"; static char __pyx_k_87[] = "nipy.utils.arrays"; static char __pyx_k_90[] = "/Users/mb312/dev_trees/nipy/nipy/algorithms/statistics/intvol.pyx"; static char __pyx_k_91[] = "nipy.algorithms.statistics.intvol"; static char __pyx_k__B[] = "B"; static char __pyx_k__D[] = "D"; static char __pyx_k__H[] = "H"; static char __pyx_k__I[] = "I"; static char __pyx_k__L[] = "L"; static char __pyx_k__O[] = "O"; static char __pyx_k__Q[] = "Q"; static char __pyx_k__b[] = "b"; static char __pyx_k__c[] = "c"; static char __pyx_k__d[] = "d"; static char __pyx_k__f[] = "f"; static char __pyx_k__g[] = "g"; static char __pyx_k__h[] = "h"; static char __pyx_k__i[] = "i"; static char __pyx_k__j[] = "j"; static char __pyx_k__k[] = "k"; static char __pyx_k__l[] = "l"; static char __pyx_k__m[] = "m"; static char __pyx_k__q[] = "q"; static char __pyx_k__r[] = "r"; static char __pyx_k__s[] = "s"; static char __pyx_k__v[] = "v"; static char __pyx_k__Zd[] = "Zd"; static char __pyx_k__Zf[] = "Zf"; static char __pyx_k__Zg[] = "Zg"; static char __pyx_k__d2[] = "d2"; static char __pyx_k__d3[] = "d3"; static char __pyx_k__d4[] = "d4"; static char __pyx_k__l0[] = "l0"; static char __pyx_k__l1[] = "l1"; static char __pyx_k__l2[] = "l2"; static char __pyx_k__l3[] = "l3"; static char __pyx_k__m2[] = "m2"; static char __pyx_k__m3[] = "m3"; static char __pyx_k__m4[] = "m4"; static char __pyx_k__mr[] = "mr"; static char __pyx_k__ms[] = "ms"; static char __pyx_k__np[] = "np"; static char __pyx_k__pi[] = "pi"; static char __pyx_k__rr[] = "rr"; static char __pyx_k__s0[] = "s0"; static char __pyx_k__s1[] = "s1"; static char __pyx_k__s2[] = "s2"; static char __pyx_k__ss[] = "ss"; static char __pyx_k__v0[] = "v0"; static char __pyx_k__v1[] = "v1"; static char __pyx_k__v2[] = "v2"; static char __pyx_k__v3[] = "v3"; static char __pyx_k__w0[] = "w0"; static char __pyx_k__w1[] = "w1"; static char __pyx_k__w2[] = "w2"; static char __pyx_k__w3[] = "w3"; static char __pyx_k__D00[] = "D00"; static char __pyx_k__D01[] = "D01"; static char __pyx_k__D02[] = "D02"; static char __pyx_k__D03[] = "D03"; static char __pyx_k__D11[] = "D11"; static char __pyx_k__D12[] = "D12"; static char __pyx_k__D13[] = "D13"; static char __pyx_k__D22[] = "D22"; static char __pyx_k__D23[] = "D23"; static char __pyx_k__D33[] = "D33"; static char __pyx_k__ds2[] = "ds2"; static char __pyx_k__ds3[] = "ds3"; static char __pyx_k__ds4[] = "ds4"; static char __pyx_k__int[] = "int"; static char __pyx_k__res[] = "res"; static char __pyx_k__ss0[] = "ss0"; static char __pyx_k__ss1[] = "ss1"; static char __pyx_k__ss2[] = "ss2"; static char __pyx_k__sum[] = "sum"; static char __pyx_k__EC1d[] = "EC1d"; static char __pyx_k__EC2d[] = "EC2d"; static char __pyx_k__EC3d[] = "EC3d"; static char __pyx_k__bool[] = "bool"; static char __pyx_k__intp[] = "intp"; static char __pyx_k__mask[] = "mask"; static char __pyx_k__ndim[] = "ndim"; static char __pyx_k__npix[] = "npix"; static char __pyx_k__nvox[] = "nvox"; static char __pyx_k__size[] = "size"; static char __pyx_k__ss0d[] = "ss0d"; static char __pyx_k__ss1d[] = "ss1d"; static char __pyx_k__ss2d[] = "ss2d"; static char __pyx_k__Ds0s0[] = "Ds0s0"; static char __pyx_k__Ds0s1[] = "Ds0s1"; static char __pyx_k__Ds0t0[] = "Ds0t0"; static char __pyx_k__Ds0t1[] = "Ds0t1"; static char __pyx_k__Ds1s1[] = "Ds1s1"; static char __pyx_k__Ds1t0[] = "Ds1t0"; static char __pyx_k__Ds1t1[] = "Ds1t1"; static char __pyx_k__Dt0t0[] = "Dt0t0"; static char __pyx_k__Dt0t1[] = "Dt0t1"; static char __pyx_k__Dt1t1[] = "Dt1t1"; static char __pyx_k__array[] = "array"; static char __pyx_k__dtype[] = "dtype"; static char __pyx_k__fmask[] = "fmask"; static char __pyx_k__index[] = "index"; static char __pyx_k__numpy[] = "numpy"; static char __pyx_k__pmask[] = "pmask"; static char __pyx_k__range[] = "range"; static char __pyx_k__shape[] = "shape"; static char __pyx_k__union[] = "union"; static char __pyx_k__utils[] = "utils"; static char __pyx_k__value[] = "value"; static char __pyx_k__verts[] = "verts"; static char __pyx_k__zeros[] = "zeros"; static char __pyx_k__Lips1d[] = "Lips1d"; static char __pyx_k__Lips2d[] = "Lips2d"; static char __pyx_k__Lips3d[] = "Lips3d"; static char __pyx_k__coords[] = "coords"; static char __pyx_k__fpmask[] = "fpmask"; static char __pyx_k__hstack[] = "hstack"; static char __pyx_k__mask_c[] = "mask_c"; static char __pyx_k__pindex[] = "pindex"; static char __pyx_k__sorted[] = "sorted"; static char __pyx_k__unique[] = "unique"; static char __pyx_k__fcoords[] = "fcoords"; static char __pyx_k__reshape[] = "reshape"; static char __pyx_k__squeeze[] = "squeeze"; static char __pyx_k__stride1[] = "stride1"; static char __pyx_k__stride2[] = "stride2"; static char __pyx_k__strides[] = "strides"; static char __pyx_k____main__[] = "__main__"; static char __pyx_k____test__[] = "__test__"; static char __pyx_k__coords_c[] = "coords_c"; static char __pyx_k__dstrides[] = "dstrides"; static char __pyx_k__issubset[] = "issubset"; static char __pyx_k__cvertices[] = "cvertices"; static char __pyx_k__ValueError[] = "ValueError"; static char __pyx_k__difference[] = "difference"; static char __pyx_k__dok_matrix[] = "dok_matrix"; static char __pyx_k__pmask_shape[] = "pmask_shape"; static char __pyx_k__RuntimeError[] = "RuntimeError"; static char __pyx_k__strides_from[] = "strides_from"; static char __pyx_k__join_complexes[] = "join_complexes"; static char __pyx_k___convert_stride1[] = "_convert_stride1"; static char __pyx_k___convert_stride2[] = "_convert_stride2"; static char __pyx_k___convert_stride3[] = "_convert_stride3"; static PyObject *__pyx_kp_s_1; static PyObject *__pyx_kp_s_17; static PyObject *__pyx_kp_u_73; static PyObject *__pyx_kp_u_75; static PyObject *__pyx_kp_u_77; static PyObject *__pyx_kp_u_79; static PyObject *__pyx_n_s_8; static PyObject *__pyx_kp_u_80; static PyObject *__pyx_kp_u_83; static PyObject *__pyx_n_s_86; static PyObject *__pyx_n_s_87; static PyObject *__pyx_kp_s_90; static PyObject *__pyx_n_s_91; static PyObject *__pyx_n_s__D; static PyObject *__pyx_n_s__D00; static PyObject *__pyx_n_s__D01; static PyObject *__pyx_n_s__D02; static PyObject *__pyx_n_s__D03; static PyObject *__pyx_n_s__D11; static PyObject *__pyx_n_s__D12; static PyObject *__pyx_n_s__D13; static PyObject *__pyx_n_s__D22; static PyObject *__pyx_n_s__D23; static PyObject *__pyx_n_s__D33; static PyObject *__pyx_n_s__Ds0s0; static PyObject *__pyx_n_s__Ds0s1; static PyObject *__pyx_n_s__Ds0t0; static PyObject *__pyx_n_s__Ds0t1; static PyObject *__pyx_n_s__Ds1s1; static PyObject *__pyx_n_s__Ds1t0; static PyObject *__pyx_n_s__Ds1t1; static PyObject *__pyx_n_s__Dt0t0; static PyObject *__pyx_n_s__Dt0t1; static PyObject *__pyx_n_s__Dt1t1; static PyObject *__pyx_n_s__EC1d; static PyObject *__pyx_n_s__EC2d; static PyObject *__pyx_n_s__EC3d; static PyObject *__pyx_n_s__Lips1d; static PyObject *__pyx_n_s__Lips2d; static PyObject *__pyx_n_s__Lips3d; static PyObject *__pyx_n_s__RuntimeError; static PyObject *__pyx_n_s__ValueError; static PyObject *__pyx_n_s____main__; static PyObject *__pyx_n_s____test__; static PyObject *__pyx_n_s___convert_stride1; static PyObject *__pyx_n_s___convert_stride2; static PyObject *__pyx_n_s___convert_stride3; static PyObject *__pyx_n_s__array; static PyObject *__pyx_n_s__bool; static PyObject *__pyx_n_s__c; static PyObject *__pyx_n_s__coords; static PyObject *__pyx_n_s__coords_c; static PyObject *__pyx_n_s__cvertices; static PyObject *__pyx_n_s__d2; static PyObject *__pyx_n_s__d3; static PyObject *__pyx_n_s__d4; static PyObject *__pyx_n_s__difference; static PyObject *__pyx_n_s__dok_matrix; static PyObject *__pyx_n_s__ds2; static PyObject *__pyx_n_s__ds3; static PyObject *__pyx_n_s__ds4; static PyObject *__pyx_n_s__dstrides; static PyObject *__pyx_n_s__dtype; static PyObject *__pyx_n_s__fcoords; static PyObject *__pyx_n_s__fmask; static PyObject *__pyx_n_s__fpmask; static PyObject *__pyx_n_s__hstack; static PyObject *__pyx_n_s__i; static PyObject *__pyx_n_s__index; static PyObject *__pyx_n_s__int; static PyObject *__pyx_n_s__intp; static PyObject *__pyx_n_s__issubset; static PyObject *__pyx_n_s__j; static PyObject *__pyx_n_s__join_complexes; static PyObject *__pyx_n_s__k; static PyObject *__pyx_n_s__l; static PyObject *__pyx_n_s__l0; static PyObject *__pyx_n_s__l1; static PyObject *__pyx_n_s__l2; static PyObject *__pyx_n_s__l3; static PyObject *__pyx_n_s__m; static PyObject *__pyx_n_s__m2; static PyObject *__pyx_n_s__m3; static PyObject *__pyx_n_s__m4; static PyObject *__pyx_n_s__mask; static PyObject *__pyx_n_s__mask_c; static PyObject *__pyx_n_s__mr; static PyObject *__pyx_n_s__ms; static PyObject *__pyx_n_s__ndim; static PyObject *__pyx_n_s__np; static PyObject *__pyx_n_s__npix; static PyObject *__pyx_n_s__numpy; static PyObject *__pyx_n_s__nvox; static PyObject *__pyx_n_s__pi; static PyObject *__pyx_n_s__pindex; static PyObject *__pyx_n_s__pmask; static PyObject *__pyx_n_s__pmask_shape; static PyObject *__pyx_n_s__r; static PyObject *__pyx_n_s__range; static PyObject *__pyx_n_s__res; static PyObject *__pyx_n_s__reshape; static PyObject *__pyx_n_s__rr; static PyObject *__pyx_n_s__s; static PyObject *__pyx_n_s__s0; static PyObject *__pyx_n_s__s1; static PyObject *__pyx_n_s__s2; static PyObject *__pyx_n_s__shape; static PyObject *__pyx_n_s__size; static PyObject *__pyx_n_s__sorted; static PyObject *__pyx_n_s__squeeze; static PyObject *__pyx_n_s__ss; static PyObject *__pyx_n_s__ss0; static PyObject *__pyx_n_s__ss0d; static PyObject *__pyx_n_s__ss1; static PyObject *__pyx_n_s__ss1d; static PyObject *__pyx_n_s__ss2; static PyObject *__pyx_n_s__ss2d; static PyObject *__pyx_n_s__stride1; static PyObject *__pyx_n_s__stride2; static PyObject *__pyx_n_s__strides; static PyObject *__pyx_n_s__strides_from; static PyObject *__pyx_n_s__sum; static PyObject *__pyx_n_s__union; static PyObject *__pyx_n_s__unique; static PyObject *__pyx_n_s__utils; static PyObject *__pyx_n_s__v; static PyObject *__pyx_n_s__v0; static PyObject *__pyx_n_s__v1; static PyObject *__pyx_n_s__v2; static PyObject *__pyx_n_s__v3; static PyObject *__pyx_n_s__value; static PyObject *__pyx_n_s__verts; static PyObject *__pyx_n_s__w0; static PyObject *__pyx_n_s__w1; static PyObject *__pyx_n_s__w2; static PyObject *__pyx_n_s__w3; static PyObject *__pyx_n_s__zeros; static PyObject *__pyx_int_0; static PyObject *__pyx_int_1; static PyObject *__pyx_int_2; static PyObject *__pyx_int_3; static PyObject *__pyx_int_4; static PyObject *__pyx_int_8; static PyObject *__pyx_int_neg_1; static PyObject *__pyx_int_15; static PyObject *__pyx_k_slice_3; static PyObject *__pyx_k_slice_4; static PyObject *__pyx_k_slice_5; static PyObject *__pyx_k_tuple_2; static PyObject *__pyx_k_tuple_6; static PyObject *__pyx_k_tuple_7; static PyObject *__pyx_k_tuple_9; static PyObject *__pyx_k_slice_21; static PyObject *__pyx_k_slice_22; static PyObject *__pyx_k_slice_23; static PyObject *__pyx_k_slice_43; static PyObject *__pyx_k_slice_44; static PyObject *__pyx_k_slice_57; static PyObject *__pyx_k_slice_58; static PyObject *__pyx_k_tuple_10; static PyObject *__pyx_k_tuple_11; static PyObject *__pyx_k_tuple_12; static PyObject *__pyx_k_tuple_13; static PyObject *__pyx_k_tuple_14; static PyObject *__pyx_k_tuple_15; static PyObject *__pyx_k_tuple_16; static PyObject *__pyx_k_tuple_18; static PyObject *__pyx_k_tuple_19; static PyObject *__pyx_k_tuple_20; static PyObject *__pyx_k_tuple_24; static PyObject *__pyx_k_tuple_25; static PyObject *__pyx_k_tuple_26; static PyObject *__pyx_k_tuple_27; static PyObject *__pyx_k_tuple_28; static PyObject *__pyx_k_tuple_29; static PyObject *__pyx_k_tuple_30; static PyObject *__pyx_k_tuple_31; static PyObject *__pyx_k_tuple_32; static PyObject *__pyx_k_tuple_33; static PyObject *__pyx_k_tuple_34; static PyObject *__pyx_k_tuple_35; static PyObject *__pyx_k_tuple_36; static PyObject *__pyx_k_tuple_37; static PyObject *__pyx_k_tuple_38; static PyObject *__pyx_k_tuple_39; static PyObject *__pyx_k_tuple_40; static PyObject *__pyx_k_tuple_41; static PyObject *__pyx_k_tuple_42; static PyObject *__pyx_k_tuple_45; static PyObject *__pyx_k_tuple_46; static PyObject *__pyx_k_tuple_47; static PyObject *__pyx_k_tuple_48; static PyObject *__pyx_k_tuple_49; static PyObject *__pyx_k_tuple_50; static PyObject *__pyx_k_tuple_51; static PyObject *__pyx_k_tuple_52; static PyObject *__pyx_k_tuple_53; static PyObject *__pyx_k_tuple_54; static PyObject *__pyx_k_tuple_55; static PyObject *__pyx_k_tuple_56; static PyObject *__pyx_k_tuple_59; static PyObject *__pyx_k_tuple_60; static PyObject *__pyx_k_tuple_61; static PyObject *__pyx_k_tuple_62; static PyObject *__pyx_k_tuple_63; static PyObject *__pyx_k_tuple_64; static PyObject *__pyx_k_tuple_65; static PyObject *__pyx_k_tuple_66; static PyObject *__pyx_k_tuple_67; static PyObject *__pyx_k_tuple_68; static PyObject *__pyx_k_tuple_69; static PyObject *__pyx_k_tuple_70; static PyObject *__pyx_k_tuple_71; static PyObject *__pyx_k_tuple_72; static PyObject *__pyx_k_tuple_74; static PyObject *__pyx_k_tuple_76; static PyObject *__pyx_k_tuple_78; static PyObject *__pyx_k_tuple_81; static PyObject *__pyx_k_tuple_82; static PyObject *__pyx_k_tuple_84; static PyObject *__pyx_k_tuple_88; static PyObject *__pyx_k_tuple_92; static PyObject *__pyx_k_tuple_94; static PyObject *__pyx_k_tuple_96; static PyObject *__pyx_k_tuple_98; static PyObject *__pyx_k_tuple_100; static PyObject *__pyx_k_tuple_102; static PyObject *__pyx_k_tuple_104; static PyObject *__pyx_k_tuple_106; static PyObject *__pyx_k_codeobj_89; static PyObject *__pyx_k_codeobj_93; static PyObject *__pyx_k_codeobj_95; static PyObject *__pyx_k_codeobj_97; static PyObject *__pyx_k_codeobj_99; static PyObject *__pyx_k_codeobj_101; static PyObject *__pyx_k_codeobj_103; static PyObject *__pyx_k_codeobj_105; static PyObject *__pyx_k_codeobj_107; /* "nipy/algorithms/statistics/intvol.pyx":36 * * * cpdef double mu3_tet(double D00, double D01, double D02, double D03, # <<<<<<<<<<<<<< * double D11, double D12, double D13, * double D22, double D23, */ static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_6intvol_1mu3_tet(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static double __pyx_f_4nipy_10algorithms_10statistics_6intvol_mu3_tet(double __pyx_v_D00, double __pyx_v_D01, double __pyx_v_D02, double __pyx_v_D03, double __pyx_v_D11, double __pyx_v_D12, double __pyx_v_D13, double __pyx_v_D22, double __pyx_v_D23, double __pyx_v_D33, CYTHON_UNUSED int __pyx_skip_dispatch) { double __pyx_v_C00; double __pyx_v_C01; double __pyx_v_C02; double __pyx_v_C11; double __pyx_v_C12; double __pyx_v_C22; double __pyx_v_v2; double __pyx_r; int __pyx_t_1; /* "nipy/algorithms/statistics/intvol.pyx":76 * """ * cdef double C00, C01, C02, C11, C12, C22, v2 * C00 = D00 - 2*D03 + D33 # <<<<<<<<<<<<<< * C01 = D01 - D13 - D03 + D33 * C02 = D02 - D23 - D03 + D33 */ __pyx_v_C00 = ((__pyx_v_D00 - (2.0 * __pyx_v_D03)) + __pyx_v_D33); /* "nipy/algorithms/statistics/intvol.pyx":77 * cdef double C00, C01, C02, C11, C12, C22, v2 * C00 = D00 - 2*D03 + D33 * C01 = D01 - D13 - D03 + D33 # <<<<<<<<<<<<<< * C02 = D02 - D23 - D03 + D33 * C11 = D11 - 2*D13 + D33 */ __pyx_v_C01 = (((__pyx_v_D01 - __pyx_v_D13) - __pyx_v_D03) + __pyx_v_D33); /* "nipy/algorithms/statistics/intvol.pyx":78 * C00 = D00 - 2*D03 + D33 * C01 = D01 - D13 - D03 + D33 * C02 = D02 - D23 - D03 + D33 # <<<<<<<<<<<<<< * C11 = D11 - 2*D13 + D33 * C12 = D12 - D13 - D23 + D33 */ __pyx_v_C02 = (((__pyx_v_D02 - __pyx_v_D23) - __pyx_v_D03) + __pyx_v_D33); /* "nipy/algorithms/statistics/intvol.pyx":79 * C01 = D01 - D13 - D03 + D33 * C02 = D02 - D23 - D03 + D33 * C11 = D11 - 2*D13 + D33 # <<<<<<<<<<<<<< * C12 = D12 - D13 - D23 + D33 * C22 = D22 - 2*D23 + D33 */ __pyx_v_C11 = ((__pyx_v_D11 - (2.0 * __pyx_v_D13)) + __pyx_v_D33); /* "nipy/algorithms/statistics/intvol.pyx":80 * C02 = D02 - D23 - D03 + D33 * C11 = D11 - 2*D13 + D33 * C12 = D12 - D13 - D23 + D33 # <<<<<<<<<<<<<< * C22 = D22 - 2*D23 + D33 * v2 = (C00 * (C11 * C22 - C12 * C12) - */ __pyx_v_C12 = (((__pyx_v_D12 - __pyx_v_D13) - __pyx_v_D23) + __pyx_v_D33); /* "nipy/algorithms/statistics/intvol.pyx":81 * C11 = D11 - 2*D13 + D33 * C12 = D12 - D13 - D23 + D33 * C22 = D22 - 2*D23 + D33 # <<<<<<<<<<<<<< * v2 = (C00 * (C11 * C22 - C12 * C12) - * C01 * (C01 * C22 - C02 * C12) + */ __pyx_v_C22 = ((__pyx_v_D22 - (2.0 * __pyx_v_D23)) + __pyx_v_D33); /* "nipy/algorithms/statistics/intvol.pyx":84 * v2 = (C00 * (C11 * C22 - C12 * C12) - * C01 * (C01 * C22 - C02 * C12) + * C02 * (C01 * C12 - C11 * C02)) # <<<<<<<<<<<<<< * # Rounding errors near 0 cause NaNs * if v2 <= 0: */ __pyx_v_v2 = (((__pyx_v_C00 * ((__pyx_v_C11 * __pyx_v_C22) - (__pyx_v_C12 * __pyx_v_C12))) - (__pyx_v_C01 * ((__pyx_v_C01 * __pyx_v_C22) - (__pyx_v_C02 * __pyx_v_C12)))) + (__pyx_v_C02 * ((__pyx_v_C01 * __pyx_v_C12) - (__pyx_v_C11 * __pyx_v_C02)))); /* "nipy/algorithms/statistics/intvol.pyx":86 * C02 * (C01 * C12 - C11 * C02)) * # Rounding errors near 0 cause NaNs * if v2 <= 0: # <<<<<<<<<<<<<< * return 0 * return sqrt(v2) / 6. */ __pyx_t_1 = (__pyx_v_v2 <= 0.0); if (__pyx_t_1) { /* "nipy/algorithms/statistics/intvol.pyx":87 * # Rounding errors near 0 cause NaNs * if v2 <= 0: * return 0 # <<<<<<<<<<<<<< * return sqrt(v2) / 6. * */ __pyx_r = 0.0; goto __pyx_L0; goto __pyx_L3; } __pyx_L3:; /* "nipy/algorithms/statistics/intvol.pyx":88 * if v2 <= 0: * return 0 * return sqrt(v2) / 6. # <<<<<<<<<<<<<< * * */ __pyx_r = (sqrt(__pyx_v_v2) / 6.); goto __pyx_L0; __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_6intvol_1mu3_tet(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_10algorithms_10statistics_6intvol_mu3_tet[] = " Compute the 3rd intrinsic volume of a tetrahedron.\n\n 3rd intrinsic volume (just volume in this case) of a tetrahedron with\n coordinates implied by dot products below.\n\n Parameters\n ----------\n D00 : float\n If ``cv0`` is a 3-vector of coordinates for the first vertex, `D00` is\n ``cv0.dot(cv0)``\n D01 : float\n ``cv0.dot(cv1)`` where ``cv1`` is the coordinates for the second\n vertex.\n D02 : float\n ``cv0.dot(cv2)``\n D03 : float\n ``cv0.dot(cv3)``\n D11 : float\n ``cv1.dot(cv1)``\n D12 : float\n ``cv1.dot(cv2)``\n D13 : float\n ``cv1.dot(cv3)``\n D22 : float\n ``cv2.dot(cv2)``\n D23 : float\n ``cv2.dot(cv2)``\n D33 : float\n ``cv3.dot(cv3)``\n\n Returns\n -------\n mu3 : float\n volume of tetrahedron\n "; static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_6intvol_1mu3_tet(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { double __pyx_v_D00; double __pyx_v_D01; double __pyx_v_D02; double __pyx_v_D03; double __pyx_v_D11; double __pyx_v_D12; double __pyx_v_D13; double __pyx_v_D22; double __pyx_v_D23; double __pyx_v_D33; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("mu3_tet (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__D00,&__pyx_n_s__D01,&__pyx_n_s__D02,&__pyx_n_s__D03,&__pyx_n_s__D11,&__pyx_n_s__D12,&__pyx_n_s__D13,&__pyx_n_s__D22,&__pyx_n_s__D23,&__pyx_n_s__D33,0}; PyObject* values[10] = {0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D00)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D01)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mu3_tet", 1, 10, 10, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D02)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mu3_tet", 1, 10, 10, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D03)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mu3_tet", 1, 10, 10, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D11)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mu3_tet", 1, 10, 10, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D12)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mu3_tet", 1, 10, 10, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D13)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mu3_tet", 1, 10, 10, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D22)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mu3_tet", 1, 10, 10, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D23)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mu3_tet", 1, 10, 10, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D33)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mu3_tet", 1, 10, 10, 9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "mu3_tet") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 10) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); } __pyx_v_D00 = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_D00 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_D01 = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_D01 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_D02 = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_D02 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_D03 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_D03 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_D11 = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_D11 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 37; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_D12 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_D12 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 37; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_D13 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_D13 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 37; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_D22 = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_D22 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_D23 = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_D23 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_D33 = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_D33 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("mu3_tet", 1, 10, 10, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.algorithms.statistics.intvol.mu3_tet", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_10algorithms_10statistics_6intvol_mu3_tet(__pyx_self, __pyx_v_D00, __pyx_v_D01, __pyx_v_D02, __pyx_v_D03, __pyx_v_D11, __pyx_v_D12, __pyx_v_D13, __pyx_v_D22, __pyx_v_D23, __pyx_v_D33); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/algorithms/statistics/intvol.pyx":36 * * * cpdef double mu3_tet(double D00, double D01, double D02, double D03, # <<<<<<<<<<<<<< * double D11, double D12, double D13, * double D22, double D23, */ static PyObject *__pyx_pf_4nipy_10algorithms_10statistics_6intvol_mu3_tet(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_D00, double __pyx_v_D01, double __pyx_v_D02, double __pyx_v_D03, double __pyx_v_D11, double __pyx_v_D12, double __pyx_v_D13, double __pyx_v_D22, double __pyx_v_D23, double __pyx_v_D33) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("mu3_tet", 0); __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyFloat_FromDouble(__pyx_f_4nipy_10algorithms_10statistics_6intvol_mu3_tet(__pyx_v_D00, __pyx_v_D01, __pyx_v_D02, __pyx_v_D03, __pyx_v_D11, __pyx_v_D12, __pyx_v_D13, __pyx_v_D22, __pyx_v_D23, __pyx_v_D33, 0)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.algorithms.statistics.intvol.mu3_tet", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/algorithms/statistics/intvol.pyx":91 * * * cpdef double mu2_tet(double D00, double D01, double D02, double D03, # <<<<<<<<<<<<<< * double D11, double D12, double D13, * double D22, double D23, */ static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_6intvol_3mu2_tet(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static double __pyx_f_4nipy_10algorithms_10statistics_6intvol_mu2_tet(double __pyx_v_D00, double __pyx_v_D01, double __pyx_v_D02, double __pyx_v_D03, double __pyx_v_D11, double __pyx_v_D12, double __pyx_v_D13, double __pyx_v_D22, double __pyx_v_D23, double __pyx_v_D33, CYTHON_UNUSED int __pyx_skip_dispatch) { double __pyx_v_mu; double __pyx_r; /* "nipy/algorithms/statistics/intvol.pyx":130 * Half tetrahedron surface area * """ * cdef double mu = 0 # <<<<<<<<<<<<<< * mu += mu2_tri(D00, D01, D02, D11, D12, D22) * mu += mu2_tri(D00, D02, D03, D22, D23, D33) */ __pyx_v_mu = 0.0; /* "nipy/algorithms/statistics/intvol.pyx":131 * """ * cdef double mu = 0 * mu += mu2_tri(D00, D01, D02, D11, D12, D22) # <<<<<<<<<<<<<< * mu += mu2_tri(D00, D02, D03, D22, D23, D33) * mu += mu2_tri(D11, D12, D13, D22, D23, D33) */ __pyx_v_mu = (__pyx_v_mu + __pyx_f_4nipy_10algorithms_10statistics_6intvol_mu2_tri(__pyx_v_D00, __pyx_v_D01, __pyx_v_D02, __pyx_v_D11, __pyx_v_D12, __pyx_v_D22, 0)); /* "nipy/algorithms/statistics/intvol.pyx":132 * cdef double mu = 0 * mu += mu2_tri(D00, D01, D02, D11, D12, D22) * mu += mu2_tri(D00, D02, D03, D22, D23, D33) # <<<<<<<<<<<<<< * mu += mu2_tri(D11, D12, D13, D22, D23, D33) * mu += mu2_tri(D00, D01, D03, D11, D13, D33) */ __pyx_v_mu = (__pyx_v_mu + __pyx_f_4nipy_10algorithms_10statistics_6intvol_mu2_tri(__pyx_v_D00, __pyx_v_D02, __pyx_v_D03, __pyx_v_D22, __pyx_v_D23, __pyx_v_D33, 0)); /* "nipy/algorithms/statistics/intvol.pyx":133 * mu += mu2_tri(D00, D01, D02, D11, D12, D22) * mu += mu2_tri(D00, D02, D03, D22, D23, D33) * mu += mu2_tri(D11, D12, D13, D22, D23, D33) # <<<<<<<<<<<<<< * mu += mu2_tri(D00, D01, D03, D11, D13, D33) * return mu * 0.5 */ __pyx_v_mu = (__pyx_v_mu + __pyx_f_4nipy_10algorithms_10statistics_6intvol_mu2_tri(__pyx_v_D11, __pyx_v_D12, __pyx_v_D13, __pyx_v_D22, __pyx_v_D23, __pyx_v_D33, 0)); /* "nipy/algorithms/statistics/intvol.pyx":134 * mu += mu2_tri(D00, D02, D03, D22, D23, D33) * mu += mu2_tri(D11, D12, D13, D22, D23, D33) * mu += mu2_tri(D00, D01, D03, D11, D13, D33) # <<<<<<<<<<<<<< * return mu * 0.5 * */ __pyx_v_mu = (__pyx_v_mu + __pyx_f_4nipy_10algorithms_10statistics_6intvol_mu2_tri(__pyx_v_D00, __pyx_v_D01, __pyx_v_D03, __pyx_v_D11, __pyx_v_D13, __pyx_v_D33, 0)); /* "nipy/algorithms/statistics/intvol.pyx":135 * mu += mu2_tri(D11, D12, D13, D22, D23, D33) * mu += mu2_tri(D00, D01, D03, D11, D13, D33) * return mu * 0.5 # <<<<<<<<<<<<<< * * */ __pyx_r = (__pyx_v_mu * 0.5); goto __pyx_L0; __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_6intvol_3mu2_tet(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_10algorithms_10statistics_6intvol_2mu2_tet[] = " Compute the 2nd intrinsic volume of tetrahedron\n\n 2nd intrinsic volume (half the surface area) of a tetrahedron with coordinates\n implied by dot products below.\n\n Parameters\n ----------\n D00 : float\n If ``cv0`` is a 3-vector of coordinates for the first vertex, `D00` is\n ``cv0.dot(cv0)``\n D01 : float\n ``cv0.dot(cv1)`` where ``cv1`` is the coordinates for the second\n vertex.\n D02 : float\n ``cv0.dot(cv2)``\n D03 : float\n ``cv0.dot(cv3)``\n D11 : float\n ``cv1.dot(cv1)``\n D12 : float\n ``cv1.dot(cv2)``\n D13 : float\n ``cv1.dot(cv3)``\n D22 : float\n ``cv2.dot(cv2)``\n D23 : float\n ``cv2.dot(cv2)``\n D33 : float\n ``cv3.dot(cv3)``\n\n Returns\n -------\n mu2 : float\n Half tetrahedron surface area\n "; static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_6intvol_3mu2_tet(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { double __pyx_v_D00; double __pyx_v_D01; double __pyx_v_D02; double __pyx_v_D03; double __pyx_v_D11; double __pyx_v_D12; double __pyx_v_D13; double __pyx_v_D22; double __pyx_v_D23; double __pyx_v_D33; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("mu2_tet (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__D00,&__pyx_n_s__D01,&__pyx_n_s__D02,&__pyx_n_s__D03,&__pyx_n_s__D11,&__pyx_n_s__D12,&__pyx_n_s__D13,&__pyx_n_s__D22,&__pyx_n_s__D23,&__pyx_n_s__D33,0}; PyObject* values[10] = {0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D00)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D01)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mu2_tet", 1, 10, 10, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D02)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mu2_tet", 1, 10, 10, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D03)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mu2_tet", 1, 10, 10, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D11)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mu2_tet", 1, 10, 10, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D12)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mu2_tet", 1, 10, 10, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D13)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mu2_tet", 1, 10, 10, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D22)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mu2_tet", 1, 10, 10, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D23)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mu2_tet", 1, 10, 10, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D33)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mu2_tet", 1, 10, 10, 9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "mu2_tet") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 10) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); } __pyx_v_D00 = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_D00 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_D01 = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_D01 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_D02 = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_D02 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_D03 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_D03 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_D11 = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_D11 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 92; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_D12 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_D12 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 92; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_D13 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_D13 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 92; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_D22 = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_D22 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_D23 = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_D23 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_D33 = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_D33 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 94; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("mu2_tet", 1, 10, 10, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.algorithms.statistics.intvol.mu2_tet", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_10algorithms_10statistics_6intvol_2mu2_tet(__pyx_self, __pyx_v_D00, __pyx_v_D01, __pyx_v_D02, __pyx_v_D03, __pyx_v_D11, __pyx_v_D12, __pyx_v_D13, __pyx_v_D22, __pyx_v_D23, __pyx_v_D33); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/algorithms/statistics/intvol.pyx":91 * * * cpdef double mu2_tet(double D00, double D01, double D02, double D03, # <<<<<<<<<<<<<< * double D11, double D12, double D13, * double D22, double D23, */ static PyObject *__pyx_pf_4nipy_10algorithms_10statistics_6intvol_2mu2_tet(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_D00, double __pyx_v_D01, double __pyx_v_D02, double __pyx_v_D03, double __pyx_v_D11, double __pyx_v_D12, double __pyx_v_D13, double __pyx_v_D22, double __pyx_v_D23, double __pyx_v_D33) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("mu2_tet", 0); __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyFloat_FromDouble(__pyx_f_4nipy_10algorithms_10statistics_6intvol_mu2_tet(__pyx_v_D00, __pyx_v_D01, __pyx_v_D02, __pyx_v_D03, __pyx_v_D11, __pyx_v_D12, __pyx_v_D13, __pyx_v_D22, __pyx_v_D23, __pyx_v_D33, 0)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.algorithms.statistics.intvol.mu2_tet", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/algorithms/statistics/intvol.pyx":138 * * * cpdef double mu1_tet(double D00, double D01, double D02, double D03, # <<<<<<<<<<<<<< * double D11, double D12, double D13, * double D22, double D23, */ static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_6intvol_5mu1_tet(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static double __pyx_f_4nipy_10algorithms_10statistics_6intvol_mu1_tet(double __pyx_v_D00, double __pyx_v_D01, double __pyx_v_D02, double __pyx_v_D03, double __pyx_v_D11, double __pyx_v_D12, double __pyx_v_D13, double __pyx_v_D22, double __pyx_v_D23, double __pyx_v_D33, CYTHON_UNUSED int __pyx_skip_dispatch) { double __pyx_v_mu; double __pyx_r; /* "nipy/algorithms/statistics/intvol.pyx":179 * """ * cdef double mu * mu = 0 # <<<<<<<<<<<<<< * mu += _mu1_tetface(D00, D01, D11, D02, D03, D12, D13, D22, D23, D33) * mu += _mu1_tetface(D00, D02, D22, D01, D03, D12, D23, D11, D13, D33) */ __pyx_v_mu = 0.0; /* "nipy/algorithms/statistics/intvol.pyx":180 * cdef double mu * mu = 0 * mu += _mu1_tetface(D00, D01, D11, D02, D03, D12, D13, D22, D23, D33) # <<<<<<<<<<<<<< * mu += _mu1_tetface(D00, D02, D22, D01, D03, D12, D23, D11, D13, D33) * mu += _mu1_tetface(D00, D03, D33, D01, D02, D13, D23, D11, D12, D22) */ __pyx_v_mu = (__pyx_v_mu + __pyx_f_4nipy_10algorithms_10statistics_6intvol__mu1_tetface(__pyx_v_D00, __pyx_v_D01, __pyx_v_D11, __pyx_v_D02, __pyx_v_D03, __pyx_v_D12, __pyx_v_D13, __pyx_v_D22, __pyx_v_D23, __pyx_v_D33, 0)); /* "nipy/algorithms/statistics/intvol.pyx":181 * mu = 0 * mu += _mu1_tetface(D00, D01, D11, D02, D03, D12, D13, D22, D23, D33) * mu += _mu1_tetface(D00, D02, D22, D01, D03, D12, D23, D11, D13, D33) # <<<<<<<<<<<<<< * mu += _mu1_tetface(D00, D03, D33, D01, D02, D13, D23, D11, D12, D22) * mu += _mu1_tetface(D11, D12, D22, D01, D13, D02, D23, D00, D03, D33) */ __pyx_v_mu = (__pyx_v_mu + __pyx_f_4nipy_10algorithms_10statistics_6intvol__mu1_tetface(__pyx_v_D00, __pyx_v_D02, __pyx_v_D22, __pyx_v_D01, __pyx_v_D03, __pyx_v_D12, __pyx_v_D23, __pyx_v_D11, __pyx_v_D13, __pyx_v_D33, 0)); /* "nipy/algorithms/statistics/intvol.pyx":182 * mu += _mu1_tetface(D00, D01, D11, D02, D03, D12, D13, D22, D23, D33) * mu += _mu1_tetface(D00, D02, D22, D01, D03, D12, D23, D11, D13, D33) * mu += _mu1_tetface(D00, D03, D33, D01, D02, D13, D23, D11, D12, D22) # <<<<<<<<<<<<<< * mu += _mu1_tetface(D11, D12, D22, D01, D13, D02, D23, D00, D03, D33) * mu += _mu1_tetface(D11, D13, D33, D01, D12, D03, D23, D00, D02, D22) */ __pyx_v_mu = (__pyx_v_mu + __pyx_f_4nipy_10algorithms_10statistics_6intvol__mu1_tetface(__pyx_v_D00, __pyx_v_D03, __pyx_v_D33, __pyx_v_D01, __pyx_v_D02, __pyx_v_D13, __pyx_v_D23, __pyx_v_D11, __pyx_v_D12, __pyx_v_D22, 0)); /* "nipy/algorithms/statistics/intvol.pyx":183 * mu += _mu1_tetface(D00, D02, D22, D01, D03, D12, D23, D11, D13, D33) * mu += _mu1_tetface(D00, D03, D33, D01, D02, D13, D23, D11, D12, D22) * mu += _mu1_tetface(D11, D12, D22, D01, D13, D02, D23, D00, D03, D33) # <<<<<<<<<<<<<< * mu += _mu1_tetface(D11, D13, D33, D01, D12, D03, D23, D00, D02, D22) * mu += _mu1_tetface(D22, D23, D33, D02, D12, D03, D13, D00, D01, D11) */ __pyx_v_mu = (__pyx_v_mu + __pyx_f_4nipy_10algorithms_10statistics_6intvol__mu1_tetface(__pyx_v_D11, __pyx_v_D12, __pyx_v_D22, __pyx_v_D01, __pyx_v_D13, __pyx_v_D02, __pyx_v_D23, __pyx_v_D00, __pyx_v_D03, __pyx_v_D33, 0)); /* "nipy/algorithms/statistics/intvol.pyx":184 * mu += _mu1_tetface(D00, D03, D33, D01, D02, D13, D23, D11, D12, D22) * mu += _mu1_tetface(D11, D12, D22, D01, D13, D02, D23, D00, D03, D33) * mu += _mu1_tetface(D11, D13, D33, D01, D12, D03, D23, D00, D02, D22) # <<<<<<<<<<<<<< * mu += _mu1_tetface(D22, D23, D33, D02, D12, D03, D13, D00, D01, D11) * return mu */ __pyx_v_mu = (__pyx_v_mu + __pyx_f_4nipy_10algorithms_10statistics_6intvol__mu1_tetface(__pyx_v_D11, __pyx_v_D13, __pyx_v_D33, __pyx_v_D01, __pyx_v_D12, __pyx_v_D03, __pyx_v_D23, __pyx_v_D00, __pyx_v_D02, __pyx_v_D22, 0)); /* "nipy/algorithms/statistics/intvol.pyx":185 * mu += _mu1_tetface(D11, D12, D22, D01, D13, D02, D23, D00, D03, D33) * mu += _mu1_tetface(D11, D13, D33, D01, D12, D03, D23, D00, D02, D22) * mu += _mu1_tetface(D22, D23, D33, D02, D12, D03, D13, D00, D01, D11) # <<<<<<<<<<<<<< * return mu * */ __pyx_v_mu = (__pyx_v_mu + __pyx_f_4nipy_10algorithms_10statistics_6intvol__mu1_tetface(__pyx_v_D22, __pyx_v_D23, __pyx_v_D33, __pyx_v_D02, __pyx_v_D12, __pyx_v_D03, __pyx_v_D13, __pyx_v_D00, __pyx_v_D01, __pyx_v_D11, 0)); /* "nipy/algorithms/statistics/intvol.pyx":186 * mu += _mu1_tetface(D11, D13, D33, D01, D12, D03, D23, D00, D02, D22) * mu += _mu1_tetface(D22, D23, D33, D02, D12, D03, D13, D00, D01, D11) * return mu # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_mu; goto __pyx_L0; __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_6intvol_5mu1_tet(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_10algorithms_10statistics_6intvol_4mu1_tet[] = " Return 3rd intrinsic volume of tetrahedron\n\n Compute the 3rd intrinsic volume (sum of external angles * edge\n lengths) of a tetrahedron for which the input arguments represent the\n coordinate dot products of the vertices.\n\n Parameters\n ----------\n D00 : float\n If ``cv0`` is a 3-vector of coordinates for the first vertex, `D00` is\n ``cv0.dot(cv0)``\n D01 : float\n ``cv0.dot(cv1)`` where ``cv1`` is the coordinates for the second\n vertex.\n D02 : float\n ``cv0.dot(cv2)``\n D03 : float\n ``cv0.dot(cv3)``\n D11 : float\n ``cv1.dot(cv1)``\n D12 : float\n ``cv1.dot(cv2)``\n D13 : float\n ``cv1.dot(cv3)``\n D22 : float\n ``cv2.dot(cv2)``\n D23 : float\n ``cv2.dot(cv2)``\n D33 : float\n ``cv3.dot(cv3)``\n\n Returns\n -------\n mu1 : float\n 3rd intrinsic volume of tetrahedron\n "; static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_6intvol_5mu1_tet(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { double __pyx_v_D00; double __pyx_v_D01; double __pyx_v_D02; double __pyx_v_D03; double __pyx_v_D11; double __pyx_v_D12; double __pyx_v_D13; double __pyx_v_D22; double __pyx_v_D23; double __pyx_v_D33; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("mu1_tet (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__D00,&__pyx_n_s__D01,&__pyx_n_s__D02,&__pyx_n_s__D03,&__pyx_n_s__D11,&__pyx_n_s__D12,&__pyx_n_s__D13,&__pyx_n_s__D22,&__pyx_n_s__D23,&__pyx_n_s__D33,0}; PyObject* values[10] = {0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D00)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D01)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mu1_tet", 1, 10, 10, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D02)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mu1_tet", 1, 10, 10, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D03)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mu1_tet", 1, 10, 10, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D11)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mu1_tet", 1, 10, 10, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D12)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mu1_tet", 1, 10, 10, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D13)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mu1_tet", 1, 10, 10, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D22)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mu1_tet", 1, 10, 10, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D23)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mu1_tet", 1, 10, 10, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D33)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mu1_tet", 1, 10, 10, 9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "mu1_tet") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 10) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); } __pyx_v_D00 = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_D00 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_D01 = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_D01 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_D02 = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_D02 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_D03 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_D03 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_D11 = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_D11 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_D12 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_D12 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_D13 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_D13 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_D22 = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_D22 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_D23 = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_D23 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_D33 = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_D33 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 141; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("mu1_tet", 1, 10, 10, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.algorithms.statistics.intvol.mu1_tet", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_10algorithms_10statistics_6intvol_4mu1_tet(__pyx_self, __pyx_v_D00, __pyx_v_D01, __pyx_v_D02, __pyx_v_D03, __pyx_v_D11, __pyx_v_D12, __pyx_v_D13, __pyx_v_D22, __pyx_v_D23, __pyx_v_D33); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/algorithms/statistics/intvol.pyx":138 * * * cpdef double mu1_tet(double D00, double D01, double D02, double D03, # <<<<<<<<<<<<<< * double D11, double D12, double D13, * double D22, double D23, */ static PyObject *__pyx_pf_4nipy_10algorithms_10statistics_6intvol_4mu1_tet(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_D00, double __pyx_v_D01, double __pyx_v_D02, double __pyx_v_D03, double __pyx_v_D11, double __pyx_v_D12, double __pyx_v_D13, double __pyx_v_D22, double __pyx_v_D23, double __pyx_v_D33) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("mu1_tet", 0); __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyFloat_FromDouble(__pyx_f_4nipy_10algorithms_10statistics_6intvol_mu1_tet(__pyx_v_D00, __pyx_v_D01, __pyx_v_D02, __pyx_v_D03, __pyx_v_D11, __pyx_v_D12, __pyx_v_D13, __pyx_v_D22, __pyx_v_D23, __pyx_v_D33, 0)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.algorithms.statistics.intvol.mu1_tet", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/algorithms/statistics/intvol.pyx":189 * * * cdef inline double limited_acos(double val) nogil: # <<<<<<<<<<<<<< * """ Check for -1 <= val <= 1 before returning acos(val) * */ static CYTHON_INLINE double __pyx_f_4nipy_10algorithms_10statistics_6intvol_limited_acos(double __pyx_v_val) { double __pyx_r; int __pyx_t_1; /* "nipy/algorithms/statistics/intvol.pyx":194 * Avoids nan values from small rounding errors * """ * if val >= 1: # <<<<<<<<<<<<<< * return 0 * elif val <= -1: */ __pyx_t_1 = (__pyx_v_val >= 1.0); if (__pyx_t_1) { /* "nipy/algorithms/statistics/intvol.pyx":195 * """ * if val >= 1: * return 0 # <<<<<<<<<<<<<< * elif val <= -1: * return PI */ __pyx_r = 0.0; goto __pyx_L0; goto __pyx_L3; } /* "nipy/algorithms/statistics/intvol.pyx":196 * if val >= 1: * return 0 * elif val <= -1: # <<<<<<<<<<<<<< * return PI * return acos(val) */ __pyx_t_1 = (__pyx_v_val <= -1.0); if (__pyx_t_1) { /* "nipy/algorithms/statistics/intvol.pyx":197 * return 0 * elif val <= -1: * return PI # <<<<<<<<<<<<<< * return acos(val) * */ __pyx_r = __pyx_v_4nipy_10algorithms_10statistics_6intvol_PI; goto __pyx_L0; goto __pyx_L3; } __pyx_L3:; /* "nipy/algorithms/statistics/intvol.pyx":198 * elif val <= -1: * return PI * return acos(val) # <<<<<<<<<<<<<< * * */ __pyx_r = acos(__pyx_v_val); goto __pyx_L0; __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "nipy/algorithms/statistics/intvol.pyx":202 * * @cython.cdivision(True) * cpdef double _mu1_tetface(double Ds0s0, # <<<<<<<<<<<<<< * double Ds0s1, * double Ds1s1, */ static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_6intvol_7_mu1_tetface(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static double __pyx_f_4nipy_10algorithms_10statistics_6intvol__mu1_tetface(double __pyx_v_Ds0s0, double __pyx_v_Ds0s1, double __pyx_v_Ds1s1, double __pyx_v_Ds0t0, double __pyx_v_Ds0t1, double __pyx_v_Ds1t0, double __pyx_v_Ds1t1, double __pyx_v_Dt0t0, double __pyx_v_Dt0t1, double __pyx_v_Dt1t1, CYTHON_UNUSED int __pyx_skip_dispatch) { double __pyx_v_A00; double __pyx_v_A01; double __pyx_v_A02; double __pyx_v_A11; double __pyx_v_A12; double __pyx_v_A22; double __pyx_v_np_len; double __pyx_v_a; double __pyx_v_acosval; double __pyx_v_length; double __pyx_v_norm_proj0; double __pyx_v_norm_proj1; double __pyx_v_inner_prod_proj; double __pyx_r; int __pyx_t_1; /* "nipy/algorithms/statistics/intvol.pyx":215 * cdef double length, norm_proj0, norm_proj1, inner_prod_proj * * A00 = Ds1s1 - 2 * Ds0s1 + Ds0s0 # <<<<<<<<<<<<<< * # all norms divided by this value, leading to NaN value for output, for * # values <= 0 */ __pyx_v_A00 = ((__pyx_v_Ds1s1 - (2.0 * __pyx_v_Ds0s1)) + __pyx_v_Ds0s0); /* "nipy/algorithms/statistics/intvol.pyx":218 * # all norms divided by this value, leading to NaN value for output, for * # values <= 0 * if A00 <= 0: # <<<<<<<<<<<<<< * return 0 * A11 = Dt0t0 - 2 * Ds0t0 + Ds0s0 */ __pyx_t_1 = (__pyx_v_A00 <= 0.0); if (__pyx_t_1) { /* "nipy/algorithms/statistics/intvol.pyx":219 * # values <= 0 * if A00 <= 0: * return 0 # <<<<<<<<<<<<<< * A11 = Dt0t0 - 2 * Ds0t0 + Ds0s0 * A22 = Dt1t1 - 2 * Ds0t1 + Ds0s0 */ __pyx_r = 0.0; goto __pyx_L0; goto __pyx_L3; } __pyx_L3:; /* "nipy/algorithms/statistics/intvol.pyx":220 * if A00 <= 0: * return 0 * A11 = Dt0t0 - 2 * Ds0t0 + Ds0s0 # <<<<<<<<<<<<<< * A22 = Dt1t1 - 2 * Ds0t1 + Ds0s0 * A01 = Ds1t0 - Ds0t0 - Ds0s1 + Ds0s0 */ __pyx_v_A11 = ((__pyx_v_Dt0t0 - (2.0 * __pyx_v_Ds0t0)) + __pyx_v_Ds0s0); /* "nipy/algorithms/statistics/intvol.pyx":221 * return 0 * A11 = Dt0t0 - 2 * Ds0t0 + Ds0s0 * A22 = Dt1t1 - 2 * Ds0t1 + Ds0s0 # <<<<<<<<<<<<<< * A01 = Ds1t0 - Ds0t0 - Ds0s1 + Ds0s0 * A02 = Ds1t1 - Ds0t1 - Ds0s1 + Ds0s0 */ __pyx_v_A22 = ((__pyx_v_Dt1t1 - (2.0 * __pyx_v_Ds0t1)) + __pyx_v_Ds0s0); /* "nipy/algorithms/statistics/intvol.pyx":222 * A11 = Dt0t0 - 2 * Ds0t0 + Ds0s0 * A22 = Dt1t1 - 2 * Ds0t1 + Ds0s0 * A01 = Ds1t0 - Ds0t0 - Ds0s1 + Ds0s0 # <<<<<<<<<<<<<< * A02 = Ds1t1 - Ds0t1 - Ds0s1 + Ds0s0 * A12 = Dt0t1 - Ds0t0 - Ds0t1 + Ds0s0 */ __pyx_v_A01 = (((__pyx_v_Ds1t0 - __pyx_v_Ds0t0) - __pyx_v_Ds0s1) + __pyx_v_Ds0s0); /* "nipy/algorithms/statistics/intvol.pyx":223 * A22 = Dt1t1 - 2 * Ds0t1 + Ds0s0 * A01 = Ds1t0 - Ds0t0 - Ds0s1 + Ds0s0 * A02 = Ds1t1 - Ds0t1 - Ds0s1 + Ds0s0 # <<<<<<<<<<<<<< * A12 = Dt0t1 - Ds0t0 - Ds0t1 + Ds0s0 * length = sqrt(A00) */ __pyx_v_A02 = (((__pyx_v_Ds1t1 - __pyx_v_Ds0t1) - __pyx_v_Ds0s1) + __pyx_v_Ds0s0); /* "nipy/algorithms/statistics/intvol.pyx":224 * A01 = Ds1t0 - Ds0t0 - Ds0s1 + Ds0s0 * A02 = Ds1t1 - Ds0t1 - Ds0s1 + Ds0s0 * A12 = Dt0t1 - Ds0t0 - Ds0t1 + Ds0s0 # <<<<<<<<<<<<<< * length = sqrt(A00) * norm_proj0 = A11 - A01 * A01 / A00 */ __pyx_v_A12 = (((__pyx_v_Dt0t1 - __pyx_v_Ds0t0) - __pyx_v_Ds0t1) + __pyx_v_Ds0s0); /* "nipy/algorithms/statistics/intvol.pyx":225 * A02 = Ds1t1 - Ds0t1 - Ds0s1 + Ds0s0 * A12 = Dt0t1 - Ds0t0 - Ds0t1 + Ds0s0 * length = sqrt(A00) # <<<<<<<<<<<<<< * norm_proj0 = A11 - A01 * A01 / A00 * norm_proj1 = A22 - A02 * A02 / A00 */ __pyx_v_length = sqrt(__pyx_v_A00); /* "nipy/algorithms/statistics/intvol.pyx":226 * A12 = Dt0t1 - Ds0t0 - Ds0t1 + Ds0s0 * length = sqrt(A00) * norm_proj0 = A11 - A01 * A01 / A00 # <<<<<<<<<<<<<< * norm_proj1 = A22 - A02 * A02 / A00 * inner_prod_proj = A12 - A01 * A02 / A00 */ __pyx_v_norm_proj0 = (__pyx_v_A11 - ((__pyx_v_A01 * __pyx_v_A01) / __pyx_v_A00)); /* "nipy/algorithms/statistics/intvol.pyx":227 * length = sqrt(A00) * norm_proj0 = A11 - A01 * A01 / A00 * norm_proj1 = A22 - A02 * A02 / A00 # <<<<<<<<<<<<<< * inner_prod_proj = A12 - A01 * A02 / A00 * np_len = norm_proj0 * norm_proj1 */ __pyx_v_norm_proj1 = (__pyx_v_A22 - ((__pyx_v_A02 * __pyx_v_A02) / __pyx_v_A00)); /* "nipy/algorithms/statistics/intvol.pyx":228 * norm_proj0 = A11 - A01 * A01 / A00 * norm_proj1 = A22 - A02 * A02 / A00 * inner_prod_proj = A12 - A01 * A02 / A00 # <<<<<<<<<<<<<< * np_len = norm_proj0 * norm_proj1 * if np_len <= 0: # would otherwise lead to NaN return value */ __pyx_v_inner_prod_proj = (__pyx_v_A12 - ((__pyx_v_A01 * __pyx_v_A02) / __pyx_v_A00)); /* "nipy/algorithms/statistics/intvol.pyx":229 * norm_proj1 = A22 - A02 * A02 / A00 * inner_prod_proj = A12 - A01 * A02 / A00 * np_len = norm_proj0 * norm_proj1 # <<<<<<<<<<<<<< * if np_len <= 0: # would otherwise lead to NaN return value * return 0 */ __pyx_v_np_len = (__pyx_v_norm_proj0 * __pyx_v_norm_proj1); /* "nipy/algorithms/statistics/intvol.pyx":230 * inner_prod_proj = A12 - A01 * A02 / A00 * np_len = norm_proj0 * norm_proj1 * if np_len <= 0: # would otherwise lead to NaN return value # <<<<<<<<<<<<<< * return 0 * # hedge for small rounding errors above 1 and below -1 */ __pyx_t_1 = (__pyx_v_np_len <= 0.0); if (__pyx_t_1) { /* "nipy/algorithms/statistics/intvol.pyx":231 * np_len = norm_proj0 * norm_proj1 * if np_len <= 0: # would otherwise lead to NaN return value * return 0 # <<<<<<<<<<<<<< * # hedge for small rounding errors above 1 and below -1 * acosval = limited_acos(inner_prod_proj / sqrt(np_len)) */ __pyx_r = 0.0; goto __pyx_L0; goto __pyx_L4; } __pyx_L4:; /* "nipy/algorithms/statistics/intvol.pyx":233 * return 0 * # hedge for small rounding errors above 1 and below -1 * acosval = limited_acos(inner_prod_proj / sqrt(np_len)) # <<<<<<<<<<<<<< * a = (PI - acosval) * length / (2 * PI) * return a */ __pyx_v_acosval = __pyx_f_4nipy_10algorithms_10statistics_6intvol_limited_acos((__pyx_v_inner_prod_proj / sqrt(__pyx_v_np_len))); /* "nipy/algorithms/statistics/intvol.pyx":234 * # hedge for small rounding errors above 1 and below -1 * acosval = limited_acos(inner_prod_proj / sqrt(np_len)) * a = (PI - acosval) * length / (2 * PI) # <<<<<<<<<<<<<< * return a * */ __pyx_v_a = (((__pyx_v_4nipy_10algorithms_10statistics_6intvol_PI - __pyx_v_acosval) * __pyx_v_length) / (2.0 * __pyx_v_4nipy_10algorithms_10statistics_6intvol_PI)); /* "nipy/algorithms/statistics/intvol.pyx":235 * acosval = limited_acos(inner_prod_proj / sqrt(np_len)) * a = (PI - acosval) * length / (2 * PI) * return a # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_a; goto __pyx_L0; __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_6intvol_7_mu1_tetface(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_6intvol_7_mu1_tetface(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { double __pyx_v_Ds0s0; double __pyx_v_Ds0s1; double __pyx_v_Ds1s1; double __pyx_v_Ds0t0; double __pyx_v_Ds0t1; double __pyx_v_Ds1t0; double __pyx_v_Ds1t1; double __pyx_v_Dt0t0; double __pyx_v_Dt0t1; double __pyx_v_Dt1t1; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("_mu1_tetface (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__Ds0s0,&__pyx_n_s__Ds0s1,&__pyx_n_s__Ds1s1,&__pyx_n_s__Ds0t0,&__pyx_n_s__Ds0t1,&__pyx_n_s__Ds1t0,&__pyx_n_s__Ds1t1,&__pyx_n_s__Dt0t0,&__pyx_n_s__Dt0t1,&__pyx_n_s__Dt1t1,0}; PyObject* values[10] = {0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Ds0s0)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Ds0s1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_mu1_tetface", 1, 10, 10, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 202; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Ds1s1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_mu1_tetface", 1, 10, 10, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 202; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Ds0t0)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_mu1_tetface", 1, 10, 10, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 202; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Ds0t1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_mu1_tetface", 1, 10, 10, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 202; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Ds1t0)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_mu1_tetface", 1, 10, 10, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 202; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Ds1t1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_mu1_tetface", 1, 10, 10, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 202; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Dt0t0)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_mu1_tetface", 1, 10, 10, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 202; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Dt0t1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_mu1_tetface", 1, 10, 10, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 202; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Dt1t1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_mu1_tetface", 1, 10, 10, 9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 202; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_mu1_tetface") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 202; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 10) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); } __pyx_v_Ds0s0 = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_Ds0s0 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 202; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_Ds0s1 = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_Ds0s1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_Ds1s1 = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_Ds1s1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 204; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_Ds0t0 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_Ds0t0 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 205; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_Ds0t1 = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_Ds0t1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 206; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_Ds1t0 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_Ds1t0 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_Ds1t1 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_Ds1t1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 208; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_Dt0t0 = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_Dt0t0 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 209; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_Dt0t1 = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_Dt0t1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 210; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_Dt1t1 = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_Dt1t1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("_mu1_tetface", 1, 10, 10, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 202; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.algorithms.statistics.intvol._mu1_tetface", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_10algorithms_10statistics_6intvol_6_mu1_tetface(__pyx_self, __pyx_v_Ds0s0, __pyx_v_Ds0s1, __pyx_v_Ds1s1, __pyx_v_Ds0t0, __pyx_v_Ds0t1, __pyx_v_Ds1t0, __pyx_v_Ds1t1, __pyx_v_Dt0t0, __pyx_v_Dt0t1, __pyx_v_Dt1t1); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/algorithms/statistics/intvol.pyx":202 * * @cython.cdivision(True) * cpdef double _mu1_tetface(double Ds0s0, # <<<<<<<<<<<<<< * double Ds0s1, * double Ds1s1, */ static PyObject *__pyx_pf_4nipy_10algorithms_10statistics_6intvol_6_mu1_tetface(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_Ds0s0, double __pyx_v_Ds0s1, double __pyx_v_Ds1s1, double __pyx_v_Ds0t0, double __pyx_v_Ds0t1, double __pyx_v_Ds1t0, double __pyx_v_Ds1t1, double __pyx_v_Dt0t0, double __pyx_v_Dt0t1, double __pyx_v_Dt1t1) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_mu1_tetface", 0); __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyFloat_FromDouble(__pyx_f_4nipy_10algorithms_10statistics_6intvol__mu1_tetface(__pyx_v_Ds0s0, __pyx_v_Ds0s1, __pyx_v_Ds1s1, __pyx_v_Ds0t0, __pyx_v_Ds0t1, __pyx_v_Ds1t0, __pyx_v_Ds1t1, __pyx_v_Dt0t0, __pyx_v_Dt0t1, __pyx_v_Dt1t1, 0)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 202; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.algorithms.statistics.intvol._mu1_tetface", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/algorithms/statistics/intvol.pyx":238 * * * cpdef double mu2_tri(double D00, double D01, double D02, # <<<<<<<<<<<<<< * double D11, double D12, * double D22) nogil: */ static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_6intvol_9mu2_tri(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static double __pyx_f_4nipy_10algorithms_10statistics_6intvol_mu2_tri(double __pyx_v_D00, double __pyx_v_D01, double __pyx_v_D02, double __pyx_v_D11, double __pyx_v_D12, double __pyx_v_D22, CYTHON_UNUSED int __pyx_skip_dispatch) { double __pyx_v_C00; double __pyx_v_C01; double __pyx_v_C11; double __pyx_v_L; double __pyx_r; int __pyx_t_1; /* "nipy/algorithms/statistics/intvol.pyx":269 * """ * cdef double C00, C01, C11, L * C00 = D11 - 2*D01 + D00 # <<<<<<<<<<<<<< * C01 = D12 - D01 - D02 + D00 * C11 = D22 - 2*D02 + D00 */ __pyx_v_C00 = ((__pyx_v_D11 - (2.0 * __pyx_v_D01)) + __pyx_v_D00); /* "nipy/algorithms/statistics/intvol.pyx":270 * cdef double C00, C01, C11, L * C00 = D11 - 2*D01 + D00 * C01 = D12 - D01 - D02 + D00 # <<<<<<<<<<<<<< * C11 = D22 - 2*D02 + D00 * L = C00 * C11 - C01 * C01 */ __pyx_v_C01 = (((__pyx_v_D12 - __pyx_v_D01) - __pyx_v_D02) + __pyx_v_D00); /* "nipy/algorithms/statistics/intvol.pyx":271 * C00 = D11 - 2*D01 + D00 * C01 = D12 - D01 - D02 + D00 * C11 = D22 - 2*D02 + D00 # <<<<<<<<<<<<<< * L = C00 * C11 - C01 * C01 * # Negative area appeared to result from floating point errors on PPC */ __pyx_v_C11 = ((__pyx_v_D22 - (2.0 * __pyx_v_D02)) + __pyx_v_D00); /* "nipy/algorithms/statistics/intvol.pyx":272 * C01 = D12 - D01 - D02 + D00 * C11 = D22 - 2*D02 + D00 * L = C00 * C11 - C01 * C01 # <<<<<<<<<<<<<< * # Negative area appeared to result from floating point errors on PPC * if L < 0: */ __pyx_v_L = ((__pyx_v_C00 * __pyx_v_C11) - (__pyx_v_C01 * __pyx_v_C01)); /* "nipy/algorithms/statistics/intvol.pyx":274 * L = C00 * C11 - C01 * C01 * # Negative area appeared to result from floating point errors on PPC * if L < 0: # <<<<<<<<<<<<<< * return 0.0 * return sqrt(L) * 0.5 */ __pyx_t_1 = (__pyx_v_L < 0.0); if (__pyx_t_1) { /* "nipy/algorithms/statistics/intvol.pyx":275 * # Negative area appeared to result from floating point errors on PPC * if L < 0: * return 0.0 # <<<<<<<<<<<<<< * return sqrt(L) * 0.5 * */ __pyx_r = 0.0; goto __pyx_L0; goto __pyx_L3; } __pyx_L3:; /* "nipy/algorithms/statistics/intvol.pyx":276 * if L < 0: * return 0.0 * return sqrt(L) * 0.5 # <<<<<<<<<<<<<< * * */ __pyx_r = (sqrt(__pyx_v_L) * 0.5); goto __pyx_L0; __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_6intvol_9mu2_tri(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_10algorithms_10statistics_6intvol_8mu2_tri[] = " Compute the 2nd intrinsic volume of triangle\n\n 2nd intrinsic volume (just area in this case) of a triangle with coordinates\n implied by the dot products below.\n\n Parameters\n ----------\n D00 : float\n If ``cv0`` is a 3-vector of coordinates for the first vertex, `D00` is\n ``cv0.dot(cv0)``\n D01 : float\n ``cv0.dot(cv1)`` where ``cv1`` is the coordinates for the second\n vertex.\n D02 : float\n ``cv0.dot(cv2)``\n D11 : float\n ``cv1.dot(cv1)``\n D12 : float\n ``cv1.dot(cv2)``\n D22 : float\n ``cv2.dot(cv2)``\n\n Returns\n -------\n mu2 : float\n area of triangle\n "; static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_6intvol_9mu2_tri(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { double __pyx_v_D00; double __pyx_v_D01; double __pyx_v_D02; double __pyx_v_D11; double __pyx_v_D12; double __pyx_v_D22; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("mu2_tri (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__D00,&__pyx_n_s__D01,&__pyx_n_s__D02,&__pyx_n_s__D11,&__pyx_n_s__D12,&__pyx_n_s__D22,0}; PyObject* values[6] = {0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D00)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D01)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mu2_tri", 1, 6, 6, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 238; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D02)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mu2_tri", 1, 6, 6, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 238; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D11)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mu2_tri", 1, 6, 6, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 238; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D12)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mu2_tri", 1, 6, 6, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 238; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D22)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mu2_tri", 1, 6, 6, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 238; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "mu2_tri") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 238; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 6) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); } __pyx_v_D00 = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_D00 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 238; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_D01 = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_D01 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 238; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_D02 = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_D02 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 238; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_D11 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_D11 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_D12 = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_D12 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_D22 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_D22 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 240; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("mu2_tri", 1, 6, 6, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 238; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.algorithms.statistics.intvol.mu2_tri", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_10algorithms_10statistics_6intvol_8mu2_tri(__pyx_self, __pyx_v_D00, __pyx_v_D01, __pyx_v_D02, __pyx_v_D11, __pyx_v_D12, __pyx_v_D22); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/algorithms/statistics/intvol.pyx":238 * * * cpdef double mu2_tri(double D00, double D01, double D02, # <<<<<<<<<<<<<< * double D11, double D12, * double D22) nogil: */ static PyObject *__pyx_pf_4nipy_10algorithms_10statistics_6intvol_8mu2_tri(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_D00, double __pyx_v_D01, double __pyx_v_D02, double __pyx_v_D11, double __pyx_v_D12, double __pyx_v_D22) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("mu2_tri", 0); __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyFloat_FromDouble(__pyx_f_4nipy_10algorithms_10statistics_6intvol_mu2_tri(__pyx_v_D00, __pyx_v_D01, __pyx_v_D02, __pyx_v_D11, __pyx_v_D12, __pyx_v_D22, 0)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 238; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.algorithms.statistics.intvol.mu2_tri", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/algorithms/statistics/intvol.pyx":279 * * * cpdef double mu1_tri(double D00, double D01, double D02, # <<<<<<<<<<<<<< * double D11, double D12, * double D22) nogil: */ static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_6intvol_11mu1_tri(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static double __pyx_f_4nipy_10algorithms_10statistics_6intvol_mu1_tri(double __pyx_v_D00, double __pyx_v_D01, double __pyx_v_D02, double __pyx_v_D11, double __pyx_v_D12, double __pyx_v_D22, CYTHON_UNUSED int __pyx_skip_dispatch) { double __pyx_v_mu; double __pyx_r; /* "nipy/algorithms/statistics/intvol.pyx":309 * 1/2 perimeter of triangle * """ * cdef double mu = 0 # <<<<<<<<<<<<<< * mu += mu1_edge(D00, D01, D11) * mu += mu1_edge(D00, D02, D22) */ __pyx_v_mu = 0.0; /* "nipy/algorithms/statistics/intvol.pyx":310 * """ * cdef double mu = 0 * mu += mu1_edge(D00, D01, D11) # <<<<<<<<<<<<<< * mu += mu1_edge(D00, D02, D22) * mu += mu1_edge(D11, D12, D22) */ __pyx_v_mu = (__pyx_v_mu + __pyx_f_4nipy_10algorithms_10statistics_6intvol_mu1_edge(__pyx_v_D00, __pyx_v_D01, __pyx_v_D11, 0)); /* "nipy/algorithms/statistics/intvol.pyx":311 * cdef double mu = 0 * mu += mu1_edge(D00, D01, D11) * mu += mu1_edge(D00, D02, D22) # <<<<<<<<<<<<<< * mu += mu1_edge(D11, D12, D22) * return mu * 0.5 */ __pyx_v_mu = (__pyx_v_mu + __pyx_f_4nipy_10algorithms_10statistics_6intvol_mu1_edge(__pyx_v_D00, __pyx_v_D02, __pyx_v_D22, 0)); /* "nipy/algorithms/statistics/intvol.pyx":312 * mu += mu1_edge(D00, D01, D11) * mu += mu1_edge(D00, D02, D22) * mu += mu1_edge(D11, D12, D22) # <<<<<<<<<<<<<< * return mu * 0.5 * */ __pyx_v_mu = (__pyx_v_mu + __pyx_f_4nipy_10algorithms_10statistics_6intvol_mu1_edge(__pyx_v_D11, __pyx_v_D12, __pyx_v_D22, 0)); /* "nipy/algorithms/statistics/intvol.pyx":313 * mu += mu1_edge(D00, D02, D22) * mu += mu1_edge(D11, D12, D22) * return mu * 0.5 # <<<<<<<<<<<<<< * * */ __pyx_r = (__pyx_v_mu * 0.5); goto __pyx_L0; __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_6intvol_11mu1_tri(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_10algorithms_10statistics_6intvol_10mu1_tri[] = " Compute the 1st intrinsic volume of triangle\n\n 1st intrinsic volume (1/2 the perimeter) of a triangle with coordinates\n implied by the dot products below.\n\n Parameters\n ----------\n D00 : float\n If ``cv0`` is a 3-vector of coordinates for the first vertex, `D00` is\n ``cv0.dot(cv0)``\n D01 : float\n ``cv0.dot(cv1)`` where ``cv1`` is the coordinates for the second\n vertex.\n D02 : float\n ``cv0.dot(cv2)``\n D11 : float\n ``cv1.dot(cv1)``\n D12 : float\n ``cv1.dot(cv2)``\n D22 : float\n ``cv2.dot(cv2)``\n\n Returns\n -------\n mu1 : float\n 1/2 perimeter of triangle\n "; static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_6intvol_11mu1_tri(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { double __pyx_v_D00; double __pyx_v_D01; double __pyx_v_D02; double __pyx_v_D11; double __pyx_v_D12; double __pyx_v_D22; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("mu1_tri (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__D00,&__pyx_n_s__D01,&__pyx_n_s__D02,&__pyx_n_s__D11,&__pyx_n_s__D12,&__pyx_n_s__D22,0}; PyObject* values[6] = {0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D00)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D01)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mu1_tri", 1, 6, 6, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D02)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mu1_tri", 1, 6, 6, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D11)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mu1_tri", 1, 6, 6, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D12)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mu1_tri", 1, 6, 6, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D22)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mu1_tri", 1, 6, 6, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "mu1_tri") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 6) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); } __pyx_v_D00 = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_D00 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_D01 = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_D01 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_D02 = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_D02 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_D11 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_D11 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 280; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_D12 = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_D12 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 280; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_D22 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_D22 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 281; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("mu1_tri", 1, 6, 6, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.algorithms.statistics.intvol.mu1_tri", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_10algorithms_10statistics_6intvol_10mu1_tri(__pyx_self, __pyx_v_D00, __pyx_v_D01, __pyx_v_D02, __pyx_v_D11, __pyx_v_D12, __pyx_v_D22); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/algorithms/statistics/intvol.pyx":279 * * * cpdef double mu1_tri(double D00, double D01, double D02, # <<<<<<<<<<<<<< * double D11, double D12, * double D22) nogil: */ static PyObject *__pyx_pf_4nipy_10algorithms_10statistics_6intvol_10mu1_tri(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_D00, double __pyx_v_D01, double __pyx_v_D02, double __pyx_v_D11, double __pyx_v_D12, double __pyx_v_D22) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("mu1_tri", 0); __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyFloat_FromDouble(__pyx_f_4nipy_10algorithms_10statistics_6intvol_mu1_tri(__pyx_v_D00, __pyx_v_D01, __pyx_v_D02, __pyx_v_D11, __pyx_v_D12, __pyx_v_D22, 0)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.algorithms.statistics.intvol.mu1_tri", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/algorithms/statistics/intvol.pyx":316 * * * cpdef double mu1_edge(double D00, double D01, double D11) nogil: # <<<<<<<<<<<<<< * """ Compute the 1st intrinsic volume (length) of line segment * */ static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_6intvol_13mu1_edge(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static double __pyx_f_4nipy_10algorithms_10statistics_6intvol_mu1_edge(double __pyx_v_D00, double __pyx_v_D01, double __pyx_v_D11, CYTHON_UNUSED int __pyx_skip_dispatch) { double __pyx_r; /* "nipy/algorithms/statistics/intvol.pyx":337 * length of line segment * """ * return sqrt(D00 - 2*D01 + D11) # <<<<<<<<<<<<<< * * */ __pyx_r = sqrt(((__pyx_v_D00 - (2.0 * __pyx_v_D01)) + __pyx_v_D11)); goto __pyx_L0; __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_6intvol_13mu1_edge(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_10algorithms_10statistics_6intvol_12mu1_edge[] = " Compute the 1st intrinsic volume (length) of line segment\n\n Length of a line segment with vertex coordinates implied by dot products\n below.\n\n Parameters\n ----------\n D00 : float\n If ``cv0`` is a 3-vector of coordinates for the line start, `D00` is\n ``cv0.dot(cv0)``\n D01 : float\n ``cv0.dot(cv1)`` where ``cv1`` is the coordinates for the line end.\n D11 : float\n ``cv1.dot(cv1)``\n\n Returns\n -------\n mu0 : float\n length of line segment\n "; static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_6intvol_13mu1_edge(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { double __pyx_v_D00; double __pyx_v_D01; double __pyx_v_D11; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("mu1_edge (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__D00,&__pyx_n_s__D01,&__pyx_n_s__D11,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D00)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D01)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mu1_edge", 1, 3, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 316; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__D11)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mu1_edge", 1, 3, 3, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 316; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "mu1_edge") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 316; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_D00 = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_D00 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 316; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_D01 = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_D01 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 316; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_D11 = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_D11 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 316; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("mu1_edge", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 316; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.algorithms.statistics.intvol.mu1_edge", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_10algorithms_10statistics_6intvol_12mu1_edge(__pyx_self, __pyx_v_D00, __pyx_v_D01, __pyx_v_D11); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/algorithms/statistics/intvol.pyx":316 * * * cpdef double mu1_edge(double D00, double D01, double D11) nogil: # <<<<<<<<<<<<<< * """ Compute the 1st intrinsic volume (length) of line segment * */ static PyObject *__pyx_pf_4nipy_10algorithms_10statistics_6intvol_12mu1_edge(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_D00, double __pyx_v_D01, double __pyx_v_D11) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("mu1_edge", 0); __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyFloat_FromDouble(__pyx_f_4nipy_10algorithms_10statistics_6intvol_mu1_edge(__pyx_v_D00, __pyx_v_D01, __pyx_v_D11, 0)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 316; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.algorithms.statistics.intvol.mu1_edge", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_6intvol_15EC3d(PyObject *__pyx_self, PyObject *__pyx_v_mask); /*proto*/ static char __pyx_doc_4nipy_10algorithms_10statistics_6intvol_14EC3d[] = " Compute Euler characteristic of region within `mask`\n\n Given a 3d `mask`, compute the 0th intrinsic volume (Euler characteristic)\n of the masked region. The region is broken up into tetrahedra / triangles /\n edges / vertices, which are included based on whether all voxels in the\n tetrahedron / triangle / edge / vertex are in the mask or not.\n\n Parameters\n ----------\n mask : ndarray((i,j,k), np.int)\n Binary mask determining whether or not a voxel is in the mask.\n\n Returns\n -------\n mu0 : int\n Euler characteristic\n\n Notes\n -----\n The array mask is assumed to be binary. At the time of writing, it is not\n clear how to get cython to use np.bool arrays.\n\n The 3d cubes are triangulated into 6 tetrahedra of equal volume, as\n described in the reference below.\n\n References\n ----------\n Taylor, J.E. & Worsley, K.J. (2007). \"Detecting sparse signal in random fields,\n with an application to brain mapping.\"\n Journal of the American Statistical Association, 102(479):913-928.\n "; static PyMethodDef __pyx_mdef_4nipy_10algorithms_10statistics_6intvol_15EC3d = {__Pyx_NAMESTR("EC3d"), (PyCFunction)__pyx_pw_4nipy_10algorithms_10statistics_6intvol_15EC3d, METH_O, __Pyx_DOCSTR(__pyx_doc_4nipy_10algorithms_10statistics_6intvol_14EC3d)}; static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_6intvol_15EC3d(PyObject *__pyx_self, PyObject *__pyx_v_mask) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("EC3d (wrapper)", 0); __pyx_r = __pyx_pf_4nipy_10algorithms_10statistics_6intvol_14EC3d(__pyx_self, ((PyObject *)__pyx_v_mask)); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/algorithms/statistics/intvol.pyx":340 * * * def EC3d(mask): # <<<<<<<<<<<<<< * """ Compute Euler characteristic of region within `mask` * */ static PyObject *__pyx_pf_4nipy_10algorithms_10statistics_6intvol_14EC3d(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_mask) { PyArrayObject *__pyx_v_mask_c = 0; PyArrayObject *__pyx_v_fpmask = 0; PyArrayObject *__pyx_v_d2 = 0; PyArrayObject *__pyx_v_d3 = 0; PyArrayObject *__pyx_v_d4 = 0; npy_intp __pyx_v_i; npy_intp __pyx_v_j; npy_intp __pyx_v_k; npy_intp __pyx_v_l; npy_intp __pyx_v_s0; npy_intp __pyx_v_s1; npy_intp __pyx_v_s2; npy_intp __pyx_v_ds2; npy_intp __pyx_v_ds3; npy_intp __pyx_v_ds4; npy_intp __pyx_v_index; npy_intp __pyx_v_m; CYTHON_UNUSED npy_intp __pyx_v_nvox; npy_intp __pyx_v_ss0; npy_intp __pyx_v_ss1; npy_intp __pyx_v_ss2; npy_intp __pyx_v_v0; npy_intp __pyx_v_v1; npy_intp __pyx_v_v2; npy_intp __pyx_v_v3; npy_intp __pyx_v_l0; PyObject *__pyx_v_pmask_shape = NULL; PyObject *__pyx_v_pmask = NULL; PyArrayObject *__pyx_v_strides = 0; PyObject *__pyx_v_union = NULL; PyObject *__pyx_v_c = NULL; __Pyx_LocalBuf_ND __pyx_pybuffernd_d2; __Pyx_Buffer __pyx_pybuffer_d2; __Pyx_LocalBuf_ND __pyx_pybuffernd_d3; __Pyx_Buffer __pyx_pybuffer_d3; __Pyx_LocalBuf_ND __pyx_pybuffernd_d4; __Pyx_Buffer __pyx_pybuffer_d4; __Pyx_LocalBuf_ND __pyx_pybuffernd_fpmask; __Pyx_Buffer __pyx_pybuffer_fpmask; __Pyx_LocalBuf_ND __pyx_pybuffernd_mask_c; __Pyx_Buffer __pyx_pybuffer_mask_c; __Pyx_LocalBuf_ND __pyx_pybuffernd_strides; __Pyx_Buffer __pyx_pybuffer_strides; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; PyArrayObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; npy_intp __pyx_t_11; npy_intp __pyx_t_12; npy_intp __pyx_t_13; PyArrayObject *__pyx_t_14 = NULL; PyObject *__pyx_t_15 = NULL; PyObject *__pyx_t_16 = NULL; PyArrayObject *__pyx_t_17 = NULL; PyObject *__pyx_t_18 = NULL; PyObject *__pyx_t_19 = NULL; PyObject *__pyx_t_20 = NULL; PyObject *__pyx_t_21 = NULL; PyObject *__pyx_t_22 = NULL; PyArrayObject *__pyx_t_23 = NULL; PyArrayObject *__pyx_t_24 = NULL; PyArrayObject *__pyx_t_25 = NULL; long __pyx_t_26; long __pyx_t_27; long __pyx_t_28; long __pyx_t_29; long __pyx_t_30; long __pyx_t_31; npy_intp __pyx_t_32; npy_intp __pyx_t_33; npy_intp __pyx_t_34; long __pyx_t_35; npy_intp __pyx_t_36; npy_intp __pyx_t_37; long __pyx_t_38; npy_intp __pyx_t_39; long __pyx_t_40; npy_intp __pyx_t_41; long __pyx_t_42; npy_intp __pyx_t_43; npy_intp __pyx_t_44; npy_intp __pyx_t_45; npy_intp __pyx_t_46; long __pyx_t_47; npy_intp __pyx_t_48; npy_intp __pyx_t_49; long __pyx_t_50; npy_intp __pyx_t_51; long __pyx_t_52; npy_intp __pyx_t_53; npy_intp __pyx_t_54; npy_intp __pyx_t_55; long __pyx_t_56; npy_intp __pyx_t_57; npy_intp __pyx_t_58; long __pyx_t_59; npy_intp __pyx_t_60; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("EC3d", 0); __pyx_pybuffer_mask_c.pybuffer.buf = NULL; __pyx_pybuffer_mask_c.refcount = 0; __pyx_pybuffernd_mask_c.data = NULL; __pyx_pybuffernd_mask_c.rcbuffer = &__pyx_pybuffer_mask_c; __pyx_pybuffer_fpmask.pybuffer.buf = NULL; __pyx_pybuffer_fpmask.refcount = 0; __pyx_pybuffernd_fpmask.data = NULL; __pyx_pybuffernd_fpmask.rcbuffer = &__pyx_pybuffer_fpmask; __pyx_pybuffer_d2.pybuffer.buf = NULL; __pyx_pybuffer_d2.refcount = 0; __pyx_pybuffernd_d2.data = NULL; __pyx_pybuffernd_d2.rcbuffer = &__pyx_pybuffer_d2; __pyx_pybuffer_d3.pybuffer.buf = NULL; __pyx_pybuffer_d3.refcount = 0; __pyx_pybuffernd_d3.data = NULL; __pyx_pybuffernd_d3.rcbuffer = &__pyx_pybuffer_d3; __pyx_pybuffer_d4.pybuffer.buf = NULL; __pyx_pybuffer_d4.refcount = 0; __pyx_pybuffernd_d4.data = NULL; __pyx_pybuffernd_d4.rcbuffer = &__pyx_pybuffer_d4; __pyx_pybuffer_strides.pybuffer.buf = NULL; __pyx_pybuffer_strides.refcount = 0; __pyx_pybuffernd_strides.data = NULL; __pyx_pybuffernd_strides.rcbuffer = &__pyx_pybuffer_strides; /* "nipy/algorithms/statistics/intvol.pyx":372 * Journal of the American Statistical Association, 102(479):913-928. * """ * if not set(np.unique(mask)).issubset([0,1]): # <<<<<<<<<<<<<< * raise ValueError('mask should be filled with 0/1 ' * 'values, but be of type np.int') */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 372; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__unique); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 372; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 372; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_mask); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_mask); __Pyx_GIVEREF(__pyx_v_mask); __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 372; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 372; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyObject_Call(((PyObject *)((PyObject*)(&PySet_Type))), ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 372; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_t_1 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__issubset); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 372; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyList_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 372; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_int_0); PyList_SET_ITEM(__pyx_t_3, 0, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_INCREF(__pyx_int_1); PyList_SET_ITEM(__pyx_t_3, 1, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 372; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_t_3)); __Pyx_GIVEREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 372; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 372; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_5 = (!__pyx_t_4); if (__pyx_t_5) { /* "nipy/algorithms/statistics/intvol.pyx":373 * """ * if not set(np.unique(mask)).issubset([0,1]): * raise ValueError('mask should be filled with 0/1 ' # <<<<<<<<<<<<<< * 'values, but be of type np.int') * cdef: */ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 373; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[0]; __pyx_lineno = 373; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L3; } __pyx_L3:; /* "nipy/algorithms/statistics/intvol.pyx":389 * np.npy_intp ss0, ss1, ss2 # strides * np.npy_intp v0, v1, v2, v3 # vertices * np.npy_intp l0 = 0 # <<<<<<<<<<<<<< * * mask_c = mask */ __pyx_v_l0 = 0; /* "nipy/algorithms/statistics/intvol.pyx":391 * np.npy_intp l0 = 0 * * mask_c = mask # <<<<<<<<<<<<<< * * pmask_shape = np.array(mask.shape) + 1 */ if (!(likely(((__pyx_v_mask) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_mask, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 391; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_v_mask); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_mask_c.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_mask_c.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 3, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_mask_c.rcbuffer->pybuffer, (PyObject*)__pyx_v_mask_c, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 3, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10); } } __pyx_pybuffernd_mask_c.diminfo[0].strides = __pyx_pybuffernd_mask_c.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_mask_c.diminfo[0].shape = __pyx_pybuffernd_mask_c.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_mask_c.diminfo[1].strides = __pyx_pybuffernd_mask_c.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_mask_c.diminfo[1].shape = __pyx_pybuffernd_mask_c.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_mask_c.diminfo[2].strides = __pyx_pybuffernd_mask_c.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_mask_c.diminfo[2].shape = __pyx_pybuffernd_mask_c.rcbuffer->pybuffer.shape[2]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 391; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __Pyx_INCREF(__pyx_v_mask); __pyx_v_mask_c = ((PyArrayObject *)__pyx_v_mask); /* "nipy/algorithms/statistics/intvol.pyx":393 * mask_c = mask * * pmask_shape = np.array(mask.shape) + 1 # <<<<<<<<<<<<<< * pmask = np.zeros(pmask_shape, np.int) * pmask[:-1,:-1,:-1] = mask_c */ __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__array); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyObject_GetAttr(__pyx_v_mask, __pyx_n_s__shape); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_t_1 = PyNumber_Add(__pyx_t_3, __pyx_int_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pmask_shape = __pyx_t_1; __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":394 * * pmask_shape = np.array(mask.shape) + 1 * pmask = np.zeros(pmask_shape, np.int) # <<<<<<<<<<<<<< * pmask[:-1,:-1,:-1] = mask_c * */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__zeros); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__int); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_pmask_shape); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_pmask_shape); __Pyx_GIVEREF(__pyx_v_pmask_shape); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_v_pmask = __pyx_t_2; __pyx_t_2 = 0; /* "nipy/algorithms/statistics/intvol.pyx":395 * pmask_shape = np.array(mask.shape) + 1 * pmask = np.zeros(pmask_shape, np.int) * pmask[:-1,:-1,:-1] = mask_c # <<<<<<<<<<<<<< * * s0, s1, s2 = (pmask.shape[0], pmask.shape[1], pmask.shape[2]) */ if (PyObject_SetItem(__pyx_v_pmask, ((PyObject *)__pyx_k_tuple_6), ((PyObject *)__pyx_v_mask_c)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 395; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/algorithms/statistics/intvol.pyx":397 * pmask[:-1,:-1,:-1] = mask_c * * s0, s1, s2 = (pmask.shape[0], pmask.shape[1], pmask.shape[2]) # <<<<<<<<<<<<<< * * fpmask = pmask.reshape(-1) */ __pyx_t_2 = PyObject_GetAttr(__pyx_v_pmask, __pyx_n_s__shape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_2, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_11 = __Pyx_PyInt_from_py_Py_intptr_t(__pyx_t_1); if (unlikely((__pyx_t_11 == (npy_intp)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyObject_GetAttr(__pyx_v_pmask, __pyx_n_s__shape); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_GetItemInt(__pyx_t_1, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_12 = __Pyx_PyInt_from_py_Py_intptr_t(__pyx_t_2); if (unlikely((__pyx_t_12 == (npy_intp)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyObject_GetAttr(__pyx_v_pmask, __pyx_n_s__shape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_2, 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_13 = __Pyx_PyInt_from_py_Py_intptr_t(__pyx_t_1); if (unlikely((__pyx_t_13 == (npy_intp)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_s0 = __pyx_t_11; __pyx_v_s1 = __pyx_t_12; __pyx_v_s2 = __pyx_t_13; /* "nipy/algorithms/statistics/intvol.pyx":399 * s0, s1, s2 = (pmask.shape[0], pmask.shape[1], pmask.shape[2]) * * fpmask = pmask.reshape(-1) # <<<<<<<<<<<<<< * cdef: * np.ndarray[np.intp_t, ndim=1] strides */ __pyx_t_1 = PyObject_GetAttr(__pyx_v_pmask, __pyx_n_s__reshape); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 399; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_k_tuple_7), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 399; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 399; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_14 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_fpmask.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_fpmask.rcbuffer->pybuffer, (PyObject*)__pyx_t_14, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_10, &__pyx_t_9, &__pyx_t_8); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_fpmask.rcbuffer->pybuffer, (PyObject*)__pyx_v_fpmask, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_8); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_10, __pyx_t_9, __pyx_t_8); } } __pyx_pybuffernd_fpmask.diminfo[0].strides = __pyx_pybuffernd_fpmask.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_fpmask.diminfo[0].shape = __pyx_pybuffernd_fpmask.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 399; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_14 = 0; __pyx_v_fpmask = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "nipy/algorithms/statistics/intvol.pyx":402 * cdef: * np.ndarray[np.intp_t, ndim=1] strides * strides = np.array(strides_from(pmask_shape, np.bool), dtype=np.intp) # <<<<<<<<<<<<<< * * # First do the interior contributions. */ __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 402; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__array); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 402; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__strides_from); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 402; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 402; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_15 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__bool); if (unlikely(!__pyx_t_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 402; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_15); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 402; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_pmask_shape); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_pmask_shape); __Pyx_GIVEREF(__pyx_v_pmask_shape); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_15); __Pyx_GIVEREF(__pyx_t_15); __pyx_t_15 = 0; __pyx_t_15 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 402; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_15); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 402; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_15); __Pyx_GIVEREF(__pyx_t_15); __pyx_t_15 = 0; __pyx_t_15 = PyDict_New(); if (unlikely(!__pyx_t_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 402; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_15)); __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 402; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_16 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__intp); if (unlikely(!__pyx_t_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 402; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_16); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (PyDict_SetItem(__pyx_t_15, ((PyObject *)__pyx_n_s__dtype), __pyx_t_16) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 402; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0; __pyx_t_16 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_3), ((PyObject *)__pyx_t_15)); if (unlikely(!__pyx_t_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 402; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_16); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_15)); __pyx_t_15 = 0; if (!(likely(((__pyx_t_16) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_16, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 402; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_17 = ((PyArrayObject *)__pyx_t_16); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_strides.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_strides.rcbuffer->pybuffer, (PyObject*)__pyx_t_17, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_strides.rcbuffer->pybuffer, (PyObject*)__pyx_v_strides, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10); } } __pyx_pybuffernd_strides.diminfo[0].strides = __pyx_pybuffernd_strides.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_strides.diminfo[0].shape = __pyx_pybuffernd_strides.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 402; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_17 = 0; __pyx_v_strides = ((PyArrayObject *)__pyx_t_16); __pyx_t_16 = 0; /* "nipy/algorithms/statistics/intvol.pyx":407 * # We first figure out which vertices, edges, triangles, tetrahedra * # are uniquely associated with an interior voxel * union = join_complexes(*[cube_with_strides_center((0,0,1), strides), # <<<<<<<<<<<<<< * cube_with_strides_center((0,1,0), strides), * cube_with_strides_center((0,1,1), strides), */ __pyx_t_16 = __Pyx_GetName(__pyx_m, __pyx_n_s__join_complexes); if (unlikely(!__pyx_t_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 407; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_16); __pyx_t_15 = __Pyx_GetName(__pyx_m, __pyx_n_s_8); if (unlikely(!__pyx_t_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 407; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_15); __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 407; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_k_tuple_9)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_k_tuple_9)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_9)); __Pyx_INCREF(((PyObject *)__pyx_v_strides)); PyTuple_SET_ITEM(__pyx_t_3, 1, ((PyObject *)__pyx_v_strides)); __Pyx_GIVEREF(((PyObject *)__pyx_v_strides)); __pyx_t_1 = PyObject_Call(__pyx_t_15, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 407; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; /* "nipy/algorithms/statistics/intvol.pyx":408 * # are uniquely associated with an interior voxel * union = join_complexes(*[cube_with_strides_center((0,0,1), strides), * cube_with_strides_center((0,1,0), strides), # <<<<<<<<<<<<<< * cube_with_strides_center((0,1,1), strides), * cube_with_strides_center((1,0,0), strides), */ __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s_8); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 408; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_15 = PyTuple_New(2); if (unlikely(!__pyx_t_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 408; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_15); __Pyx_INCREF(((PyObject *)__pyx_k_tuple_10)); PyTuple_SET_ITEM(__pyx_t_15, 0, ((PyObject *)__pyx_k_tuple_10)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_10)); __Pyx_INCREF(((PyObject *)__pyx_v_strides)); PyTuple_SET_ITEM(__pyx_t_15, 1, ((PyObject *)__pyx_v_strides)); __Pyx_GIVEREF(((PyObject *)__pyx_v_strides)); __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_15), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 408; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_15)); __pyx_t_15 = 0; /* "nipy/algorithms/statistics/intvol.pyx":409 * union = join_complexes(*[cube_with_strides_center((0,0,1), strides), * cube_with_strides_center((0,1,0), strides), * cube_with_strides_center((0,1,1), strides), # <<<<<<<<<<<<<< * cube_with_strides_center((1,0,0), strides), * cube_with_strides_center((1,0,1), strides), */ __pyx_t_15 = __Pyx_GetName(__pyx_m, __pyx_n_s_8); if (unlikely(!__pyx_t_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 409; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_15); __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 409; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_k_tuple_11)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_k_tuple_11)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_11)); __Pyx_INCREF(((PyObject *)__pyx_v_strides)); PyTuple_SET_ITEM(__pyx_t_3, 1, ((PyObject *)__pyx_v_strides)); __Pyx_GIVEREF(((PyObject *)__pyx_v_strides)); __pyx_t_18 = PyObject_Call(__pyx_t_15, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_18)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 409; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_18); __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; /* "nipy/algorithms/statistics/intvol.pyx":410 * cube_with_strides_center((0,1,0), strides), * cube_with_strides_center((0,1,1), strides), * cube_with_strides_center((1,0,0), strides), # <<<<<<<<<<<<<< * cube_with_strides_center((1,0,1), strides), * cube_with_strides_center((1,1,0), strides), */ __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s_8); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 410; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_15 = PyTuple_New(2); if (unlikely(!__pyx_t_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 410; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_15); __Pyx_INCREF(((PyObject *)__pyx_k_tuple_12)); PyTuple_SET_ITEM(__pyx_t_15, 0, ((PyObject *)__pyx_k_tuple_12)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_12)); __Pyx_INCREF(((PyObject *)__pyx_v_strides)); PyTuple_SET_ITEM(__pyx_t_15, 1, ((PyObject *)__pyx_v_strides)); __Pyx_GIVEREF(((PyObject *)__pyx_v_strides)); __pyx_t_19 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_15), NULL); if (unlikely(!__pyx_t_19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 410; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_19); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_15)); __pyx_t_15 = 0; /* "nipy/algorithms/statistics/intvol.pyx":411 * cube_with_strides_center((0,1,1), strides), * cube_with_strides_center((1,0,0), strides), * cube_with_strides_center((1,0,1), strides), # <<<<<<<<<<<<<< * cube_with_strides_center((1,1,0), strides), * cube_with_strides_center((1,1,1), strides)]) */ __pyx_t_15 = __Pyx_GetName(__pyx_m, __pyx_n_s_8); if (unlikely(!__pyx_t_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 411; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_15); __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 411; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_k_tuple_13)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_k_tuple_13)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_13)); __Pyx_INCREF(((PyObject *)__pyx_v_strides)); PyTuple_SET_ITEM(__pyx_t_3, 1, ((PyObject *)__pyx_v_strides)); __Pyx_GIVEREF(((PyObject *)__pyx_v_strides)); __pyx_t_20 = PyObject_Call(__pyx_t_15, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_20)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 411; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_20); __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; /* "nipy/algorithms/statistics/intvol.pyx":412 * cube_with_strides_center((1,0,0), strides), * cube_with_strides_center((1,0,1), strides), * cube_with_strides_center((1,1,0), strides), # <<<<<<<<<<<<<< * cube_with_strides_center((1,1,1), strides)]) * c = cube_with_strides_center((0,0,0), strides) */ __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s_8); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 412; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_15 = PyTuple_New(2); if (unlikely(!__pyx_t_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 412; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_15); __Pyx_INCREF(((PyObject *)__pyx_k_tuple_14)); PyTuple_SET_ITEM(__pyx_t_15, 0, ((PyObject *)__pyx_k_tuple_14)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_14)); __Pyx_INCREF(((PyObject *)__pyx_v_strides)); PyTuple_SET_ITEM(__pyx_t_15, 1, ((PyObject *)__pyx_v_strides)); __Pyx_GIVEREF(((PyObject *)__pyx_v_strides)); __pyx_t_21 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_15), NULL); if (unlikely(!__pyx_t_21)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 412; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_21); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_15)); __pyx_t_15 = 0; /* "nipy/algorithms/statistics/intvol.pyx":413 * cube_with_strides_center((1,0,1), strides), * cube_with_strides_center((1,1,0), strides), * cube_with_strides_center((1,1,1), strides)]) # <<<<<<<<<<<<<< * c = cube_with_strides_center((0,0,0), strides) * */ __pyx_t_15 = __Pyx_GetName(__pyx_m, __pyx_n_s_8); if (unlikely(!__pyx_t_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 413; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_15); __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 413; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_k_tuple_15)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_k_tuple_15)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_15)); __Pyx_INCREF(((PyObject *)__pyx_v_strides)); PyTuple_SET_ITEM(__pyx_t_3, 1, ((PyObject *)__pyx_v_strides)); __Pyx_GIVEREF(((PyObject *)__pyx_v_strides)); __pyx_t_22 = PyObject_Call(__pyx_t_15, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_22)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 413; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_22); __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __pyx_t_3 = PyList_New(7); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 407; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyList_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); PyList_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_3, 2, __pyx_t_18); __Pyx_GIVEREF(__pyx_t_18); PyList_SET_ITEM(__pyx_t_3, 3, __pyx_t_19); __Pyx_GIVEREF(__pyx_t_19); PyList_SET_ITEM(__pyx_t_3, 4, __pyx_t_20); __Pyx_GIVEREF(__pyx_t_20); PyList_SET_ITEM(__pyx_t_3, 5, __pyx_t_21); __Pyx_GIVEREF(__pyx_t_21); PyList_SET_ITEM(__pyx_t_3, 6, __pyx_t_22); __Pyx_GIVEREF(__pyx_t_22); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_18 = 0; __pyx_t_19 = 0; __pyx_t_20 = 0; __pyx_t_21 = 0; __pyx_t_22 = 0; __pyx_t_22 = PySequence_Tuple(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_22)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 407; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_22)); __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __pyx_t_3 = PyObject_Call(__pyx_t_16, ((PyObject *)__pyx_t_22), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 407; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_22)); __pyx_t_22 = 0; __pyx_v_union = __pyx_t_3; __pyx_t_3 = 0; /* "nipy/algorithms/statistics/intvol.pyx":414 * cube_with_strides_center((1,1,0), strides), * cube_with_strides_center((1,1,1), strides)]) * c = cube_with_strides_center((0,0,0), strides) # <<<<<<<<<<<<<< * * d4 = np.array(list(c[4].difference(union[4]))) */ __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s_8); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 414; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_22 = PyTuple_New(2); if (unlikely(!__pyx_t_22)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 414; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_22); __Pyx_INCREF(((PyObject *)__pyx_k_tuple_16)); PyTuple_SET_ITEM(__pyx_t_22, 0, ((PyObject *)__pyx_k_tuple_16)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_16)); __Pyx_INCREF(((PyObject *)__pyx_v_strides)); PyTuple_SET_ITEM(__pyx_t_22, 1, ((PyObject *)__pyx_v_strides)); __Pyx_GIVEREF(((PyObject *)__pyx_v_strides)); __pyx_t_16 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_22), NULL); if (unlikely(!__pyx_t_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 414; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_16); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_22)); __pyx_t_22 = 0; __pyx_v_c = __pyx_t_16; __pyx_t_16 = 0; /* "nipy/algorithms/statistics/intvol.pyx":416 * c = cube_with_strides_center((0,0,0), strides) * * d4 = np.array(list(c[4].difference(union[4]))) # <<<<<<<<<<<<<< * d3 = np.array(list(c[3].difference(union[3]))) * d2 = np.array(list(c[2].difference(union[2]))) */ __pyx_t_16 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 416; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_16); __pyx_t_22 = PyObject_GetAttr(__pyx_t_16, __pyx_n_s__array); if (unlikely(!__pyx_t_22)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 416; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_22); __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0; __pyx_t_16 = __Pyx_GetItemInt(__pyx_v_c, 4, sizeof(long), PyInt_FromLong); if (!__pyx_t_16) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 416; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_16); __pyx_t_3 = PyObject_GetAttr(__pyx_t_16, __pyx_n_s__difference); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 416; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0; __pyx_t_16 = __Pyx_GetItemInt(__pyx_v_union, 4, sizeof(long), PyInt_FromLong); if (!__pyx_t_16) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 416; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_16); __pyx_t_21 = PyTuple_New(1); if (unlikely(!__pyx_t_21)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 416; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_21); PyTuple_SET_ITEM(__pyx_t_21, 0, __pyx_t_16); __Pyx_GIVEREF(__pyx_t_16); __pyx_t_16 = 0; __pyx_t_16 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_21), NULL); if (unlikely(!__pyx_t_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 416; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_16); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_21)); __pyx_t_21 = 0; __pyx_t_21 = PyTuple_New(1); if (unlikely(!__pyx_t_21)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 416; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_21); PyTuple_SET_ITEM(__pyx_t_21, 0, __pyx_t_16); __Pyx_GIVEREF(__pyx_t_16); __pyx_t_16 = 0; __pyx_t_16 = PyObject_Call(((PyObject *)((PyObject*)(&PyList_Type))), ((PyObject *)__pyx_t_21), NULL); if (unlikely(!__pyx_t_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 416; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_16); __Pyx_DECREF(((PyObject *)__pyx_t_21)); __pyx_t_21 = 0; __pyx_t_21 = PyTuple_New(1); if (unlikely(!__pyx_t_21)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 416; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_21); PyTuple_SET_ITEM(__pyx_t_21, 0, __pyx_t_16); __Pyx_GIVEREF(__pyx_t_16); __pyx_t_16 = 0; __pyx_t_16 = PyObject_Call(__pyx_t_22, ((PyObject *)__pyx_t_21), NULL); if (unlikely(!__pyx_t_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 416; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_16); __Pyx_DECREF(__pyx_t_22); __pyx_t_22 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_21)); __pyx_t_21 = 0; if (!(likely(((__pyx_t_16) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_16, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 416; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_23 = ((PyArrayObject *)__pyx_t_16); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_d4.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_d4.rcbuffer->pybuffer, (PyObject*)__pyx_t_23, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_10, &__pyx_t_9, &__pyx_t_8); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_d4.rcbuffer->pybuffer, (PyObject*)__pyx_v_d4, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_8); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_10, __pyx_t_9, __pyx_t_8); } } __pyx_pybuffernd_d4.diminfo[0].strides = __pyx_pybuffernd_d4.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_d4.diminfo[0].shape = __pyx_pybuffernd_d4.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_d4.diminfo[1].strides = __pyx_pybuffernd_d4.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_d4.diminfo[1].shape = __pyx_pybuffernd_d4.rcbuffer->pybuffer.shape[1]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 416; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_23 = 0; __pyx_v_d4 = ((PyArrayObject *)__pyx_t_16); __pyx_t_16 = 0; /* "nipy/algorithms/statistics/intvol.pyx":417 * * d4 = np.array(list(c[4].difference(union[4]))) * d3 = np.array(list(c[3].difference(union[3]))) # <<<<<<<<<<<<<< * d2 = np.array(list(c[2].difference(union[2]))) * */ __pyx_t_16 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 417; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_16); __pyx_t_21 = PyObject_GetAttr(__pyx_t_16, __pyx_n_s__array); if (unlikely(!__pyx_t_21)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 417; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_21); __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0; __pyx_t_16 = __Pyx_GetItemInt(__pyx_v_c, 3, sizeof(long), PyInt_FromLong); if (!__pyx_t_16) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 417; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_16); __pyx_t_22 = PyObject_GetAttr(__pyx_t_16, __pyx_n_s__difference); if (unlikely(!__pyx_t_22)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 417; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_22); __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0; __pyx_t_16 = __Pyx_GetItemInt(__pyx_v_union, 3, sizeof(long), PyInt_FromLong); if (!__pyx_t_16) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 417; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_16); __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 417; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_16); __Pyx_GIVEREF(__pyx_t_16); __pyx_t_16 = 0; __pyx_t_16 = PyObject_Call(__pyx_t_22, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 417; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_16); __Pyx_DECREF(__pyx_t_22); __pyx_t_22 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 417; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_16); __Pyx_GIVEREF(__pyx_t_16); __pyx_t_16 = 0; __pyx_t_16 = PyObject_Call(((PyObject *)((PyObject*)(&PyList_Type))), ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 417; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_16); __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 417; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_16); __Pyx_GIVEREF(__pyx_t_16); __pyx_t_16 = 0; __pyx_t_16 = PyObject_Call(__pyx_t_21, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 417; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_16); __Pyx_DECREF(__pyx_t_21); __pyx_t_21 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; if (!(likely(((__pyx_t_16) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_16, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 417; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_24 = ((PyArrayObject *)__pyx_t_16); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_d3.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_d3.rcbuffer->pybuffer, (PyObject*)__pyx_t_24, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_d3.rcbuffer->pybuffer, (PyObject*)__pyx_v_d3, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10); } } __pyx_pybuffernd_d3.diminfo[0].strides = __pyx_pybuffernd_d3.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_d3.diminfo[0].shape = __pyx_pybuffernd_d3.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_d3.diminfo[1].strides = __pyx_pybuffernd_d3.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_d3.diminfo[1].shape = __pyx_pybuffernd_d3.rcbuffer->pybuffer.shape[1]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 417; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_24 = 0; __pyx_v_d3 = ((PyArrayObject *)__pyx_t_16); __pyx_t_16 = 0; /* "nipy/algorithms/statistics/intvol.pyx":418 * d4 = np.array(list(c[4].difference(union[4]))) * d3 = np.array(list(c[3].difference(union[3]))) * d2 = np.array(list(c[2].difference(union[2]))) # <<<<<<<<<<<<<< * * ds2 = d2.shape[0] */ __pyx_t_16 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 418; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_16); __pyx_t_3 = PyObject_GetAttr(__pyx_t_16, __pyx_n_s__array); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 418; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0; __pyx_t_16 = __Pyx_GetItemInt(__pyx_v_c, 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_16) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 418; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_16); __pyx_t_21 = PyObject_GetAttr(__pyx_t_16, __pyx_n_s__difference); if (unlikely(!__pyx_t_21)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 418; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_21); __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0; __pyx_t_16 = __Pyx_GetItemInt(__pyx_v_union, 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_16) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 418; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_16); __pyx_t_22 = PyTuple_New(1); if (unlikely(!__pyx_t_22)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 418; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_22); PyTuple_SET_ITEM(__pyx_t_22, 0, __pyx_t_16); __Pyx_GIVEREF(__pyx_t_16); __pyx_t_16 = 0; __pyx_t_16 = PyObject_Call(__pyx_t_21, ((PyObject *)__pyx_t_22), NULL); if (unlikely(!__pyx_t_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 418; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_16); __Pyx_DECREF(__pyx_t_21); __pyx_t_21 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_22)); __pyx_t_22 = 0; __pyx_t_22 = PyTuple_New(1); if (unlikely(!__pyx_t_22)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 418; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_22); PyTuple_SET_ITEM(__pyx_t_22, 0, __pyx_t_16); __Pyx_GIVEREF(__pyx_t_16); __pyx_t_16 = 0; __pyx_t_16 = PyObject_Call(((PyObject *)((PyObject*)(&PyList_Type))), ((PyObject *)__pyx_t_22), NULL); if (unlikely(!__pyx_t_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 418; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_16); __Pyx_DECREF(((PyObject *)__pyx_t_22)); __pyx_t_22 = 0; __pyx_t_22 = PyTuple_New(1); if (unlikely(!__pyx_t_22)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 418; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_22); PyTuple_SET_ITEM(__pyx_t_22, 0, __pyx_t_16); __Pyx_GIVEREF(__pyx_t_16); __pyx_t_16 = 0; __pyx_t_16 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_22), NULL); if (unlikely(!__pyx_t_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 418; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_16); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_22)); __pyx_t_22 = 0; if (!(likely(((__pyx_t_16) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_16, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 418; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_25 = ((PyArrayObject *)__pyx_t_16); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_d2.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_d2.rcbuffer->pybuffer, (PyObject*)__pyx_t_25, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_10, &__pyx_t_9, &__pyx_t_8); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_d2.rcbuffer->pybuffer, (PyObject*)__pyx_v_d2, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_8); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_10, __pyx_t_9, __pyx_t_8); } } __pyx_pybuffernd_d2.diminfo[0].strides = __pyx_pybuffernd_d2.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_d2.diminfo[0].shape = __pyx_pybuffernd_d2.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_d2.diminfo[1].strides = __pyx_pybuffernd_d2.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_d2.diminfo[1].shape = __pyx_pybuffernd_d2.rcbuffer->pybuffer.shape[1]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 418; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_25 = 0; __pyx_v_d2 = ((PyArrayObject *)__pyx_t_16); __pyx_t_16 = 0; /* "nipy/algorithms/statistics/intvol.pyx":420 * d2 = np.array(list(c[2].difference(union[2]))) * * ds2 = d2.shape[0] # <<<<<<<<<<<<<< * ds3 = d3.shape[0] * ds4 = d4.shape[0] */ __pyx_v_ds2 = (__pyx_v_d2->dimensions[0]); /* "nipy/algorithms/statistics/intvol.pyx":421 * * ds2 = d2.shape[0] * ds3 = d3.shape[0] # <<<<<<<<<<<<<< * ds4 = d4.shape[0] * */ __pyx_v_ds3 = (__pyx_v_d3->dimensions[0]); /* "nipy/algorithms/statistics/intvol.pyx":422 * ds2 = d2.shape[0] * ds3 = d3.shape[0] * ds4 = d4.shape[0] # <<<<<<<<<<<<<< * * ss0 = strides[0] */ __pyx_v_ds4 = (__pyx_v_d4->dimensions[0]); /* "nipy/algorithms/statistics/intvol.pyx":424 * ds4 = d4.shape[0] * * ss0 = strides[0] # <<<<<<<<<<<<<< * ss1 = strides[1] * ss2 = strides[2] */ __pyx_t_26 = 0; __pyx_t_7 = -1; if (__pyx_t_26 < 0) { __pyx_t_26 += __pyx_pybuffernd_strides.diminfo[0].shape; if (unlikely(__pyx_t_26 < 0)) __pyx_t_7 = 0; } else if (unlikely(__pyx_t_26 >= __pyx_pybuffernd_strides.diminfo[0].shape)) __pyx_t_7 = 0; if (unlikely(__pyx_t_7 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_ss0 = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_strides.rcbuffer->pybuffer.buf, __pyx_t_26, __pyx_pybuffernd_strides.diminfo[0].strides)); /* "nipy/algorithms/statistics/intvol.pyx":425 * * ss0 = strides[0] * ss1 = strides[1] # <<<<<<<<<<<<<< * ss2 = strides[2] * */ __pyx_t_27 = 1; __pyx_t_7 = -1; if (__pyx_t_27 < 0) { __pyx_t_27 += __pyx_pybuffernd_strides.diminfo[0].shape; if (unlikely(__pyx_t_27 < 0)) __pyx_t_7 = 0; } else if (unlikely(__pyx_t_27 >= __pyx_pybuffernd_strides.diminfo[0].shape)) __pyx_t_7 = 0; if (unlikely(__pyx_t_7 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 425; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_ss1 = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_strides.rcbuffer->pybuffer.buf, __pyx_t_27, __pyx_pybuffernd_strides.diminfo[0].strides)); /* "nipy/algorithms/statistics/intvol.pyx":426 * ss0 = strides[0] * ss1 = strides[1] * ss2 = strides[2] # <<<<<<<<<<<<<< * * nvox = mask.size */ __pyx_t_28 = 2; __pyx_t_7 = -1; if (__pyx_t_28 < 0) { __pyx_t_28 += __pyx_pybuffernd_strides.diminfo[0].shape; if (unlikely(__pyx_t_28 < 0)) __pyx_t_7 = 0; } else if (unlikely(__pyx_t_28 >= __pyx_pybuffernd_strides.diminfo[0].shape)) __pyx_t_7 = 0; if (unlikely(__pyx_t_7 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 426; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_ss2 = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_strides.rcbuffer->pybuffer.buf, __pyx_t_28, __pyx_pybuffernd_strides.diminfo[0].strides)); /* "nipy/algorithms/statistics/intvol.pyx":428 * ss2 = strides[2] * * nvox = mask.size # <<<<<<<<<<<<<< * * for i in range(s0-1): */ __pyx_t_16 = PyObject_GetAttr(__pyx_v_mask, __pyx_n_s__size); if (unlikely(!__pyx_t_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 428; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_16); __pyx_t_13 = __Pyx_PyInt_from_py_Py_intptr_t(__pyx_t_16); if (unlikely((__pyx_t_13 == (npy_intp)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 428; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0; __pyx_v_nvox = __pyx_t_13; /* "nipy/algorithms/statistics/intvol.pyx":430 * nvox = mask.size * * for i in range(s0-1): # <<<<<<<<<<<<<< * for j in range(s1-1): * for k in range(s2-1): */ __pyx_t_29 = (__pyx_v_s0 - 1); for (__pyx_t_13 = 0; __pyx_t_13 < __pyx_t_29; __pyx_t_13+=1) { __pyx_v_i = __pyx_t_13; /* "nipy/algorithms/statistics/intvol.pyx":431 * * for i in range(s0-1): * for j in range(s1-1): # <<<<<<<<<<<<<< * for k in range(s2-1): * index = i*ss0+j*ss1+k*ss2 */ __pyx_t_30 = (__pyx_v_s1 - 1); for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_30; __pyx_t_12+=1) { __pyx_v_j = __pyx_t_12; /* "nipy/algorithms/statistics/intvol.pyx":432 * for i in range(s0-1): * for j in range(s1-1): * for k in range(s2-1): # <<<<<<<<<<<<<< * index = i*ss0+j*ss1+k*ss2 * for l in range(ds4): */ __pyx_t_31 = (__pyx_v_s2 - 1); for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_31; __pyx_t_11+=1) { __pyx_v_k = __pyx_t_11; /* "nipy/algorithms/statistics/intvol.pyx":433 * for j in range(s1-1): * for k in range(s2-1): * index = i*ss0+j*ss1+k*ss2 # <<<<<<<<<<<<<< * for l in range(ds4): * v0 = index + d4[l,0] */ __pyx_v_index = (((__pyx_v_i * __pyx_v_ss0) + (__pyx_v_j * __pyx_v_ss1)) + (__pyx_v_k * __pyx_v_ss2)); /* "nipy/algorithms/statistics/intvol.pyx":434 * for k in range(s2-1): * index = i*ss0+j*ss1+k*ss2 * for l in range(ds4): # <<<<<<<<<<<<<< * v0 = index + d4[l,0] * m = fpmask[v0] */ __pyx_t_32 = __pyx_v_ds4; for (__pyx_t_33 = 0; __pyx_t_33 < __pyx_t_32; __pyx_t_33+=1) { __pyx_v_l = __pyx_t_33; /* "nipy/algorithms/statistics/intvol.pyx":435 * index = i*ss0+j*ss1+k*ss2 * for l in range(ds4): * v0 = index + d4[l,0] # <<<<<<<<<<<<<< * m = fpmask[v0] * if m: */ __pyx_t_34 = __pyx_v_l; __pyx_t_35 = 0; __pyx_t_7 = -1; if (__pyx_t_34 < 0) { __pyx_t_34 += __pyx_pybuffernd_d4.diminfo[0].shape; if (unlikely(__pyx_t_34 < 0)) __pyx_t_7 = 0; } else if (unlikely(__pyx_t_34 >= __pyx_pybuffernd_d4.diminfo[0].shape)) __pyx_t_7 = 0; if (__pyx_t_35 < 0) { __pyx_t_35 += __pyx_pybuffernd_d4.diminfo[1].shape; if (unlikely(__pyx_t_35 < 0)) __pyx_t_7 = 1; } else if (unlikely(__pyx_t_35 >= __pyx_pybuffernd_d4.diminfo[1].shape)) __pyx_t_7 = 1; if (unlikely(__pyx_t_7 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 435; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_v0 = (__pyx_v_index + (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d4.rcbuffer->pybuffer.buf, __pyx_t_34, __pyx_pybuffernd_d4.diminfo[0].strides, __pyx_t_35, __pyx_pybuffernd_d4.diminfo[1].strides))); /* "nipy/algorithms/statistics/intvol.pyx":436 * for l in range(ds4): * v0 = index + d4[l,0] * m = fpmask[v0] # <<<<<<<<<<<<<< * if m: * v1 = index + d4[l,1] */ __pyx_t_36 = __pyx_v_v0; __pyx_t_7 = -1; if (__pyx_t_36 < 0) { __pyx_t_36 += __pyx_pybuffernd_fpmask.diminfo[0].shape; if (unlikely(__pyx_t_36 < 0)) __pyx_t_7 = 0; } else if (unlikely(__pyx_t_36 >= __pyx_pybuffernd_fpmask.diminfo[0].shape)) __pyx_t_7 = 0; if (unlikely(__pyx_t_7 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 436; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_m = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_fpmask.rcbuffer->pybuffer.buf, __pyx_t_36, __pyx_pybuffernd_fpmask.diminfo[0].strides)); /* "nipy/algorithms/statistics/intvol.pyx":437 * v0 = index + d4[l,0] * m = fpmask[v0] * if m: # <<<<<<<<<<<<<< * v1 = index + d4[l,1] * v2 = index + d4[l,2] */ if (__pyx_v_m) { /* "nipy/algorithms/statistics/intvol.pyx":438 * m = fpmask[v0] * if m: * v1 = index + d4[l,1] # <<<<<<<<<<<<<< * v2 = index + d4[l,2] * v3 = index + d4[l,3] */ __pyx_t_37 = __pyx_v_l; __pyx_t_38 = 1; __pyx_t_7 = -1; if (__pyx_t_37 < 0) { __pyx_t_37 += __pyx_pybuffernd_d4.diminfo[0].shape; if (unlikely(__pyx_t_37 < 0)) __pyx_t_7 = 0; } else if (unlikely(__pyx_t_37 >= __pyx_pybuffernd_d4.diminfo[0].shape)) __pyx_t_7 = 0; if (__pyx_t_38 < 0) { __pyx_t_38 += __pyx_pybuffernd_d4.diminfo[1].shape; if (unlikely(__pyx_t_38 < 0)) __pyx_t_7 = 1; } else if (unlikely(__pyx_t_38 >= __pyx_pybuffernd_d4.diminfo[1].shape)) __pyx_t_7 = 1; if (unlikely(__pyx_t_7 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 438; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_v1 = (__pyx_v_index + (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d4.rcbuffer->pybuffer.buf, __pyx_t_37, __pyx_pybuffernd_d4.diminfo[0].strides, __pyx_t_38, __pyx_pybuffernd_d4.diminfo[1].strides))); /* "nipy/algorithms/statistics/intvol.pyx":439 * if m: * v1 = index + d4[l,1] * v2 = index + d4[l,2] # <<<<<<<<<<<<<< * v3 = index + d4[l,3] * m = m * fpmask[v1] * fpmask[v2] * fpmask[v3] */ __pyx_t_39 = __pyx_v_l; __pyx_t_40 = 2; __pyx_t_7 = -1; if (__pyx_t_39 < 0) { __pyx_t_39 += __pyx_pybuffernd_d4.diminfo[0].shape; if (unlikely(__pyx_t_39 < 0)) __pyx_t_7 = 0; } else if (unlikely(__pyx_t_39 >= __pyx_pybuffernd_d4.diminfo[0].shape)) __pyx_t_7 = 0; if (__pyx_t_40 < 0) { __pyx_t_40 += __pyx_pybuffernd_d4.diminfo[1].shape; if (unlikely(__pyx_t_40 < 0)) __pyx_t_7 = 1; } else if (unlikely(__pyx_t_40 >= __pyx_pybuffernd_d4.diminfo[1].shape)) __pyx_t_7 = 1; if (unlikely(__pyx_t_7 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 439; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_v2 = (__pyx_v_index + (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d4.rcbuffer->pybuffer.buf, __pyx_t_39, __pyx_pybuffernd_d4.diminfo[0].strides, __pyx_t_40, __pyx_pybuffernd_d4.diminfo[1].strides))); /* "nipy/algorithms/statistics/intvol.pyx":440 * v1 = index + d4[l,1] * v2 = index + d4[l,2] * v3 = index + d4[l,3] # <<<<<<<<<<<<<< * m = m * fpmask[v1] * fpmask[v2] * fpmask[v3] * l0 = l0 - m */ __pyx_t_41 = __pyx_v_l; __pyx_t_42 = 3; __pyx_t_7 = -1; if (__pyx_t_41 < 0) { __pyx_t_41 += __pyx_pybuffernd_d4.diminfo[0].shape; if (unlikely(__pyx_t_41 < 0)) __pyx_t_7 = 0; } else if (unlikely(__pyx_t_41 >= __pyx_pybuffernd_d4.diminfo[0].shape)) __pyx_t_7 = 0; if (__pyx_t_42 < 0) { __pyx_t_42 += __pyx_pybuffernd_d4.diminfo[1].shape; if (unlikely(__pyx_t_42 < 0)) __pyx_t_7 = 1; } else if (unlikely(__pyx_t_42 >= __pyx_pybuffernd_d4.diminfo[1].shape)) __pyx_t_7 = 1; if (unlikely(__pyx_t_7 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 440; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_v3 = (__pyx_v_index + (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d4.rcbuffer->pybuffer.buf, __pyx_t_41, __pyx_pybuffernd_d4.diminfo[0].strides, __pyx_t_42, __pyx_pybuffernd_d4.diminfo[1].strides))); /* "nipy/algorithms/statistics/intvol.pyx":441 * v2 = index + d4[l,2] * v3 = index + d4[l,3] * m = m * fpmask[v1] * fpmask[v2] * fpmask[v3] # <<<<<<<<<<<<<< * l0 = l0 - m * */ __pyx_t_43 = __pyx_v_v1; __pyx_t_7 = -1; if (__pyx_t_43 < 0) { __pyx_t_43 += __pyx_pybuffernd_fpmask.diminfo[0].shape; if (unlikely(__pyx_t_43 < 0)) __pyx_t_7 = 0; } else if (unlikely(__pyx_t_43 >= __pyx_pybuffernd_fpmask.diminfo[0].shape)) __pyx_t_7 = 0; if (unlikely(__pyx_t_7 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 441; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_44 = __pyx_v_v2; __pyx_t_7 = -1; if (__pyx_t_44 < 0) { __pyx_t_44 += __pyx_pybuffernd_fpmask.diminfo[0].shape; if (unlikely(__pyx_t_44 < 0)) __pyx_t_7 = 0; } else if (unlikely(__pyx_t_44 >= __pyx_pybuffernd_fpmask.diminfo[0].shape)) __pyx_t_7 = 0; if (unlikely(__pyx_t_7 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 441; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_45 = __pyx_v_v3; __pyx_t_7 = -1; if (__pyx_t_45 < 0) { __pyx_t_45 += __pyx_pybuffernd_fpmask.diminfo[0].shape; if (unlikely(__pyx_t_45 < 0)) __pyx_t_7 = 0; } else if (unlikely(__pyx_t_45 >= __pyx_pybuffernd_fpmask.diminfo[0].shape)) __pyx_t_7 = 0; if (unlikely(__pyx_t_7 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 441; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_m = (((__pyx_v_m * (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_fpmask.rcbuffer->pybuffer.buf, __pyx_t_43, __pyx_pybuffernd_fpmask.diminfo[0].strides))) * (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_fpmask.rcbuffer->pybuffer.buf, __pyx_t_44, __pyx_pybuffernd_fpmask.diminfo[0].strides))) * (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_fpmask.rcbuffer->pybuffer.buf, __pyx_t_45, __pyx_pybuffernd_fpmask.diminfo[0].strides))); /* "nipy/algorithms/statistics/intvol.pyx":442 * v3 = index + d4[l,3] * m = m * fpmask[v1] * fpmask[v2] * fpmask[v3] * l0 = l0 - m # <<<<<<<<<<<<<< * * for l in range(ds3): */ __pyx_v_l0 = (__pyx_v_l0 - __pyx_v_m); goto __pyx_L12; } __pyx_L12:; } /* "nipy/algorithms/statistics/intvol.pyx":444 * l0 = l0 - m * * for l in range(ds3): # <<<<<<<<<<<<<< * v0 = index + d3[l,0] * m = fpmask[v0] */ __pyx_t_32 = __pyx_v_ds3; for (__pyx_t_33 = 0; __pyx_t_33 < __pyx_t_32; __pyx_t_33+=1) { __pyx_v_l = __pyx_t_33; /* "nipy/algorithms/statistics/intvol.pyx":445 * * for l in range(ds3): * v0 = index + d3[l,0] # <<<<<<<<<<<<<< * m = fpmask[v0] * if m: */ __pyx_t_46 = __pyx_v_l; __pyx_t_47 = 0; __pyx_t_7 = -1; if (__pyx_t_46 < 0) { __pyx_t_46 += __pyx_pybuffernd_d3.diminfo[0].shape; if (unlikely(__pyx_t_46 < 0)) __pyx_t_7 = 0; } else if (unlikely(__pyx_t_46 >= __pyx_pybuffernd_d3.diminfo[0].shape)) __pyx_t_7 = 0; if (__pyx_t_47 < 0) { __pyx_t_47 += __pyx_pybuffernd_d3.diminfo[1].shape; if (unlikely(__pyx_t_47 < 0)) __pyx_t_7 = 1; } else if (unlikely(__pyx_t_47 >= __pyx_pybuffernd_d3.diminfo[1].shape)) __pyx_t_7 = 1; if (unlikely(__pyx_t_7 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 445; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_v0 = (__pyx_v_index + (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d3.rcbuffer->pybuffer.buf, __pyx_t_46, __pyx_pybuffernd_d3.diminfo[0].strides, __pyx_t_47, __pyx_pybuffernd_d3.diminfo[1].strides))); /* "nipy/algorithms/statistics/intvol.pyx":446 * for l in range(ds3): * v0 = index + d3[l,0] * m = fpmask[v0] # <<<<<<<<<<<<<< * if m: * v1 = index + d3[l,1] */ __pyx_t_48 = __pyx_v_v0; __pyx_t_7 = -1; if (__pyx_t_48 < 0) { __pyx_t_48 += __pyx_pybuffernd_fpmask.diminfo[0].shape; if (unlikely(__pyx_t_48 < 0)) __pyx_t_7 = 0; } else if (unlikely(__pyx_t_48 >= __pyx_pybuffernd_fpmask.diminfo[0].shape)) __pyx_t_7 = 0; if (unlikely(__pyx_t_7 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 446; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_m = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_fpmask.rcbuffer->pybuffer.buf, __pyx_t_48, __pyx_pybuffernd_fpmask.diminfo[0].strides)); /* "nipy/algorithms/statistics/intvol.pyx":447 * v0 = index + d3[l,0] * m = fpmask[v0] * if m: # <<<<<<<<<<<<<< * v1 = index + d3[l,1] * v2 = index + d3[l,2] */ if (__pyx_v_m) { /* "nipy/algorithms/statistics/intvol.pyx":448 * m = fpmask[v0] * if m: * v1 = index + d3[l,1] # <<<<<<<<<<<<<< * v2 = index + d3[l,2] * m = m * fpmask[v1] * fpmask[v2] */ __pyx_t_49 = __pyx_v_l; __pyx_t_50 = 1; __pyx_t_7 = -1; if (__pyx_t_49 < 0) { __pyx_t_49 += __pyx_pybuffernd_d3.diminfo[0].shape; if (unlikely(__pyx_t_49 < 0)) __pyx_t_7 = 0; } else if (unlikely(__pyx_t_49 >= __pyx_pybuffernd_d3.diminfo[0].shape)) __pyx_t_7 = 0; if (__pyx_t_50 < 0) { __pyx_t_50 += __pyx_pybuffernd_d3.diminfo[1].shape; if (unlikely(__pyx_t_50 < 0)) __pyx_t_7 = 1; } else if (unlikely(__pyx_t_50 >= __pyx_pybuffernd_d3.diminfo[1].shape)) __pyx_t_7 = 1; if (unlikely(__pyx_t_7 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 448; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_v1 = (__pyx_v_index + (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d3.rcbuffer->pybuffer.buf, __pyx_t_49, __pyx_pybuffernd_d3.diminfo[0].strides, __pyx_t_50, __pyx_pybuffernd_d3.diminfo[1].strides))); /* "nipy/algorithms/statistics/intvol.pyx":449 * if m: * v1 = index + d3[l,1] * v2 = index + d3[l,2] # <<<<<<<<<<<<<< * m = m * fpmask[v1] * fpmask[v2] * l0 = l0 + m */ __pyx_t_51 = __pyx_v_l; __pyx_t_52 = 2; __pyx_t_7 = -1; if (__pyx_t_51 < 0) { __pyx_t_51 += __pyx_pybuffernd_d3.diminfo[0].shape; if (unlikely(__pyx_t_51 < 0)) __pyx_t_7 = 0; } else if (unlikely(__pyx_t_51 >= __pyx_pybuffernd_d3.diminfo[0].shape)) __pyx_t_7 = 0; if (__pyx_t_52 < 0) { __pyx_t_52 += __pyx_pybuffernd_d3.diminfo[1].shape; if (unlikely(__pyx_t_52 < 0)) __pyx_t_7 = 1; } else if (unlikely(__pyx_t_52 >= __pyx_pybuffernd_d3.diminfo[1].shape)) __pyx_t_7 = 1; if (unlikely(__pyx_t_7 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 449; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_v2 = (__pyx_v_index + (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d3.rcbuffer->pybuffer.buf, __pyx_t_51, __pyx_pybuffernd_d3.diminfo[0].strides, __pyx_t_52, __pyx_pybuffernd_d3.diminfo[1].strides))); /* "nipy/algorithms/statistics/intvol.pyx":450 * v1 = index + d3[l,1] * v2 = index + d3[l,2] * m = m * fpmask[v1] * fpmask[v2] # <<<<<<<<<<<<<< * l0 = l0 + m * */ __pyx_t_53 = __pyx_v_v1; __pyx_t_7 = -1; if (__pyx_t_53 < 0) { __pyx_t_53 += __pyx_pybuffernd_fpmask.diminfo[0].shape; if (unlikely(__pyx_t_53 < 0)) __pyx_t_7 = 0; } else if (unlikely(__pyx_t_53 >= __pyx_pybuffernd_fpmask.diminfo[0].shape)) __pyx_t_7 = 0; if (unlikely(__pyx_t_7 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 450; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_54 = __pyx_v_v2; __pyx_t_7 = -1; if (__pyx_t_54 < 0) { __pyx_t_54 += __pyx_pybuffernd_fpmask.diminfo[0].shape; if (unlikely(__pyx_t_54 < 0)) __pyx_t_7 = 0; } else if (unlikely(__pyx_t_54 >= __pyx_pybuffernd_fpmask.diminfo[0].shape)) __pyx_t_7 = 0; if (unlikely(__pyx_t_7 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 450; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_m = ((__pyx_v_m * (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_fpmask.rcbuffer->pybuffer.buf, __pyx_t_53, __pyx_pybuffernd_fpmask.diminfo[0].strides))) * (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_fpmask.rcbuffer->pybuffer.buf, __pyx_t_54, __pyx_pybuffernd_fpmask.diminfo[0].strides))); /* "nipy/algorithms/statistics/intvol.pyx":451 * v2 = index + d3[l,2] * m = m * fpmask[v1] * fpmask[v2] * l0 = l0 + m # <<<<<<<<<<<<<< * * for l in range(ds2): */ __pyx_v_l0 = (__pyx_v_l0 + __pyx_v_m); goto __pyx_L15; } __pyx_L15:; } /* "nipy/algorithms/statistics/intvol.pyx":453 * l0 = l0 + m * * for l in range(ds2): # <<<<<<<<<<<<<< * v0 = index + d2[l,0] * m = fpmask[v0] */ __pyx_t_32 = __pyx_v_ds2; for (__pyx_t_33 = 0; __pyx_t_33 < __pyx_t_32; __pyx_t_33+=1) { __pyx_v_l = __pyx_t_33; /* "nipy/algorithms/statistics/intvol.pyx":454 * * for l in range(ds2): * v0 = index + d2[l,0] # <<<<<<<<<<<<<< * m = fpmask[v0] * if m: */ __pyx_t_55 = __pyx_v_l; __pyx_t_56 = 0; __pyx_t_7 = -1; if (__pyx_t_55 < 0) { __pyx_t_55 += __pyx_pybuffernd_d2.diminfo[0].shape; if (unlikely(__pyx_t_55 < 0)) __pyx_t_7 = 0; } else if (unlikely(__pyx_t_55 >= __pyx_pybuffernd_d2.diminfo[0].shape)) __pyx_t_7 = 0; if (__pyx_t_56 < 0) { __pyx_t_56 += __pyx_pybuffernd_d2.diminfo[1].shape; if (unlikely(__pyx_t_56 < 0)) __pyx_t_7 = 1; } else if (unlikely(__pyx_t_56 >= __pyx_pybuffernd_d2.diminfo[1].shape)) __pyx_t_7 = 1; if (unlikely(__pyx_t_7 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_v0 = (__pyx_v_index + (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d2.rcbuffer->pybuffer.buf, __pyx_t_55, __pyx_pybuffernd_d2.diminfo[0].strides, __pyx_t_56, __pyx_pybuffernd_d2.diminfo[1].strides))); /* "nipy/algorithms/statistics/intvol.pyx":455 * for l in range(ds2): * v0 = index + d2[l,0] * m = fpmask[v0] # <<<<<<<<<<<<<< * if m: * v1 = index + d2[l,1] */ __pyx_t_57 = __pyx_v_v0; __pyx_t_7 = -1; if (__pyx_t_57 < 0) { __pyx_t_57 += __pyx_pybuffernd_fpmask.diminfo[0].shape; if (unlikely(__pyx_t_57 < 0)) __pyx_t_7 = 0; } else if (unlikely(__pyx_t_57 >= __pyx_pybuffernd_fpmask.diminfo[0].shape)) __pyx_t_7 = 0; if (unlikely(__pyx_t_7 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 455; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_m = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_fpmask.rcbuffer->pybuffer.buf, __pyx_t_57, __pyx_pybuffernd_fpmask.diminfo[0].strides)); /* "nipy/algorithms/statistics/intvol.pyx":456 * v0 = index + d2[l,0] * m = fpmask[v0] * if m: # <<<<<<<<<<<<<< * v1 = index + d2[l,1] * m = m * fpmask[v1] */ if (__pyx_v_m) { /* "nipy/algorithms/statistics/intvol.pyx":457 * m = fpmask[v0] * if m: * v1 = index + d2[l,1] # <<<<<<<<<<<<<< * m = m * fpmask[v1] * l0 = l0 - m */ __pyx_t_58 = __pyx_v_l; __pyx_t_59 = 1; __pyx_t_7 = -1; if (__pyx_t_58 < 0) { __pyx_t_58 += __pyx_pybuffernd_d2.diminfo[0].shape; if (unlikely(__pyx_t_58 < 0)) __pyx_t_7 = 0; } else if (unlikely(__pyx_t_58 >= __pyx_pybuffernd_d2.diminfo[0].shape)) __pyx_t_7 = 0; if (__pyx_t_59 < 0) { __pyx_t_59 += __pyx_pybuffernd_d2.diminfo[1].shape; if (unlikely(__pyx_t_59 < 0)) __pyx_t_7 = 1; } else if (unlikely(__pyx_t_59 >= __pyx_pybuffernd_d2.diminfo[1].shape)) __pyx_t_7 = 1; if (unlikely(__pyx_t_7 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 457; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_v1 = (__pyx_v_index + (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d2.rcbuffer->pybuffer.buf, __pyx_t_58, __pyx_pybuffernd_d2.diminfo[0].strides, __pyx_t_59, __pyx_pybuffernd_d2.diminfo[1].strides))); /* "nipy/algorithms/statistics/intvol.pyx":458 * if m: * v1 = index + d2[l,1] * m = m * fpmask[v1] # <<<<<<<<<<<<<< * l0 = l0 - m * l0 += mask.sum() */ __pyx_t_60 = __pyx_v_v1; __pyx_t_7 = -1; if (__pyx_t_60 < 0) { __pyx_t_60 += __pyx_pybuffernd_fpmask.diminfo[0].shape; if (unlikely(__pyx_t_60 < 0)) __pyx_t_7 = 0; } else if (unlikely(__pyx_t_60 >= __pyx_pybuffernd_fpmask.diminfo[0].shape)) __pyx_t_7 = 0; if (unlikely(__pyx_t_7 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 458; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_m = (__pyx_v_m * (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_fpmask.rcbuffer->pybuffer.buf, __pyx_t_60, __pyx_pybuffernd_fpmask.diminfo[0].strides))); /* "nipy/algorithms/statistics/intvol.pyx":459 * v1 = index + d2[l,1] * m = m * fpmask[v1] * l0 = l0 - m # <<<<<<<<<<<<<< * l0 += mask.sum() * return l0 */ __pyx_v_l0 = (__pyx_v_l0 - __pyx_v_m); goto __pyx_L18; } __pyx_L18:; } } } } /* "nipy/algorithms/statistics/intvol.pyx":460 * m = m * fpmask[v1] * l0 = l0 - m * l0 += mask.sum() # <<<<<<<<<<<<<< * return l0 * */ __pyx_t_16 = __Pyx_PyInt_to_py_Py_intptr_t(__pyx_v_l0); if (unlikely(!__pyx_t_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 460; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_16); __pyx_t_22 = PyObject_GetAttr(__pyx_v_mask, __pyx_n_s__sum); if (unlikely(!__pyx_t_22)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 460; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_22); __pyx_t_3 = PyObject_Call(__pyx_t_22, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 460; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_22); __pyx_t_22 = 0; __pyx_t_22 = PyNumber_InPlaceAdd(__pyx_t_16, __pyx_t_3); if (unlikely(!__pyx_t_22)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 460; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_22); __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_13 = __Pyx_PyInt_from_py_Py_intptr_t(__pyx_t_22); if (unlikely((__pyx_t_13 == (npy_intp)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 460; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_22); __pyx_t_22 = 0; __pyx_v_l0 = __pyx_t_13; /* "nipy/algorithms/statistics/intvol.pyx":461 * l0 = l0 - m * l0 += mask.sum() * return l0 # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_22 = __Pyx_PyInt_to_py_Py_intptr_t(__pyx_v_l0); if (unlikely(!__pyx_t_22)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 461; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_22); __pyx_r = __pyx_t_22; __pyx_t_22 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_15); __Pyx_XDECREF(__pyx_t_16); __Pyx_XDECREF(__pyx_t_18); __Pyx_XDECREF(__pyx_t_19); __Pyx_XDECREF(__pyx_t_20); __Pyx_XDECREF(__pyx_t_21); __Pyx_XDECREF(__pyx_t_22); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_d2.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_d3.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_d4.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_fpmask.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_mask_c.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_strides.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("nipy.algorithms.statistics.intvol.EC3d", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_d2.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_d3.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_d4.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_fpmask.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_mask_c.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_strides.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_mask_c); __Pyx_XDECREF((PyObject *)__pyx_v_fpmask); __Pyx_XDECREF((PyObject *)__pyx_v_d2); __Pyx_XDECREF((PyObject *)__pyx_v_d3); __Pyx_XDECREF((PyObject *)__pyx_v_d4); __Pyx_XDECREF(__pyx_v_pmask_shape); __Pyx_XDECREF(__pyx_v_pmask); __Pyx_XDECREF((PyObject *)__pyx_v_strides); __Pyx_XDECREF(__pyx_v_union); __Pyx_XDECREF(__pyx_v_c); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_6intvol_17Lips3d(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_10algorithms_10statistics_6intvol_16Lips3d[] = " Estimated intrinsic volumes within masked region given coordinates\n\n Given a 3d `mask` and coordinates `coords`, estimate the intrinsic volumes\n of the masked region. The region is broken up into tetrahedra / triangles /\n edges / vertices, which are included based on whether all voxels in the\n tetrahedron / triangle / edge / vertex are in the mask or not.\n\n Parameters\n ----------\n coords : ndarray((N,i,j,k))\n Coordinates for the voxels in the mask. ``N`` will often be 3 (for 3\n dimensional coordinates, but can be any integer > 0\n mask : ndarray((i,j,k), np.int)\n Binary mask determining whether or not\n a voxel is in the mask.\n\n Returns\n -------\n mu : ndarray\n Array of intrinsic volumes [mu0, mu1, mu2, mu3], being, respectively:\n #. Euler characteristic\n #. 2 * mean caliper diameter\n #. 0.5 * surface area\n #. Volume.\n\n Notes\n -----\n The array mask is assumed to be binary. At the time of writing, it\n is not clear how to get cython to use np.bool arrays.\n\n The 3d cubes are triangulated into 6 tetrahedra of equal volume, as\n described in the reference below.\n\n References\n ----------\n Taylor, J.E. & Worsley, K.J. (2007). \"Detecting sparse signal in random fields,\n with an application to brain mapping.\"\n Journal of the American Statistical Association, 102(479):913-928.\n "; static PyMethodDef __pyx_mdef_4nipy_10algorithms_10statistics_6intvol_17Lips3d = {__Pyx_NAMESTR("Lips3d"), (PyCFunction)__pyx_pw_4nipy_10algorithms_10statistics_6intvol_17Lips3d, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_10algorithms_10statistics_6intvol_16Lips3d)}; static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_6intvol_17Lips3d(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_coords = 0; PyObject *__pyx_v_mask = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("Lips3d (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__coords,&__pyx_n_s__mask,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__coords)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__mask)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("Lips3d", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 464; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "Lips3d") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 464; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_coords = values[0]; __pyx_v_mask = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("Lips3d", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 464; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.algorithms.statistics.intvol.Lips3d", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_10algorithms_10statistics_6intvol_16Lips3d(__pyx_self, __pyx_v_coords, __pyx_v_mask); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/algorithms/statistics/intvol.pyx":464 * * * def Lips3d(coords, mask): # <<<<<<<<<<<<<< * """ Estimated intrinsic volumes within masked region given coordinates * */ static PyObject *__pyx_pf_4nipy_10algorithms_10statistics_6intvol_16Lips3d(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_coords, PyObject *__pyx_v_mask) { PyObject *__pyx_v_value = NULL; PyArrayObject *__pyx_v_coords_c = 0; PyArrayObject *__pyx_v_mask_c = 0; PyArrayObject *__pyx_v_fcoords = 0; PyArrayObject *__pyx_v_D = 0; PyArrayObject *__pyx_v_fmask = 0; PyArrayObject *__pyx_v_fpmask = 0; PyArrayObject *__pyx_v_pmask = 0; PyArrayObject *__pyx_v_d4 = 0; PyArrayObject *__pyx_v_m4 = 0; PyArrayObject *__pyx_v_d3 = 0; PyArrayObject *__pyx_v_m3 = 0; PyArrayObject *__pyx_v_d2 = 0; PyArrayObject *__pyx_v_m2 = 0; PyArrayObject *__pyx_v_cvertices = 0; npy_intp __pyx_v_i; npy_intp __pyx_v_j; npy_intp __pyx_v_k; npy_intp __pyx_v_l; npy_intp __pyx_v_s0; npy_intp __pyx_v_s1; npy_intp __pyx_v_s2; npy_intp __pyx_v_ds4; npy_intp __pyx_v_ds3; npy_intp __pyx_v_ds2; npy_intp __pyx_v_index; npy_intp __pyx_v_pindex; npy_intp __pyx_v_m; npy_intp __pyx_v_nvox; npy_intp __pyx_v_r; npy_intp __pyx_v_s; npy_intp __pyx_v_rr; npy_intp __pyx_v_ss; npy_intp __pyx_v_mr; npy_intp __pyx_v_ms; npy_intp __pyx_v_ss0; npy_intp __pyx_v_ss1; npy_intp __pyx_v_ss2; npy_intp __pyx_v_v0; npy_intp __pyx_v_v1; npy_intp __pyx_v_v2; npy_intp __pyx_v_v3; npy_intp __pyx_v_w0; npy_intp __pyx_v_w1; npy_intp __pyx_v_w2; npy_intp __pyx_v_w3; double __pyx_v_l0; double __pyx_v_l1; double __pyx_v_l2; double __pyx_v_l3; double __pyx_v_res; PyObject *__pyx_v_pmask_shape = NULL; PyArrayObject *__pyx_v_strides = 0; PyArrayObject *__pyx_v_dstrides = 0; PyObject *__pyx_v_ss0d = NULL; PyObject *__pyx_v_ss1d = NULL; PyObject *__pyx_v_ss2d = NULL; PyObject *__pyx_v_verts = NULL; PyObject *__pyx_v_union = NULL; PyObject *__pyx_v_c = NULL; PyObject *__pyx_v_v = NULL; __Pyx_LocalBuf_ND __pyx_pybuffernd_D; __Pyx_Buffer __pyx_pybuffer_D; __Pyx_LocalBuf_ND __pyx_pybuffernd_coords_c; __Pyx_Buffer __pyx_pybuffer_coords_c; __Pyx_LocalBuf_ND __pyx_pybuffernd_cvertices; __Pyx_Buffer __pyx_pybuffer_cvertices; __Pyx_LocalBuf_ND __pyx_pybuffernd_d2; __Pyx_Buffer __pyx_pybuffer_d2; __Pyx_LocalBuf_ND __pyx_pybuffernd_d3; __Pyx_Buffer __pyx_pybuffer_d3; __Pyx_LocalBuf_ND __pyx_pybuffernd_d4; __Pyx_Buffer __pyx_pybuffer_d4; __Pyx_LocalBuf_ND __pyx_pybuffernd_dstrides; __Pyx_Buffer __pyx_pybuffer_dstrides; __Pyx_LocalBuf_ND __pyx_pybuffernd_fcoords; __Pyx_Buffer __pyx_pybuffer_fcoords; __Pyx_LocalBuf_ND __pyx_pybuffernd_fmask; __Pyx_Buffer __pyx_pybuffer_fmask; __Pyx_LocalBuf_ND __pyx_pybuffernd_fpmask; __Pyx_Buffer __pyx_pybuffer_fpmask; __Pyx_LocalBuf_ND __pyx_pybuffernd_m2; __Pyx_Buffer __pyx_pybuffer_m2; __Pyx_LocalBuf_ND __pyx_pybuffernd_m3; __Pyx_Buffer __pyx_pybuffer_m3; __Pyx_LocalBuf_ND __pyx_pybuffernd_m4; __Pyx_Buffer __pyx_pybuffer_m4; __Pyx_LocalBuf_ND __pyx_pybuffernd_mask_c; __Pyx_Buffer __pyx_pybuffer_mask_c; __Pyx_LocalBuf_ND __pyx_pybuffernd_pmask; __Pyx_Buffer __pyx_pybuffer_pmask; __Pyx_LocalBuf_ND __pyx_pybuffernd_strides; __Pyx_Buffer __pyx_pybuffer_strides; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; PyArrayObject *__pyx_t_7 = NULL; int __pyx_t_8; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; PyArrayObject *__pyx_t_12 = NULL; PyArrayObject *__pyx_t_13 = NULL; npy_intp __pyx_t_14; npy_intp __pyx_t_15; npy_intp __pyx_t_16; PyArrayObject *__pyx_t_17 = NULL; PyArrayObject *__pyx_t_18 = NULL; PyArrayObject *__pyx_t_19 = NULL; PyObject *__pyx_t_20 = NULL; PyArrayObject *__pyx_t_21 = NULL; PyArrayObject *__pyx_t_22 = NULL; long __pyx_t_23; __pyx_t_5numpy_intp_t __pyx_t_24; long __pyx_t_25; __pyx_t_5numpy_intp_t __pyx_t_26; long __pyx_t_27; __pyx_t_5numpy_intp_t __pyx_t_28; long __pyx_t_29; long __pyx_t_30; long __pyx_t_31; int __pyx_t_32; PyArrayObject *__pyx_t_33 = NULL; PyObject *__pyx_t_34 = NULL; PyObject *__pyx_t_35 = NULL; PyObject *__pyx_t_36 = NULL; PyObject *__pyx_t_37 = NULL; PyObject *__pyx_t_38 = NULL; PyArrayObject *__pyx_t_39 = NULL; PyArrayObject *__pyx_t_40 = NULL; PyArrayObject *__pyx_t_41 = NULL; Py_ssize_t __pyx_t_42; PyObject *(*__pyx_t_43)(PyObject *); PyArrayObject *__pyx_t_44 = NULL; PyArrayObject *__pyx_t_45 = NULL; PyArrayObject *__pyx_t_46 = NULL; PyArrayObject *__pyx_t_47 = NULL; long __pyx_t_48; long __pyx_t_49; long __pyx_t_50; npy_intp __pyx_t_51; npy_intp __pyx_t_52; npy_intp __pyx_t_53; long __pyx_t_54; npy_intp __pyx_t_55; npy_intp __pyx_t_56; npy_intp __pyx_t_57; npy_intp __pyx_t_58; npy_intp __pyx_t_59; npy_intp __pyx_t_60; npy_intp __pyx_t_61; npy_intp __pyx_t_62; npy_intp __pyx_t_63; npy_intp __pyx_t_64; npy_intp __pyx_t_65; npy_intp __pyx_t_66; npy_intp __pyx_t_67; npy_intp __pyx_t_68; npy_intp __pyx_t_69; npy_intp __pyx_t_70; npy_intp __pyx_t_71; long __pyx_t_72; npy_intp __pyx_t_73; npy_intp __pyx_t_74; long __pyx_t_75; npy_intp __pyx_t_76; long __pyx_t_77; npy_intp __pyx_t_78; long __pyx_t_79; npy_intp __pyx_t_80; long __pyx_t_81; npy_intp __pyx_t_82; long __pyx_t_83; npy_intp __pyx_t_84; long __pyx_t_85; npy_intp __pyx_t_86; npy_intp __pyx_t_87; npy_intp __pyx_t_88; npy_intp __pyx_t_89; npy_intp __pyx_t_90; npy_intp __pyx_t_91; npy_intp __pyx_t_92; npy_intp __pyx_t_93; npy_intp __pyx_t_94; npy_intp __pyx_t_95; npy_intp __pyx_t_96; npy_intp __pyx_t_97; npy_intp __pyx_t_98; npy_intp __pyx_t_99; npy_intp __pyx_t_100; npy_intp __pyx_t_101; npy_intp __pyx_t_102; npy_intp __pyx_t_103; npy_intp __pyx_t_104; npy_intp __pyx_t_105; npy_intp __pyx_t_106; npy_intp __pyx_t_107; npy_intp __pyx_t_108; npy_intp __pyx_t_109; npy_intp __pyx_t_110; npy_intp __pyx_t_111; npy_intp __pyx_t_112; npy_intp __pyx_t_113; npy_intp __pyx_t_114; npy_intp __pyx_t_115; npy_intp __pyx_t_116; npy_intp __pyx_t_117; npy_intp __pyx_t_118; npy_intp __pyx_t_119; npy_intp __pyx_t_120; npy_intp __pyx_t_121; npy_intp __pyx_t_122; npy_intp __pyx_t_123; npy_intp __pyx_t_124; npy_intp __pyx_t_125; npy_intp __pyx_t_126; npy_intp __pyx_t_127; npy_intp __pyx_t_128; npy_intp __pyx_t_129; npy_intp __pyx_t_130; npy_intp __pyx_t_131; npy_intp __pyx_t_132; npy_intp __pyx_t_133; npy_intp __pyx_t_134; npy_intp __pyx_t_135; npy_intp __pyx_t_136; npy_intp __pyx_t_137; npy_intp __pyx_t_138; npy_intp __pyx_t_139; npy_intp __pyx_t_140; npy_intp __pyx_t_141; npy_intp __pyx_t_142; npy_intp __pyx_t_143; npy_intp __pyx_t_144; npy_intp __pyx_t_145; npy_intp __pyx_t_146; npy_intp __pyx_t_147; npy_intp __pyx_t_148; npy_intp __pyx_t_149; long __pyx_t_150; npy_intp __pyx_t_151; long __pyx_t_152; npy_intp __pyx_t_153; npy_intp __pyx_t_154; long __pyx_t_155; npy_intp __pyx_t_156; long __pyx_t_157; npy_intp __pyx_t_158; long __pyx_t_159; npy_intp __pyx_t_160; long __pyx_t_161; npy_intp __pyx_t_162; npy_intp __pyx_t_163; npy_intp __pyx_t_164; npy_intp __pyx_t_165; npy_intp __pyx_t_166; npy_intp __pyx_t_167; npy_intp __pyx_t_168; npy_intp __pyx_t_169; npy_intp __pyx_t_170; npy_intp __pyx_t_171; npy_intp __pyx_t_172; npy_intp __pyx_t_173; npy_intp __pyx_t_174; npy_intp __pyx_t_175; npy_intp __pyx_t_176; npy_intp __pyx_t_177; npy_intp __pyx_t_178; npy_intp __pyx_t_179; npy_intp __pyx_t_180; npy_intp __pyx_t_181; npy_intp __pyx_t_182; npy_intp __pyx_t_183; npy_intp __pyx_t_184; npy_intp __pyx_t_185; npy_intp __pyx_t_186; npy_intp __pyx_t_187; npy_intp __pyx_t_188; long __pyx_t_189; npy_intp __pyx_t_190; long __pyx_t_191; npy_intp __pyx_t_192; npy_intp __pyx_t_193; long __pyx_t_194; npy_intp __pyx_t_195; long __pyx_t_196; npy_intp __pyx_t_197; npy_intp __pyx_t_198; npy_intp __pyx_t_199; npy_intp __pyx_t_200; npy_intp __pyx_t_201; npy_intp __pyx_t_202; npy_intp __pyx_t_203; double __pyx_t_204; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("Lips3d", 0); __Pyx_INCREF(__pyx_v_coords); __Pyx_INCREF(__pyx_v_mask); __pyx_pybuffer_coords_c.pybuffer.buf = NULL; __pyx_pybuffer_coords_c.refcount = 0; __pyx_pybuffernd_coords_c.data = NULL; __pyx_pybuffernd_coords_c.rcbuffer = &__pyx_pybuffer_coords_c; __pyx_pybuffer_mask_c.pybuffer.buf = NULL; __pyx_pybuffer_mask_c.refcount = 0; __pyx_pybuffernd_mask_c.data = NULL; __pyx_pybuffernd_mask_c.rcbuffer = &__pyx_pybuffer_mask_c; __pyx_pybuffer_fcoords.pybuffer.buf = NULL; __pyx_pybuffer_fcoords.refcount = 0; __pyx_pybuffernd_fcoords.data = NULL; __pyx_pybuffernd_fcoords.rcbuffer = &__pyx_pybuffer_fcoords; __pyx_pybuffer_D.pybuffer.buf = NULL; __pyx_pybuffer_D.refcount = 0; __pyx_pybuffernd_D.data = NULL; __pyx_pybuffernd_D.rcbuffer = &__pyx_pybuffer_D; __pyx_pybuffer_fmask.pybuffer.buf = NULL; __pyx_pybuffer_fmask.refcount = 0; __pyx_pybuffernd_fmask.data = NULL; __pyx_pybuffernd_fmask.rcbuffer = &__pyx_pybuffer_fmask; __pyx_pybuffer_fpmask.pybuffer.buf = NULL; __pyx_pybuffer_fpmask.refcount = 0; __pyx_pybuffernd_fpmask.data = NULL; __pyx_pybuffernd_fpmask.rcbuffer = &__pyx_pybuffer_fpmask; __pyx_pybuffer_pmask.pybuffer.buf = NULL; __pyx_pybuffer_pmask.refcount = 0; __pyx_pybuffernd_pmask.data = NULL; __pyx_pybuffernd_pmask.rcbuffer = &__pyx_pybuffer_pmask; __pyx_pybuffer_d4.pybuffer.buf = NULL; __pyx_pybuffer_d4.refcount = 0; __pyx_pybuffernd_d4.data = NULL; __pyx_pybuffernd_d4.rcbuffer = &__pyx_pybuffer_d4; __pyx_pybuffer_m4.pybuffer.buf = NULL; __pyx_pybuffer_m4.refcount = 0; __pyx_pybuffernd_m4.data = NULL; __pyx_pybuffernd_m4.rcbuffer = &__pyx_pybuffer_m4; __pyx_pybuffer_d3.pybuffer.buf = NULL; __pyx_pybuffer_d3.refcount = 0; __pyx_pybuffernd_d3.data = NULL; __pyx_pybuffernd_d3.rcbuffer = &__pyx_pybuffer_d3; __pyx_pybuffer_m3.pybuffer.buf = NULL; __pyx_pybuffer_m3.refcount = 0; __pyx_pybuffernd_m3.data = NULL; __pyx_pybuffernd_m3.rcbuffer = &__pyx_pybuffer_m3; __pyx_pybuffer_d2.pybuffer.buf = NULL; __pyx_pybuffer_d2.refcount = 0; __pyx_pybuffernd_d2.data = NULL; __pyx_pybuffernd_d2.rcbuffer = &__pyx_pybuffer_d2; __pyx_pybuffer_m2.pybuffer.buf = NULL; __pyx_pybuffer_m2.refcount = 0; __pyx_pybuffernd_m2.data = NULL; __pyx_pybuffernd_m2.rcbuffer = &__pyx_pybuffer_m2; __pyx_pybuffer_cvertices.pybuffer.buf = NULL; __pyx_pybuffer_cvertices.refcount = 0; __pyx_pybuffernd_cvertices.data = NULL; __pyx_pybuffernd_cvertices.rcbuffer = &__pyx_pybuffer_cvertices; __pyx_pybuffer_strides.pybuffer.buf = NULL; __pyx_pybuffer_strides.refcount = 0; __pyx_pybuffernd_strides.data = NULL; __pyx_pybuffernd_strides.rcbuffer = &__pyx_pybuffer_strides; __pyx_pybuffer_dstrides.pybuffer.buf = NULL; __pyx_pybuffer_dstrides.refcount = 0; __pyx_pybuffernd_dstrides.data = NULL; __pyx_pybuffernd_dstrides.rcbuffer = &__pyx_pybuffer_dstrides; /* "nipy/algorithms/statistics/intvol.pyx":504 * Journal of the American Statistical Association, 102(479):913-928. * """ * if mask.shape != coords.shape[1:]: # <<<<<<<<<<<<<< * raise ValueError('shape of mask does not match coordinates') * # if the data can be squeezed, we must use the lower dimensional function */ __pyx_t_1 = PyObject_GetAttr(__pyx_v_mask, __pyx_n_s__shape); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 504; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetAttr(__pyx_v_coords, __pyx_n_s__shape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 504; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PySequence_GetSlice(__pyx_t_2, 1, PY_SSIZE_T_MAX); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 504; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_t_3, Py_NE); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 504; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 504; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (__pyx_t_4) { /* "nipy/algorithms/statistics/intvol.pyx":505 * """ * if mask.shape != coords.shape[1:]: * raise ValueError('shape of mask does not match coordinates') # <<<<<<<<<<<<<< * # if the data can be squeezed, we must use the lower dimensional function * mask = np.squeeze(mask) */ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_18), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 505; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; {__pyx_filename = __pyx_f[0]; __pyx_lineno = 505; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L3; } __pyx_L3:; /* "nipy/algorithms/statistics/intvol.pyx":507 * raise ValueError('shape of mask does not match coordinates') * # if the data can be squeezed, we must use the lower dimensional function * mask = np.squeeze(mask) # <<<<<<<<<<<<<< * if mask.ndim < 3: * value = np.zeros(4) */ __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 507; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__squeeze); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 507; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 507; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_mask); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_mask); __Pyx_GIVEREF(__pyx_v_mask); __pyx_t_1 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 507; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_v_mask); __pyx_v_mask = __pyx_t_1; __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":508 * # if the data can be squeezed, we must use the lower dimensional function * mask = np.squeeze(mask) * if mask.ndim < 3: # <<<<<<<<<<<<<< * value = np.zeros(4) * coords = coords.reshape((coords.shape[0],) + mask.shape) */ __pyx_t_1 = PyObject_GetAttr(__pyx_v_mask, __pyx_n_s__ndim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 508; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_int_3, Py_LT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 508; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 508; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (__pyx_t_4) { /* "nipy/algorithms/statistics/intvol.pyx":509 * mask = np.squeeze(mask) * if mask.ndim < 3: * value = np.zeros(4) # <<<<<<<<<<<<<< * coords = coords.reshape((coords.shape[0],) + mask.shape) * if mask.ndim == 2: */ __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 509; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__zeros); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 509; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_k_tuple_19), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 509; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_value = __pyx_t_2; __pyx_t_2 = 0; /* "nipy/algorithms/statistics/intvol.pyx":510 * if mask.ndim < 3: * value = np.zeros(4) * coords = coords.reshape((coords.shape[0],) + mask.shape) # <<<<<<<<<<<<<< * if mask.ndim == 2: * value[:3] = Lips2d(coords, mask) */ __pyx_t_2 = PyObject_GetAttr(__pyx_v_coords, __pyx_n_s__reshape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 510; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyObject_GetAttr(__pyx_v_coords, __pyx_n_s__shape); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 510; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 510; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 510; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyObject_GetAttr(__pyx_v_mask, __pyx_n_s__shape); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 510; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyNumber_Add(((PyObject *)__pyx_t_1), __pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 510; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 510; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 510; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_v_coords); __pyx_v_coords = __pyx_t_5; __pyx_t_5 = 0; /* "nipy/algorithms/statistics/intvol.pyx":511 * value = np.zeros(4) * coords = coords.reshape((coords.shape[0],) + mask.shape) * if mask.ndim == 2: # <<<<<<<<<<<<<< * value[:3] = Lips2d(coords, mask) * elif mask.ndim == 1: */ __pyx_t_5 = PyObject_GetAttr(__pyx_v_mask, __pyx_n_s__ndim); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 511; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_t_5, __pyx_int_2, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 511; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 511; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_4) { /* "nipy/algorithms/statistics/intvol.pyx":512 * coords = coords.reshape((coords.shape[0],) + mask.shape) * if mask.ndim == 2: * value[:3] = Lips2d(coords, mask) # <<<<<<<<<<<<<< * elif mask.ndim == 1: * value[:2] = Lips1d(coords, mask) */ __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__Lips2d); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 512; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 512; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_coords); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_coords); __Pyx_GIVEREF(__pyx_v_coords); __Pyx_INCREF(__pyx_v_mask); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_v_mask); __Pyx_GIVEREF(__pyx_v_mask); __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 512; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; if (__Pyx_PySequence_SetSlice(__pyx_v_value, 0, 3, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 512; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; goto __pyx_L5; } /* "nipy/algorithms/statistics/intvol.pyx":513 * if mask.ndim == 2: * value[:3] = Lips2d(coords, mask) * elif mask.ndim == 1: # <<<<<<<<<<<<<< * value[:2] = Lips1d(coords, mask) * return value */ __pyx_t_2 = PyObject_GetAttr(__pyx_v_mask, __pyx_n_s__ndim); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 513; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = PyObject_RichCompare(__pyx_t_2, __pyx_int_1, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 513; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 513; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_4) { /* "nipy/algorithms/statistics/intvol.pyx":514 * value[:3] = Lips2d(coords, mask) * elif mask.ndim == 1: * value[:2] = Lips1d(coords, mask) # <<<<<<<<<<<<<< * return value * */ __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__Lips1d); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_coords); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_coords); __Pyx_GIVEREF(__pyx_v_coords); __Pyx_INCREF(__pyx_v_mask); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_mask); __Pyx_GIVEREF(__pyx_v_mask); __pyx_t_3 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; if (__Pyx_PySequence_SetSlice(__pyx_v_value, 0, 2, __pyx_t_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L5; } __pyx_L5:; /* "nipy/algorithms/statistics/intvol.pyx":515 * elif mask.ndim == 1: * value[:2] = Lips1d(coords, mask) * return value # <<<<<<<<<<<<<< * * if not set(np.unique(mask)).issubset([0,1]): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_value); __pyx_r = __pyx_v_value; goto __pyx_L0; goto __pyx_L4; } __pyx_L4:; /* "nipy/algorithms/statistics/intvol.pyx":517 * return value * * if not set(np.unique(mask)).issubset([0,1]): # <<<<<<<<<<<<<< * raise ValueError('mask should be filled with 0/1 ' * 'values, but be of type np.int') */ __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 517; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__unique); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 517; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 517; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_mask); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_mask); __Pyx_GIVEREF(__pyx_v_mask); __pyx_t_5 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 517; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 517; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyObject_Call(((PyObject *)((PyObject*)(&PySet_Type))), ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 517; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __pyx_t_3 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__issubset); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 517; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyList_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 517; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_int_0); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_INCREF(__pyx_int_1); PyList_SET_ITEM(__pyx_t_5, 1, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 517; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_t_5)); __Pyx_GIVEREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __pyx_t_5 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 517; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 517; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = (!__pyx_t_4); if (__pyx_t_6) { /* "nipy/algorithms/statistics/intvol.pyx":518 * * if not set(np.unique(mask)).issubset([0,1]): * raise ValueError('mask should be filled with 0/1 ' # <<<<<<<<<<<<<< * 'values, but be of type np.int') * cdef: */ __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_20), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 518; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[0]; __pyx_lineno = 518; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L6; } __pyx_L6:; /* "nipy/algorithms/statistics/intvol.pyx":549 * double res * * coords_c = coords # <<<<<<<<<<<<<< * mask_c = mask * l0 = 0; l1 = 0; l2 = 0; l3 = 0 */ if (!(likely(((__pyx_v_coords) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_coords, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 549; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_7 = ((PyArrayObject *)__pyx_v_coords); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_coords_c.rcbuffer->pybuffer); __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_coords_c.rcbuffer->pybuffer, (PyObject*)__pyx_t_7, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 4, 0, __pyx_stack); if (unlikely(__pyx_t_8 < 0)) { PyErr_Fetch(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_coords_c.rcbuffer->pybuffer, (PyObject*)__pyx_v_coords_c, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 4, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_9, __pyx_t_10, __pyx_t_11); } } __pyx_pybuffernd_coords_c.diminfo[0].strides = __pyx_pybuffernd_coords_c.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_coords_c.diminfo[0].shape = __pyx_pybuffernd_coords_c.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_coords_c.diminfo[1].strides = __pyx_pybuffernd_coords_c.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_coords_c.diminfo[1].shape = __pyx_pybuffernd_coords_c.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_coords_c.diminfo[2].strides = __pyx_pybuffernd_coords_c.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_coords_c.diminfo[2].shape = __pyx_pybuffernd_coords_c.rcbuffer->pybuffer.shape[2]; __pyx_pybuffernd_coords_c.diminfo[3].strides = __pyx_pybuffernd_coords_c.rcbuffer->pybuffer.strides[3]; __pyx_pybuffernd_coords_c.diminfo[3].shape = __pyx_pybuffernd_coords_c.rcbuffer->pybuffer.shape[3]; if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 549; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_7 = 0; __Pyx_INCREF(__pyx_v_coords); __pyx_v_coords_c = ((PyArrayObject *)__pyx_v_coords); /* "nipy/algorithms/statistics/intvol.pyx":550 * * coords_c = coords * mask_c = mask # <<<<<<<<<<<<<< * l0 = 0; l1 = 0; l2 = 0; l3 = 0 * */ if (!(likely(((__pyx_v_mask) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_mask, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 550; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_12 = ((PyArrayObject *)__pyx_v_mask); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_mask_c.rcbuffer->pybuffer); __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_mask_c.rcbuffer->pybuffer, (PyObject*)__pyx_t_12, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 3, 0, __pyx_stack); if (unlikely(__pyx_t_8 < 0)) { PyErr_Fetch(&__pyx_t_11, &__pyx_t_10, &__pyx_t_9); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_mask_c.rcbuffer->pybuffer, (PyObject*)__pyx_v_mask_c, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 3, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_11, __pyx_t_10, __pyx_t_9); } } __pyx_pybuffernd_mask_c.diminfo[0].strides = __pyx_pybuffernd_mask_c.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_mask_c.diminfo[0].shape = __pyx_pybuffernd_mask_c.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_mask_c.diminfo[1].strides = __pyx_pybuffernd_mask_c.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_mask_c.diminfo[1].shape = __pyx_pybuffernd_mask_c.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_mask_c.diminfo[2].strides = __pyx_pybuffernd_mask_c.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_mask_c.diminfo[2].shape = __pyx_pybuffernd_mask_c.rcbuffer->pybuffer.shape[2]; if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 550; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_12 = 0; __Pyx_INCREF(__pyx_v_mask); __pyx_v_mask_c = ((PyArrayObject *)__pyx_v_mask); /* "nipy/algorithms/statistics/intvol.pyx":551 * coords_c = coords * mask_c = mask * l0 = 0; l1 = 0; l2 = 0; l3 = 0 # <<<<<<<<<<<<<< * * pmask_shape = np.array(mask.shape) + 1 */ __pyx_v_l0 = 0.0; __pyx_v_l1 = 0.0; __pyx_v_l2 = 0.0; __pyx_v_l3 = 0.0; /* "nipy/algorithms/statistics/intvol.pyx":553 * l0 = 0; l1 = 0; l2 = 0; l3 = 0 * * pmask_shape = np.array(mask.shape) + 1 # <<<<<<<<<<<<<< * pmask = np.zeros(pmask_shape, np.int) * pmask[:-1,:-1,:-1] = mask_c */ __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 553; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__array); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 553; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyObject_GetAttr(__pyx_v_mask, __pyx_n_s__shape); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 553; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 553; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 553; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_Add(__pyx_t_5, __pyx_int_1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 553; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_pmask_shape = __pyx_t_3; __pyx_t_3 = 0; /* "nipy/algorithms/statistics/intvol.pyx":554 * * pmask_shape = np.array(mask.shape) + 1 * pmask = np.zeros(pmask_shape, np.int) # <<<<<<<<<<<<<< * pmask[:-1,:-1,:-1] = mask_c * */ __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 554; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__zeros); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 554; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 554; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__int); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 554; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 554; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_pmask_shape); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_pmask_shape); __Pyx_GIVEREF(__pyx_v_pmask_shape); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 554; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 554; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_13 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_pmask.rcbuffer->pybuffer); __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_pmask.rcbuffer->pybuffer, (PyObject*)__pyx_t_13, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 3, 0, __pyx_stack); if (unlikely(__pyx_t_8 < 0)) { PyErr_Fetch(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_pmask.rcbuffer->pybuffer, (PyObject*)__pyx_v_pmask, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 3, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_9, __pyx_t_10, __pyx_t_11); } } __pyx_pybuffernd_pmask.diminfo[0].strides = __pyx_pybuffernd_pmask.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_pmask.diminfo[0].shape = __pyx_pybuffernd_pmask.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_pmask.diminfo[1].strides = __pyx_pybuffernd_pmask.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_pmask.diminfo[1].shape = __pyx_pybuffernd_pmask.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_pmask.diminfo[2].strides = __pyx_pybuffernd_pmask.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_pmask.diminfo[2].shape = __pyx_pybuffernd_pmask.rcbuffer->pybuffer.shape[2]; if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 554; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_13 = 0; __pyx_v_pmask = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "nipy/algorithms/statistics/intvol.pyx":555 * pmask_shape = np.array(mask.shape) + 1 * pmask = np.zeros(pmask_shape, np.int) * pmask[:-1,:-1,:-1] = mask_c # <<<<<<<<<<<<<< * * s0, s1, s2 = (pmask.shape[0], pmask.shape[1], pmask.shape[2]) */ if (PyObject_SetItem(((PyObject *)__pyx_v_pmask), ((PyObject *)__pyx_k_tuple_24), ((PyObject *)__pyx_v_mask_c)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 555; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/algorithms/statistics/intvol.pyx":557 * pmask[:-1,:-1,:-1] = mask_c * * s0, s1, s2 = (pmask.shape[0], pmask.shape[1], pmask.shape[2]) # <<<<<<<<<<<<<< * * fpmask = pmask.reshape(-1) */ __pyx_t_14 = (__pyx_v_pmask->dimensions[0]); __pyx_t_15 = (__pyx_v_pmask->dimensions[1]); __pyx_t_16 = (__pyx_v_pmask->dimensions[2]); __pyx_v_s0 = __pyx_t_14; __pyx_v_s1 = __pyx_t_15; __pyx_v_s2 = __pyx_t_16; /* "nipy/algorithms/statistics/intvol.pyx":559 * s0, s1, s2 = (pmask.shape[0], pmask.shape[1], pmask.shape[2]) * * fpmask = pmask.reshape(-1) # <<<<<<<<<<<<<< * fmask = mask_c.reshape(-1) * fcoords = coords_c.reshape((coords_c.shape[0], -1)) */ __pyx_t_2 = PyObject_GetAttr(((PyObject *)__pyx_v_pmask), __pyx_n_s__reshape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 559; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_k_tuple_25), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 559; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 559; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_17 = ((PyArrayObject *)__pyx_t_3); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_fpmask.rcbuffer->pybuffer); __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_fpmask.rcbuffer->pybuffer, (PyObject*)__pyx_t_17, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_8 < 0)) { PyErr_Fetch(&__pyx_t_11, &__pyx_t_10, &__pyx_t_9); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_fpmask.rcbuffer->pybuffer, (PyObject*)__pyx_v_fpmask, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_11, __pyx_t_10, __pyx_t_9); } } __pyx_pybuffernd_fpmask.diminfo[0].strides = __pyx_pybuffernd_fpmask.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_fpmask.diminfo[0].shape = __pyx_pybuffernd_fpmask.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 559; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_17 = 0; __pyx_v_fpmask = ((PyArrayObject *)__pyx_t_3); __pyx_t_3 = 0; /* "nipy/algorithms/statistics/intvol.pyx":560 * * fpmask = pmask.reshape(-1) * fmask = mask_c.reshape(-1) # <<<<<<<<<<<<<< * fcoords = coords_c.reshape((coords_c.shape[0], -1)) * */ __pyx_t_3 = PyObject_GetAttr(((PyObject *)__pyx_v_mask_c), __pyx_n_s__reshape); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 560; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_k_tuple_26), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 560; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 560; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_18 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_fmask.rcbuffer->pybuffer); __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_fmask.rcbuffer->pybuffer, (PyObject*)__pyx_t_18, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_8 < 0)) { PyErr_Fetch(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_fmask.rcbuffer->pybuffer, (PyObject*)__pyx_v_fmask, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_9, __pyx_t_10, __pyx_t_11); } } __pyx_pybuffernd_fmask.diminfo[0].strides = __pyx_pybuffernd_fmask.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_fmask.diminfo[0].shape = __pyx_pybuffernd_fmask.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 560; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_18 = 0; __pyx_v_fmask = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "nipy/algorithms/statistics/intvol.pyx":561 * fpmask = pmask.reshape(-1) * fmask = mask_c.reshape(-1) * fcoords = coords_c.reshape((coords_c.shape[0], -1)) # <<<<<<<<<<<<<< * * # First do the interior contributions. */ __pyx_t_2 = PyObject_GetAttr(((PyObject *)__pyx_v_coords_c), __pyx_n_s__reshape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 561; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_to_py_Py_intptr_t((__pyx_v_coords_c->dimensions[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 561; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 561; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __Pyx_INCREF(__pyx_int_neg_1); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_int_neg_1); __Pyx_GIVEREF(__pyx_int_neg_1); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 561; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_5)); __Pyx_GIVEREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __pyx_t_5 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 561; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 561; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_19 = ((PyArrayObject *)__pyx_t_5); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_fcoords.rcbuffer->pybuffer); __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_fcoords.rcbuffer->pybuffer, (PyObject*)__pyx_t_19, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); if (unlikely(__pyx_t_8 < 0)) { PyErr_Fetch(&__pyx_t_11, &__pyx_t_10, &__pyx_t_9); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_fcoords.rcbuffer->pybuffer, (PyObject*)__pyx_v_fcoords, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_11, __pyx_t_10, __pyx_t_9); } } __pyx_pybuffernd_fcoords.diminfo[0].strides = __pyx_pybuffernd_fcoords.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_fcoords.diminfo[0].shape = __pyx_pybuffernd_fcoords.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_fcoords.diminfo[1].strides = __pyx_pybuffernd_fcoords.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_fcoords.diminfo[1].shape = __pyx_pybuffernd_fcoords.rcbuffer->pybuffer.shape[1]; if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 561; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_19 = 0; __pyx_v_fcoords = ((PyArrayObject *)__pyx_t_5); __pyx_t_5 = 0; /* "nipy/algorithms/statistics/intvol.pyx":572 * np.ndarray[np.intp_t, ndim=1] strides * np.ndarray[np.intp_t, ndim=1] dstrides * strides = np.array(strides_from(pmask_shape, np.bool), dtype=np.intp) # <<<<<<<<<<<<<< * dstrides = np.array(strides_from(mask.shape, np.bool), dtype=np.intp) * ss0, ss1, ss2 = strides[0], strides[1], strides[2] */ __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__array); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__strides_from); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__bool); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_pmask_shape); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_pmask_shape); __Pyx_GIVEREF(__pyx_v_pmask_shape); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_20 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__intp); if (unlikely(!__pyx_t_20)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_20); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__dtype), __pyx_t_20) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_20); __pyx_t_20 = 0; __pyx_t_20 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), ((PyObject *)__pyx_t_1)); if (unlikely(!__pyx_t_20)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_20); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; if (!(likely(((__pyx_t_20) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_20, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_21 = ((PyArrayObject *)__pyx_t_20); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_strides.rcbuffer->pybuffer); __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_strides.rcbuffer->pybuffer, (PyObject*)__pyx_t_21, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_8 < 0)) { PyErr_Fetch(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_strides.rcbuffer->pybuffer, (PyObject*)__pyx_v_strides, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_9, __pyx_t_10, __pyx_t_11); } } __pyx_pybuffernd_strides.diminfo[0].strides = __pyx_pybuffernd_strides.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_strides.diminfo[0].shape = __pyx_pybuffernd_strides.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_21 = 0; __pyx_v_strides = ((PyArrayObject *)__pyx_t_20); __pyx_t_20 = 0; /* "nipy/algorithms/statistics/intvol.pyx":573 * np.ndarray[np.intp_t, ndim=1] dstrides * strides = np.array(strides_from(pmask_shape, np.bool), dtype=np.intp) * dstrides = np.array(strides_from(mask.shape, np.bool), dtype=np.intp) # <<<<<<<<<<<<<< * ss0, ss1, ss2 = strides[0], strides[1], strides[2] * ss0d, ss1d, ss2d = dstrides[0], dstrides[1], dstrides[2] */ __pyx_t_20 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_20)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 573; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_20); __pyx_t_1 = PyObject_GetAttr(__pyx_t_20, __pyx_n_s__array); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 573; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_20); __pyx_t_20 = 0; __pyx_t_20 = __Pyx_GetName(__pyx_m, __pyx_n_s__strides_from); if (unlikely(!__pyx_t_20)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 573; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_20); __pyx_t_2 = PyObject_GetAttr(__pyx_v_mask, __pyx_n_s__shape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 573; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 573; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__bool); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 573; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 573; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_2 = 0; __pyx_t_5 = 0; __pyx_t_5 = PyObject_Call(__pyx_t_20, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 573; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_20); __pyx_t_20 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 573; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 573; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_5)); __pyx_t_20 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_20)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 573; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_20); __pyx_t_2 = PyObject_GetAttr(__pyx_t_20, __pyx_n_s__intp); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 573; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_20); __pyx_t_20 = 0; if (PyDict_SetItem(__pyx_t_5, ((PyObject *)__pyx_n_s__dtype), __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 573; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_3), ((PyObject *)__pyx_t_5)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 573; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 573; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_22 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_dstrides.rcbuffer->pybuffer); __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_dstrides.rcbuffer->pybuffer, (PyObject*)__pyx_t_22, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_8 < 0)) { PyErr_Fetch(&__pyx_t_11, &__pyx_t_10, &__pyx_t_9); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_dstrides.rcbuffer->pybuffer, (PyObject*)__pyx_v_dstrides, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_11, __pyx_t_10, __pyx_t_9); } } __pyx_pybuffernd_dstrides.diminfo[0].strides = __pyx_pybuffernd_dstrides.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_dstrides.diminfo[0].shape = __pyx_pybuffernd_dstrides.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 573; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_22 = 0; __pyx_v_dstrides = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "nipy/algorithms/statistics/intvol.pyx":574 * strides = np.array(strides_from(pmask_shape, np.bool), dtype=np.intp) * dstrides = np.array(strides_from(mask.shape, np.bool), dtype=np.intp) * ss0, ss1, ss2 = strides[0], strides[1], strides[2] # <<<<<<<<<<<<<< * ss0d, ss1d, ss2d = dstrides[0], dstrides[1], dstrides[2] * verts = [] */ __pyx_t_23 = 0; __pyx_t_8 = -1; if (__pyx_t_23 < 0) { __pyx_t_23 += __pyx_pybuffernd_strides.diminfo[0].shape; if (unlikely(__pyx_t_23 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_23 >= __pyx_pybuffernd_strides.diminfo[0].shape)) __pyx_t_8 = 0; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 574; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_24 = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_strides.rcbuffer->pybuffer.buf, __pyx_t_23, __pyx_pybuffernd_strides.diminfo[0].strides)); __pyx_t_25 = 1; __pyx_t_8 = -1; if (__pyx_t_25 < 0) { __pyx_t_25 += __pyx_pybuffernd_strides.diminfo[0].shape; if (unlikely(__pyx_t_25 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_25 >= __pyx_pybuffernd_strides.diminfo[0].shape)) __pyx_t_8 = 0; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 574; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_26 = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_strides.rcbuffer->pybuffer.buf, __pyx_t_25, __pyx_pybuffernd_strides.diminfo[0].strides)); __pyx_t_27 = 2; __pyx_t_8 = -1; if (__pyx_t_27 < 0) { __pyx_t_27 += __pyx_pybuffernd_strides.diminfo[0].shape; if (unlikely(__pyx_t_27 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_27 >= __pyx_pybuffernd_strides.diminfo[0].shape)) __pyx_t_8 = 0; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 574; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_28 = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_strides.rcbuffer->pybuffer.buf, __pyx_t_27, __pyx_pybuffernd_strides.diminfo[0].strides)); __pyx_v_ss0 = __pyx_t_24; __pyx_v_ss1 = __pyx_t_26; __pyx_v_ss2 = __pyx_t_28; /* "nipy/algorithms/statistics/intvol.pyx":575 * dstrides = np.array(strides_from(mask.shape, np.bool), dtype=np.intp) * ss0, ss1, ss2 = strides[0], strides[1], strides[2] * ss0d, ss1d, ss2d = dstrides[0], dstrides[1], dstrides[2] # <<<<<<<<<<<<<< * verts = [] * for i in range(2): */ __pyx_t_29 = 0; __pyx_t_8 = -1; if (__pyx_t_29 < 0) { __pyx_t_29 += __pyx_pybuffernd_dstrides.diminfo[0].shape; if (unlikely(__pyx_t_29 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_29 >= __pyx_pybuffernd_dstrides.diminfo[0].shape)) __pyx_t_8 = 0; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 575; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_2 = __Pyx_PyInt_to_py_Py_intptr_t((*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_dstrides.rcbuffer->pybuffer.buf, __pyx_t_29, __pyx_pybuffernd_dstrides.diminfo[0].strides))); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 575; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_30 = 1; __pyx_t_8 = -1; if (__pyx_t_30 < 0) { __pyx_t_30 += __pyx_pybuffernd_dstrides.diminfo[0].shape; if (unlikely(__pyx_t_30 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_30 >= __pyx_pybuffernd_dstrides.diminfo[0].shape)) __pyx_t_8 = 0; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 575; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_5 = __Pyx_PyInt_to_py_Py_intptr_t((*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_dstrides.rcbuffer->pybuffer.buf, __pyx_t_30, __pyx_pybuffernd_dstrides.diminfo[0].strides))); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 575; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_31 = 2; __pyx_t_8 = -1; if (__pyx_t_31 < 0) { __pyx_t_31 += __pyx_pybuffernd_dstrides.diminfo[0].shape; if (unlikely(__pyx_t_31 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_31 >= __pyx_pybuffernd_dstrides.diminfo[0].shape)) __pyx_t_8 = 0; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 575; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_3 = __Pyx_PyInt_to_py_Py_intptr_t((*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_dstrides.rcbuffer->pybuffer.buf, __pyx_t_31, __pyx_pybuffernd_dstrides.diminfo[0].strides))); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 575; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_v_ss0d = __pyx_t_2; __pyx_t_2 = 0; __pyx_v_ss1d = __pyx_t_5; __pyx_t_5 = 0; __pyx_v_ss2d = __pyx_t_3; __pyx_t_3 = 0; /* "nipy/algorithms/statistics/intvol.pyx":576 * ss0, ss1, ss2 = strides[0], strides[1], strides[2] * ss0d, ss1d, ss2d = dstrides[0], dstrides[1], dstrides[2] * verts = [] # <<<<<<<<<<<<<< * for i in range(2): * for j in range(2): */ __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 576; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_v_verts = __pyx_t_3; __pyx_t_3 = 0; /* "nipy/algorithms/statistics/intvol.pyx":577 * ss0d, ss1d, ss2d = dstrides[0], dstrides[1], dstrides[2] * verts = [] * for i in range(2): # <<<<<<<<<<<<<< * for j in range(2): * for k in range(2): */ for (__pyx_t_16 = 0; __pyx_t_16 < 2; __pyx_t_16+=1) { __pyx_v_i = __pyx_t_16; /* "nipy/algorithms/statistics/intvol.pyx":578 * verts = [] * for i in range(2): * for j in range(2): # <<<<<<<<<<<<<< * for k in range(2): * verts.append(ss0d * i + ss1d * j + ss2d * k) */ for (__pyx_t_15 = 0; __pyx_t_15 < 2; __pyx_t_15+=1) { __pyx_v_j = __pyx_t_15; /* "nipy/algorithms/statistics/intvol.pyx":579 * for i in range(2): * for j in range(2): * for k in range(2): # <<<<<<<<<<<<<< * verts.append(ss0d * i + ss1d * j + ss2d * k) * cvertices = np.array(sorted(verts), np.intp) */ for (__pyx_t_14 = 0; __pyx_t_14 < 2; __pyx_t_14+=1) { __pyx_v_k = __pyx_t_14; /* "nipy/algorithms/statistics/intvol.pyx":580 * for j in range(2): * for k in range(2): * verts.append(ss0d * i + ss1d * j + ss2d * k) # <<<<<<<<<<<<<< * cvertices = np.array(sorted(verts), np.intp) * */ __pyx_t_3 = __Pyx_PyInt_to_py_Py_intptr_t(__pyx_v_i); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 580; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyNumber_Multiply(__pyx_v_ss0d, __pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 580; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyInt_to_py_Py_intptr_t(__pyx_v_j); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 580; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyNumber_Multiply(__pyx_v_ss1d, __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 580; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_Add(__pyx_t_5, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 580; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyInt_to_py_Py_intptr_t(__pyx_v_k); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 580; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = PyNumber_Multiply(__pyx_v_ss2d, __pyx_t_2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 580; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_Add(__pyx_t_3, __pyx_t_5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 580; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_32 = PyList_Append(__pyx_v_verts, __pyx_t_2); if (unlikely(__pyx_t_32 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 580; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } } } /* "nipy/algorithms/statistics/intvol.pyx":581 * for k in range(2): * verts.append(ss0d * i + ss1d * j + ss2d * k) * cvertices = np.array(sorted(verts), np.intp) # <<<<<<<<<<<<<< * * union = join_complexes(*[cube_with_strides_center((0,0,1), strides), */ __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 581; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__array); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 581; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 581; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(((PyObject *)__pyx_v_verts)); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_verts)); __Pyx_GIVEREF(((PyObject *)__pyx_v_verts)); __pyx_t_3 = PyObject_Call(__pyx_builtin_sorted, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 581; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 581; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__intp); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 581; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 581; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_3 = 0; __pyx_t_1 = 0; __pyx_t_1 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 581; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 581; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_33 = ((PyArrayObject *)__pyx_t_1); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_cvertices.rcbuffer->pybuffer); __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_cvertices.rcbuffer->pybuffer, (PyObject*)__pyx_t_33, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_8 < 0)) { PyErr_Fetch(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_cvertices.rcbuffer->pybuffer, (PyObject*)__pyx_v_cvertices, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_9, __pyx_t_10, __pyx_t_11); } } __pyx_pybuffernd_cvertices.diminfo[0].strides = __pyx_pybuffernd_cvertices.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_cvertices.diminfo[0].shape = __pyx_pybuffernd_cvertices.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 581; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_33 = 0; __pyx_v_cvertices = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":583 * cvertices = np.array(sorted(verts), np.intp) * * union = join_complexes(*[cube_with_strides_center((0,0,1), strides), # <<<<<<<<<<<<<< * cube_with_strides_center((0,1,0), strides), * cube_with_strides_center((0,1,1), strides), */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__join_complexes); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 583; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s_8); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 583; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 583; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(((PyObject *)__pyx_k_tuple_27)); PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_k_tuple_27)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_27)); __Pyx_INCREF(((PyObject *)__pyx_v_strides)); PyTuple_SET_ITEM(__pyx_t_5, 1, ((PyObject *)__pyx_v_strides)); __Pyx_GIVEREF(((PyObject *)__pyx_v_strides)); __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 583; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; /* "nipy/algorithms/statistics/intvol.pyx":584 * * union = join_complexes(*[cube_with_strides_center((0,0,1), strides), * cube_with_strides_center((0,1,0), strides), # <<<<<<<<<<<<<< * cube_with_strides_center((0,1,1), strides), * cube_with_strides_center((1,0,0), strides), */ __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s_8); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 584; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 584; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(((PyObject *)__pyx_k_tuple_28)); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_k_tuple_28)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_28)); __Pyx_INCREF(((PyObject *)__pyx_v_strides)); PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_strides)); __Pyx_GIVEREF(((PyObject *)__pyx_v_strides)); __pyx_t_20 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_20)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 584; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_20); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; /* "nipy/algorithms/statistics/intvol.pyx":585 * union = join_complexes(*[cube_with_strides_center((0,0,1), strides), * cube_with_strides_center((0,1,0), strides), * cube_with_strides_center((0,1,1), strides), # <<<<<<<<<<<<<< * cube_with_strides_center((1,0,0), strides), * cube_with_strides_center((1,0,1), strides), */ __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s_8); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 585; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 585; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(((PyObject *)__pyx_k_tuple_29)); PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_k_tuple_29)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_29)); __Pyx_INCREF(((PyObject *)__pyx_v_strides)); PyTuple_SET_ITEM(__pyx_t_5, 1, ((PyObject *)__pyx_v_strides)); __Pyx_GIVEREF(((PyObject *)__pyx_v_strides)); __pyx_t_34 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_34)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 585; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_34); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; /* "nipy/algorithms/statistics/intvol.pyx":586 * cube_with_strides_center((0,1,0), strides), * cube_with_strides_center((0,1,1), strides), * cube_with_strides_center((1,0,0), strides), # <<<<<<<<<<<<<< * cube_with_strides_center((1,0,1), strides), * cube_with_strides_center((1,1,0), strides), */ __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s_8); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 586; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 586; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(((PyObject *)__pyx_k_tuple_30)); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_k_tuple_30)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_30)); __Pyx_INCREF(((PyObject *)__pyx_v_strides)); PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_strides)); __Pyx_GIVEREF(((PyObject *)__pyx_v_strides)); __pyx_t_35 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_35)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 586; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_35); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; /* "nipy/algorithms/statistics/intvol.pyx":587 * cube_with_strides_center((0,1,1), strides), * cube_with_strides_center((1,0,0), strides), * cube_with_strides_center((1,0,1), strides), # <<<<<<<<<<<<<< * cube_with_strides_center((1,1,0), strides), * cube_with_strides_center((1,1,1), strides)]) */ __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s_8); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 587; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 587; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(((PyObject *)__pyx_k_tuple_31)); PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_k_tuple_31)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_31)); __Pyx_INCREF(((PyObject *)__pyx_v_strides)); PyTuple_SET_ITEM(__pyx_t_5, 1, ((PyObject *)__pyx_v_strides)); __Pyx_GIVEREF(((PyObject *)__pyx_v_strides)); __pyx_t_36 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_36)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 587; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_36); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; /* "nipy/algorithms/statistics/intvol.pyx":588 * cube_with_strides_center((1,0,0), strides), * cube_with_strides_center((1,0,1), strides), * cube_with_strides_center((1,1,0), strides), # <<<<<<<<<<<<<< * cube_with_strides_center((1,1,1), strides)]) * c = cube_with_strides_center((0,0,0), strides) */ __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s_8); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(((PyObject *)__pyx_k_tuple_32)); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_k_tuple_32)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_32)); __Pyx_INCREF(((PyObject *)__pyx_v_strides)); PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_strides)); __Pyx_GIVEREF(((PyObject *)__pyx_v_strides)); __pyx_t_37 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_37)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_37); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; /* "nipy/algorithms/statistics/intvol.pyx":589 * cube_with_strides_center((1,0,1), strides), * cube_with_strides_center((1,1,0), strides), * cube_with_strides_center((1,1,1), strides)]) # <<<<<<<<<<<<<< * c = cube_with_strides_center((0,0,0), strides) * m4 = np.array(list(c[4].difference(union[4]))) */ __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s_8); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 589; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 589; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(((PyObject *)__pyx_k_tuple_33)); PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_k_tuple_33)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_33)); __Pyx_INCREF(((PyObject *)__pyx_v_strides)); PyTuple_SET_ITEM(__pyx_t_5, 1, ((PyObject *)__pyx_v_strides)); __Pyx_GIVEREF(((PyObject *)__pyx_v_strides)); __pyx_t_38 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_38)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 589; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_38); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __pyx_t_5 = PyList_New(7); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 583; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); PyList_SET_ITEM(__pyx_t_5, 1, __pyx_t_20); __Pyx_GIVEREF(__pyx_t_20); PyList_SET_ITEM(__pyx_t_5, 2, __pyx_t_34); __Pyx_GIVEREF(__pyx_t_34); PyList_SET_ITEM(__pyx_t_5, 3, __pyx_t_35); __Pyx_GIVEREF(__pyx_t_35); PyList_SET_ITEM(__pyx_t_5, 4, __pyx_t_36); __Pyx_GIVEREF(__pyx_t_36); PyList_SET_ITEM(__pyx_t_5, 5, __pyx_t_37); __Pyx_GIVEREF(__pyx_t_37); PyList_SET_ITEM(__pyx_t_5, 6, __pyx_t_38); __Pyx_GIVEREF(__pyx_t_38); __pyx_t_3 = 0; __pyx_t_20 = 0; __pyx_t_34 = 0; __pyx_t_35 = 0; __pyx_t_36 = 0; __pyx_t_37 = 0; __pyx_t_38 = 0; __pyx_t_38 = PySequence_Tuple(((PyObject *)__pyx_t_5)); if (unlikely(!__pyx_t_38)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 583; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_38)); __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __pyx_t_5 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_38), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 583; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_38)); __pyx_t_38 = 0; __pyx_v_union = __pyx_t_5; __pyx_t_5 = 0; /* "nipy/algorithms/statistics/intvol.pyx":590 * cube_with_strides_center((1,1,0), strides), * cube_with_strides_center((1,1,1), strides)]) * c = cube_with_strides_center((0,0,0), strides) # <<<<<<<<<<<<<< * m4 = np.array(list(c[4].difference(union[4]))) * m3 = np.array(list(c[3].difference(union[3]))) */ __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s_8); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 590; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_38 = PyTuple_New(2); if (unlikely(!__pyx_t_38)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 590; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_38); __Pyx_INCREF(((PyObject *)__pyx_k_tuple_34)); PyTuple_SET_ITEM(__pyx_t_38, 0, ((PyObject *)__pyx_k_tuple_34)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_34)); __Pyx_INCREF(((PyObject *)__pyx_v_strides)); PyTuple_SET_ITEM(__pyx_t_38, 1, ((PyObject *)__pyx_v_strides)); __Pyx_GIVEREF(((PyObject *)__pyx_v_strides)); __pyx_t_1 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_38), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 590; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_38)); __pyx_t_38 = 0; __pyx_v_c = __pyx_t_1; __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":591 * cube_with_strides_center((1,1,1), strides)]) * c = cube_with_strides_center((0,0,0), strides) * m4 = np.array(list(c[4].difference(union[4]))) # <<<<<<<<<<<<<< * m3 = np.array(list(c[3].difference(union[3]))) * m2 = np.array(list(c[2].difference(union[2]))) */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 591; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_38 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__array); if (unlikely(!__pyx_t_38)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 591; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_38); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_c, 4, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 591; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__difference); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 591; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_union, 4, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 591; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_37 = PyTuple_New(1); if (unlikely(!__pyx_t_37)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 591; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_37); PyTuple_SET_ITEM(__pyx_t_37, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_37), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 591; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_37)); __pyx_t_37 = 0; __pyx_t_37 = PyTuple_New(1); if (unlikely(!__pyx_t_37)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 591; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_37); PyTuple_SET_ITEM(__pyx_t_37, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)(&PyList_Type))), ((PyObject *)__pyx_t_37), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 591; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_37)); __pyx_t_37 = 0; __pyx_t_37 = PyTuple_New(1); if (unlikely(!__pyx_t_37)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 591; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_37); PyTuple_SET_ITEM(__pyx_t_37, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyObject_Call(__pyx_t_38, ((PyObject *)__pyx_t_37), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 591; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_38); __pyx_t_38 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_37)); __pyx_t_37 = 0; if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 591; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_39 = ((PyArrayObject *)__pyx_t_1); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_m4.rcbuffer->pybuffer); __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_m4.rcbuffer->pybuffer, (PyObject*)__pyx_t_39, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); if (unlikely(__pyx_t_8 < 0)) { PyErr_Fetch(&__pyx_t_11, &__pyx_t_10, &__pyx_t_9); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_m4.rcbuffer->pybuffer, (PyObject*)__pyx_v_m4, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_11, __pyx_t_10, __pyx_t_9); } } __pyx_pybuffernd_m4.diminfo[0].strides = __pyx_pybuffernd_m4.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_m4.diminfo[0].shape = __pyx_pybuffernd_m4.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_m4.diminfo[1].strides = __pyx_pybuffernd_m4.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_m4.diminfo[1].shape = __pyx_pybuffernd_m4.rcbuffer->pybuffer.shape[1]; if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 591; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_39 = 0; __pyx_v_m4 = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":592 * c = cube_with_strides_center((0,0,0), strides) * m4 = np.array(list(c[4].difference(union[4]))) * m3 = np.array(list(c[3].difference(union[3]))) # <<<<<<<<<<<<<< * m2 = np.array(list(c[2].difference(union[2]))) * */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 592; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_37 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__array); if (unlikely(!__pyx_t_37)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 592; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_37); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_c, 3, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 592; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_38 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__difference); if (unlikely(!__pyx_t_38)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 592; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_38); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_union, 3, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 592; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 592; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyObject_Call(__pyx_t_38, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 592; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_38); __pyx_t_38 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 592; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)(&PyList_Type))), ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 592; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 592; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyObject_Call(__pyx_t_37, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 592; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_37); __pyx_t_37 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 592; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_40 = ((PyArrayObject *)__pyx_t_1); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_m3.rcbuffer->pybuffer); __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_m3.rcbuffer->pybuffer, (PyObject*)__pyx_t_40, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); if (unlikely(__pyx_t_8 < 0)) { PyErr_Fetch(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_m3.rcbuffer->pybuffer, (PyObject*)__pyx_v_m3, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_9, __pyx_t_10, __pyx_t_11); } } __pyx_pybuffernd_m3.diminfo[0].strides = __pyx_pybuffernd_m3.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_m3.diminfo[0].shape = __pyx_pybuffernd_m3.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_m3.diminfo[1].strides = __pyx_pybuffernd_m3.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_m3.diminfo[1].shape = __pyx_pybuffernd_m3.rcbuffer->pybuffer.shape[1]; if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 592; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_40 = 0; __pyx_v_m3 = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":593 * m4 = np.array(list(c[4].difference(union[4]))) * m3 = np.array(list(c[3].difference(union[3]))) * m2 = np.array(list(c[2].difference(union[2]))) # <<<<<<<<<<<<<< * * d4 = np.array([[_convert_stride3(v, strides, (4,2,1)) for v in m4[i]] for i in range(m4.shape[0])]) */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 593; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__array); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 593; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_c, 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 593; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_37 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__difference); if (unlikely(!__pyx_t_37)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 593; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_37); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_union, 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 593; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_38 = PyTuple_New(1); if (unlikely(!__pyx_t_38)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 593; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_38); PyTuple_SET_ITEM(__pyx_t_38, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyObject_Call(__pyx_t_37, ((PyObject *)__pyx_t_38), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 593; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_37); __pyx_t_37 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_38)); __pyx_t_38 = 0; __pyx_t_38 = PyTuple_New(1); if (unlikely(!__pyx_t_38)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 593; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_38); PyTuple_SET_ITEM(__pyx_t_38, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)(&PyList_Type))), ((PyObject *)__pyx_t_38), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 593; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_38)); __pyx_t_38 = 0; __pyx_t_38 = PyTuple_New(1); if (unlikely(!__pyx_t_38)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 593; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_38); PyTuple_SET_ITEM(__pyx_t_38, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_38), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 593; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_38)); __pyx_t_38 = 0; if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 593; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_41 = ((PyArrayObject *)__pyx_t_1); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_m2.rcbuffer->pybuffer); __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_m2.rcbuffer->pybuffer, (PyObject*)__pyx_t_41, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); if (unlikely(__pyx_t_8 < 0)) { PyErr_Fetch(&__pyx_t_11, &__pyx_t_10, &__pyx_t_9); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_m2.rcbuffer->pybuffer, (PyObject*)__pyx_v_m2, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_11, __pyx_t_10, __pyx_t_9); } } __pyx_pybuffernd_m2.diminfo[0].strides = __pyx_pybuffernd_m2.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_m2.diminfo[0].shape = __pyx_pybuffernd_m2.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_m2.diminfo[1].strides = __pyx_pybuffernd_m2.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_m2.diminfo[1].shape = __pyx_pybuffernd_m2.rcbuffer->pybuffer.shape[1]; if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 593; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_41 = 0; __pyx_v_m2 = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":595 * m2 = np.array(list(c[2].difference(union[2]))) * * d4 = np.array([[_convert_stride3(v, strides, (4,2,1)) for v in m4[i]] for i in range(m4.shape[0])]) # <<<<<<<<<<<<<< * d4 = np.hstack([m4, d4]) * ds4 = d4.shape[0] */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_38 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__array); if (unlikely(!__pyx_t_38)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_38); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_16 = (__pyx_v_m4->dimensions[0]); for (__pyx_t_15 = 0; __pyx_t_15 < __pyx_t_16; __pyx_t_15+=1) { __pyx_v_i = __pyx_t_15; __pyx_t_5 = PyList_New(0); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_37 = __Pyx_GetItemInt(((PyObject *)__pyx_v_m4), __pyx_v_i, sizeof(npy_intp), __Pyx_PyInt_to_py_Py_intptr_t); if (!__pyx_t_37) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_37); if (PyList_CheckExact(__pyx_t_37) || PyTuple_CheckExact(__pyx_t_37)) { __pyx_t_36 = __pyx_t_37; __Pyx_INCREF(__pyx_t_36); __pyx_t_42 = 0; __pyx_t_43 = NULL; } else { __pyx_t_42 = -1; __pyx_t_36 = PyObject_GetIter(__pyx_t_37); if (unlikely(!__pyx_t_36)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_36); __pyx_t_43 = Py_TYPE(__pyx_t_36)->tp_iternext; } __Pyx_DECREF(__pyx_t_37); __pyx_t_37 = 0; for (;;) { if (!__pyx_t_43 && PyList_CheckExact(__pyx_t_36)) { if (__pyx_t_42 >= PyList_GET_SIZE(__pyx_t_36)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_37 = PyList_GET_ITEM(__pyx_t_36, __pyx_t_42); __Pyx_INCREF(__pyx_t_37); __pyx_t_42++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_37 = PySequence_ITEM(__pyx_t_36, __pyx_t_42); __pyx_t_42++; if (unlikely(!__pyx_t_37)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else if (!__pyx_t_43 && PyTuple_CheckExact(__pyx_t_36)) { if (__pyx_t_42 >= PyTuple_GET_SIZE(__pyx_t_36)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_37 = PyTuple_GET_ITEM(__pyx_t_36, __pyx_t_42); __Pyx_INCREF(__pyx_t_37); __pyx_t_42++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_37 = PySequence_ITEM(__pyx_t_36, __pyx_t_42); __pyx_t_42++; if (unlikely(!__pyx_t_37)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else { __pyx_t_37 = __pyx_t_43(__pyx_t_36); if (unlikely(!__pyx_t_37)) { if (PyErr_Occurred()) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) PyErr_Clear(); else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } break; } __Pyx_GOTREF(__pyx_t_37); } __Pyx_XDECREF(__pyx_v_v); __pyx_v_v = __pyx_t_37; __pyx_t_37 = 0; __pyx_t_37 = __Pyx_GetName(__pyx_m, __pyx_n_s___convert_stride3); if (unlikely(!__pyx_t_37)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_37); __pyx_t_35 = PyTuple_New(3); if (unlikely(!__pyx_t_35)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_35); __Pyx_INCREF(__pyx_v_v); PyTuple_SET_ITEM(__pyx_t_35, 0, __pyx_v_v); __Pyx_GIVEREF(__pyx_v_v); __Pyx_INCREF(((PyObject *)__pyx_v_strides)); PyTuple_SET_ITEM(__pyx_t_35, 1, ((PyObject *)__pyx_v_strides)); __Pyx_GIVEREF(((PyObject *)__pyx_v_strides)); __Pyx_INCREF(((PyObject *)__pyx_k_tuple_35)); PyTuple_SET_ITEM(__pyx_t_35, 2, ((PyObject *)__pyx_k_tuple_35)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_35)); __pyx_t_34 = PyObject_Call(__pyx_t_37, ((PyObject *)__pyx_t_35), NULL); if (unlikely(!__pyx_t_34)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_34); __Pyx_DECREF(__pyx_t_37); __pyx_t_37 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_35)); __pyx_t_35 = 0; if (unlikely(__Pyx_PyList_Append(__pyx_t_5, (PyObject*)__pyx_t_34))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_34); __pyx_t_34 = 0; } __Pyx_DECREF(__pyx_t_36); __pyx_t_36 = 0; if (unlikely(__Pyx_PyList_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; } __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(((PyObject *)__pyx_t_1)); PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_t_1)); __Pyx_GIVEREF(((PyObject *)__pyx_t_1)); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_t_1 = PyObject_Call(__pyx_t_38, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_38); __pyx_t_38 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_44 = ((PyArrayObject *)__pyx_t_1); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_d4.rcbuffer->pybuffer); __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_d4.rcbuffer->pybuffer, (PyObject*)__pyx_t_44, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); if (unlikely(__pyx_t_8 < 0)) { PyErr_Fetch(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_d4.rcbuffer->pybuffer, (PyObject*)__pyx_v_d4, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_9, __pyx_t_10, __pyx_t_11); } } __pyx_pybuffernd_d4.diminfo[0].strides = __pyx_pybuffernd_d4.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_d4.diminfo[0].shape = __pyx_pybuffernd_d4.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_d4.diminfo[1].strides = __pyx_pybuffernd_d4.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_d4.diminfo[1].shape = __pyx_pybuffernd_d4.rcbuffer->pybuffer.shape[1]; if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_44 = 0; __pyx_v_d4 = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":596 * * d4 = np.array([[_convert_stride3(v, strides, (4,2,1)) for v in m4[i]] for i in range(m4.shape[0])]) * d4 = np.hstack([m4, d4]) # <<<<<<<<<<<<<< * ds4 = d4.shape[0] * */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 596; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__hstack); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 596; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyList_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 596; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)__pyx_v_m4)); PyList_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_v_m4)); __Pyx_GIVEREF(((PyObject *)__pyx_v_m4)); __Pyx_INCREF(((PyObject *)__pyx_v_d4)); PyList_SET_ITEM(__pyx_t_1, 1, ((PyObject *)__pyx_v_d4)); __Pyx_GIVEREF(((PyObject *)__pyx_v_d4)); __pyx_t_38 = PyTuple_New(1); if (unlikely(!__pyx_t_38)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 596; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_38); PyTuple_SET_ITEM(__pyx_t_38, 0, ((PyObject *)__pyx_t_1)); __Pyx_GIVEREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_t_1 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_38), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 596; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_38)); __pyx_t_38 = 0; if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 596; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_44 = ((PyArrayObject *)__pyx_t_1); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_d4.rcbuffer->pybuffer); __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_d4.rcbuffer->pybuffer, (PyObject*)__pyx_t_44, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); if (unlikely(__pyx_t_8 < 0)) { PyErr_Fetch(&__pyx_t_11, &__pyx_t_10, &__pyx_t_9); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_d4.rcbuffer->pybuffer, (PyObject*)__pyx_v_d4, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_11, __pyx_t_10, __pyx_t_9); } } __pyx_pybuffernd_d4.diminfo[0].strides = __pyx_pybuffernd_d4.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_d4.diminfo[0].shape = __pyx_pybuffernd_d4.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_d4.diminfo[1].strides = __pyx_pybuffernd_d4.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_d4.diminfo[1].shape = __pyx_pybuffernd_d4.rcbuffer->pybuffer.shape[1]; if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 596; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_44 = 0; __Pyx_DECREF(((PyObject *)__pyx_v_d4)); __pyx_v_d4 = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":597 * d4 = np.array([[_convert_stride3(v, strides, (4,2,1)) for v in m4[i]] for i in range(m4.shape[0])]) * d4 = np.hstack([m4, d4]) * ds4 = d4.shape[0] # <<<<<<<<<<<<<< * * d3 = np.array([[_convert_stride3(v, strides, (4,2,1)) for v in m3[i]] for i in range(m3.shape[0])]) */ __pyx_v_ds4 = (__pyx_v_d4->dimensions[0]); /* "nipy/algorithms/statistics/intvol.pyx":599 * ds4 = d4.shape[0] * * d3 = np.array([[_convert_stride3(v, strides, (4,2,1)) for v in m3[i]] for i in range(m3.shape[0])]) # <<<<<<<<<<<<<< * d3 = np.hstack([m3, d3]) * ds3 = d3.shape[0] */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 599; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_38 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__array); if (unlikely(!__pyx_t_38)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 599; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_38); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 599; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_16 = (__pyx_v_m3->dimensions[0]); for (__pyx_t_15 = 0; __pyx_t_15 < __pyx_t_16; __pyx_t_15+=1) { __pyx_v_i = __pyx_t_15; __pyx_t_5 = PyList_New(0); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 599; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_36 = __Pyx_GetItemInt(((PyObject *)__pyx_v_m3), __pyx_v_i, sizeof(npy_intp), __Pyx_PyInt_to_py_Py_intptr_t); if (!__pyx_t_36) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 599; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_36); if (PyList_CheckExact(__pyx_t_36) || PyTuple_CheckExact(__pyx_t_36)) { __pyx_t_34 = __pyx_t_36; __Pyx_INCREF(__pyx_t_34); __pyx_t_42 = 0; __pyx_t_43 = NULL; } else { __pyx_t_42 = -1; __pyx_t_34 = PyObject_GetIter(__pyx_t_36); if (unlikely(!__pyx_t_34)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 599; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_34); __pyx_t_43 = Py_TYPE(__pyx_t_34)->tp_iternext; } __Pyx_DECREF(__pyx_t_36); __pyx_t_36 = 0; for (;;) { if (!__pyx_t_43 && PyList_CheckExact(__pyx_t_34)) { if (__pyx_t_42 >= PyList_GET_SIZE(__pyx_t_34)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_36 = PyList_GET_ITEM(__pyx_t_34, __pyx_t_42); __Pyx_INCREF(__pyx_t_36); __pyx_t_42++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 599; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_36 = PySequence_ITEM(__pyx_t_34, __pyx_t_42); __pyx_t_42++; if (unlikely(!__pyx_t_36)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 599; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else if (!__pyx_t_43 && PyTuple_CheckExact(__pyx_t_34)) { if (__pyx_t_42 >= PyTuple_GET_SIZE(__pyx_t_34)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_36 = PyTuple_GET_ITEM(__pyx_t_34, __pyx_t_42); __Pyx_INCREF(__pyx_t_36); __pyx_t_42++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 599; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_36 = PySequence_ITEM(__pyx_t_34, __pyx_t_42); __pyx_t_42++; if (unlikely(!__pyx_t_36)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 599; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else { __pyx_t_36 = __pyx_t_43(__pyx_t_34); if (unlikely(!__pyx_t_36)) { if (PyErr_Occurred()) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) PyErr_Clear(); else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 599; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } break; } __Pyx_GOTREF(__pyx_t_36); } __Pyx_XDECREF(__pyx_v_v); __pyx_v_v = __pyx_t_36; __pyx_t_36 = 0; __pyx_t_36 = __Pyx_GetName(__pyx_m, __pyx_n_s___convert_stride3); if (unlikely(!__pyx_t_36)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 599; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_36); __pyx_t_35 = PyTuple_New(3); if (unlikely(!__pyx_t_35)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 599; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_35); __Pyx_INCREF(__pyx_v_v); PyTuple_SET_ITEM(__pyx_t_35, 0, __pyx_v_v); __Pyx_GIVEREF(__pyx_v_v); __Pyx_INCREF(((PyObject *)__pyx_v_strides)); PyTuple_SET_ITEM(__pyx_t_35, 1, ((PyObject *)__pyx_v_strides)); __Pyx_GIVEREF(((PyObject *)__pyx_v_strides)); __Pyx_INCREF(((PyObject *)__pyx_k_tuple_36)); PyTuple_SET_ITEM(__pyx_t_35, 2, ((PyObject *)__pyx_k_tuple_36)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_36)); __pyx_t_37 = PyObject_Call(__pyx_t_36, ((PyObject *)__pyx_t_35), NULL); if (unlikely(!__pyx_t_37)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 599; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_37); __Pyx_DECREF(__pyx_t_36); __pyx_t_36 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_35)); __pyx_t_35 = 0; if (unlikely(__Pyx_PyList_Append(__pyx_t_5, (PyObject*)__pyx_t_37))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 599; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_37); __pyx_t_37 = 0; } __Pyx_DECREF(__pyx_t_34); __pyx_t_34 = 0; if (unlikely(__Pyx_PyList_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 599; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; } __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 599; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(((PyObject *)__pyx_t_1)); PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_t_1)); __Pyx_GIVEREF(((PyObject *)__pyx_t_1)); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_t_1 = PyObject_Call(__pyx_t_38, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 599; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_38); __pyx_t_38 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 599; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_45 = ((PyArrayObject *)__pyx_t_1); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_d3.rcbuffer->pybuffer); __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_d3.rcbuffer->pybuffer, (PyObject*)__pyx_t_45, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); if (unlikely(__pyx_t_8 < 0)) { PyErr_Fetch(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_d3.rcbuffer->pybuffer, (PyObject*)__pyx_v_d3, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_9, __pyx_t_10, __pyx_t_11); } } __pyx_pybuffernd_d3.diminfo[0].strides = __pyx_pybuffernd_d3.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_d3.diminfo[0].shape = __pyx_pybuffernd_d3.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_d3.diminfo[1].strides = __pyx_pybuffernd_d3.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_d3.diminfo[1].shape = __pyx_pybuffernd_d3.rcbuffer->pybuffer.shape[1]; if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 599; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_45 = 0; __pyx_v_d3 = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":600 * * d3 = np.array([[_convert_stride3(v, strides, (4,2,1)) for v in m3[i]] for i in range(m3.shape[0])]) * d3 = np.hstack([m3, d3]) # <<<<<<<<<<<<<< * ds3 = d3.shape[0] * */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 600; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__hstack); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 600; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyList_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 600; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)__pyx_v_m3)); PyList_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_v_m3)); __Pyx_GIVEREF(((PyObject *)__pyx_v_m3)); __Pyx_INCREF(((PyObject *)__pyx_v_d3)); PyList_SET_ITEM(__pyx_t_1, 1, ((PyObject *)__pyx_v_d3)); __Pyx_GIVEREF(((PyObject *)__pyx_v_d3)); __pyx_t_38 = PyTuple_New(1); if (unlikely(!__pyx_t_38)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 600; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_38); PyTuple_SET_ITEM(__pyx_t_38, 0, ((PyObject *)__pyx_t_1)); __Pyx_GIVEREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_t_1 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_38), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 600; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_38)); __pyx_t_38 = 0; if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 600; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_45 = ((PyArrayObject *)__pyx_t_1); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_d3.rcbuffer->pybuffer); __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_d3.rcbuffer->pybuffer, (PyObject*)__pyx_t_45, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); if (unlikely(__pyx_t_8 < 0)) { PyErr_Fetch(&__pyx_t_11, &__pyx_t_10, &__pyx_t_9); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_d3.rcbuffer->pybuffer, (PyObject*)__pyx_v_d3, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_11, __pyx_t_10, __pyx_t_9); } } __pyx_pybuffernd_d3.diminfo[0].strides = __pyx_pybuffernd_d3.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_d3.diminfo[0].shape = __pyx_pybuffernd_d3.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_d3.diminfo[1].strides = __pyx_pybuffernd_d3.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_d3.diminfo[1].shape = __pyx_pybuffernd_d3.rcbuffer->pybuffer.shape[1]; if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 600; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_45 = 0; __Pyx_DECREF(((PyObject *)__pyx_v_d3)); __pyx_v_d3 = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":601 * d3 = np.array([[_convert_stride3(v, strides, (4,2,1)) for v in m3[i]] for i in range(m3.shape[0])]) * d3 = np.hstack([m3, d3]) * ds3 = d3.shape[0] # <<<<<<<<<<<<<< * * d2 = np.array([[_convert_stride3(v, strides, (4,2,1)) for v in m2[i]] for i in range(m2.shape[0])]) */ __pyx_v_ds3 = (__pyx_v_d3->dimensions[0]); /* "nipy/algorithms/statistics/intvol.pyx":603 * ds3 = d3.shape[0] * * d2 = np.array([[_convert_stride3(v, strides, (4,2,1)) for v in m2[i]] for i in range(m2.shape[0])]) # <<<<<<<<<<<<<< * d2 = np.hstack([m2, d2]) * ds2 = d2.shape[0] */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 603; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_38 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__array); if (unlikely(!__pyx_t_38)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 603; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_38); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 603; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_16 = (__pyx_v_m2->dimensions[0]); for (__pyx_t_15 = 0; __pyx_t_15 < __pyx_t_16; __pyx_t_15+=1) { __pyx_v_i = __pyx_t_15; __pyx_t_5 = PyList_New(0); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 603; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_34 = __Pyx_GetItemInt(((PyObject *)__pyx_v_m2), __pyx_v_i, sizeof(npy_intp), __Pyx_PyInt_to_py_Py_intptr_t); if (!__pyx_t_34) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 603; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_34); if (PyList_CheckExact(__pyx_t_34) || PyTuple_CheckExact(__pyx_t_34)) { __pyx_t_37 = __pyx_t_34; __Pyx_INCREF(__pyx_t_37); __pyx_t_42 = 0; __pyx_t_43 = NULL; } else { __pyx_t_42 = -1; __pyx_t_37 = PyObject_GetIter(__pyx_t_34); if (unlikely(!__pyx_t_37)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 603; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_37); __pyx_t_43 = Py_TYPE(__pyx_t_37)->tp_iternext; } __Pyx_DECREF(__pyx_t_34); __pyx_t_34 = 0; for (;;) { if (!__pyx_t_43 && PyList_CheckExact(__pyx_t_37)) { if (__pyx_t_42 >= PyList_GET_SIZE(__pyx_t_37)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_34 = PyList_GET_ITEM(__pyx_t_37, __pyx_t_42); __Pyx_INCREF(__pyx_t_34); __pyx_t_42++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 603; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_34 = PySequence_ITEM(__pyx_t_37, __pyx_t_42); __pyx_t_42++; if (unlikely(!__pyx_t_34)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 603; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else if (!__pyx_t_43 && PyTuple_CheckExact(__pyx_t_37)) { if (__pyx_t_42 >= PyTuple_GET_SIZE(__pyx_t_37)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_34 = PyTuple_GET_ITEM(__pyx_t_37, __pyx_t_42); __Pyx_INCREF(__pyx_t_34); __pyx_t_42++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 603; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_34 = PySequence_ITEM(__pyx_t_37, __pyx_t_42); __pyx_t_42++; if (unlikely(!__pyx_t_34)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 603; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else { __pyx_t_34 = __pyx_t_43(__pyx_t_37); if (unlikely(!__pyx_t_34)) { if (PyErr_Occurred()) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) PyErr_Clear(); else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 603; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } break; } __Pyx_GOTREF(__pyx_t_34); } __Pyx_XDECREF(__pyx_v_v); __pyx_v_v = __pyx_t_34; __pyx_t_34 = 0; __pyx_t_34 = __Pyx_GetName(__pyx_m, __pyx_n_s___convert_stride3); if (unlikely(!__pyx_t_34)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 603; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_34); __pyx_t_35 = PyTuple_New(3); if (unlikely(!__pyx_t_35)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 603; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_35); __Pyx_INCREF(__pyx_v_v); PyTuple_SET_ITEM(__pyx_t_35, 0, __pyx_v_v); __Pyx_GIVEREF(__pyx_v_v); __Pyx_INCREF(((PyObject *)__pyx_v_strides)); PyTuple_SET_ITEM(__pyx_t_35, 1, ((PyObject *)__pyx_v_strides)); __Pyx_GIVEREF(((PyObject *)__pyx_v_strides)); __Pyx_INCREF(((PyObject *)__pyx_k_tuple_37)); PyTuple_SET_ITEM(__pyx_t_35, 2, ((PyObject *)__pyx_k_tuple_37)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_37)); __pyx_t_36 = PyObject_Call(__pyx_t_34, ((PyObject *)__pyx_t_35), NULL); if (unlikely(!__pyx_t_36)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 603; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_36); __Pyx_DECREF(__pyx_t_34); __pyx_t_34 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_35)); __pyx_t_35 = 0; if (unlikely(__Pyx_PyList_Append(__pyx_t_5, (PyObject*)__pyx_t_36))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 603; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_36); __pyx_t_36 = 0; } __Pyx_DECREF(__pyx_t_37); __pyx_t_37 = 0; if (unlikely(__Pyx_PyList_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 603; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; } __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 603; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(((PyObject *)__pyx_t_1)); PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_t_1)); __Pyx_GIVEREF(((PyObject *)__pyx_t_1)); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_t_1 = PyObject_Call(__pyx_t_38, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 603; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_38); __pyx_t_38 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 603; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_46 = ((PyArrayObject *)__pyx_t_1); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_d2.rcbuffer->pybuffer); __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_d2.rcbuffer->pybuffer, (PyObject*)__pyx_t_46, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); if (unlikely(__pyx_t_8 < 0)) { PyErr_Fetch(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_d2.rcbuffer->pybuffer, (PyObject*)__pyx_v_d2, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_9, __pyx_t_10, __pyx_t_11); } } __pyx_pybuffernd_d2.diminfo[0].strides = __pyx_pybuffernd_d2.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_d2.diminfo[0].shape = __pyx_pybuffernd_d2.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_d2.diminfo[1].strides = __pyx_pybuffernd_d2.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_d2.diminfo[1].shape = __pyx_pybuffernd_d2.rcbuffer->pybuffer.shape[1]; if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 603; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_46 = 0; __pyx_v_d2 = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":604 * * d2 = np.array([[_convert_stride3(v, strides, (4,2,1)) for v in m2[i]] for i in range(m2.shape[0])]) * d2 = np.hstack([m2, d2]) # <<<<<<<<<<<<<< * ds2 = d2.shape[0] * */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 604; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__hstack); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 604; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyList_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 604; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)__pyx_v_m2)); PyList_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_v_m2)); __Pyx_GIVEREF(((PyObject *)__pyx_v_m2)); __Pyx_INCREF(((PyObject *)__pyx_v_d2)); PyList_SET_ITEM(__pyx_t_1, 1, ((PyObject *)__pyx_v_d2)); __Pyx_GIVEREF(((PyObject *)__pyx_v_d2)); __pyx_t_38 = PyTuple_New(1); if (unlikely(!__pyx_t_38)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 604; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_38); PyTuple_SET_ITEM(__pyx_t_38, 0, ((PyObject *)__pyx_t_1)); __Pyx_GIVEREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_t_1 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_38), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 604; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_38)); __pyx_t_38 = 0; if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 604; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_46 = ((PyArrayObject *)__pyx_t_1); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_d2.rcbuffer->pybuffer); __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_d2.rcbuffer->pybuffer, (PyObject*)__pyx_t_46, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); if (unlikely(__pyx_t_8 < 0)) { PyErr_Fetch(&__pyx_t_11, &__pyx_t_10, &__pyx_t_9); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_d2.rcbuffer->pybuffer, (PyObject*)__pyx_v_d2, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_11, __pyx_t_10, __pyx_t_9); } } __pyx_pybuffernd_d2.diminfo[0].strides = __pyx_pybuffernd_d2.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_d2.diminfo[0].shape = __pyx_pybuffernd_d2.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_d2.diminfo[1].strides = __pyx_pybuffernd_d2.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_d2.diminfo[1].shape = __pyx_pybuffernd_d2.rcbuffer->pybuffer.shape[1]; if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 604; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_46 = 0; __Pyx_DECREF(((PyObject *)__pyx_v_d2)); __pyx_v_d2 = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":605 * d2 = np.array([[_convert_stride3(v, strides, (4,2,1)) for v in m2[i]] for i in range(m2.shape[0])]) * d2 = np.hstack([m2, d2]) * ds2 = d2.shape[0] # <<<<<<<<<<<<<< * * nvox = mask.size */ __pyx_v_ds2 = (__pyx_v_d2->dimensions[0]); /* "nipy/algorithms/statistics/intvol.pyx":607 * ds2 = d2.shape[0] * * nvox = mask.size # <<<<<<<<<<<<<< * * D = np.zeros((8,8)) */ __pyx_t_1 = PyObject_GetAttr(__pyx_v_mask, __pyx_n_s__size); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 607; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_16 = __Pyx_PyInt_from_py_Py_intptr_t(__pyx_t_1); if (unlikely((__pyx_t_16 == (npy_intp)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 607; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_nvox = __pyx_t_16; /* "nipy/algorithms/statistics/intvol.pyx":609 * nvox = mask.size * * D = np.zeros((8,8)) # <<<<<<<<<<<<<< * * for i in range(s0-1): */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 609; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_38 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__zeros); if (unlikely(!__pyx_t_38)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 609; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_38); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyObject_Call(__pyx_t_38, ((PyObject *)__pyx_k_tuple_39), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 609; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_38); __pyx_t_38 = 0; if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 609; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_47 = ((PyArrayObject *)__pyx_t_1); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_D.rcbuffer->pybuffer); __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_D.rcbuffer->pybuffer, (PyObject*)__pyx_t_47, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack); if (unlikely(__pyx_t_8 < 0)) { PyErr_Fetch(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_D.rcbuffer->pybuffer, (PyObject*)__pyx_v_D, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_9, __pyx_t_10, __pyx_t_11); } } __pyx_pybuffernd_D.diminfo[0].strides = __pyx_pybuffernd_D.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_D.diminfo[0].shape = __pyx_pybuffernd_D.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_D.diminfo[1].strides = __pyx_pybuffernd_D.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_D.diminfo[1].shape = __pyx_pybuffernd_D.rcbuffer->pybuffer.shape[1]; if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 609; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_47 = 0; __pyx_v_D = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":611 * D = np.zeros((8,8)) * * for i in range(s0-1): # <<<<<<<<<<<<<< * for j in range(s1-1): * for k in range(s2-1): */ __pyx_t_48 = (__pyx_v_s0 - 1); for (__pyx_t_16 = 0; __pyx_t_16 < __pyx_t_48; __pyx_t_16+=1) { __pyx_v_i = __pyx_t_16; /* "nipy/algorithms/statistics/intvol.pyx":612 * * for i in range(s0-1): * for j in range(s1-1): # <<<<<<<<<<<<<< * for k in range(s2-1): * */ __pyx_t_49 = (__pyx_v_s1 - 1); for (__pyx_t_15 = 0; __pyx_t_15 < __pyx_t_49; __pyx_t_15+=1) { __pyx_v_j = __pyx_t_15; /* "nipy/algorithms/statistics/intvol.pyx":613 * for i in range(s0-1): * for j in range(s1-1): * for k in range(s2-1): # <<<<<<<<<<<<<< * * pindex = i*ss0+j*ss1+k*ss2 */ __pyx_t_50 = (__pyx_v_s2 - 1); for (__pyx_t_14 = 0; __pyx_t_14 < __pyx_t_50; __pyx_t_14+=1) { __pyx_v_k = __pyx_t_14; /* "nipy/algorithms/statistics/intvol.pyx":615 * for k in range(s2-1): * * pindex = i*ss0+j*ss1+k*ss2 # <<<<<<<<<<<<<< * index = i*ss0d+j*ss1d+k*ss2d * for r in range(8): */ __pyx_v_pindex = (((__pyx_v_i * __pyx_v_ss0) + (__pyx_v_j * __pyx_v_ss1)) + (__pyx_v_k * __pyx_v_ss2)); /* "nipy/algorithms/statistics/intvol.pyx":616 * * pindex = i*ss0+j*ss1+k*ss2 * index = i*ss0d+j*ss1d+k*ss2d # <<<<<<<<<<<<<< * for r in range(8): * rr = (index+cvertices[r]) % nvox */ __pyx_t_1 = __Pyx_PyInt_to_py_Py_intptr_t(__pyx_v_i); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 616; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_38 = PyNumber_Multiply(__pyx_t_1, __pyx_v_ss0d); if (unlikely(!__pyx_t_38)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 616; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_38); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyInt_to_py_Py_intptr_t(__pyx_v_j); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 616; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = PyNumber_Multiply(__pyx_t_1, __pyx_v_ss1d); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 616; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyNumber_Add(__pyx_t_38, __pyx_t_5); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 616; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_38); __pyx_t_38 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyInt_to_py_Py_intptr_t(__pyx_v_k); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 616; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_38 = PyNumber_Multiply(__pyx_t_5, __pyx_v_ss2d); if (unlikely(!__pyx_t_38)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 616; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_38); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyNumber_Add(__pyx_t_1, __pyx_t_38); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 616; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_38); __pyx_t_38 = 0; __pyx_t_51 = __Pyx_PyInt_from_py_Py_intptr_t(__pyx_t_5); if (unlikely((__pyx_t_51 == (npy_intp)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 616; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_index = __pyx_t_51; /* "nipy/algorithms/statistics/intvol.pyx":617 * pindex = i*ss0+j*ss1+k*ss2 * index = i*ss0d+j*ss1d+k*ss2d * for r in range(8): # <<<<<<<<<<<<<< * rr = (index+cvertices[r]) % nvox * mr = fmask[rr] */ for (__pyx_t_51 = 0; __pyx_t_51 < 8; __pyx_t_51+=1) { __pyx_v_r = __pyx_t_51; /* "nipy/algorithms/statistics/intvol.pyx":618 * index = i*ss0d+j*ss1d+k*ss2d * for r in range(8): * rr = (index+cvertices[r]) % nvox # <<<<<<<<<<<<<< * mr = fmask[rr] * for s in range(r+1): */ __pyx_t_52 = __pyx_v_r; __pyx_t_8 = -1; if (__pyx_t_52 < 0) { __pyx_t_52 += __pyx_pybuffernd_cvertices.diminfo[0].shape; if (unlikely(__pyx_t_52 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_52 >= __pyx_pybuffernd_cvertices.diminfo[0].shape)) __pyx_t_8 = 0; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 618; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_8 = (__pyx_v_index + (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_cvertices.rcbuffer->pybuffer.buf, __pyx_t_52, __pyx_pybuffernd_cvertices.diminfo[0].strides))); if (unlikely(__pyx_v_nvox == 0)) { PyErr_Format(PyExc_ZeroDivisionError, "integer division or modulo by zero"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 618; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_rr = __Pyx_mod_int(__pyx_t_8, __pyx_v_nvox); /* "nipy/algorithms/statistics/intvol.pyx":619 * for r in range(8): * rr = (index+cvertices[r]) % nvox * mr = fmask[rr] # <<<<<<<<<<<<<< * for s in range(r+1): * res = 0 */ __pyx_t_53 = __pyx_v_rr; __pyx_t_8 = -1; if (__pyx_t_53 < 0) { __pyx_t_53 += __pyx_pybuffernd_fmask.diminfo[0].shape; if (unlikely(__pyx_t_53 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_53 >= __pyx_pybuffernd_fmask.diminfo[0].shape)) __pyx_t_8 = 0; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 619; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_mr = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_fmask.rcbuffer->pybuffer.buf, __pyx_t_53, __pyx_pybuffernd_fmask.diminfo[0].strides)); /* "nipy/algorithms/statistics/intvol.pyx":620 * rr = (index+cvertices[r]) % nvox * mr = fmask[rr] * for s in range(r+1): # <<<<<<<<<<<<<< * res = 0 * ss = (index+cvertices[s]) % nvox */ __pyx_t_54 = (__pyx_v_r + 1); for (__pyx_t_55 = 0; __pyx_t_55 < __pyx_t_54; __pyx_t_55+=1) { __pyx_v_s = __pyx_t_55; /* "nipy/algorithms/statistics/intvol.pyx":621 * mr = fmask[rr] * for s in range(r+1): * res = 0 # <<<<<<<<<<<<<< * ss = (index+cvertices[s]) % nvox * ms = fmask[ss] */ __pyx_v_res = 0.0; /* "nipy/algorithms/statistics/intvol.pyx":622 * for s in range(r+1): * res = 0 * ss = (index+cvertices[s]) % nvox # <<<<<<<<<<<<<< * ms = fmask[ss] * if mr * ms: */ __pyx_t_56 = __pyx_v_s; __pyx_t_8 = -1; if (__pyx_t_56 < 0) { __pyx_t_56 += __pyx_pybuffernd_cvertices.diminfo[0].shape; if (unlikely(__pyx_t_56 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_56 >= __pyx_pybuffernd_cvertices.diminfo[0].shape)) __pyx_t_8 = 0; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 622; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_8 = (__pyx_v_index + (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_cvertices.rcbuffer->pybuffer.buf, __pyx_t_56, __pyx_pybuffernd_cvertices.diminfo[0].strides))); if (unlikely(__pyx_v_nvox == 0)) { PyErr_Format(PyExc_ZeroDivisionError, "integer division or modulo by zero"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 622; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_ss = __Pyx_mod_int(__pyx_t_8, __pyx_v_nvox); /* "nipy/algorithms/statistics/intvol.pyx":623 * res = 0 * ss = (index+cvertices[s]) % nvox * ms = fmask[ss] # <<<<<<<<<<<<<< * if mr * ms: * for l in range(fcoords.shape[0]): */ __pyx_t_57 = __pyx_v_ss; __pyx_t_8 = -1; if (__pyx_t_57 < 0) { __pyx_t_57 += __pyx_pybuffernd_fmask.diminfo[0].shape; if (unlikely(__pyx_t_57 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_57 >= __pyx_pybuffernd_fmask.diminfo[0].shape)) __pyx_t_8 = 0; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 623; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_ms = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_fmask.rcbuffer->pybuffer.buf, __pyx_t_57, __pyx_pybuffernd_fmask.diminfo[0].strides)); /* "nipy/algorithms/statistics/intvol.pyx":624 * ss = (index+cvertices[s]) % nvox * ms = fmask[ss] * if mr * ms: # <<<<<<<<<<<<<< * for l in range(fcoords.shape[0]): * res += fcoords[l,ss] * fcoords[l,rr] */ __pyx_t_8 = (__pyx_v_mr * __pyx_v_ms); if (__pyx_t_8) { /* "nipy/algorithms/statistics/intvol.pyx":625 * ms = fmask[ss] * if mr * ms: * for l in range(fcoords.shape[0]): # <<<<<<<<<<<<<< * res += fcoords[l,ss] * fcoords[l,rr] * D[r,s] = res */ __pyx_t_58 = (__pyx_v_fcoords->dimensions[0]); for (__pyx_t_59 = 0; __pyx_t_59 < __pyx_t_58; __pyx_t_59+=1) { __pyx_v_l = __pyx_t_59; /* "nipy/algorithms/statistics/intvol.pyx":626 * if mr * ms: * for l in range(fcoords.shape[0]): * res += fcoords[l,ss] * fcoords[l,rr] # <<<<<<<<<<<<<< * D[r,s] = res * D[s,r] = res */ __pyx_t_60 = __pyx_v_l; __pyx_t_61 = __pyx_v_ss; __pyx_t_8 = -1; if (__pyx_t_60 < 0) { __pyx_t_60 += __pyx_pybuffernd_fcoords.diminfo[0].shape; if (unlikely(__pyx_t_60 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_60 >= __pyx_pybuffernd_fcoords.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_61 < 0) { __pyx_t_61 += __pyx_pybuffernd_fcoords.diminfo[1].shape; if (unlikely(__pyx_t_61 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_61 >= __pyx_pybuffernd_fcoords.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 626; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_62 = __pyx_v_l; __pyx_t_63 = __pyx_v_rr; __pyx_t_8 = -1; if (__pyx_t_62 < 0) { __pyx_t_62 += __pyx_pybuffernd_fcoords.diminfo[0].shape; if (unlikely(__pyx_t_62 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_62 >= __pyx_pybuffernd_fcoords.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_63 < 0) { __pyx_t_63 += __pyx_pybuffernd_fcoords.diminfo[1].shape; if (unlikely(__pyx_t_63 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_63 >= __pyx_pybuffernd_fcoords.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 626; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_res = (__pyx_v_res + ((*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_fcoords.rcbuffer->pybuffer.buf, __pyx_t_60, __pyx_pybuffernd_fcoords.diminfo[0].strides, __pyx_t_61, __pyx_pybuffernd_fcoords.diminfo[1].strides)) * (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_fcoords.rcbuffer->pybuffer.buf, __pyx_t_62, __pyx_pybuffernd_fcoords.diminfo[0].strides, __pyx_t_63, __pyx_pybuffernd_fcoords.diminfo[1].strides)))); } /* "nipy/algorithms/statistics/intvol.pyx":627 * for l in range(fcoords.shape[0]): * res += fcoords[l,ss] * fcoords[l,rr] * D[r,s] = res # <<<<<<<<<<<<<< * D[s,r] = res * else: */ __pyx_t_58 = __pyx_v_r; __pyx_t_59 = __pyx_v_s; __pyx_t_8 = -1; if (__pyx_t_58 < 0) { __pyx_t_58 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_58 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_58 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_59 < 0) { __pyx_t_59 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_59 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_59 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 627; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } *__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_58, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_59, __pyx_pybuffernd_D.diminfo[1].strides) = __pyx_v_res; /* "nipy/algorithms/statistics/intvol.pyx":628 * res += fcoords[l,ss] * fcoords[l,rr] * D[r,s] = res * D[s,r] = res # <<<<<<<<<<<<<< * else: * D[r,s] = 0 */ __pyx_t_64 = __pyx_v_s; __pyx_t_65 = __pyx_v_r; __pyx_t_8 = -1; if (__pyx_t_64 < 0) { __pyx_t_64 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_64 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_64 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_65 < 0) { __pyx_t_65 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_65 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_65 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 628; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } *__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_64, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_65, __pyx_pybuffernd_D.diminfo[1].strides) = __pyx_v_res; goto __pyx_L35; } /*else*/ { /* "nipy/algorithms/statistics/intvol.pyx":630 * D[s,r] = res * else: * D[r,s] = 0 # <<<<<<<<<<<<<< * D[s,r] = 0 * */ __pyx_t_66 = __pyx_v_r; __pyx_t_67 = __pyx_v_s; __pyx_t_8 = -1; if (__pyx_t_66 < 0) { __pyx_t_66 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_66 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_66 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_67 < 0) { __pyx_t_67 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_67 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_67 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 630; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } *__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_66, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_67, __pyx_pybuffernd_D.diminfo[1].strides) = 0.0; /* "nipy/algorithms/statistics/intvol.pyx":631 * else: * D[r,s] = 0 * D[s,r] = 0 # <<<<<<<<<<<<<< * * for l in range(ds4): */ __pyx_t_68 = __pyx_v_s; __pyx_t_69 = __pyx_v_r; __pyx_t_8 = -1; if (__pyx_t_68 < 0) { __pyx_t_68 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_68 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_68 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_69 < 0) { __pyx_t_69 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_69 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_69 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 631; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } *__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_68, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_69, __pyx_pybuffernd_D.diminfo[1].strides) = 0.0; } __pyx_L35:; } } /* "nipy/algorithms/statistics/intvol.pyx":633 * D[s,r] = 0 * * for l in range(ds4): # <<<<<<<<<<<<<< * v0 = pindex + d4[l,0] * w0 = d4[l,4] */ __pyx_t_51 = __pyx_v_ds4; for (__pyx_t_55 = 0; __pyx_t_55 < __pyx_t_51; __pyx_t_55+=1) { __pyx_v_l = __pyx_t_55; /* "nipy/algorithms/statistics/intvol.pyx":634 * * for l in range(ds4): * v0 = pindex + d4[l,0] # <<<<<<<<<<<<<< * w0 = d4[l,4] * m = fpmask[v0] */ __pyx_t_70 = __pyx_v_l; __pyx_t_54 = 0; __pyx_t_8 = -1; if (__pyx_t_70 < 0) { __pyx_t_70 += __pyx_pybuffernd_d4.diminfo[0].shape; if (unlikely(__pyx_t_70 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_70 >= __pyx_pybuffernd_d4.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_54 < 0) { __pyx_t_54 += __pyx_pybuffernd_d4.diminfo[1].shape; if (unlikely(__pyx_t_54 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_54 >= __pyx_pybuffernd_d4.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 634; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_v0 = (__pyx_v_pindex + (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d4.rcbuffer->pybuffer.buf, __pyx_t_70, __pyx_pybuffernd_d4.diminfo[0].strides, __pyx_t_54, __pyx_pybuffernd_d4.diminfo[1].strides))); /* "nipy/algorithms/statistics/intvol.pyx":635 * for l in range(ds4): * v0 = pindex + d4[l,0] * w0 = d4[l,4] # <<<<<<<<<<<<<< * m = fpmask[v0] * if m: */ __pyx_t_71 = __pyx_v_l; __pyx_t_72 = 4; __pyx_t_8 = -1; if (__pyx_t_71 < 0) { __pyx_t_71 += __pyx_pybuffernd_d4.diminfo[0].shape; if (unlikely(__pyx_t_71 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_71 >= __pyx_pybuffernd_d4.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_72 < 0) { __pyx_t_72 += __pyx_pybuffernd_d4.diminfo[1].shape; if (unlikely(__pyx_t_72 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_72 >= __pyx_pybuffernd_d4.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 635; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_w0 = (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d4.rcbuffer->pybuffer.buf, __pyx_t_71, __pyx_pybuffernd_d4.diminfo[0].strides, __pyx_t_72, __pyx_pybuffernd_d4.diminfo[1].strides)); /* "nipy/algorithms/statistics/intvol.pyx":636 * v0 = pindex + d4[l,0] * w0 = d4[l,4] * m = fpmask[v0] # <<<<<<<<<<<<<< * if m: * v1 = pindex + d4[l,1] */ __pyx_t_73 = __pyx_v_v0; __pyx_t_8 = -1; if (__pyx_t_73 < 0) { __pyx_t_73 += __pyx_pybuffernd_fpmask.diminfo[0].shape; if (unlikely(__pyx_t_73 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_73 >= __pyx_pybuffernd_fpmask.diminfo[0].shape)) __pyx_t_8 = 0; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 636; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_m = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_fpmask.rcbuffer->pybuffer.buf, __pyx_t_73, __pyx_pybuffernd_fpmask.diminfo[0].strides)); /* "nipy/algorithms/statistics/intvol.pyx":637 * w0 = d4[l,4] * m = fpmask[v0] * if m: # <<<<<<<<<<<<<< * v1 = pindex + d4[l,1] * v2 = pindex + d4[l,2] */ if (__pyx_v_m) { /* "nipy/algorithms/statistics/intvol.pyx":638 * m = fpmask[v0] * if m: * v1 = pindex + d4[l,1] # <<<<<<<<<<<<<< * v2 = pindex + d4[l,2] * v3 = pindex + d4[l,3] */ __pyx_t_74 = __pyx_v_l; __pyx_t_75 = 1; __pyx_t_8 = -1; if (__pyx_t_74 < 0) { __pyx_t_74 += __pyx_pybuffernd_d4.diminfo[0].shape; if (unlikely(__pyx_t_74 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_74 >= __pyx_pybuffernd_d4.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_75 < 0) { __pyx_t_75 += __pyx_pybuffernd_d4.diminfo[1].shape; if (unlikely(__pyx_t_75 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_75 >= __pyx_pybuffernd_d4.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 638; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_v1 = (__pyx_v_pindex + (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d4.rcbuffer->pybuffer.buf, __pyx_t_74, __pyx_pybuffernd_d4.diminfo[0].strides, __pyx_t_75, __pyx_pybuffernd_d4.diminfo[1].strides))); /* "nipy/algorithms/statistics/intvol.pyx":639 * if m: * v1 = pindex + d4[l,1] * v2 = pindex + d4[l,2] # <<<<<<<<<<<<<< * v3 = pindex + d4[l,3] * w1 = d4[l,5] */ __pyx_t_76 = __pyx_v_l; __pyx_t_77 = 2; __pyx_t_8 = -1; if (__pyx_t_76 < 0) { __pyx_t_76 += __pyx_pybuffernd_d4.diminfo[0].shape; if (unlikely(__pyx_t_76 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_76 >= __pyx_pybuffernd_d4.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_77 < 0) { __pyx_t_77 += __pyx_pybuffernd_d4.diminfo[1].shape; if (unlikely(__pyx_t_77 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_77 >= __pyx_pybuffernd_d4.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 639; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_v2 = (__pyx_v_pindex + (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d4.rcbuffer->pybuffer.buf, __pyx_t_76, __pyx_pybuffernd_d4.diminfo[0].strides, __pyx_t_77, __pyx_pybuffernd_d4.diminfo[1].strides))); /* "nipy/algorithms/statistics/intvol.pyx":640 * v1 = pindex + d4[l,1] * v2 = pindex + d4[l,2] * v3 = pindex + d4[l,3] # <<<<<<<<<<<<<< * w1 = d4[l,5] * w2 = d4[l,6] */ __pyx_t_78 = __pyx_v_l; __pyx_t_79 = 3; __pyx_t_8 = -1; if (__pyx_t_78 < 0) { __pyx_t_78 += __pyx_pybuffernd_d4.diminfo[0].shape; if (unlikely(__pyx_t_78 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_78 >= __pyx_pybuffernd_d4.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_79 < 0) { __pyx_t_79 += __pyx_pybuffernd_d4.diminfo[1].shape; if (unlikely(__pyx_t_79 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_79 >= __pyx_pybuffernd_d4.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 640; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_v3 = (__pyx_v_pindex + (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d4.rcbuffer->pybuffer.buf, __pyx_t_78, __pyx_pybuffernd_d4.diminfo[0].strides, __pyx_t_79, __pyx_pybuffernd_d4.diminfo[1].strides))); /* "nipy/algorithms/statistics/intvol.pyx":641 * v2 = pindex + d4[l,2] * v3 = pindex + d4[l,3] * w1 = d4[l,5] # <<<<<<<<<<<<<< * w2 = d4[l,6] * w3 = d4[l,7] */ __pyx_t_80 = __pyx_v_l; __pyx_t_81 = 5; __pyx_t_8 = -1; if (__pyx_t_80 < 0) { __pyx_t_80 += __pyx_pybuffernd_d4.diminfo[0].shape; if (unlikely(__pyx_t_80 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_80 >= __pyx_pybuffernd_d4.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_81 < 0) { __pyx_t_81 += __pyx_pybuffernd_d4.diminfo[1].shape; if (unlikely(__pyx_t_81 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_81 >= __pyx_pybuffernd_d4.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 641; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_w1 = (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d4.rcbuffer->pybuffer.buf, __pyx_t_80, __pyx_pybuffernd_d4.diminfo[0].strides, __pyx_t_81, __pyx_pybuffernd_d4.diminfo[1].strides)); /* "nipy/algorithms/statistics/intvol.pyx":642 * v3 = pindex + d4[l,3] * w1 = d4[l,5] * w2 = d4[l,6] # <<<<<<<<<<<<<< * w3 = d4[l,7] * */ __pyx_t_82 = __pyx_v_l; __pyx_t_83 = 6; __pyx_t_8 = -1; if (__pyx_t_82 < 0) { __pyx_t_82 += __pyx_pybuffernd_d4.diminfo[0].shape; if (unlikely(__pyx_t_82 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_82 >= __pyx_pybuffernd_d4.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_83 < 0) { __pyx_t_83 += __pyx_pybuffernd_d4.diminfo[1].shape; if (unlikely(__pyx_t_83 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_83 >= __pyx_pybuffernd_d4.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 642; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_w2 = (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d4.rcbuffer->pybuffer.buf, __pyx_t_82, __pyx_pybuffernd_d4.diminfo[0].strides, __pyx_t_83, __pyx_pybuffernd_d4.diminfo[1].strides)); /* "nipy/algorithms/statistics/intvol.pyx":643 * w1 = d4[l,5] * w2 = d4[l,6] * w3 = d4[l,7] # <<<<<<<<<<<<<< * * m = m * fpmask[v1] * fpmask[v2] * fpmask[v3] */ __pyx_t_84 = __pyx_v_l; __pyx_t_85 = 7; __pyx_t_8 = -1; if (__pyx_t_84 < 0) { __pyx_t_84 += __pyx_pybuffernd_d4.diminfo[0].shape; if (unlikely(__pyx_t_84 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_84 >= __pyx_pybuffernd_d4.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_85 < 0) { __pyx_t_85 += __pyx_pybuffernd_d4.diminfo[1].shape; if (unlikely(__pyx_t_85 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_85 >= __pyx_pybuffernd_d4.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 643; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_w3 = (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d4.rcbuffer->pybuffer.buf, __pyx_t_84, __pyx_pybuffernd_d4.diminfo[0].strides, __pyx_t_85, __pyx_pybuffernd_d4.diminfo[1].strides)); /* "nipy/algorithms/statistics/intvol.pyx":645 * w3 = d4[l,7] * * m = m * fpmask[v1] * fpmask[v2] * fpmask[v3] # <<<<<<<<<<<<<< * * l3 = l3 + m * mu3_tet(D[w0,w0], D[w0,w1], D[w0,w2], */ __pyx_t_86 = __pyx_v_v1; __pyx_t_8 = -1; if (__pyx_t_86 < 0) { __pyx_t_86 += __pyx_pybuffernd_fpmask.diminfo[0].shape; if (unlikely(__pyx_t_86 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_86 >= __pyx_pybuffernd_fpmask.diminfo[0].shape)) __pyx_t_8 = 0; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 645; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_87 = __pyx_v_v2; __pyx_t_8 = -1; if (__pyx_t_87 < 0) { __pyx_t_87 += __pyx_pybuffernd_fpmask.diminfo[0].shape; if (unlikely(__pyx_t_87 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_87 >= __pyx_pybuffernd_fpmask.diminfo[0].shape)) __pyx_t_8 = 0; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 645; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_88 = __pyx_v_v3; __pyx_t_8 = -1; if (__pyx_t_88 < 0) { __pyx_t_88 += __pyx_pybuffernd_fpmask.diminfo[0].shape; if (unlikely(__pyx_t_88 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_88 >= __pyx_pybuffernd_fpmask.diminfo[0].shape)) __pyx_t_8 = 0; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 645; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_m = (((__pyx_v_m * (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_fpmask.rcbuffer->pybuffer.buf, __pyx_t_86, __pyx_pybuffernd_fpmask.diminfo[0].strides))) * (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_fpmask.rcbuffer->pybuffer.buf, __pyx_t_87, __pyx_pybuffernd_fpmask.diminfo[0].strides))) * (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_fpmask.rcbuffer->pybuffer.buf, __pyx_t_88, __pyx_pybuffernd_fpmask.diminfo[0].strides))); /* "nipy/algorithms/statistics/intvol.pyx":647 * m = m * fpmask[v1] * fpmask[v2] * fpmask[v3] * * l3 = l3 + m * mu3_tet(D[w0,w0], D[w0,w1], D[w0,w2], # <<<<<<<<<<<<<< * D[w0,w3], D[w1,w1], D[w1,w2], * D[w1,w3], D[w2,w2], D[w2,w3], */ __pyx_t_89 = __pyx_v_w0; __pyx_t_90 = __pyx_v_w0; __pyx_t_8 = -1; if (__pyx_t_89 < 0) { __pyx_t_89 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_89 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_89 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_90 < 0) { __pyx_t_90 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_90 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_90 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 647; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_91 = __pyx_v_w0; __pyx_t_92 = __pyx_v_w1; __pyx_t_8 = -1; if (__pyx_t_91 < 0) { __pyx_t_91 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_91 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_91 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_92 < 0) { __pyx_t_92 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_92 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_92 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 647; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_93 = __pyx_v_w0; __pyx_t_94 = __pyx_v_w2; __pyx_t_8 = -1; if (__pyx_t_93 < 0) { __pyx_t_93 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_93 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_93 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_94 < 0) { __pyx_t_94 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_94 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_94 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 647; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "nipy/algorithms/statistics/intvol.pyx":648 * * l3 = l3 + m * mu3_tet(D[w0,w0], D[w0,w1], D[w0,w2], * D[w0,w3], D[w1,w1], D[w1,w2], # <<<<<<<<<<<<<< * D[w1,w3], D[w2,w2], D[w2,w3], * D[w3,w3]) */ __pyx_t_95 = __pyx_v_w0; __pyx_t_96 = __pyx_v_w3; __pyx_t_8 = -1; if (__pyx_t_95 < 0) { __pyx_t_95 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_95 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_95 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_96 < 0) { __pyx_t_96 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_96 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_96 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 648; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_97 = __pyx_v_w1; __pyx_t_98 = __pyx_v_w1; __pyx_t_8 = -1; if (__pyx_t_97 < 0) { __pyx_t_97 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_97 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_97 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_98 < 0) { __pyx_t_98 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_98 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_98 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 648; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_99 = __pyx_v_w1; __pyx_t_100 = __pyx_v_w2; __pyx_t_8 = -1; if (__pyx_t_99 < 0) { __pyx_t_99 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_99 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_99 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_100 < 0) { __pyx_t_100 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_100 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_100 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 648; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "nipy/algorithms/statistics/intvol.pyx":649 * l3 = l3 + m * mu3_tet(D[w0,w0], D[w0,w1], D[w0,w2], * D[w0,w3], D[w1,w1], D[w1,w2], * D[w1,w3], D[w2,w2], D[w2,w3], # <<<<<<<<<<<<<< * D[w3,w3]) * */ __pyx_t_101 = __pyx_v_w1; __pyx_t_102 = __pyx_v_w3; __pyx_t_8 = -1; if (__pyx_t_101 < 0) { __pyx_t_101 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_101 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_101 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_102 < 0) { __pyx_t_102 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_102 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_102 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 649; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_103 = __pyx_v_w2; __pyx_t_104 = __pyx_v_w2; __pyx_t_8 = -1; if (__pyx_t_103 < 0) { __pyx_t_103 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_103 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_103 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_104 < 0) { __pyx_t_104 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_104 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_104 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 649; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_105 = __pyx_v_w2; __pyx_t_106 = __pyx_v_w3; __pyx_t_8 = -1; if (__pyx_t_105 < 0) { __pyx_t_105 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_105 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_105 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_106 < 0) { __pyx_t_106 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_106 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_106 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 649; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "nipy/algorithms/statistics/intvol.pyx":650 * D[w0,w3], D[w1,w1], D[w1,w2], * D[w1,w3], D[w2,w2], D[w2,w3], * D[w3,w3]) # <<<<<<<<<<<<<< * * l2 = l2 - m * mu2_tet(D[w0,w0], D[w0,w1], D[w0,w2], */ __pyx_t_107 = __pyx_v_w3; __pyx_t_108 = __pyx_v_w3; __pyx_t_8 = -1; if (__pyx_t_107 < 0) { __pyx_t_107 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_107 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_107 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_108 < 0) { __pyx_t_108 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_108 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_108 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 650; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_l3 = (__pyx_v_l3 + (__pyx_v_m * __pyx_f_4nipy_10algorithms_10statistics_6intvol_mu3_tet((*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_89, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_90, __pyx_pybuffernd_D.diminfo[1].strides)), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_91, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_92, __pyx_pybuffernd_D.diminfo[1].strides)), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_93, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_94, __pyx_pybuffernd_D.diminfo[1].strides)), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_95, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_96, __pyx_pybuffernd_D.diminfo[1].strides)), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_97, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_98, __pyx_pybuffernd_D.diminfo[1].strides)), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_99, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_100, __pyx_pybuffernd_D.diminfo[1].strides)), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_101, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_102, __pyx_pybuffernd_D.diminfo[1].strides)), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_103, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_104, __pyx_pybuffernd_D.diminfo[1].strides)), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_105, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_106, __pyx_pybuffernd_D.diminfo[1].strides)), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_107, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_108, __pyx_pybuffernd_D.diminfo[1].strides)), 0))); /* "nipy/algorithms/statistics/intvol.pyx":652 * D[w3,w3]) * * l2 = l2 - m * mu2_tet(D[w0,w0], D[w0,w1], D[w0,w2], # <<<<<<<<<<<<<< * D[w0,w3], D[w1,w1], D[w1,w2], * D[w1,w3], D[w2,w2], D[w2,w3], */ __pyx_t_109 = __pyx_v_w0; __pyx_t_110 = __pyx_v_w0; __pyx_t_8 = -1; if (__pyx_t_109 < 0) { __pyx_t_109 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_109 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_109 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_110 < 0) { __pyx_t_110 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_110 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_110 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 652; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_111 = __pyx_v_w0; __pyx_t_112 = __pyx_v_w1; __pyx_t_8 = -1; if (__pyx_t_111 < 0) { __pyx_t_111 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_111 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_111 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_112 < 0) { __pyx_t_112 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_112 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_112 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 652; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_113 = __pyx_v_w0; __pyx_t_114 = __pyx_v_w2; __pyx_t_8 = -1; if (__pyx_t_113 < 0) { __pyx_t_113 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_113 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_113 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_114 < 0) { __pyx_t_114 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_114 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_114 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 652; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "nipy/algorithms/statistics/intvol.pyx":653 * * l2 = l2 - m * mu2_tet(D[w0,w0], D[w0,w1], D[w0,w2], * D[w0,w3], D[w1,w1], D[w1,w2], # <<<<<<<<<<<<<< * D[w1,w3], D[w2,w2], D[w2,w3], * D[w3,w3]) */ __pyx_t_115 = __pyx_v_w0; __pyx_t_116 = __pyx_v_w3; __pyx_t_8 = -1; if (__pyx_t_115 < 0) { __pyx_t_115 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_115 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_115 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_116 < 0) { __pyx_t_116 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_116 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_116 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 653; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_117 = __pyx_v_w1; __pyx_t_118 = __pyx_v_w1; __pyx_t_8 = -1; if (__pyx_t_117 < 0) { __pyx_t_117 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_117 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_117 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_118 < 0) { __pyx_t_118 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_118 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_118 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 653; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_119 = __pyx_v_w1; __pyx_t_120 = __pyx_v_w2; __pyx_t_8 = -1; if (__pyx_t_119 < 0) { __pyx_t_119 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_119 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_119 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_120 < 0) { __pyx_t_120 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_120 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_120 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 653; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "nipy/algorithms/statistics/intvol.pyx":654 * l2 = l2 - m * mu2_tet(D[w0,w0], D[w0,w1], D[w0,w2], * D[w0,w3], D[w1,w1], D[w1,w2], * D[w1,w3], D[w2,w2], D[w2,w3], # <<<<<<<<<<<<<< * D[w3,w3]) * */ __pyx_t_121 = __pyx_v_w1; __pyx_t_122 = __pyx_v_w3; __pyx_t_8 = -1; if (__pyx_t_121 < 0) { __pyx_t_121 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_121 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_121 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_122 < 0) { __pyx_t_122 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_122 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_122 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 654; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_123 = __pyx_v_w2; __pyx_t_124 = __pyx_v_w2; __pyx_t_8 = -1; if (__pyx_t_123 < 0) { __pyx_t_123 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_123 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_123 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_124 < 0) { __pyx_t_124 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_124 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_124 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 654; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_125 = __pyx_v_w2; __pyx_t_126 = __pyx_v_w3; __pyx_t_8 = -1; if (__pyx_t_125 < 0) { __pyx_t_125 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_125 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_125 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_126 < 0) { __pyx_t_126 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_126 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_126 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 654; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "nipy/algorithms/statistics/intvol.pyx":655 * D[w0,w3], D[w1,w1], D[w1,w2], * D[w1,w3], D[w2,w2], D[w2,w3], * D[w3,w3]) # <<<<<<<<<<<<<< * * l1 = l1 + m * mu1_tet(D[w0,w0], D[w0,w1], D[w0,w2], */ __pyx_t_127 = __pyx_v_w3; __pyx_t_128 = __pyx_v_w3; __pyx_t_8 = -1; if (__pyx_t_127 < 0) { __pyx_t_127 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_127 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_127 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_128 < 0) { __pyx_t_128 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_128 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_128 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 655; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_l2 = (__pyx_v_l2 - (__pyx_v_m * __pyx_f_4nipy_10algorithms_10statistics_6intvol_mu2_tet((*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_109, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_110, __pyx_pybuffernd_D.diminfo[1].strides)), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_111, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_112, __pyx_pybuffernd_D.diminfo[1].strides)), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_113, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_114, __pyx_pybuffernd_D.diminfo[1].strides)), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_115, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_116, __pyx_pybuffernd_D.diminfo[1].strides)), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_117, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_118, __pyx_pybuffernd_D.diminfo[1].strides)), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_119, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_120, __pyx_pybuffernd_D.diminfo[1].strides)), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_121, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_122, __pyx_pybuffernd_D.diminfo[1].strides)), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_123, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_124, __pyx_pybuffernd_D.diminfo[1].strides)), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_125, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_126, __pyx_pybuffernd_D.diminfo[1].strides)), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_127, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_128, __pyx_pybuffernd_D.diminfo[1].strides)), 0))); /* "nipy/algorithms/statistics/intvol.pyx":657 * D[w3,w3]) * * l1 = l1 + m * mu1_tet(D[w0,w0], D[w0,w1], D[w0,w2], # <<<<<<<<<<<<<< * D[w0,w3], D[w1,w1], D[w1,w2], * D[w1,w3], D[w2,w2], D[w2,w3], */ __pyx_t_129 = __pyx_v_w0; __pyx_t_130 = __pyx_v_w0; __pyx_t_8 = -1; if (__pyx_t_129 < 0) { __pyx_t_129 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_129 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_129 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_130 < 0) { __pyx_t_130 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_130 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_130 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 657; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_131 = __pyx_v_w0; __pyx_t_132 = __pyx_v_w1; __pyx_t_8 = -1; if (__pyx_t_131 < 0) { __pyx_t_131 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_131 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_131 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_132 < 0) { __pyx_t_132 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_132 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_132 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 657; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_133 = __pyx_v_w0; __pyx_t_134 = __pyx_v_w2; __pyx_t_8 = -1; if (__pyx_t_133 < 0) { __pyx_t_133 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_133 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_133 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_134 < 0) { __pyx_t_134 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_134 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_134 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 657; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "nipy/algorithms/statistics/intvol.pyx":658 * * l1 = l1 + m * mu1_tet(D[w0,w0], D[w0,w1], D[w0,w2], * D[w0,w3], D[w1,w1], D[w1,w2], # <<<<<<<<<<<<<< * D[w1,w3], D[w2,w2], D[w2,w3], * D[w3,w3]) */ __pyx_t_135 = __pyx_v_w0; __pyx_t_136 = __pyx_v_w3; __pyx_t_8 = -1; if (__pyx_t_135 < 0) { __pyx_t_135 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_135 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_135 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_136 < 0) { __pyx_t_136 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_136 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_136 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 658; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_137 = __pyx_v_w1; __pyx_t_138 = __pyx_v_w1; __pyx_t_8 = -1; if (__pyx_t_137 < 0) { __pyx_t_137 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_137 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_137 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_138 < 0) { __pyx_t_138 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_138 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_138 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 658; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_139 = __pyx_v_w1; __pyx_t_140 = __pyx_v_w2; __pyx_t_8 = -1; if (__pyx_t_139 < 0) { __pyx_t_139 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_139 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_139 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_140 < 0) { __pyx_t_140 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_140 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_140 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 658; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "nipy/algorithms/statistics/intvol.pyx":659 * l1 = l1 + m * mu1_tet(D[w0,w0], D[w0,w1], D[w0,w2], * D[w0,w3], D[w1,w1], D[w1,w2], * D[w1,w3], D[w2,w2], D[w2,w3], # <<<<<<<<<<<<<< * D[w3,w3]) * */ __pyx_t_141 = __pyx_v_w1; __pyx_t_142 = __pyx_v_w3; __pyx_t_8 = -1; if (__pyx_t_141 < 0) { __pyx_t_141 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_141 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_141 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_142 < 0) { __pyx_t_142 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_142 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_142 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 659; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_143 = __pyx_v_w2; __pyx_t_144 = __pyx_v_w2; __pyx_t_8 = -1; if (__pyx_t_143 < 0) { __pyx_t_143 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_143 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_143 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_144 < 0) { __pyx_t_144 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_144 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_144 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 659; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_145 = __pyx_v_w2; __pyx_t_146 = __pyx_v_w3; __pyx_t_8 = -1; if (__pyx_t_145 < 0) { __pyx_t_145 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_145 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_145 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_146 < 0) { __pyx_t_146 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_146 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_146 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 659; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "nipy/algorithms/statistics/intvol.pyx":660 * D[w0,w3], D[w1,w1], D[w1,w2], * D[w1,w3], D[w2,w2], D[w2,w3], * D[w3,w3]) # <<<<<<<<<<<<<< * * l0 = l0 - m */ __pyx_t_147 = __pyx_v_w3; __pyx_t_148 = __pyx_v_w3; __pyx_t_8 = -1; if (__pyx_t_147 < 0) { __pyx_t_147 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_147 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_147 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_148 < 0) { __pyx_t_148 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_148 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_148 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_l1 = (__pyx_v_l1 + (__pyx_v_m * __pyx_f_4nipy_10algorithms_10statistics_6intvol_mu1_tet((*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_129, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_130, __pyx_pybuffernd_D.diminfo[1].strides)), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_131, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_132, __pyx_pybuffernd_D.diminfo[1].strides)), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_133, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_134, __pyx_pybuffernd_D.diminfo[1].strides)), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_135, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_136, __pyx_pybuffernd_D.diminfo[1].strides)), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_137, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_138, __pyx_pybuffernd_D.diminfo[1].strides)), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_139, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_140, __pyx_pybuffernd_D.diminfo[1].strides)), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_141, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_142, __pyx_pybuffernd_D.diminfo[1].strides)), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_143, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_144, __pyx_pybuffernd_D.diminfo[1].strides)), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_145, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_146, __pyx_pybuffernd_D.diminfo[1].strides)), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_147, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_148, __pyx_pybuffernd_D.diminfo[1].strides)), 0))); /* "nipy/algorithms/statistics/intvol.pyx":662 * D[w3,w3]) * * l0 = l0 - m # <<<<<<<<<<<<<< * * for l in range(ds3): */ __pyx_v_l0 = (__pyx_v_l0 - __pyx_v_m); goto __pyx_L40; } __pyx_L40:; } /* "nipy/algorithms/statistics/intvol.pyx":664 * l0 = l0 - m * * for l in range(ds3): # <<<<<<<<<<<<<< * v0 = pindex + d3[l,0] * w0 = d3[l,3] */ __pyx_t_51 = __pyx_v_ds3; for (__pyx_t_55 = 0; __pyx_t_55 < __pyx_t_51; __pyx_t_55+=1) { __pyx_v_l = __pyx_t_55; /* "nipy/algorithms/statistics/intvol.pyx":665 * * for l in range(ds3): * v0 = pindex + d3[l,0] # <<<<<<<<<<<<<< * w0 = d3[l,3] * m = fpmask[v0] */ __pyx_t_149 = __pyx_v_l; __pyx_t_150 = 0; __pyx_t_8 = -1; if (__pyx_t_149 < 0) { __pyx_t_149 += __pyx_pybuffernd_d3.diminfo[0].shape; if (unlikely(__pyx_t_149 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_149 >= __pyx_pybuffernd_d3.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_150 < 0) { __pyx_t_150 += __pyx_pybuffernd_d3.diminfo[1].shape; if (unlikely(__pyx_t_150 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_150 >= __pyx_pybuffernd_d3.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 665; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_v0 = (__pyx_v_pindex + (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d3.rcbuffer->pybuffer.buf, __pyx_t_149, __pyx_pybuffernd_d3.diminfo[0].strides, __pyx_t_150, __pyx_pybuffernd_d3.diminfo[1].strides))); /* "nipy/algorithms/statistics/intvol.pyx":666 * for l in range(ds3): * v0 = pindex + d3[l,0] * w0 = d3[l,3] # <<<<<<<<<<<<<< * m = fpmask[v0] * if m: */ __pyx_t_151 = __pyx_v_l; __pyx_t_152 = 3; __pyx_t_8 = -1; if (__pyx_t_151 < 0) { __pyx_t_151 += __pyx_pybuffernd_d3.diminfo[0].shape; if (unlikely(__pyx_t_151 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_151 >= __pyx_pybuffernd_d3.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_152 < 0) { __pyx_t_152 += __pyx_pybuffernd_d3.diminfo[1].shape; if (unlikely(__pyx_t_152 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_152 >= __pyx_pybuffernd_d3.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 666; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_w0 = (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d3.rcbuffer->pybuffer.buf, __pyx_t_151, __pyx_pybuffernd_d3.diminfo[0].strides, __pyx_t_152, __pyx_pybuffernd_d3.diminfo[1].strides)); /* "nipy/algorithms/statistics/intvol.pyx":667 * v0 = pindex + d3[l,0] * w0 = d3[l,3] * m = fpmask[v0] # <<<<<<<<<<<<<< * if m: * v1 = pindex + d3[l,1] */ __pyx_t_153 = __pyx_v_v0; __pyx_t_8 = -1; if (__pyx_t_153 < 0) { __pyx_t_153 += __pyx_pybuffernd_fpmask.diminfo[0].shape; if (unlikely(__pyx_t_153 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_153 >= __pyx_pybuffernd_fpmask.diminfo[0].shape)) __pyx_t_8 = 0; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 667; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_m = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_fpmask.rcbuffer->pybuffer.buf, __pyx_t_153, __pyx_pybuffernd_fpmask.diminfo[0].strides)); /* "nipy/algorithms/statistics/intvol.pyx":668 * w0 = d3[l,3] * m = fpmask[v0] * if m: # <<<<<<<<<<<<<< * v1 = pindex + d3[l,1] * v2 = pindex + d3[l,2] */ if (__pyx_v_m) { /* "nipy/algorithms/statistics/intvol.pyx":669 * m = fpmask[v0] * if m: * v1 = pindex + d3[l,1] # <<<<<<<<<<<<<< * v2 = pindex + d3[l,2] * w1 = d3[l,4] */ __pyx_t_154 = __pyx_v_l; __pyx_t_155 = 1; __pyx_t_8 = -1; if (__pyx_t_154 < 0) { __pyx_t_154 += __pyx_pybuffernd_d3.diminfo[0].shape; if (unlikely(__pyx_t_154 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_154 >= __pyx_pybuffernd_d3.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_155 < 0) { __pyx_t_155 += __pyx_pybuffernd_d3.diminfo[1].shape; if (unlikely(__pyx_t_155 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_155 >= __pyx_pybuffernd_d3.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 669; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_v1 = (__pyx_v_pindex + (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d3.rcbuffer->pybuffer.buf, __pyx_t_154, __pyx_pybuffernd_d3.diminfo[0].strides, __pyx_t_155, __pyx_pybuffernd_d3.diminfo[1].strides))); /* "nipy/algorithms/statistics/intvol.pyx":670 * if m: * v1 = pindex + d3[l,1] * v2 = pindex + d3[l,2] # <<<<<<<<<<<<<< * w1 = d3[l,4] * w2 = d3[l,5] */ __pyx_t_156 = __pyx_v_l; __pyx_t_157 = 2; __pyx_t_8 = -1; if (__pyx_t_156 < 0) { __pyx_t_156 += __pyx_pybuffernd_d3.diminfo[0].shape; if (unlikely(__pyx_t_156 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_156 >= __pyx_pybuffernd_d3.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_157 < 0) { __pyx_t_157 += __pyx_pybuffernd_d3.diminfo[1].shape; if (unlikely(__pyx_t_157 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_157 >= __pyx_pybuffernd_d3.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 670; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_v2 = (__pyx_v_pindex + (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d3.rcbuffer->pybuffer.buf, __pyx_t_156, __pyx_pybuffernd_d3.diminfo[0].strides, __pyx_t_157, __pyx_pybuffernd_d3.diminfo[1].strides))); /* "nipy/algorithms/statistics/intvol.pyx":671 * v1 = pindex + d3[l,1] * v2 = pindex + d3[l,2] * w1 = d3[l,4] # <<<<<<<<<<<<<< * w2 = d3[l,5] * */ __pyx_t_158 = __pyx_v_l; __pyx_t_159 = 4; __pyx_t_8 = -1; if (__pyx_t_158 < 0) { __pyx_t_158 += __pyx_pybuffernd_d3.diminfo[0].shape; if (unlikely(__pyx_t_158 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_158 >= __pyx_pybuffernd_d3.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_159 < 0) { __pyx_t_159 += __pyx_pybuffernd_d3.diminfo[1].shape; if (unlikely(__pyx_t_159 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_159 >= __pyx_pybuffernd_d3.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 671; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_w1 = (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d3.rcbuffer->pybuffer.buf, __pyx_t_158, __pyx_pybuffernd_d3.diminfo[0].strides, __pyx_t_159, __pyx_pybuffernd_d3.diminfo[1].strides)); /* "nipy/algorithms/statistics/intvol.pyx":672 * v2 = pindex + d3[l,2] * w1 = d3[l,4] * w2 = d3[l,5] # <<<<<<<<<<<<<< * * m = m * fpmask[v1] * fpmask[v2] */ __pyx_t_160 = __pyx_v_l; __pyx_t_161 = 5; __pyx_t_8 = -1; if (__pyx_t_160 < 0) { __pyx_t_160 += __pyx_pybuffernd_d3.diminfo[0].shape; if (unlikely(__pyx_t_160 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_160 >= __pyx_pybuffernd_d3.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_161 < 0) { __pyx_t_161 += __pyx_pybuffernd_d3.diminfo[1].shape; if (unlikely(__pyx_t_161 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_161 >= __pyx_pybuffernd_d3.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 672; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_w2 = (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d3.rcbuffer->pybuffer.buf, __pyx_t_160, __pyx_pybuffernd_d3.diminfo[0].strides, __pyx_t_161, __pyx_pybuffernd_d3.diminfo[1].strides)); /* "nipy/algorithms/statistics/intvol.pyx":674 * w2 = d3[l,5] * * m = m * fpmask[v1] * fpmask[v2] # <<<<<<<<<<<<<< * l2 = l2 + m * mu2_tri(D[w0,w0], D[w0,w1], D[w0,w2], * D[w1,w1], D[w1,w2], D[w2,w2]) */ __pyx_t_162 = __pyx_v_v1; __pyx_t_8 = -1; if (__pyx_t_162 < 0) { __pyx_t_162 += __pyx_pybuffernd_fpmask.diminfo[0].shape; if (unlikely(__pyx_t_162 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_162 >= __pyx_pybuffernd_fpmask.diminfo[0].shape)) __pyx_t_8 = 0; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 674; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_163 = __pyx_v_v2; __pyx_t_8 = -1; if (__pyx_t_163 < 0) { __pyx_t_163 += __pyx_pybuffernd_fpmask.diminfo[0].shape; if (unlikely(__pyx_t_163 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_163 >= __pyx_pybuffernd_fpmask.diminfo[0].shape)) __pyx_t_8 = 0; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 674; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_m = ((__pyx_v_m * (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_fpmask.rcbuffer->pybuffer.buf, __pyx_t_162, __pyx_pybuffernd_fpmask.diminfo[0].strides))) * (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_fpmask.rcbuffer->pybuffer.buf, __pyx_t_163, __pyx_pybuffernd_fpmask.diminfo[0].strides))); /* "nipy/algorithms/statistics/intvol.pyx":675 * * m = m * fpmask[v1] * fpmask[v2] * l2 = l2 + m * mu2_tri(D[w0,w0], D[w0,w1], D[w0,w2], # <<<<<<<<<<<<<< * D[w1,w1], D[w1,w2], D[w2,w2]) * */ __pyx_t_164 = __pyx_v_w0; __pyx_t_165 = __pyx_v_w0; __pyx_t_8 = -1; if (__pyx_t_164 < 0) { __pyx_t_164 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_164 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_164 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_165 < 0) { __pyx_t_165 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_165 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_165 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 675; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_166 = __pyx_v_w0; __pyx_t_167 = __pyx_v_w1; __pyx_t_8 = -1; if (__pyx_t_166 < 0) { __pyx_t_166 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_166 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_166 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_167 < 0) { __pyx_t_167 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_167 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_167 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 675; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_168 = __pyx_v_w0; __pyx_t_169 = __pyx_v_w2; __pyx_t_8 = -1; if (__pyx_t_168 < 0) { __pyx_t_168 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_168 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_168 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_169 < 0) { __pyx_t_169 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_169 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_169 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 675; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "nipy/algorithms/statistics/intvol.pyx":676 * m = m * fpmask[v1] * fpmask[v2] * l2 = l2 + m * mu2_tri(D[w0,w0], D[w0,w1], D[w0,w2], * D[w1,w1], D[w1,w2], D[w2,w2]) # <<<<<<<<<<<<<< * * l1 = l1 - m * mu1_tri(D[w0,w0], D[w0,w1], D[w0,w2], */ __pyx_t_170 = __pyx_v_w1; __pyx_t_171 = __pyx_v_w1; __pyx_t_8 = -1; if (__pyx_t_170 < 0) { __pyx_t_170 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_170 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_170 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_171 < 0) { __pyx_t_171 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_171 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_171 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 676; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_172 = __pyx_v_w1; __pyx_t_173 = __pyx_v_w2; __pyx_t_8 = -1; if (__pyx_t_172 < 0) { __pyx_t_172 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_172 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_172 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_173 < 0) { __pyx_t_173 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_173 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_173 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 676; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_174 = __pyx_v_w2; __pyx_t_175 = __pyx_v_w2; __pyx_t_8 = -1; if (__pyx_t_174 < 0) { __pyx_t_174 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_174 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_174 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_175 < 0) { __pyx_t_175 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_175 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_175 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 676; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_l2 = (__pyx_v_l2 + (__pyx_v_m * __pyx_f_4nipy_10algorithms_10statistics_6intvol_mu2_tri((*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_164, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_165, __pyx_pybuffernd_D.diminfo[1].strides)), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_166, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_167, __pyx_pybuffernd_D.diminfo[1].strides)), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_168, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_169, __pyx_pybuffernd_D.diminfo[1].strides)), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_170, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_171, __pyx_pybuffernd_D.diminfo[1].strides)), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_172, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_173, __pyx_pybuffernd_D.diminfo[1].strides)), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_174, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_175, __pyx_pybuffernd_D.diminfo[1].strides)), 0))); /* "nipy/algorithms/statistics/intvol.pyx":678 * D[w1,w1], D[w1,w2], D[w2,w2]) * * l1 = l1 - m * mu1_tri(D[w0,w0], D[w0,w1], D[w0,w2], # <<<<<<<<<<<<<< * D[w1,w1], D[w1,w2], D[w2,w2]) * */ __pyx_t_176 = __pyx_v_w0; __pyx_t_177 = __pyx_v_w0; __pyx_t_8 = -1; if (__pyx_t_176 < 0) { __pyx_t_176 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_176 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_176 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_177 < 0) { __pyx_t_177 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_177 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_177 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 678; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_178 = __pyx_v_w0; __pyx_t_179 = __pyx_v_w1; __pyx_t_8 = -1; if (__pyx_t_178 < 0) { __pyx_t_178 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_178 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_178 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_179 < 0) { __pyx_t_179 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_179 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_179 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 678; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_180 = __pyx_v_w0; __pyx_t_181 = __pyx_v_w2; __pyx_t_8 = -1; if (__pyx_t_180 < 0) { __pyx_t_180 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_180 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_180 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_181 < 0) { __pyx_t_181 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_181 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_181 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 678; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "nipy/algorithms/statistics/intvol.pyx":679 * * l1 = l1 - m * mu1_tri(D[w0,w0], D[w0,w1], D[w0,w2], * D[w1,w1], D[w1,w2], D[w2,w2]) # <<<<<<<<<<<<<< * * l0 = l0 + m */ __pyx_t_182 = __pyx_v_w1; __pyx_t_183 = __pyx_v_w1; __pyx_t_8 = -1; if (__pyx_t_182 < 0) { __pyx_t_182 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_182 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_182 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_183 < 0) { __pyx_t_183 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_183 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_183 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 679; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_184 = __pyx_v_w1; __pyx_t_185 = __pyx_v_w2; __pyx_t_8 = -1; if (__pyx_t_184 < 0) { __pyx_t_184 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_184 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_184 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_185 < 0) { __pyx_t_185 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_185 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_185 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 679; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_186 = __pyx_v_w2; __pyx_t_187 = __pyx_v_w2; __pyx_t_8 = -1; if (__pyx_t_186 < 0) { __pyx_t_186 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_186 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_186 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_187 < 0) { __pyx_t_187 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_187 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_187 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 679; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_l1 = (__pyx_v_l1 - (__pyx_v_m * __pyx_f_4nipy_10algorithms_10statistics_6intvol_mu1_tri((*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_176, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_177, __pyx_pybuffernd_D.diminfo[1].strides)), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_178, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_179, __pyx_pybuffernd_D.diminfo[1].strides)), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_180, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_181, __pyx_pybuffernd_D.diminfo[1].strides)), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_182, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_183, __pyx_pybuffernd_D.diminfo[1].strides)), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_184, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_185, __pyx_pybuffernd_D.diminfo[1].strides)), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_186, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_187, __pyx_pybuffernd_D.diminfo[1].strides)), 0))); /* "nipy/algorithms/statistics/intvol.pyx":681 * D[w1,w1], D[w1,w2], D[w2,w2]) * * l0 = l0 + m # <<<<<<<<<<<<<< * * for l in range(ds2): */ __pyx_v_l0 = (__pyx_v_l0 + __pyx_v_m); goto __pyx_L43; } __pyx_L43:; } /* "nipy/algorithms/statistics/intvol.pyx":683 * l0 = l0 + m * * for l in range(ds2): # <<<<<<<<<<<<<< * v0 = pindex + d2[l,0] * w0 = d2[l,2] */ __pyx_t_51 = __pyx_v_ds2; for (__pyx_t_55 = 0; __pyx_t_55 < __pyx_t_51; __pyx_t_55+=1) { __pyx_v_l = __pyx_t_55; /* "nipy/algorithms/statistics/intvol.pyx":684 * * for l in range(ds2): * v0 = pindex + d2[l,0] # <<<<<<<<<<<<<< * w0 = d2[l,2] * m = fpmask[v0] */ __pyx_t_188 = __pyx_v_l; __pyx_t_189 = 0; __pyx_t_8 = -1; if (__pyx_t_188 < 0) { __pyx_t_188 += __pyx_pybuffernd_d2.diminfo[0].shape; if (unlikely(__pyx_t_188 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_188 >= __pyx_pybuffernd_d2.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_189 < 0) { __pyx_t_189 += __pyx_pybuffernd_d2.diminfo[1].shape; if (unlikely(__pyx_t_189 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_189 >= __pyx_pybuffernd_d2.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 684; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_v0 = (__pyx_v_pindex + (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d2.rcbuffer->pybuffer.buf, __pyx_t_188, __pyx_pybuffernd_d2.diminfo[0].strides, __pyx_t_189, __pyx_pybuffernd_d2.diminfo[1].strides))); /* "nipy/algorithms/statistics/intvol.pyx":685 * for l in range(ds2): * v0 = pindex + d2[l,0] * w0 = d2[l,2] # <<<<<<<<<<<<<< * m = fpmask[v0] * if m: */ __pyx_t_190 = __pyx_v_l; __pyx_t_191 = 2; __pyx_t_8 = -1; if (__pyx_t_190 < 0) { __pyx_t_190 += __pyx_pybuffernd_d2.diminfo[0].shape; if (unlikely(__pyx_t_190 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_190 >= __pyx_pybuffernd_d2.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_191 < 0) { __pyx_t_191 += __pyx_pybuffernd_d2.diminfo[1].shape; if (unlikely(__pyx_t_191 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_191 >= __pyx_pybuffernd_d2.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 685; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_w0 = (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d2.rcbuffer->pybuffer.buf, __pyx_t_190, __pyx_pybuffernd_d2.diminfo[0].strides, __pyx_t_191, __pyx_pybuffernd_d2.diminfo[1].strides)); /* "nipy/algorithms/statistics/intvol.pyx":686 * v0 = pindex + d2[l,0] * w0 = d2[l,2] * m = fpmask[v0] # <<<<<<<<<<<<<< * if m: * v1 = pindex + d2[l,1] */ __pyx_t_192 = __pyx_v_v0; __pyx_t_8 = -1; if (__pyx_t_192 < 0) { __pyx_t_192 += __pyx_pybuffernd_fpmask.diminfo[0].shape; if (unlikely(__pyx_t_192 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_192 >= __pyx_pybuffernd_fpmask.diminfo[0].shape)) __pyx_t_8 = 0; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 686; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_m = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_fpmask.rcbuffer->pybuffer.buf, __pyx_t_192, __pyx_pybuffernd_fpmask.diminfo[0].strides)); /* "nipy/algorithms/statistics/intvol.pyx":687 * w0 = d2[l,2] * m = fpmask[v0] * if m: # <<<<<<<<<<<<<< * v1 = pindex + d2[l,1] * w1 = d2[l,3] */ if (__pyx_v_m) { /* "nipy/algorithms/statistics/intvol.pyx":688 * m = fpmask[v0] * if m: * v1 = pindex + d2[l,1] # <<<<<<<<<<<<<< * w1 = d2[l,3] * m = m * fpmask[v1] */ __pyx_t_193 = __pyx_v_l; __pyx_t_194 = 1; __pyx_t_8 = -1; if (__pyx_t_193 < 0) { __pyx_t_193 += __pyx_pybuffernd_d2.diminfo[0].shape; if (unlikely(__pyx_t_193 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_193 >= __pyx_pybuffernd_d2.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_194 < 0) { __pyx_t_194 += __pyx_pybuffernd_d2.diminfo[1].shape; if (unlikely(__pyx_t_194 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_194 >= __pyx_pybuffernd_d2.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 688; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_v1 = (__pyx_v_pindex + (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d2.rcbuffer->pybuffer.buf, __pyx_t_193, __pyx_pybuffernd_d2.diminfo[0].strides, __pyx_t_194, __pyx_pybuffernd_d2.diminfo[1].strides))); /* "nipy/algorithms/statistics/intvol.pyx":689 * if m: * v1 = pindex + d2[l,1] * w1 = d2[l,3] # <<<<<<<<<<<<<< * m = m * fpmask[v1] * l1 = l1 + m * mu1_edge(D[w0,w0], D[w0,w1], D[w1,w1]) */ __pyx_t_195 = __pyx_v_l; __pyx_t_196 = 3; __pyx_t_8 = -1; if (__pyx_t_195 < 0) { __pyx_t_195 += __pyx_pybuffernd_d2.diminfo[0].shape; if (unlikely(__pyx_t_195 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_195 >= __pyx_pybuffernd_d2.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_196 < 0) { __pyx_t_196 += __pyx_pybuffernd_d2.diminfo[1].shape; if (unlikely(__pyx_t_196 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_196 >= __pyx_pybuffernd_d2.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 689; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_w1 = (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d2.rcbuffer->pybuffer.buf, __pyx_t_195, __pyx_pybuffernd_d2.diminfo[0].strides, __pyx_t_196, __pyx_pybuffernd_d2.diminfo[1].strides)); /* "nipy/algorithms/statistics/intvol.pyx":690 * v1 = pindex + d2[l,1] * w1 = d2[l,3] * m = m * fpmask[v1] # <<<<<<<<<<<<<< * l1 = l1 + m * mu1_edge(D[w0,w0], D[w0,w1], D[w1,w1]) * */ __pyx_t_197 = __pyx_v_v1; __pyx_t_8 = -1; if (__pyx_t_197 < 0) { __pyx_t_197 += __pyx_pybuffernd_fpmask.diminfo[0].shape; if (unlikely(__pyx_t_197 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_197 >= __pyx_pybuffernd_fpmask.diminfo[0].shape)) __pyx_t_8 = 0; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 690; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_m = (__pyx_v_m * (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_fpmask.rcbuffer->pybuffer.buf, __pyx_t_197, __pyx_pybuffernd_fpmask.diminfo[0].strides))); /* "nipy/algorithms/statistics/intvol.pyx":691 * w1 = d2[l,3] * m = m * fpmask[v1] * l1 = l1 + m * mu1_edge(D[w0,w0], D[w0,w1], D[w1,w1]) # <<<<<<<<<<<<<< * * l0 = l0 - m */ __pyx_t_198 = __pyx_v_w0; __pyx_t_199 = __pyx_v_w0; __pyx_t_8 = -1; if (__pyx_t_198 < 0) { __pyx_t_198 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_198 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_198 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_199 < 0) { __pyx_t_199 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_199 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_199 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_200 = __pyx_v_w0; __pyx_t_201 = __pyx_v_w1; __pyx_t_8 = -1; if (__pyx_t_200 < 0) { __pyx_t_200 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_200 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_200 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_201 < 0) { __pyx_t_201 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_201 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_201 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_202 = __pyx_v_w1; __pyx_t_203 = __pyx_v_w1; __pyx_t_8 = -1; if (__pyx_t_202 < 0) { __pyx_t_202 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_202 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_202 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_203 < 0) { __pyx_t_203 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_203 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_203 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_l1 = (__pyx_v_l1 + (__pyx_v_m * __pyx_f_4nipy_10algorithms_10statistics_6intvol_mu1_edge((*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_198, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_199, __pyx_pybuffernd_D.diminfo[1].strides)), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_200, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_201, __pyx_pybuffernd_D.diminfo[1].strides)), (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_202, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_203, __pyx_pybuffernd_D.diminfo[1].strides)), 0))); /* "nipy/algorithms/statistics/intvol.pyx":693 * l1 = l1 + m * mu1_edge(D[w0,w0], D[w0,w1], D[w1,w1]) * * l0 = l0 - m # <<<<<<<<<<<<<< * * l0 += mask.sum() */ __pyx_v_l0 = (__pyx_v_l0 - __pyx_v_m); goto __pyx_L46; } __pyx_L46:; } } } } /* "nipy/algorithms/statistics/intvol.pyx":695 * l0 = l0 - m * * l0 += mask.sum() # <<<<<<<<<<<<<< * return np.array([l0, l1, l2, l3]) * */ __pyx_t_5 = PyFloat_FromDouble(__pyx_v_l0); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 695; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_38 = PyObject_GetAttr(__pyx_v_mask, __pyx_n_s__sum); if (unlikely(!__pyx_t_38)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 695; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_38); __pyx_t_1 = PyObject_Call(__pyx_t_38, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 695; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_38); __pyx_t_38 = 0; __pyx_t_38 = PyNumber_InPlaceAdd(__pyx_t_5, __pyx_t_1); if (unlikely(!__pyx_t_38)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 695; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_38); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_204 = __pyx_PyFloat_AsDouble(__pyx_t_38); if (unlikely((__pyx_t_204 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 695; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_38); __pyx_t_38 = 0; __pyx_v_l0 = __pyx_t_204; /* "nipy/algorithms/statistics/intvol.pyx":696 * * l0 += mask.sum() * return np.array([l0, l1, l2, l3]) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_38 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_38)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 696; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_38); __pyx_t_1 = PyObject_GetAttr(__pyx_t_38, __pyx_n_s__array); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 696; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_38); __pyx_t_38 = 0; __pyx_t_38 = PyFloat_FromDouble(__pyx_v_l0); if (unlikely(!__pyx_t_38)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 696; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_38); __pyx_t_5 = PyFloat_FromDouble(__pyx_v_l1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 696; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_37 = PyFloat_FromDouble(__pyx_v_l2); if (unlikely(!__pyx_t_37)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 696; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_37); __pyx_t_36 = PyFloat_FromDouble(__pyx_v_l3); if (unlikely(!__pyx_t_36)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 696; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_36); __pyx_t_35 = PyList_New(4); if (unlikely(!__pyx_t_35)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 696; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_35); PyList_SET_ITEM(__pyx_t_35, 0, __pyx_t_38); __Pyx_GIVEREF(__pyx_t_38); PyList_SET_ITEM(__pyx_t_35, 1, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_35, 2, __pyx_t_37); __Pyx_GIVEREF(__pyx_t_37); PyList_SET_ITEM(__pyx_t_35, 3, __pyx_t_36); __Pyx_GIVEREF(__pyx_t_36); __pyx_t_38 = 0; __pyx_t_5 = 0; __pyx_t_37 = 0; __pyx_t_36 = 0; __pyx_t_36 = PyTuple_New(1); if (unlikely(!__pyx_t_36)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 696; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_36); PyTuple_SET_ITEM(__pyx_t_36, 0, ((PyObject *)__pyx_t_35)); __Pyx_GIVEREF(((PyObject *)__pyx_t_35)); __pyx_t_35 = 0; __pyx_t_35 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_36), NULL); if (unlikely(!__pyx_t_35)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 696; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_35); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_36)); __pyx_t_36 = 0; __pyx_r = __pyx_t_35; __pyx_t_35 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_20); __Pyx_XDECREF(__pyx_t_34); __Pyx_XDECREF(__pyx_t_35); __Pyx_XDECREF(__pyx_t_36); __Pyx_XDECREF(__pyx_t_37); __Pyx_XDECREF(__pyx_t_38); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_D.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_coords_c.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_cvertices.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_d2.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_d3.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_d4.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_dstrides.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_fcoords.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_fmask.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_fpmask.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_m2.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_m3.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_m4.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_mask_c.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_pmask.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_strides.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("nipy.algorithms.statistics.intvol.Lips3d", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_D.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_coords_c.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_cvertices.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_d2.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_d3.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_d4.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_dstrides.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_fcoords.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_fmask.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_fpmask.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_m2.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_m3.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_m4.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_mask_c.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_pmask.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_strides.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XDECREF(__pyx_v_value); __Pyx_XDECREF((PyObject *)__pyx_v_coords_c); __Pyx_XDECREF((PyObject *)__pyx_v_mask_c); __Pyx_XDECREF((PyObject *)__pyx_v_fcoords); __Pyx_XDECREF((PyObject *)__pyx_v_D); __Pyx_XDECREF((PyObject *)__pyx_v_fmask); __Pyx_XDECREF((PyObject *)__pyx_v_fpmask); __Pyx_XDECREF((PyObject *)__pyx_v_pmask); __Pyx_XDECREF((PyObject *)__pyx_v_d4); __Pyx_XDECREF((PyObject *)__pyx_v_m4); __Pyx_XDECREF((PyObject *)__pyx_v_d3); __Pyx_XDECREF((PyObject *)__pyx_v_m3); __Pyx_XDECREF((PyObject *)__pyx_v_d2); __Pyx_XDECREF((PyObject *)__pyx_v_m2); __Pyx_XDECREF((PyObject *)__pyx_v_cvertices); __Pyx_XDECREF(__pyx_v_pmask_shape); __Pyx_XDECREF((PyObject *)__pyx_v_strides); __Pyx_XDECREF((PyObject *)__pyx_v_dstrides); __Pyx_XDECREF(__pyx_v_ss0d); __Pyx_XDECREF(__pyx_v_ss1d); __Pyx_XDECREF(__pyx_v_ss2d); __Pyx_XDECREF(__pyx_v_verts); __Pyx_XDECREF(__pyx_v_union); __Pyx_XDECREF(__pyx_v_c); __Pyx_XDECREF(__pyx_v_v); __Pyx_XDECREF(__pyx_v_coords); __Pyx_XDECREF(__pyx_v_mask); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_6intvol_19_convert_stride3(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_10algorithms_10statistics_6intvol_18_convert_stride3[] = "\n Take a voxel, expressed as in index in stride1 and\n re-express it as an index in stride2\n "; static PyMethodDef __pyx_mdef_4nipy_10algorithms_10statistics_6intvol_19_convert_stride3 = {__Pyx_NAMESTR("_convert_stride3"), (PyCFunction)__pyx_pw_4nipy_10algorithms_10statistics_6intvol_19_convert_stride3, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_10algorithms_10statistics_6intvol_18_convert_stride3)}; static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_6intvol_19_convert_stride3(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_v = 0; PyObject *__pyx_v_stride1 = 0; PyObject *__pyx_v_stride2 = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("_convert_stride3 (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__v,&__pyx_n_s__stride1,&__pyx_n_s__stride2,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__v)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__stride1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_convert_stride3", 1, 3, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 699; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__stride2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_convert_stride3", 1, 3, 3, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 699; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_convert_stride3") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 699; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_v = values[0]; __pyx_v_stride1 = values[1]; __pyx_v_stride2 = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("_convert_stride3", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 699; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.algorithms.statistics.intvol._convert_stride3", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_10algorithms_10statistics_6intvol_18_convert_stride3(__pyx_self, __pyx_v_v, __pyx_v_stride1, __pyx_v_stride2); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/algorithms/statistics/intvol.pyx":699 * * * def _convert_stride3(v, stride1, stride2): # <<<<<<<<<<<<<< * """ * Take a voxel, expressed as in index in stride1 and */ static PyObject *__pyx_pf_4nipy_10algorithms_10statistics_6intvol_18_convert_stride3(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_v, PyObject *__pyx_v_stride1, PyObject *__pyx_v_stride2) { PyObject *__pyx_v_v0 = NULL; PyObject *__pyx_v_v1 = NULL; PyObject *__pyx_v_v2 = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_convert_stride3", 0); __Pyx_INCREF(__pyx_v_v); /* "nipy/algorithms/statistics/intvol.pyx":704 * re-express it as an index in stride2 * """ * v0 = v // stride1[0] # <<<<<<<<<<<<<< * v -= v0 * stride1[0] * v1 = v // stride1[1] */ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_stride1, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 704; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyNumber_FloorDivide(__pyx_v_v, __pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 704; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_v0 = __pyx_t_2; __pyx_t_2 = 0; /* "nipy/algorithms/statistics/intvol.pyx":705 * """ * v0 = v // stride1[0] * v -= v0 * stride1[0] # <<<<<<<<<<<<<< * v1 = v // stride1[1] * v2 = v - v1 * stride1[1] */ __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_stride1, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 705; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyNumber_Multiply(__pyx_v_v0, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 705; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_InPlaceSubtract(__pyx_v_v, __pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 705; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_v_v); __pyx_v_v = __pyx_t_2; __pyx_t_2 = 0; /* "nipy/algorithms/statistics/intvol.pyx":706 * v0 = v // stride1[0] * v -= v0 * stride1[0] * v1 = v // stride1[1] # <<<<<<<<<<<<<< * v2 = v - v1 * stride1[1] * return v0*stride2[0] + v1*stride2[1] + v2*stride2[2] */ __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_stride1, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 706; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyNumber_FloorDivide(__pyx_v_v, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 706; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_v1 = __pyx_t_1; __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":707 * v -= v0 * stride1[0] * v1 = v // stride1[1] * v2 = v - v1 * stride1[1] # <<<<<<<<<<<<<< * return v0*stride2[0] + v1*stride2[1] + v2*stride2[2] * */ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_stride1, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 707; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyNumber_Multiply(__pyx_v_v1, __pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 707; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyNumber_Subtract(__pyx_v_v, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 707; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_v2 = __pyx_t_1; __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":708 * v1 = v // stride1[1] * v2 = v - v1 * stride1[1] * return v0*stride2[0] + v1*stride2[1] + v2*stride2[2] # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_stride2, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyNumber_Multiply(__pyx_v_v0, __pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_stride2, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = PyNumber_Multiply(__pyx_v_v1, __pyx_t_1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyNumber_Add(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_stride2, 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyNumber_Multiply(__pyx_v_v2, __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_Add(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("nipy.algorithms.statistics.intvol._convert_stride3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_v0); __Pyx_XDECREF(__pyx_v_v1); __Pyx_XDECREF(__pyx_v_v2); __Pyx_XDECREF(__pyx_v_v); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_6intvol_21_convert_stride2(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_10algorithms_10statistics_6intvol_20_convert_stride2[] = "\n Take a voxel, expressed as in index in stride1 and\n re-express it as an index in stride2\n "; static PyMethodDef __pyx_mdef_4nipy_10algorithms_10statistics_6intvol_21_convert_stride2 = {__Pyx_NAMESTR("_convert_stride2"), (PyCFunction)__pyx_pw_4nipy_10algorithms_10statistics_6intvol_21_convert_stride2, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_10algorithms_10statistics_6intvol_20_convert_stride2)}; static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_6intvol_21_convert_stride2(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_v = 0; PyObject *__pyx_v_stride1 = 0; PyObject *__pyx_v_stride2 = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("_convert_stride2 (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__v,&__pyx_n_s__stride1,&__pyx_n_s__stride2,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__v)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__stride1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_convert_stride2", 1, 3, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 711; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__stride2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_convert_stride2", 1, 3, 3, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 711; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_convert_stride2") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 711; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_v = values[0]; __pyx_v_stride1 = values[1]; __pyx_v_stride2 = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("_convert_stride2", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 711; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.algorithms.statistics.intvol._convert_stride2", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_10algorithms_10statistics_6intvol_20_convert_stride2(__pyx_self, __pyx_v_v, __pyx_v_stride1, __pyx_v_stride2); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/algorithms/statistics/intvol.pyx":711 * * * def _convert_stride2(v, stride1, stride2): # <<<<<<<<<<<<<< * """ * Take a voxel, expressed as in index in stride1 and */ static PyObject *__pyx_pf_4nipy_10algorithms_10statistics_6intvol_20_convert_stride2(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_v, PyObject *__pyx_v_stride1, PyObject *__pyx_v_stride2) { PyObject *__pyx_v_v0 = NULL; PyObject *__pyx_v_v1 = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_convert_stride2", 0); /* "nipy/algorithms/statistics/intvol.pyx":716 * re-express it as an index in stride2 * """ * v0 = v // stride1[0] # <<<<<<<<<<<<<< * v1 = v - v0 * stride1[0] * return v0*stride2[0] + v1*stride2[1] */ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_stride1, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 716; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyNumber_FloorDivide(__pyx_v_v, __pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 716; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_v0 = __pyx_t_2; __pyx_t_2 = 0; /* "nipy/algorithms/statistics/intvol.pyx":717 * """ * v0 = v // stride1[0] * v1 = v - v0 * stride1[0] # <<<<<<<<<<<<<< * return v0*stride2[0] + v1*stride2[1] * */ __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_stride1, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 717; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyNumber_Multiply(__pyx_v_v0, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 717; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_Subtract(__pyx_v_v, __pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 717; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_v1 = __pyx_t_2; __pyx_t_2 = 0; /* "nipy/algorithms/statistics/intvol.pyx":718 * v0 = v // stride1[0] * v1 = v - v0 * stride1[0] * return v0*stride2[0] + v1*stride2[1] # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_stride2, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 718; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyNumber_Multiply(__pyx_v_v0, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 718; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_stride2, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 718; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_v_v1, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 718; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_Add(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 718; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("nipy.algorithms.statistics.intvol._convert_stride2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_v0); __Pyx_XDECREF(__pyx_v_v1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_6intvol_23_convert_stride1(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_10algorithms_10statistics_6intvol_22_convert_stride1[] = "\n Take a voxel, expressed as in index in stride1 and\n re-express it as an index in stride2\n "; static PyMethodDef __pyx_mdef_4nipy_10algorithms_10statistics_6intvol_23_convert_stride1 = {__Pyx_NAMESTR("_convert_stride1"), (PyCFunction)__pyx_pw_4nipy_10algorithms_10statistics_6intvol_23_convert_stride1, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_10algorithms_10statistics_6intvol_22_convert_stride1)}; static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_6intvol_23_convert_stride1(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_v = 0; PyObject *__pyx_v_stride1 = 0; PyObject *__pyx_v_stride2 = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("_convert_stride1 (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__v,&__pyx_n_s__stride1,&__pyx_n_s__stride2,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__v)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__stride1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_convert_stride1", 1, 3, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 721; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__stride2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_convert_stride1", 1, 3, 3, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 721; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_convert_stride1") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 721; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_v = values[0]; __pyx_v_stride1 = values[1]; __pyx_v_stride2 = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("_convert_stride1", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 721; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.algorithms.statistics.intvol._convert_stride1", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_10algorithms_10statistics_6intvol_22_convert_stride1(__pyx_self, __pyx_v_v, __pyx_v_stride1, __pyx_v_stride2); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/algorithms/statistics/intvol.pyx":721 * * * def _convert_stride1(v, stride1, stride2): # <<<<<<<<<<<<<< * """ * Take a voxel, expressed as in index in stride1 and */ static PyObject *__pyx_pf_4nipy_10algorithms_10statistics_6intvol_22_convert_stride1(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_v, PyObject *__pyx_v_stride1, PyObject *__pyx_v_stride2) { PyObject *__pyx_v_v0 = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_convert_stride1", 0); /* "nipy/algorithms/statistics/intvol.pyx":726 * re-express it as an index in stride2 * """ * v0 = v // stride1[0] # <<<<<<<<<<<<<< * return v0 * stride2[0] * */ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_stride1, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 726; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyNumber_FloorDivide(__pyx_v_v, __pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 726; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_v0 = __pyx_t_2; __pyx_t_2 = 0; /* "nipy/algorithms/statistics/intvol.pyx":727 * """ * v0 = v // stride1[0] * return v0 * stride2[0] # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_stride2, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 727; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyNumber_Multiply(__pyx_v_v0, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 727; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("nipy.algorithms.statistics.intvol._convert_stride1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_v0); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_6intvol_25Lips2d(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_10algorithms_10statistics_6intvol_24Lips2d[] = " Estimate intrinsic volumes for 2d region in `mask` given `coords`\n\n Given a 2d `mask` and coordinates `coords`, estimate the intrinsic volumes\n of the masked region. The region is broken up into triangles / edges /\n vertices, which are included based on whether all voxels in the triangle /\n edge / vertex are in the mask or not.\n\n Parameters\n ----------\n coords : ndarray((N,i,j,k))\n Coordinates for the voxels in the mask. ``N`` will often be 2 (for 2\n dimensional coordinates, but can be any integer > 0\n mask : ndarray((i,j), np.int)\n Binary mask determining whether or not a voxel is in the mask.\n\n Returns\n -------\n mu : ndarray\n Array of intrinsic volumes [mu0, mu1, mu2], being, respectively:\n #. Euler characteristic\n #. 2 * mean caliper diameter\n #. Area.\n\n Notes\n -----\n The array mask is assumed to be binary. At the time of writing, it\n is not clear how to get cython to use np.bool arrays.\n\n References\n ----------\n Taylor, J.E. & Worsley, K.J. (2007). \"Detecting sparse signal in random fields,\n with an application to brain mapping.\"\n Journal of the American Statistical Association, 102(479):913-928.\n "; static PyMethodDef __pyx_mdef_4nipy_10algorithms_10statistics_6intvol_25Lips2d = {__Pyx_NAMESTR("Lips2d"), (PyCFunction)__pyx_pw_4nipy_10algorithms_10statistics_6intvol_25Lips2d, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_10algorithms_10statistics_6intvol_24Lips2d)}; static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_6intvol_25Lips2d(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_coords = 0; PyObject *__pyx_v_mask = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("Lips2d (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__coords,&__pyx_n_s__mask,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__coords)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__mask)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("Lips2d", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 730; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "Lips2d") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 730; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_coords = values[0]; __pyx_v_mask = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("Lips2d", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 730; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.algorithms.statistics.intvol.Lips2d", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_10algorithms_10statistics_6intvol_24Lips2d(__pyx_self, __pyx_v_coords, __pyx_v_mask); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/algorithms/statistics/intvol.pyx":730 * * * def Lips2d(coords, mask): # <<<<<<<<<<<<<< * """ Estimate intrinsic volumes for 2d region in `mask` given `coords` * */ static PyObject *__pyx_pf_4nipy_10algorithms_10statistics_6intvol_24Lips2d(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_coords, PyObject *__pyx_v_mask) { PyObject *__pyx_v_value = NULL; CYTHON_UNUSED PyArrayObject *__pyx_v_coords_c = 0; PyArrayObject *__pyx_v_mask_c = 0; PyArrayObject *__pyx_v_fcoords = 0; PyArrayObject *__pyx_v_D = 0; PyArrayObject *__pyx_v_fmask = 0; PyArrayObject *__pyx_v_fpmask = 0; PyArrayObject *__pyx_v_pmask = 0; PyArrayObject *__pyx_v_d3 = 0; PyArrayObject *__pyx_v_d2 = 0; PyArrayObject *__pyx_v_cvertices = 0; npy_intp __pyx_v_i; npy_intp __pyx_v_j; npy_intp __pyx_v_l; npy_intp __pyx_v_r; npy_intp __pyx_v_s; npy_intp __pyx_v_rr; npy_intp __pyx_v_ss; npy_intp __pyx_v_mr; npy_intp __pyx_v_ms; npy_intp __pyx_v_s0; npy_intp __pyx_v_s1; npy_intp __pyx_v_ds2; npy_intp __pyx_v_ds3; npy_intp __pyx_v_index; npy_intp __pyx_v_m; npy_intp __pyx_v_npix; npy_intp __pyx_v_ss0; npy_intp __pyx_v_ss1; npy_intp __pyx_v_ss0d; npy_intp __pyx_v_ss1d; npy_intp __pyx_v_v0; npy_intp __pyx_v_v1; npy_intp __pyx_v_v2; double __pyx_v_l0; double __pyx_v_l1; double __pyx_v_l2; double __pyx_v_res; PyObject *__pyx_v_pmask_shape = NULL; PyArrayObject *__pyx_v_strides = 0; PyArrayObject *__pyx_v_dstrides = 0; PyObject *__pyx_v_verts = NULL; PyObject *__pyx_v_union = NULL; PyObject *__pyx_v_c = NULL; PyObject *__pyx_v_m3 = NULL; PyObject *__pyx_v_m2 = NULL; PyObject *__pyx_v_pindex = NULL; PyObject *__pyx_v_w0 = NULL; PyObject *__pyx_v_w1 = NULL; PyObject *__pyx_v_w2 = NULL; PyObject *__pyx_v_v = NULL; __Pyx_LocalBuf_ND __pyx_pybuffernd_D; __Pyx_Buffer __pyx_pybuffer_D; __Pyx_LocalBuf_ND __pyx_pybuffernd_coords_c; __Pyx_Buffer __pyx_pybuffer_coords_c; __Pyx_LocalBuf_ND __pyx_pybuffernd_cvertices; __Pyx_Buffer __pyx_pybuffer_cvertices; __Pyx_LocalBuf_ND __pyx_pybuffernd_d2; __Pyx_Buffer __pyx_pybuffer_d2; __Pyx_LocalBuf_ND __pyx_pybuffernd_d3; __Pyx_Buffer __pyx_pybuffer_d3; __Pyx_LocalBuf_ND __pyx_pybuffernd_dstrides; __Pyx_Buffer __pyx_pybuffer_dstrides; __Pyx_LocalBuf_ND __pyx_pybuffernd_fcoords; __Pyx_Buffer __pyx_pybuffer_fcoords; __Pyx_LocalBuf_ND __pyx_pybuffernd_fmask; __Pyx_Buffer __pyx_pybuffer_fmask; __Pyx_LocalBuf_ND __pyx_pybuffernd_fpmask; __Pyx_Buffer __pyx_pybuffer_fpmask; __Pyx_LocalBuf_ND __pyx_pybuffernd_mask_c; __Pyx_Buffer __pyx_pybuffer_mask_c; __Pyx_LocalBuf_ND __pyx_pybuffernd_pmask; __Pyx_Buffer __pyx_pybuffer_pmask; __Pyx_LocalBuf_ND __pyx_pybuffernd_strides; __Pyx_Buffer __pyx_pybuffer_strides; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; PyArrayObject *__pyx_t_7 = NULL; int __pyx_t_8; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; PyArrayObject *__pyx_t_12 = NULL; PyArrayObject *__pyx_t_13 = NULL; npy_intp __pyx_t_14; npy_intp __pyx_t_15; PyArrayObject *__pyx_t_16 = NULL; PyArrayObject *__pyx_t_17 = NULL; PyArrayObject *__pyx_t_18 = NULL; PyObject *__pyx_t_19 = NULL; PyArrayObject *__pyx_t_20 = NULL; PyArrayObject *__pyx_t_21 = NULL; long __pyx_t_22; __pyx_t_5numpy_intp_t __pyx_t_23; long __pyx_t_24; __pyx_t_5numpy_intp_t __pyx_t_25; long __pyx_t_26; long __pyx_t_27; int __pyx_t_28; PyArrayObject *__pyx_t_29 = NULL; PyObject *__pyx_t_30 = NULL; long __pyx_t_31; Py_ssize_t __pyx_t_32; PyObject *(*__pyx_t_33)(PyObject *); PyObject *__pyx_t_34 = NULL; PyArrayObject *__pyx_t_35 = NULL; PyArrayObject *__pyx_t_36 = NULL; PyArrayObject *__pyx_t_37 = NULL; long __pyx_t_38; npy_intp __pyx_t_39; npy_intp __pyx_t_40; npy_intp __pyx_t_41; long __pyx_t_42; npy_intp __pyx_t_43; npy_intp __pyx_t_44; npy_intp __pyx_t_45; npy_intp __pyx_t_46; npy_intp __pyx_t_47; npy_intp __pyx_t_48; npy_intp __pyx_t_49; npy_intp __pyx_t_50; npy_intp __pyx_t_51; npy_intp __pyx_t_52; npy_intp __pyx_t_53; npy_intp __pyx_t_54; npy_intp __pyx_t_55; npy_intp __pyx_t_56; npy_intp __pyx_t_57; npy_intp __pyx_t_58; npy_intp __pyx_t_59; long __pyx_t_60; npy_intp __pyx_t_61; npy_intp __pyx_t_62; long __pyx_t_63; npy_intp __pyx_t_64; long __pyx_t_65; npy_intp __pyx_t_66; long __pyx_t_67; npy_intp __pyx_t_68; long __pyx_t_69; npy_intp __pyx_t_70; npy_intp __pyx_t_71; double __pyx_t_72; double __pyx_t_73; double __pyx_t_74; double __pyx_t_75; double __pyx_t_76; double __pyx_t_77; npy_intp __pyx_t_78; long __pyx_t_79; npy_intp __pyx_t_80; long __pyx_t_81; npy_intp __pyx_t_82; npy_intp __pyx_t_83; long __pyx_t_84; npy_intp __pyx_t_85; long __pyx_t_86; npy_intp __pyx_t_87; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("Lips2d", 0); __Pyx_INCREF(__pyx_v_coords); __Pyx_INCREF(__pyx_v_mask); __pyx_pybuffer_coords_c.pybuffer.buf = NULL; __pyx_pybuffer_coords_c.refcount = 0; __pyx_pybuffernd_coords_c.data = NULL; __pyx_pybuffernd_coords_c.rcbuffer = &__pyx_pybuffer_coords_c; __pyx_pybuffer_mask_c.pybuffer.buf = NULL; __pyx_pybuffer_mask_c.refcount = 0; __pyx_pybuffernd_mask_c.data = NULL; __pyx_pybuffernd_mask_c.rcbuffer = &__pyx_pybuffer_mask_c; __pyx_pybuffer_fcoords.pybuffer.buf = NULL; __pyx_pybuffer_fcoords.refcount = 0; __pyx_pybuffernd_fcoords.data = NULL; __pyx_pybuffernd_fcoords.rcbuffer = &__pyx_pybuffer_fcoords; __pyx_pybuffer_D.pybuffer.buf = NULL; __pyx_pybuffer_D.refcount = 0; __pyx_pybuffernd_D.data = NULL; __pyx_pybuffernd_D.rcbuffer = &__pyx_pybuffer_D; __pyx_pybuffer_fmask.pybuffer.buf = NULL; __pyx_pybuffer_fmask.refcount = 0; __pyx_pybuffernd_fmask.data = NULL; __pyx_pybuffernd_fmask.rcbuffer = &__pyx_pybuffer_fmask; __pyx_pybuffer_fpmask.pybuffer.buf = NULL; __pyx_pybuffer_fpmask.refcount = 0; __pyx_pybuffernd_fpmask.data = NULL; __pyx_pybuffernd_fpmask.rcbuffer = &__pyx_pybuffer_fpmask; __pyx_pybuffer_pmask.pybuffer.buf = NULL; __pyx_pybuffer_pmask.refcount = 0; __pyx_pybuffernd_pmask.data = NULL; __pyx_pybuffernd_pmask.rcbuffer = &__pyx_pybuffer_pmask; __pyx_pybuffer_d3.pybuffer.buf = NULL; __pyx_pybuffer_d3.refcount = 0; __pyx_pybuffernd_d3.data = NULL; __pyx_pybuffernd_d3.rcbuffer = &__pyx_pybuffer_d3; __pyx_pybuffer_d2.pybuffer.buf = NULL; __pyx_pybuffer_d2.refcount = 0; __pyx_pybuffernd_d2.data = NULL; __pyx_pybuffernd_d2.rcbuffer = &__pyx_pybuffer_d2; __pyx_pybuffer_cvertices.pybuffer.buf = NULL; __pyx_pybuffer_cvertices.refcount = 0; __pyx_pybuffernd_cvertices.data = NULL; __pyx_pybuffernd_cvertices.rcbuffer = &__pyx_pybuffer_cvertices; __pyx_pybuffer_strides.pybuffer.buf = NULL; __pyx_pybuffer_strides.refcount = 0; __pyx_pybuffernd_strides.data = NULL; __pyx_pybuffernd_strides.rcbuffer = &__pyx_pybuffer_strides; __pyx_pybuffer_dstrides.pybuffer.buf = NULL; __pyx_pybuffer_dstrides.refcount = 0; __pyx_pybuffernd_dstrides.data = NULL; __pyx_pybuffernd_dstrides.rcbuffer = &__pyx_pybuffer_dstrides; /* "nipy/algorithms/statistics/intvol.pyx":765 * Journal of the American Statistical Association, 102(479):913-928. * """ * if mask.shape != coords.shape[1:]: # <<<<<<<<<<<<<< * raise ValueError('shape of mask does not match coordinates') * # if the data can be squeezed, we must use the lower dimensional function */ __pyx_t_1 = PyObject_GetAttr(__pyx_v_mask, __pyx_n_s__shape); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 765; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetAttr(__pyx_v_coords, __pyx_n_s__shape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 765; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PySequence_GetSlice(__pyx_t_2, 1, PY_SSIZE_T_MAX); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 765; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_t_3, Py_NE); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 765; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 765; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (__pyx_t_4) { /* "nipy/algorithms/statistics/intvol.pyx":766 * """ * if mask.shape != coords.shape[1:]: * raise ValueError('shape of mask does not match coordinates') # <<<<<<<<<<<<<< * # if the data can be squeezed, we must use the lower dimensional function * mask = np.squeeze(mask) */ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_40), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 766; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; {__pyx_filename = __pyx_f[0]; __pyx_lineno = 766; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L3; } __pyx_L3:; /* "nipy/algorithms/statistics/intvol.pyx":768 * raise ValueError('shape of mask does not match coordinates') * # if the data can be squeezed, we must use the lower dimensional function * mask = np.squeeze(mask) # <<<<<<<<<<<<<< * if mask.ndim == 1: * value = np.zeros(3) */ __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 768; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__squeeze); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 768; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 768; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_mask); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_mask); __Pyx_GIVEREF(__pyx_v_mask); __pyx_t_1 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 768; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_v_mask); __pyx_v_mask = __pyx_t_1; __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":769 * # if the data can be squeezed, we must use the lower dimensional function * mask = np.squeeze(mask) * if mask.ndim == 1: # <<<<<<<<<<<<<< * value = np.zeros(3) * coords = coords.reshape((coords.shape[0],) + mask.shape) */ __pyx_t_1 = PyObject_GetAttr(__pyx_v_mask, __pyx_n_s__ndim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 769; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_int_1, Py_EQ); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 769; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 769; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (__pyx_t_4) { /* "nipy/algorithms/statistics/intvol.pyx":770 * mask = np.squeeze(mask) * if mask.ndim == 1: * value = np.zeros(3) # <<<<<<<<<<<<<< * coords = coords.reshape((coords.shape[0],) + mask.shape) * value[:2] = Lips1d(coords, mask) */ __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 770; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__zeros); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 770; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_k_tuple_41), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 770; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_value = __pyx_t_2; __pyx_t_2 = 0; /* "nipy/algorithms/statistics/intvol.pyx":771 * if mask.ndim == 1: * value = np.zeros(3) * coords = coords.reshape((coords.shape[0],) + mask.shape) # <<<<<<<<<<<<<< * value[:2] = Lips1d(coords, mask) * return value */ __pyx_t_2 = PyObject_GetAttr(__pyx_v_coords, __pyx_n_s__reshape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 771; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyObject_GetAttr(__pyx_v_coords, __pyx_n_s__shape); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 771; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 771; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 771; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyObject_GetAttr(__pyx_v_mask, __pyx_n_s__shape); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 771; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyNumber_Add(((PyObject *)__pyx_t_1), __pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 771; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 771; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 771; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_v_coords); __pyx_v_coords = __pyx_t_5; __pyx_t_5 = 0; /* "nipy/algorithms/statistics/intvol.pyx":772 * value = np.zeros(3) * coords = coords.reshape((coords.shape[0],) + mask.shape) * value[:2] = Lips1d(coords, mask) # <<<<<<<<<<<<<< * return value * */ __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__Lips1d); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_coords); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_coords); __Pyx_GIVEREF(__pyx_v_coords); __Pyx_INCREF(__pyx_v_mask); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_v_mask); __Pyx_GIVEREF(__pyx_v_mask); __pyx_t_2 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; if (__Pyx_PySequence_SetSlice(__pyx_v_value, 0, 2, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "nipy/algorithms/statistics/intvol.pyx":773 * coords = coords.reshape((coords.shape[0],) + mask.shape) * value[:2] = Lips1d(coords, mask) * return value # <<<<<<<<<<<<<< * * if not set(np.unique(mask)).issubset([0,1]): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_value); __pyx_r = __pyx_v_value; goto __pyx_L0; goto __pyx_L4; } __pyx_L4:; /* "nipy/algorithms/statistics/intvol.pyx":775 * return value * * if not set(np.unique(mask)).issubset([0,1]): # <<<<<<<<<<<<<< * raise ValueError('mask should be filled with 0/1 ' * 'values, but be of type np.int') */ __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__unique); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_mask); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_mask); __Pyx_GIVEREF(__pyx_v_mask); __pyx_t_5 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyObject_Call(((PyObject *)((PyObject*)(&PySet_Type))), ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_t_2 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__issubset); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyList_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_int_0); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_INCREF(__pyx_int_1); PyList_SET_ITEM(__pyx_t_5, 1, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_5)); __Pyx_GIVEREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __pyx_t_5 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = (!__pyx_t_4); if (__pyx_t_6) { /* "nipy/algorithms/statistics/intvol.pyx":776 * * if not set(np.unique(mask)).issubset([0,1]): * raise ValueError('mask should be filled with 0/1 ' # <<<<<<<<<<<<<< * 'values, but be of type np.int') * */ __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_42), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 776; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[0]; __pyx_lineno = 776; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L5; } __pyx_L5:; /* "nipy/algorithms/statistics/intvol.pyx":803 * double res * * coords_c = coords # <<<<<<<<<<<<<< * mask_c = mask * l0 = 0; l1 = 0; l2 = 0 */ if (!(likely(((__pyx_v_coords) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_coords, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_7 = ((PyArrayObject *)__pyx_v_coords); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_coords_c.rcbuffer->pybuffer); __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_coords_c.rcbuffer->pybuffer, (PyObject*)__pyx_t_7, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 3, 0, __pyx_stack); if (unlikely(__pyx_t_8 < 0)) { PyErr_Fetch(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_coords_c.rcbuffer->pybuffer, (PyObject*)__pyx_v_coords_c, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 3, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_9, __pyx_t_10, __pyx_t_11); } } __pyx_pybuffernd_coords_c.diminfo[0].strides = __pyx_pybuffernd_coords_c.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_coords_c.diminfo[0].shape = __pyx_pybuffernd_coords_c.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_coords_c.diminfo[1].strides = __pyx_pybuffernd_coords_c.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_coords_c.diminfo[1].shape = __pyx_pybuffernd_coords_c.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_coords_c.diminfo[2].strides = __pyx_pybuffernd_coords_c.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_coords_c.diminfo[2].shape = __pyx_pybuffernd_coords_c.rcbuffer->pybuffer.shape[2]; if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_7 = 0; __Pyx_INCREF(__pyx_v_coords); __pyx_v_coords_c = ((PyArrayObject *)__pyx_v_coords); /* "nipy/algorithms/statistics/intvol.pyx":804 * * coords_c = coords * mask_c = mask # <<<<<<<<<<<<<< * l0 = 0; l1 = 0; l2 = 0 * */ if (!(likely(((__pyx_v_mask) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_mask, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 804; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_12 = ((PyArrayObject *)__pyx_v_mask); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_mask_c.rcbuffer->pybuffer); __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_mask_c.rcbuffer->pybuffer, (PyObject*)__pyx_t_12, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); if (unlikely(__pyx_t_8 < 0)) { PyErr_Fetch(&__pyx_t_11, &__pyx_t_10, &__pyx_t_9); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_mask_c.rcbuffer->pybuffer, (PyObject*)__pyx_v_mask_c, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_11, __pyx_t_10, __pyx_t_9); } } __pyx_pybuffernd_mask_c.diminfo[0].strides = __pyx_pybuffernd_mask_c.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_mask_c.diminfo[0].shape = __pyx_pybuffernd_mask_c.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_mask_c.diminfo[1].strides = __pyx_pybuffernd_mask_c.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_mask_c.diminfo[1].shape = __pyx_pybuffernd_mask_c.rcbuffer->pybuffer.shape[1]; if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 804; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_12 = 0; __Pyx_INCREF(__pyx_v_mask); __pyx_v_mask_c = ((PyArrayObject *)__pyx_v_mask); /* "nipy/algorithms/statistics/intvol.pyx":805 * coords_c = coords * mask_c = mask * l0 = 0; l1 = 0; l2 = 0 # <<<<<<<<<<<<<< * * pmask_shape = np.array(mask.shape) + 1 */ __pyx_v_l0 = 0.0; __pyx_v_l1 = 0.0; __pyx_v_l2 = 0.0; /* "nipy/algorithms/statistics/intvol.pyx":807 * l0 = 0; l1 = 0; l2 = 0 * * pmask_shape = np.array(mask.shape) + 1 # <<<<<<<<<<<<<< * pmask = np.zeros(pmask_shape, np.int) * pmask[:-1,:-1] = mask_c */ __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 807; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__array); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 807; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyObject_GetAttr(__pyx_v_mask, __pyx_n_s__shape); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 807; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 807; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 807; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_Add(__pyx_t_5, __pyx_int_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 807; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_pmask_shape = __pyx_t_2; __pyx_t_2 = 0; /* "nipy/algorithms/statistics/intvol.pyx":808 * * pmask_shape = np.array(mask.shape) + 1 * pmask = np.zeros(pmask_shape, np.int) # <<<<<<<<<<<<<< * pmask[:-1,:-1] = mask_c * */ __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 808; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__zeros); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 808; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 808; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__int); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 808; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 808; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_pmask_shape); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_pmask_shape); __Pyx_GIVEREF(__pyx_v_pmask_shape); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 808; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 808; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_13 = ((PyArrayObject *)__pyx_t_3); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_pmask.rcbuffer->pybuffer); __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_pmask.rcbuffer->pybuffer, (PyObject*)__pyx_t_13, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); if (unlikely(__pyx_t_8 < 0)) { PyErr_Fetch(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_pmask.rcbuffer->pybuffer, (PyObject*)__pyx_v_pmask, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_9, __pyx_t_10, __pyx_t_11); } } __pyx_pybuffernd_pmask.diminfo[0].strides = __pyx_pybuffernd_pmask.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_pmask.diminfo[0].shape = __pyx_pybuffernd_pmask.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_pmask.diminfo[1].strides = __pyx_pybuffernd_pmask.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_pmask.diminfo[1].shape = __pyx_pybuffernd_pmask.rcbuffer->pybuffer.shape[1]; if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 808; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_13 = 0; __pyx_v_pmask = ((PyArrayObject *)__pyx_t_3); __pyx_t_3 = 0; /* "nipy/algorithms/statistics/intvol.pyx":809 * pmask_shape = np.array(mask.shape) + 1 * pmask = np.zeros(pmask_shape, np.int) * pmask[:-1,:-1] = mask_c # <<<<<<<<<<<<<< * * s0, s1 = pmask.shape[0], pmask.shape[1] */ if (PyObject_SetItem(((PyObject *)__pyx_v_pmask), ((PyObject *)__pyx_k_tuple_45), ((PyObject *)__pyx_v_mask_c)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 809; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/algorithms/statistics/intvol.pyx":811 * pmask[:-1,:-1] = mask_c * * s0, s1 = pmask.shape[0], pmask.shape[1] # <<<<<<<<<<<<<< * * fpmask = pmask.reshape(-1) */ __pyx_t_14 = (__pyx_v_pmask->dimensions[0]); __pyx_t_15 = (__pyx_v_pmask->dimensions[1]); __pyx_v_s0 = __pyx_t_14; __pyx_v_s1 = __pyx_t_15; /* "nipy/algorithms/statistics/intvol.pyx":813 * s0, s1 = pmask.shape[0], pmask.shape[1] * * fpmask = pmask.reshape(-1) # <<<<<<<<<<<<<< * fmask = mask_c.reshape(-1) * fcoords = coords.reshape((coords.shape[0], -1)) */ __pyx_t_3 = PyObject_GetAttr(((PyObject *)__pyx_v_pmask), __pyx_n_s__reshape); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_k_tuple_46), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_16 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_fpmask.rcbuffer->pybuffer); __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_fpmask.rcbuffer->pybuffer, (PyObject*)__pyx_t_16, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_8 < 0)) { PyErr_Fetch(&__pyx_t_11, &__pyx_t_10, &__pyx_t_9); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_fpmask.rcbuffer->pybuffer, (PyObject*)__pyx_v_fpmask, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_11, __pyx_t_10, __pyx_t_9); } } __pyx_pybuffernd_fpmask.diminfo[0].strides = __pyx_pybuffernd_fpmask.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_fpmask.diminfo[0].shape = __pyx_pybuffernd_fpmask.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_16 = 0; __pyx_v_fpmask = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "nipy/algorithms/statistics/intvol.pyx":814 * * fpmask = pmask.reshape(-1) * fmask = mask_c.reshape(-1) # <<<<<<<<<<<<<< * fcoords = coords.reshape((coords.shape[0], -1)) * */ __pyx_t_2 = PyObject_GetAttr(((PyObject *)__pyx_v_mask_c), __pyx_n_s__reshape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 814; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_k_tuple_47), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 814; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 814; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_17 = ((PyArrayObject *)__pyx_t_3); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_fmask.rcbuffer->pybuffer); __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_fmask.rcbuffer->pybuffer, (PyObject*)__pyx_t_17, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_8 < 0)) { PyErr_Fetch(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_fmask.rcbuffer->pybuffer, (PyObject*)__pyx_v_fmask, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_9, __pyx_t_10, __pyx_t_11); } } __pyx_pybuffernd_fmask.diminfo[0].strides = __pyx_pybuffernd_fmask.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_fmask.diminfo[0].shape = __pyx_pybuffernd_fmask.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 814; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_17 = 0; __pyx_v_fmask = ((PyArrayObject *)__pyx_t_3); __pyx_t_3 = 0; /* "nipy/algorithms/statistics/intvol.pyx":815 * fpmask = pmask.reshape(-1) * fmask = mask_c.reshape(-1) * fcoords = coords.reshape((coords.shape[0], -1)) # <<<<<<<<<<<<<< * * # First do the interior contributions. */ __pyx_t_3 = PyObject_GetAttr(__pyx_v_coords, __pyx_n_s__reshape); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 815; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyObject_GetAttr(__pyx_v_coords, __pyx_n_s__shape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 815; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = __Pyx_GetItemInt(__pyx_t_2, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 815; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 815; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __Pyx_INCREF(__pyx_int_neg_1); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_int_neg_1); __Pyx_GIVEREF(__pyx_int_neg_1); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 815; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_t_2)); __Pyx_GIVEREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 815; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 815; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_18 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_fcoords.rcbuffer->pybuffer); __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_fcoords.rcbuffer->pybuffer, (PyObject*)__pyx_t_18, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); if (unlikely(__pyx_t_8 < 0)) { PyErr_Fetch(&__pyx_t_11, &__pyx_t_10, &__pyx_t_9); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_fcoords.rcbuffer->pybuffer, (PyObject*)__pyx_v_fcoords, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_11, __pyx_t_10, __pyx_t_9); } } __pyx_pybuffernd_fcoords.diminfo[0].strides = __pyx_pybuffernd_fcoords.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_fcoords.diminfo[0].shape = __pyx_pybuffernd_fcoords.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_fcoords.diminfo[1].strides = __pyx_pybuffernd_fcoords.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_fcoords.diminfo[1].shape = __pyx_pybuffernd_fcoords.rcbuffer->pybuffer.shape[1]; if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 815; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_18 = 0; __pyx_v_fcoords = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "nipy/algorithms/statistics/intvol.pyx":826 * np.ndarray[np.intp_t, ndim=1] strides * np.ndarray[np.intp_t, ndim=1] dstrides * strides = np.array(strides_from(pmask_shape, np.bool), dtype=np.intp) # <<<<<<<<<<<<<< * dstrides = np.array(strides_from(mask.shape, np.bool), dtype=np.intp) * ss0, ss1 = strides[0], strides[1] */ __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__array); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__strides_from); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__bool); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_pmask_shape); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_pmask_shape); __Pyx_GIVEREF(__pyx_v_pmask_shape); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_19 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__intp); if (unlikely(!__pyx_t_19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_19); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__dtype), __pyx_t_19) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_19); __pyx_t_19 = 0; __pyx_t_19 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_3), ((PyObject *)__pyx_t_1)); if (unlikely(!__pyx_t_19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_19); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; if (!(likely(((__pyx_t_19) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_19, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_20 = ((PyArrayObject *)__pyx_t_19); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_strides.rcbuffer->pybuffer); __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_strides.rcbuffer->pybuffer, (PyObject*)__pyx_t_20, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_8 < 0)) { PyErr_Fetch(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_strides.rcbuffer->pybuffer, (PyObject*)__pyx_v_strides, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_9, __pyx_t_10, __pyx_t_11); } } __pyx_pybuffernd_strides.diminfo[0].strides = __pyx_pybuffernd_strides.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_strides.diminfo[0].shape = __pyx_pybuffernd_strides.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_20 = 0; __pyx_v_strides = ((PyArrayObject *)__pyx_t_19); __pyx_t_19 = 0; /* "nipy/algorithms/statistics/intvol.pyx":827 * np.ndarray[np.intp_t, ndim=1] dstrides * strides = np.array(strides_from(pmask_shape, np.bool), dtype=np.intp) * dstrides = np.array(strides_from(mask.shape, np.bool), dtype=np.intp) # <<<<<<<<<<<<<< * ss0, ss1 = strides[0], strides[1] * ss0d, ss1d = dstrides[0], dstrides[1] */ __pyx_t_19 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_19); __pyx_t_1 = PyObject_GetAttr(__pyx_t_19, __pyx_n_s__array); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_19); __pyx_t_19 = 0; __pyx_t_19 = __Pyx_GetName(__pyx_m, __pyx_n_s__strides_from); if (unlikely(!__pyx_t_19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_19); __pyx_t_3 = PyObject_GetAttr(__pyx_v_mask, __pyx_n_s__shape); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__bool); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_3 = 0; __pyx_t_2 = 0; __pyx_t_2 = PyObject_Call(__pyx_t_19, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_19); __pyx_t_19 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_2)); __pyx_t_19 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_19); __pyx_t_3 = PyObject_GetAttr(__pyx_t_19, __pyx_n_s__intp); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_19); __pyx_t_19 = 0; if (PyDict_SetItem(__pyx_t_2, ((PyObject *)__pyx_n_s__dtype), __pyx_t_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_5), ((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_21 = ((PyArrayObject *)__pyx_t_3); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_dstrides.rcbuffer->pybuffer); __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_dstrides.rcbuffer->pybuffer, (PyObject*)__pyx_t_21, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_8 < 0)) { PyErr_Fetch(&__pyx_t_11, &__pyx_t_10, &__pyx_t_9); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_dstrides.rcbuffer->pybuffer, (PyObject*)__pyx_v_dstrides, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_11, __pyx_t_10, __pyx_t_9); } } __pyx_pybuffernd_dstrides.diminfo[0].strides = __pyx_pybuffernd_dstrides.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_dstrides.diminfo[0].shape = __pyx_pybuffernd_dstrides.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_21 = 0; __pyx_v_dstrides = ((PyArrayObject *)__pyx_t_3); __pyx_t_3 = 0; /* "nipy/algorithms/statistics/intvol.pyx":828 * strides = np.array(strides_from(pmask_shape, np.bool), dtype=np.intp) * dstrides = np.array(strides_from(mask.shape, np.bool), dtype=np.intp) * ss0, ss1 = strides[0], strides[1] # <<<<<<<<<<<<<< * ss0d, ss1d = dstrides[0], dstrides[1] * verts = [] */ __pyx_t_22 = 0; __pyx_t_8 = -1; if (__pyx_t_22 < 0) { __pyx_t_22 += __pyx_pybuffernd_strides.diminfo[0].shape; if (unlikely(__pyx_t_22 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_22 >= __pyx_pybuffernd_strides.diminfo[0].shape)) __pyx_t_8 = 0; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_23 = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_strides.rcbuffer->pybuffer.buf, __pyx_t_22, __pyx_pybuffernd_strides.diminfo[0].strides)); __pyx_t_24 = 1; __pyx_t_8 = -1; if (__pyx_t_24 < 0) { __pyx_t_24 += __pyx_pybuffernd_strides.diminfo[0].shape; if (unlikely(__pyx_t_24 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_24 >= __pyx_pybuffernd_strides.diminfo[0].shape)) __pyx_t_8 = 0; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_25 = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_strides.rcbuffer->pybuffer.buf, __pyx_t_24, __pyx_pybuffernd_strides.diminfo[0].strides)); __pyx_v_ss0 = __pyx_t_23; __pyx_v_ss1 = __pyx_t_25; /* "nipy/algorithms/statistics/intvol.pyx":829 * dstrides = np.array(strides_from(mask.shape, np.bool), dtype=np.intp) * ss0, ss1 = strides[0], strides[1] * ss0d, ss1d = dstrides[0], dstrides[1] # <<<<<<<<<<<<<< * verts = [] * for i in range(2): */ __pyx_t_26 = 0; __pyx_t_8 = -1; if (__pyx_t_26 < 0) { __pyx_t_26 += __pyx_pybuffernd_dstrides.diminfo[0].shape; if (unlikely(__pyx_t_26 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_26 >= __pyx_pybuffernd_dstrides.diminfo[0].shape)) __pyx_t_8 = 0; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_25 = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_dstrides.rcbuffer->pybuffer.buf, __pyx_t_26, __pyx_pybuffernd_dstrides.diminfo[0].strides)); __pyx_t_27 = 1; __pyx_t_8 = -1; if (__pyx_t_27 < 0) { __pyx_t_27 += __pyx_pybuffernd_dstrides.diminfo[0].shape; if (unlikely(__pyx_t_27 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_27 >= __pyx_pybuffernd_dstrides.diminfo[0].shape)) __pyx_t_8 = 0; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_23 = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_dstrides.rcbuffer->pybuffer.buf, __pyx_t_27, __pyx_pybuffernd_dstrides.diminfo[0].strides)); __pyx_v_ss0d = __pyx_t_25; __pyx_v_ss1d = __pyx_t_23; /* "nipy/algorithms/statistics/intvol.pyx":830 * ss0, ss1 = strides[0], strides[1] * ss0d, ss1d = dstrides[0], dstrides[1] * verts = [] # <<<<<<<<<<<<<< * for i in range(2): * for j in range(2): */ __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_v_verts = __pyx_t_3; __pyx_t_3 = 0; /* "nipy/algorithms/statistics/intvol.pyx":831 * ss0d, ss1d = dstrides[0], dstrides[1] * verts = [] * for i in range(2): # <<<<<<<<<<<<<< * for j in range(2): * verts.append(ss0d * i + ss1d * j) */ for (__pyx_t_15 = 0; __pyx_t_15 < 2; __pyx_t_15+=1) { __pyx_v_i = __pyx_t_15; /* "nipy/algorithms/statistics/intvol.pyx":832 * verts = [] * for i in range(2): * for j in range(2): # <<<<<<<<<<<<<< * verts.append(ss0d * i + ss1d * j) * cvertices = np.array(sorted(verts), np.intp) */ for (__pyx_t_14 = 0; __pyx_t_14 < 2; __pyx_t_14+=1) { __pyx_v_j = __pyx_t_14; /* "nipy/algorithms/statistics/intvol.pyx":833 * for i in range(2): * for j in range(2): * verts.append(ss0d * i + ss1d * j) # <<<<<<<<<<<<<< * cvertices = np.array(sorted(verts), np.intp) * */ __pyx_t_3 = PyInt_FromLong(((__pyx_v_ss0d * __pyx_v_i) + (__pyx_v_ss1d * __pyx_v_j))); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_28 = PyList_Append(__pyx_v_verts, __pyx_t_3); if (unlikely(__pyx_t_28 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } } /* "nipy/algorithms/statistics/intvol.pyx":834 * for j in range(2): * verts.append(ss0d * i + ss1d * j) * cvertices = np.array(sorted(verts), np.intp) # <<<<<<<<<<<<<< * * union = join_complexes(*[cube_with_strides_center((0,1), strides), */ __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__array); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_v_verts)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_verts)); __Pyx_GIVEREF(((PyObject *)__pyx_v_verts)); __pyx_t_5 = PyObject_Call(__pyx_builtin_sorted, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__intp); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_5 = 0; __pyx_t_1 = 0; __pyx_t_1 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_29 = ((PyArrayObject *)__pyx_t_1); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_cvertices.rcbuffer->pybuffer); __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_cvertices.rcbuffer->pybuffer, (PyObject*)__pyx_t_29, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_8 < 0)) { PyErr_Fetch(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_cvertices.rcbuffer->pybuffer, (PyObject*)__pyx_v_cvertices, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_9, __pyx_t_10, __pyx_t_11); } } __pyx_pybuffernd_cvertices.diminfo[0].strides = __pyx_pybuffernd_cvertices.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_cvertices.diminfo[0].shape = __pyx_pybuffernd_cvertices.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_29 = 0; __pyx_v_cvertices = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":836 * cvertices = np.array(sorted(verts), np.intp) * * union = join_complexes(*[cube_with_strides_center((0,1), strides), # <<<<<<<<<<<<<< * cube_with_strides_center((1,0), strides), * cube_with_strides_center((1,1), strides)]) */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__join_complexes); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s_8); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(((PyObject *)__pyx_k_tuple_48)); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_k_tuple_48)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_48)); __Pyx_INCREF(((PyObject *)__pyx_v_strides)); PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_strides)); __Pyx_GIVEREF(((PyObject *)__pyx_v_strides)); __pyx_t_5 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; /* "nipy/algorithms/statistics/intvol.pyx":837 * * union = join_complexes(*[cube_with_strides_center((0,1), strides), * cube_with_strides_center((1,0), strides), # <<<<<<<<<<<<<< * cube_with_strides_center((1,1), strides)]) * */ __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s_8); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_k_tuple_49)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_k_tuple_49)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_49)); __Pyx_INCREF(((PyObject *)__pyx_v_strides)); PyTuple_SET_ITEM(__pyx_t_3, 1, ((PyObject *)__pyx_v_strides)); __Pyx_GIVEREF(((PyObject *)__pyx_v_strides)); __pyx_t_19 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_19); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; /* "nipy/algorithms/statistics/intvol.pyx":838 * union = join_complexes(*[cube_with_strides_center((0,1), strides), * cube_with_strides_center((1,0), strides), * cube_with_strides_center((1,1), strides)]) # <<<<<<<<<<<<<< * * c = cube_with_strides_center((0,0), strides) */ __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s_8); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(((PyObject *)__pyx_k_tuple_50)); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_k_tuple_50)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_50)); __Pyx_INCREF(((PyObject *)__pyx_v_strides)); PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_strides)); __Pyx_GIVEREF(((PyObject *)__pyx_v_strides)); __pyx_t_30 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_30)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_30); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_t_2 = PyList_New(3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_2, 1, __pyx_t_19); __Pyx_GIVEREF(__pyx_t_19); PyList_SET_ITEM(__pyx_t_2, 2, __pyx_t_30); __Pyx_GIVEREF(__pyx_t_30); __pyx_t_5 = 0; __pyx_t_19 = 0; __pyx_t_30 = 0; __pyx_t_30 = PySequence_Tuple(((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_30)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_30)); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_30), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_30)); __pyx_t_30 = 0; __pyx_v_union = __pyx_t_2; __pyx_t_2 = 0; /* "nipy/algorithms/statistics/intvol.pyx":840 * cube_with_strides_center((1,1), strides)]) * * c = cube_with_strides_center((0,0), strides) # <<<<<<<<<<<<<< * m3 = np.array(list(c[3].difference(union[3]))) * m2 = np.array(list(c[2].difference(union[2]))) */ __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s_8); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_30 = PyTuple_New(2); if (unlikely(!__pyx_t_30)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_30); __Pyx_INCREF(((PyObject *)__pyx_k_tuple_51)); PyTuple_SET_ITEM(__pyx_t_30, 0, ((PyObject *)__pyx_k_tuple_51)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_51)); __Pyx_INCREF(((PyObject *)__pyx_v_strides)); PyTuple_SET_ITEM(__pyx_t_30, 1, ((PyObject *)__pyx_v_strides)); __Pyx_GIVEREF(((PyObject *)__pyx_v_strides)); __pyx_t_1 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_30), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_30)); __pyx_t_30 = 0; __pyx_v_c = __pyx_t_1; __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":841 * * c = cube_with_strides_center((0,0), strides) * m3 = np.array(list(c[3].difference(union[3]))) # <<<<<<<<<<<<<< * m2 = np.array(list(c[2].difference(union[2]))) * */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_30 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__array); if (unlikely(!__pyx_t_30)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_30); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_c, 3, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__difference); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_union, 3, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_19 = PyTuple_New(1); if (unlikely(!__pyx_t_19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_19); PyTuple_SET_ITEM(__pyx_t_19, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_19), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_19)); __pyx_t_19 = 0; __pyx_t_19 = PyTuple_New(1); if (unlikely(!__pyx_t_19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_19); PyTuple_SET_ITEM(__pyx_t_19, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)(&PyList_Type))), ((PyObject *)__pyx_t_19), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_19)); __pyx_t_19 = 0; __pyx_t_19 = PyTuple_New(1); if (unlikely(!__pyx_t_19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_19); PyTuple_SET_ITEM(__pyx_t_19, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyObject_Call(__pyx_t_30, ((PyObject *)__pyx_t_19), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_30); __pyx_t_30 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_19)); __pyx_t_19 = 0; __pyx_v_m3 = __pyx_t_1; __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":842 * c = cube_with_strides_center((0,0), strides) * m3 = np.array(list(c[3].difference(union[3]))) * m2 = np.array(list(c[2].difference(union[2]))) # <<<<<<<<<<<<<< * * d3 = np.array([[_convert_stride2(v, strides, (2,1)) for v in m3[i]] for i in range(m3.shape[0])]) */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_19 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__array); if (unlikely(!__pyx_t_19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_19); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_c, 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_30 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__difference); if (unlikely(!__pyx_t_30)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_30); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_union, 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyObject_Call(__pyx_t_30, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_30); __pyx_t_30 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)(&PyList_Type))), ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyObject_Call(__pyx_t_19, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_19); __pyx_t_19 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_v_m2 = __pyx_t_1; __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":844 * m2 = np.array(list(c[2].difference(union[2]))) * * d3 = np.array([[_convert_stride2(v, strides, (2,1)) for v in m3[i]] for i in range(m3.shape[0])]) # <<<<<<<<<<<<<< * d3 = np.hstack([m3, d3]) * ds3 = d3.shape[0] */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__array); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_19 = PyObject_GetAttr(__pyx_v_m3, __pyx_n_s__shape); if (unlikely(!__pyx_t_19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_19); __pyx_t_30 = __Pyx_GetItemInt(__pyx_t_19, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_30) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_30); __Pyx_DECREF(__pyx_t_19); __pyx_t_19 = 0; __pyx_t_31 = __Pyx_PyInt_AsLong(__pyx_t_30); if (unlikely((__pyx_t_31 == (long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_30); __pyx_t_30 = 0; for (__pyx_t_15 = 0; __pyx_t_15 < __pyx_t_31; __pyx_t_15+=1) { __pyx_v_i = __pyx_t_15; __pyx_t_30 = PyList_New(0); if (unlikely(!__pyx_t_30)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_30); __pyx_t_19 = __Pyx_GetItemInt(__pyx_v_m3, __pyx_v_i, sizeof(npy_intp), __Pyx_PyInt_to_py_Py_intptr_t); if (!__pyx_t_19) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_19); if (PyList_CheckExact(__pyx_t_19) || PyTuple_CheckExact(__pyx_t_19)) { __pyx_t_5 = __pyx_t_19; __Pyx_INCREF(__pyx_t_5); __pyx_t_32 = 0; __pyx_t_33 = NULL; } else { __pyx_t_32 = -1; __pyx_t_5 = PyObject_GetIter(__pyx_t_19); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_33 = Py_TYPE(__pyx_t_5)->tp_iternext; } __Pyx_DECREF(__pyx_t_19); __pyx_t_19 = 0; for (;;) { if (!__pyx_t_33 && PyList_CheckExact(__pyx_t_5)) { if (__pyx_t_32 >= PyList_GET_SIZE(__pyx_t_5)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_19 = PyList_GET_ITEM(__pyx_t_5, __pyx_t_32); __Pyx_INCREF(__pyx_t_19); __pyx_t_32++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_19 = PySequence_ITEM(__pyx_t_5, __pyx_t_32); __pyx_t_32++; if (unlikely(!__pyx_t_19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else if (!__pyx_t_33 && PyTuple_CheckExact(__pyx_t_5)) { if (__pyx_t_32 >= PyTuple_GET_SIZE(__pyx_t_5)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_19 = PyTuple_GET_ITEM(__pyx_t_5, __pyx_t_32); __Pyx_INCREF(__pyx_t_19); __pyx_t_32++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_19 = PySequence_ITEM(__pyx_t_5, __pyx_t_32); __pyx_t_32++; if (unlikely(!__pyx_t_19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else { __pyx_t_19 = __pyx_t_33(__pyx_t_5); if (unlikely(!__pyx_t_19)) { if (PyErr_Occurred()) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) PyErr_Clear(); else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } break; } __Pyx_GOTREF(__pyx_t_19); } __Pyx_XDECREF(__pyx_v_v); __pyx_v_v = __pyx_t_19; __pyx_t_19 = 0; __pyx_t_19 = __Pyx_GetName(__pyx_m, __pyx_n_s___convert_stride2); if (unlikely(!__pyx_t_19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_19); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_v); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_v); __Pyx_GIVEREF(__pyx_v_v); __Pyx_INCREF(((PyObject *)__pyx_v_strides)); PyTuple_SET_ITEM(__pyx_t_3, 1, ((PyObject *)__pyx_v_strides)); __Pyx_GIVEREF(((PyObject *)__pyx_v_strides)); __Pyx_INCREF(((PyObject *)__pyx_k_tuple_52)); PyTuple_SET_ITEM(__pyx_t_3, 2, ((PyObject *)__pyx_k_tuple_52)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_52)); __pyx_t_34 = PyObject_Call(__pyx_t_19, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_34)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_34); __Pyx_DECREF(__pyx_t_19); __pyx_t_19 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; if (unlikely(__Pyx_PyList_Append(__pyx_t_30, (PyObject*)__pyx_t_34))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_34); __pyx_t_34 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(__Pyx_PyList_Append(__pyx_t_1, (PyObject*)__pyx_t_30))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(((PyObject *)__pyx_t_30)); __pyx_t_30 = 0; } __pyx_t_30 = PyTuple_New(1); if (unlikely(!__pyx_t_30)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_30); __Pyx_INCREF(((PyObject *)__pyx_t_1)); PyTuple_SET_ITEM(__pyx_t_30, 0, ((PyObject *)__pyx_t_1)); __Pyx_GIVEREF(((PyObject *)__pyx_t_1)); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_t_1 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_30), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_30)); __pyx_t_30 = 0; if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_35 = ((PyArrayObject *)__pyx_t_1); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_d3.rcbuffer->pybuffer); __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_d3.rcbuffer->pybuffer, (PyObject*)__pyx_t_35, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); if (unlikely(__pyx_t_8 < 0)) { PyErr_Fetch(&__pyx_t_11, &__pyx_t_10, &__pyx_t_9); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_d3.rcbuffer->pybuffer, (PyObject*)__pyx_v_d3, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_11, __pyx_t_10, __pyx_t_9); } } __pyx_pybuffernd_d3.diminfo[0].strides = __pyx_pybuffernd_d3.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_d3.diminfo[0].shape = __pyx_pybuffernd_d3.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_d3.diminfo[1].strides = __pyx_pybuffernd_d3.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_d3.diminfo[1].shape = __pyx_pybuffernd_d3.rcbuffer->pybuffer.shape[1]; if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_35 = 0; __pyx_v_d3 = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":845 * * d3 = np.array([[_convert_stride2(v, strides, (2,1)) for v in m3[i]] for i in range(m3.shape[0])]) * d3 = np.hstack([m3, d3]) # <<<<<<<<<<<<<< * ds3 = d3.shape[0] * */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_30 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__hstack); if (unlikely(!__pyx_t_30)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_30); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyList_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_m3); PyList_SET_ITEM(__pyx_t_1, 0, __pyx_v_m3); __Pyx_GIVEREF(__pyx_v_m3); __Pyx_INCREF(((PyObject *)__pyx_v_d3)); PyList_SET_ITEM(__pyx_t_1, 1, ((PyObject *)__pyx_v_d3)); __Pyx_GIVEREF(((PyObject *)__pyx_v_d3)); __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_t_1)); __Pyx_GIVEREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_t_1 = PyObject_Call(__pyx_t_30, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_30); __pyx_t_30 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_35 = ((PyArrayObject *)__pyx_t_1); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_d3.rcbuffer->pybuffer); __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_d3.rcbuffer->pybuffer, (PyObject*)__pyx_t_35, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); if (unlikely(__pyx_t_8 < 0)) { PyErr_Fetch(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_d3.rcbuffer->pybuffer, (PyObject*)__pyx_v_d3, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_9, __pyx_t_10, __pyx_t_11); } } __pyx_pybuffernd_d3.diminfo[0].strides = __pyx_pybuffernd_d3.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_d3.diminfo[0].shape = __pyx_pybuffernd_d3.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_d3.diminfo[1].strides = __pyx_pybuffernd_d3.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_d3.diminfo[1].shape = __pyx_pybuffernd_d3.rcbuffer->pybuffer.shape[1]; if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_35 = 0; __Pyx_DECREF(((PyObject *)__pyx_v_d3)); __pyx_v_d3 = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":846 * d3 = np.array([[_convert_stride2(v, strides, (2,1)) for v in m3[i]] for i in range(m3.shape[0])]) * d3 = np.hstack([m3, d3]) * ds3 = d3.shape[0] # <<<<<<<<<<<<<< * * d2 = np.array([[_convert_stride2(v, strides, (2,1)) for v in m2[i]] for i in range(m2.shape[0])]) */ __pyx_v_ds3 = (__pyx_v_d3->dimensions[0]); /* "nipy/algorithms/statistics/intvol.pyx":848 * ds3 = d3.shape[0] * * d2 = np.array([[_convert_stride2(v, strides, (2,1)) for v in m2[i]] for i in range(m2.shape[0])]) # <<<<<<<<<<<<<< * d2 = np.hstack([m2, d2]) * ds2 = d2.shape[0] */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 848; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__array); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 848; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 848; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_30 = PyObject_GetAttr(__pyx_v_m2, __pyx_n_s__shape); if (unlikely(!__pyx_t_30)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 848; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_30); __pyx_t_5 = __Pyx_GetItemInt(__pyx_t_30, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 848; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_30); __pyx_t_30 = 0; __pyx_t_31 = __Pyx_PyInt_AsLong(__pyx_t_5); if (unlikely((__pyx_t_31 == (long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 848; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; for (__pyx_t_15 = 0; __pyx_t_15 < __pyx_t_31; __pyx_t_15+=1) { __pyx_v_i = __pyx_t_15; __pyx_t_5 = PyList_New(0); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 848; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_30 = __Pyx_GetItemInt(__pyx_v_m2, __pyx_v_i, sizeof(npy_intp), __Pyx_PyInt_to_py_Py_intptr_t); if (!__pyx_t_30) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 848; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_30); if (PyList_CheckExact(__pyx_t_30) || PyTuple_CheckExact(__pyx_t_30)) { __pyx_t_34 = __pyx_t_30; __Pyx_INCREF(__pyx_t_34); __pyx_t_32 = 0; __pyx_t_33 = NULL; } else { __pyx_t_32 = -1; __pyx_t_34 = PyObject_GetIter(__pyx_t_30); if (unlikely(!__pyx_t_34)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 848; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_34); __pyx_t_33 = Py_TYPE(__pyx_t_34)->tp_iternext; } __Pyx_DECREF(__pyx_t_30); __pyx_t_30 = 0; for (;;) { if (!__pyx_t_33 && PyList_CheckExact(__pyx_t_34)) { if (__pyx_t_32 >= PyList_GET_SIZE(__pyx_t_34)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_30 = PyList_GET_ITEM(__pyx_t_34, __pyx_t_32); __Pyx_INCREF(__pyx_t_30); __pyx_t_32++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 848; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_30 = PySequence_ITEM(__pyx_t_34, __pyx_t_32); __pyx_t_32++; if (unlikely(!__pyx_t_30)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 848; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else if (!__pyx_t_33 && PyTuple_CheckExact(__pyx_t_34)) { if (__pyx_t_32 >= PyTuple_GET_SIZE(__pyx_t_34)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_30 = PyTuple_GET_ITEM(__pyx_t_34, __pyx_t_32); __Pyx_INCREF(__pyx_t_30); __pyx_t_32++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 848; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_30 = PySequence_ITEM(__pyx_t_34, __pyx_t_32); __pyx_t_32++; if (unlikely(!__pyx_t_30)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 848; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else { __pyx_t_30 = __pyx_t_33(__pyx_t_34); if (unlikely(!__pyx_t_30)) { if (PyErr_Occurred()) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) PyErr_Clear(); else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 848; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } break; } __Pyx_GOTREF(__pyx_t_30); } __Pyx_XDECREF(__pyx_v_v); __pyx_v_v = __pyx_t_30; __pyx_t_30 = 0; __pyx_t_30 = __Pyx_GetName(__pyx_m, __pyx_n_s___convert_stride2); if (unlikely(!__pyx_t_30)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 848; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_30); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 848; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_v); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_v); __Pyx_GIVEREF(__pyx_v_v); __Pyx_INCREF(((PyObject *)__pyx_v_strides)); PyTuple_SET_ITEM(__pyx_t_3, 1, ((PyObject *)__pyx_v_strides)); __Pyx_GIVEREF(((PyObject *)__pyx_v_strides)); __Pyx_INCREF(((PyObject *)__pyx_k_tuple_53)); PyTuple_SET_ITEM(__pyx_t_3, 2, ((PyObject *)__pyx_k_tuple_53)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_53)); __pyx_t_19 = PyObject_Call(__pyx_t_30, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 848; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_19); __Pyx_DECREF(__pyx_t_30); __pyx_t_30 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; if (unlikely(__Pyx_PyList_Append(__pyx_t_5, (PyObject*)__pyx_t_19))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 848; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_19); __pyx_t_19 = 0; } __Pyx_DECREF(__pyx_t_34); __pyx_t_34 = 0; if (unlikely(__Pyx_PyList_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 848; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; } __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 848; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(((PyObject *)__pyx_t_1)); PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_t_1)); __Pyx_GIVEREF(((PyObject *)__pyx_t_1)); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_t_1 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 848; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 848; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_36 = ((PyArrayObject *)__pyx_t_1); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_d2.rcbuffer->pybuffer); __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_d2.rcbuffer->pybuffer, (PyObject*)__pyx_t_36, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); if (unlikely(__pyx_t_8 < 0)) { PyErr_Fetch(&__pyx_t_11, &__pyx_t_10, &__pyx_t_9); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_d2.rcbuffer->pybuffer, (PyObject*)__pyx_v_d2, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_11, __pyx_t_10, __pyx_t_9); } } __pyx_pybuffernd_d2.diminfo[0].strides = __pyx_pybuffernd_d2.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_d2.diminfo[0].shape = __pyx_pybuffernd_d2.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_d2.diminfo[1].strides = __pyx_pybuffernd_d2.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_d2.diminfo[1].shape = __pyx_pybuffernd_d2.rcbuffer->pybuffer.shape[1]; if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 848; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_36 = 0; __pyx_v_d2 = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":849 * * d2 = np.array([[_convert_stride2(v, strides, (2,1)) for v in m2[i]] for i in range(m2.shape[0])]) * d2 = np.hstack([m2, d2]) # <<<<<<<<<<<<<< * ds2 = d2.shape[0] * */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 849; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__hstack); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 849; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyList_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 849; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_m2); PyList_SET_ITEM(__pyx_t_1, 0, __pyx_v_m2); __Pyx_GIVEREF(__pyx_v_m2); __Pyx_INCREF(((PyObject *)__pyx_v_d2)); PyList_SET_ITEM(__pyx_t_1, 1, ((PyObject *)__pyx_v_d2)); __Pyx_GIVEREF(((PyObject *)__pyx_v_d2)); __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 849; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_t_1)); __Pyx_GIVEREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_t_1 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 849; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 849; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_36 = ((PyArrayObject *)__pyx_t_1); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_d2.rcbuffer->pybuffer); __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_d2.rcbuffer->pybuffer, (PyObject*)__pyx_t_36, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); if (unlikely(__pyx_t_8 < 0)) { PyErr_Fetch(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_d2.rcbuffer->pybuffer, (PyObject*)__pyx_v_d2, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_9, __pyx_t_10, __pyx_t_11); } } __pyx_pybuffernd_d2.diminfo[0].strides = __pyx_pybuffernd_d2.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_d2.diminfo[0].shape = __pyx_pybuffernd_d2.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_d2.diminfo[1].strides = __pyx_pybuffernd_d2.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_d2.diminfo[1].shape = __pyx_pybuffernd_d2.rcbuffer->pybuffer.shape[1]; if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 849; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_36 = 0; __Pyx_DECREF(((PyObject *)__pyx_v_d2)); __pyx_v_d2 = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":850 * d2 = np.array([[_convert_stride2(v, strides, (2,1)) for v in m2[i]] for i in range(m2.shape[0])]) * d2 = np.hstack([m2, d2]) * ds2 = d2.shape[0] # <<<<<<<<<<<<<< * * D = np.zeros((4,4)) */ __pyx_v_ds2 = (__pyx_v_d2->dimensions[0]); /* "nipy/algorithms/statistics/intvol.pyx":852 * ds2 = d2.shape[0] * * D = np.zeros((4,4)) # <<<<<<<<<<<<<< * * npix = mask.size */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 852; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__zeros); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 852; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_k_tuple_55), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 852; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 852; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_37 = ((PyArrayObject *)__pyx_t_1); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_D.rcbuffer->pybuffer); __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_D.rcbuffer->pybuffer, (PyObject*)__pyx_t_37, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack); if (unlikely(__pyx_t_8 < 0)) { PyErr_Fetch(&__pyx_t_11, &__pyx_t_10, &__pyx_t_9); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_D.rcbuffer->pybuffer, (PyObject*)__pyx_v_D, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_11, __pyx_t_10, __pyx_t_9); } } __pyx_pybuffernd_D.diminfo[0].strides = __pyx_pybuffernd_D.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_D.diminfo[0].shape = __pyx_pybuffernd_D.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_D.diminfo[1].strides = __pyx_pybuffernd_D.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_D.diminfo[1].shape = __pyx_pybuffernd_D.rcbuffer->pybuffer.shape[1]; if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 852; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_37 = 0; __pyx_v_D = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":854 * D = np.zeros((4,4)) * * npix = mask.size # <<<<<<<<<<<<<< * * for i in range(s0-1): */ __pyx_t_1 = PyObject_GetAttr(__pyx_v_mask, __pyx_n_s__size); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 854; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_15 = __Pyx_PyInt_from_py_Py_intptr_t(__pyx_t_1); if (unlikely((__pyx_t_15 == (npy_intp)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 854; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_npix = __pyx_t_15; /* "nipy/algorithms/statistics/intvol.pyx":856 * npix = mask.size * * for i in range(s0-1): # <<<<<<<<<<<<<< * for j in range(s1-1): * pindex = i*ss0+j*ss1 */ __pyx_t_31 = (__pyx_v_s0 - 1); for (__pyx_t_15 = 0; __pyx_t_15 < __pyx_t_31; __pyx_t_15+=1) { __pyx_v_i = __pyx_t_15; /* "nipy/algorithms/statistics/intvol.pyx":857 * * for i in range(s0-1): * for j in range(s1-1): # <<<<<<<<<<<<<< * pindex = i*ss0+j*ss1 * index = i*ss0d+j*ss1d */ __pyx_t_38 = (__pyx_v_s1 - 1); for (__pyx_t_14 = 0; __pyx_t_14 < __pyx_t_38; __pyx_t_14+=1) { __pyx_v_j = __pyx_t_14; /* "nipy/algorithms/statistics/intvol.pyx":858 * for i in range(s0-1): * for j in range(s1-1): * pindex = i*ss0+j*ss1 # <<<<<<<<<<<<<< * index = i*ss0d+j*ss1d * for r in range(4): */ __pyx_t_1 = PyInt_FromLong(((__pyx_v_i * __pyx_v_ss0) + (__pyx_v_j * __pyx_v_ss1))); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 858; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_XDECREF(__pyx_v_pindex); __pyx_v_pindex = __pyx_t_1; __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":859 * for j in range(s1-1): * pindex = i*ss0+j*ss1 * index = i*ss0d+j*ss1d # <<<<<<<<<<<<<< * for r in range(4): * rr = (index+cvertices[r]) % npix */ __pyx_v_index = ((__pyx_v_i * __pyx_v_ss0d) + (__pyx_v_j * __pyx_v_ss1d)); /* "nipy/algorithms/statistics/intvol.pyx":860 * pindex = i*ss0+j*ss1 * index = i*ss0d+j*ss1d * for r in range(4): # <<<<<<<<<<<<<< * rr = (index+cvertices[r]) % npix * mr = fmask[rr] */ for (__pyx_t_39 = 0; __pyx_t_39 < 4; __pyx_t_39+=1) { __pyx_v_r = __pyx_t_39; /* "nipy/algorithms/statistics/intvol.pyx":861 * index = i*ss0d+j*ss1d * for r in range(4): * rr = (index+cvertices[r]) % npix # <<<<<<<<<<<<<< * mr = fmask[rr] * for s in range(r+1): */ __pyx_t_40 = __pyx_v_r; __pyx_t_8 = -1; if (__pyx_t_40 < 0) { __pyx_t_40 += __pyx_pybuffernd_cvertices.diminfo[0].shape; if (unlikely(__pyx_t_40 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_40 >= __pyx_pybuffernd_cvertices.diminfo[0].shape)) __pyx_t_8 = 0; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 861; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_8 = (__pyx_v_index + (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_cvertices.rcbuffer->pybuffer.buf, __pyx_t_40, __pyx_pybuffernd_cvertices.diminfo[0].strides))); if (unlikely(__pyx_v_npix == 0)) { PyErr_Format(PyExc_ZeroDivisionError, "integer division or modulo by zero"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 861; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_rr = __Pyx_mod_int(__pyx_t_8, __pyx_v_npix); /* "nipy/algorithms/statistics/intvol.pyx":862 * for r in range(4): * rr = (index+cvertices[r]) % npix * mr = fmask[rr] # <<<<<<<<<<<<<< * for s in range(r+1): * res = 0 */ __pyx_t_41 = __pyx_v_rr; __pyx_t_8 = -1; if (__pyx_t_41 < 0) { __pyx_t_41 += __pyx_pybuffernd_fmask.diminfo[0].shape; if (unlikely(__pyx_t_41 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_41 >= __pyx_pybuffernd_fmask.diminfo[0].shape)) __pyx_t_8 = 0; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 862; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_mr = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_fmask.rcbuffer->pybuffer.buf, __pyx_t_41, __pyx_pybuffernd_fmask.diminfo[0].strides)); /* "nipy/algorithms/statistics/intvol.pyx":863 * rr = (index+cvertices[r]) % npix * mr = fmask[rr] * for s in range(r+1): # <<<<<<<<<<<<<< * res = 0 * ss = (index+cvertices[s]) % npix */ __pyx_t_42 = (__pyx_v_r + 1); for (__pyx_t_43 = 0; __pyx_t_43 < __pyx_t_42; __pyx_t_43+=1) { __pyx_v_s = __pyx_t_43; /* "nipy/algorithms/statistics/intvol.pyx":864 * mr = fmask[rr] * for s in range(r+1): * res = 0 # <<<<<<<<<<<<<< * ss = (index+cvertices[s]) % npix * ms = fmask[ss] */ __pyx_v_res = 0.0; /* "nipy/algorithms/statistics/intvol.pyx":865 * for s in range(r+1): * res = 0 * ss = (index+cvertices[s]) % npix # <<<<<<<<<<<<<< * ms = fmask[ss] * if mr * ms: */ __pyx_t_44 = __pyx_v_s; __pyx_t_8 = -1; if (__pyx_t_44 < 0) { __pyx_t_44 += __pyx_pybuffernd_cvertices.diminfo[0].shape; if (unlikely(__pyx_t_44 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_44 >= __pyx_pybuffernd_cvertices.diminfo[0].shape)) __pyx_t_8 = 0; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 865; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_8 = (__pyx_v_index + (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_cvertices.rcbuffer->pybuffer.buf, __pyx_t_44, __pyx_pybuffernd_cvertices.diminfo[0].strides))); if (unlikely(__pyx_v_npix == 0)) { PyErr_Format(PyExc_ZeroDivisionError, "integer division or modulo by zero"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 865; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_ss = __Pyx_mod_int(__pyx_t_8, __pyx_v_npix); /* "nipy/algorithms/statistics/intvol.pyx":866 * res = 0 * ss = (index+cvertices[s]) % npix * ms = fmask[ss] # <<<<<<<<<<<<<< * if mr * ms: * for l in range(fcoords.shape[0]): */ __pyx_t_45 = __pyx_v_ss; __pyx_t_8 = -1; if (__pyx_t_45 < 0) { __pyx_t_45 += __pyx_pybuffernd_fmask.diminfo[0].shape; if (unlikely(__pyx_t_45 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_45 >= __pyx_pybuffernd_fmask.diminfo[0].shape)) __pyx_t_8 = 0; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 866; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_ms = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_fmask.rcbuffer->pybuffer.buf, __pyx_t_45, __pyx_pybuffernd_fmask.diminfo[0].strides)); /* "nipy/algorithms/statistics/intvol.pyx":867 * ss = (index+cvertices[s]) % npix * ms = fmask[ss] * if mr * ms: # <<<<<<<<<<<<<< * for l in range(fcoords.shape[0]): * res += fcoords[l,ss] * fcoords[l,rr] */ __pyx_t_8 = (__pyx_v_mr * __pyx_v_ms); if (__pyx_t_8) { /* "nipy/algorithms/statistics/intvol.pyx":868 * ms = fmask[ss] * if mr * ms: * for l in range(fcoords.shape[0]): # <<<<<<<<<<<<<< * res += fcoords[l,ss] * fcoords[l,rr] * D[r,s] = res */ __pyx_t_46 = (__pyx_v_fcoords->dimensions[0]); for (__pyx_t_47 = 0; __pyx_t_47 < __pyx_t_46; __pyx_t_47+=1) { __pyx_v_l = __pyx_t_47; /* "nipy/algorithms/statistics/intvol.pyx":869 * if mr * ms: * for l in range(fcoords.shape[0]): * res += fcoords[l,ss] * fcoords[l,rr] # <<<<<<<<<<<<<< * D[r,s] = res * D[s,r] = res */ __pyx_t_48 = __pyx_v_l; __pyx_t_49 = __pyx_v_ss; __pyx_t_8 = -1; if (__pyx_t_48 < 0) { __pyx_t_48 += __pyx_pybuffernd_fcoords.diminfo[0].shape; if (unlikely(__pyx_t_48 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_48 >= __pyx_pybuffernd_fcoords.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_49 < 0) { __pyx_t_49 += __pyx_pybuffernd_fcoords.diminfo[1].shape; if (unlikely(__pyx_t_49 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_49 >= __pyx_pybuffernd_fcoords.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 869; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_50 = __pyx_v_l; __pyx_t_51 = __pyx_v_rr; __pyx_t_8 = -1; if (__pyx_t_50 < 0) { __pyx_t_50 += __pyx_pybuffernd_fcoords.diminfo[0].shape; if (unlikely(__pyx_t_50 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_50 >= __pyx_pybuffernd_fcoords.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_51 < 0) { __pyx_t_51 += __pyx_pybuffernd_fcoords.diminfo[1].shape; if (unlikely(__pyx_t_51 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_51 >= __pyx_pybuffernd_fcoords.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 869; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_res = (__pyx_v_res + ((*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_fcoords.rcbuffer->pybuffer.buf, __pyx_t_48, __pyx_pybuffernd_fcoords.diminfo[0].strides, __pyx_t_49, __pyx_pybuffernd_fcoords.diminfo[1].strides)) * (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_fcoords.rcbuffer->pybuffer.buf, __pyx_t_50, __pyx_pybuffernd_fcoords.diminfo[0].strides, __pyx_t_51, __pyx_pybuffernd_fcoords.diminfo[1].strides)))); /* "nipy/algorithms/statistics/intvol.pyx":870 * for l in range(fcoords.shape[0]): * res += fcoords[l,ss] * fcoords[l,rr] * D[r,s] = res # <<<<<<<<<<<<<< * D[s,r] = res * else: */ __pyx_t_52 = __pyx_v_r; __pyx_t_53 = __pyx_v_s; __pyx_t_8 = -1; if (__pyx_t_52 < 0) { __pyx_t_52 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_52 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_52 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_53 < 0) { __pyx_t_53 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_53 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_53 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 870; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } *__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_52, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_53, __pyx_pybuffernd_D.diminfo[1].strides) = __pyx_v_res; /* "nipy/algorithms/statistics/intvol.pyx":871 * res += fcoords[l,ss] * fcoords[l,rr] * D[r,s] = res * D[s,r] = res # <<<<<<<<<<<<<< * else: * D[r,s] = 0 */ __pyx_t_54 = __pyx_v_s; __pyx_t_55 = __pyx_v_r; __pyx_t_8 = -1; if (__pyx_t_54 < 0) { __pyx_t_54 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_54 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_54 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_55 < 0) { __pyx_t_55 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_55 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_55 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 871; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } *__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_54, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_55, __pyx_pybuffernd_D.diminfo[1].strides) = __pyx_v_res; } goto __pyx_L26; } /*else*/ { /* "nipy/algorithms/statistics/intvol.pyx":873 * D[s,r] = res * else: * D[r,s] = 0 # <<<<<<<<<<<<<< * D[s,r] = 0 * */ __pyx_t_46 = __pyx_v_r; __pyx_t_47 = __pyx_v_s; __pyx_t_8 = -1; if (__pyx_t_46 < 0) { __pyx_t_46 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_46 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_46 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_47 < 0) { __pyx_t_47 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_47 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_47 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 873; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } *__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_46, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_47, __pyx_pybuffernd_D.diminfo[1].strides) = 0.0; /* "nipy/algorithms/statistics/intvol.pyx":874 * else: * D[r,s] = 0 * D[s,r] = 0 # <<<<<<<<<<<<<< * * for l in range(ds3): */ __pyx_t_56 = __pyx_v_s; __pyx_t_57 = __pyx_v_r; __pyx_t_8 = -1; if (__pyx_t_56 < 0) { __pyx_t_56 += __pyx_pybuffernd_D.diminfo[0].shape; if (unlikely(__pyx_t_56 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_56 >= __pyx_pybuffernd_D.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_57 < 0) { __pyx_t_57 += __pyx_pybuffernd_D.diminfo[1].shape; if (unlikely(__pyx_t_57 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_57 >= __pyx_pybuffernd_D.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 874; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } *__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_56, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_57, __pyx_pybuffernd_D.diminfo[1].strides) = 0.0; } __pyx_L26:; } } /* "nipy/algorithms/statistics/intvol.pyx":876 * D[s,r] = 0 * * for l in range(ds3): # <<<<<<<<<<<<<< * v0 = pindex + d3[l,0] * w0 = d3[l,3] */ __pyx_t_39 = __pyx_v_ds3; for (__pyx_t_43 = 0; __pyx_t_43 < __pyx_t_39; __pyx_t_43+=1) { __pyx_v_l = __pyx_t_43; /* "nipy/algorithms/statistics/intvol.pyx":877 * * for l in range(ds3): * v0 = pindex + d3[l,0] # <<<<<<<<<<<<<< * w0 = d3[l,3] * m = fpmask[v0] */ __pyx_t_58 = __pyx_v_l; __pyx_t_42 = 0; __pyx_t_8 = -1; if (__pyx_t_58 < 0) { __pyx_t_58 += __pyx_pybuffernd_d3.diminfo[0].shape; if (unlikely(__pyx_t_58 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_58 >= __pyx_pybuffernd_d3.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_42 < 0) { __pyx_t_42 += __pyx_pybuffernd_d3.diminfo[1].shape; if (unlikely(__pyx_t_42 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_42 >= __pyx_pybuffernd_d3.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 877; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_1 = __Pyx_PyInt_to_py_Py_intptr_t((*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d3.rcbuffer->pybuffer.buf, __pyx_t_58, __pyx_pybuffernd_d3.diminfo[0].strides, __pyx_t_42, __pyx_pybuffernd_d3.diminfo[1].strides))); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 877; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyNumber_Add(__pyx_v_pindex, __pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 877; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_59 = __Pyx_PyInt_from_py_Py_intptr_t(__pyx_t_2); if (unlikely((__pyx_t_59 == (npy_intp)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 877; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_v0 = __pyx_t_59; /* "nipy/algorithms/statistics/intvol.pyx":878 * for l in range(ds3): * v0 = pindex + d3[l,0] * w0 = d3[l,3] # <<<<<<<<<<<<<< * m = fpmask[v0] * if m: */ __pyx_t_59 = __pyx_v_l; __pyx_t_60 = 3; __pyx_t_8 = -1; if (__pyx_t_59 < 0) { __pyx_t_59 += __pyx_pybuffernd_d3.diminfo[0].shape; if (unlikely(__pyx_t_59 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_59 >= __pyx_pybuffernd_d3.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_60 < 0) { __pyx_t_60 += __pyx_pybuffernd_d3.diminfo[1].shape; if (unlikely(__pyx_t_60 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_60 >= __pyx_pybuffernd_d3.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 878; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_2 = __Pyx_PyInt_to_py_Py_intptr_t((*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d3.rcbuffer->pybuffer.buf, __pyx_t_59, __pyx_pybuffernd_d3.diminfo[0].strides, __pyx_t_60, __pyx_pybuffernd_d3.diminfo[1].strides))); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 878; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_XDECREF(__pyx_v_w0); __pyx_v_w0 = __pyx_t_2; __pyx_t_2 = 0; /* "nipy/algorithms/statistics/intvol.pyx":879 * v0 = pindex + d3[l,0] * w0 = d3[l,3] * m = fpmask[v0] # <<<<<<<<<<<<<< * if m: * v1 = pindex + d3[l,1] */ __pyx_t_61 = __pyx_v_v0; __pyx_t_8 = -1; if (__pyx_t_61 < 0) { __pyx_t_61 += __pyx_pybuffernd_fpmask.diminfo[0].shape; if (unlikely(__pyx_t_61 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_61 >= __pyx_pybuffernd_fpmask.diminfo[0].shape)) __pyx_t_8 = 0; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 879; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_m = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_fpmask.rcbuffer->pybuffer.buf, __pyx_t_61, __pyx_pybuffernd_fpmask.diminfo[0].strides)); /* "nipy/algorithms/statistics/intvol.pyx":880 * w0 = d3[l,3] * m = fpmask[v0] * if m: # <<<<<<<<<<<<<< * v1 = pindex + d3[l,1] * v2 = pindex + d3[l,2] */ if (__pyx_v_m) { /* "nipy/algorithms/statistics/intvol.pyx":881 * m = fpmask[v0] * if m: * v1 = pindex + d3[l,1] # <<<<<<<<<<<<<< * v2 = pindex + d3[l,2] * w1 = d3[l,4] */ __pyx_t_62 = __pyx_v_l; __pyx_t_63 = 1; __pyx_t_8 = -1; if (__pyx_t_62 < 0) { __pyx_t_62 += __pyx_pybuffernd_d3.diminfo[0].shape; if (unlikely(__pyx_t_62 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_62 >= __pyx_pybuffernd_d3.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_63 < 0) { __pyx_t_63 += __pyx_pybuffernd_d3.diminfo[1].shape; if (unlikely(__pyx_t_63 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_63 >= __pyx_pybuffernd_d3.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 881; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_2 = __Pyx_PyInt_to_py_Py_intptr_t((*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d3.rcbuffer->pybuffer.buf, __pyx_t_62, __pyx_pybuffernd_d3.diminfo[0].strides, __pyx_t_63, __pyx_pybuffernd_d3.diminfo[1].strides))); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 881; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyNumber_Add(__pyx_v_pindex, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 881; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_64 = __Pyx_PyInt_from_py_Py_intptr_t(__pyx_t_1); if (unlikely((__pyx_t_64 == (npy_intp)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 881; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_v1 = __pyx_t_64; /* "nipy/algorithms/statistics/intvol.pyx":882 * if m: * v1 = pindex + d3[l,1] * v2 = pindex + d3[l,2] # <<<<<<<<<<<<<< * w1 = d3[l,4] * w2 = d3[l,5] */ __pyx_t_64 = __pyx_v_l; __pyx_t_65 = 2; __pyx_t_8 = -1; if (__pyx_t_64 < 0) { __pyx_t_64 += __pyx_pybuffernd_d3.diminfo[0].shape; if (unlikely(__pyx_t_64 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_64 >= __pyx_pybuffernd_d3.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_65 < 0) { __pyx_t_65 += __pyx_pybuffernd_d3.diminfo[1].shape; if (unlikely(__pyx_t_65 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_65 >= __pyx_pybuffernd_d3.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 882; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_1 = __Pyx_PyInt_to_py_Py_intptr_t((*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d3.rcbuffer->pybuffer.buf, __pyx_t_64, __pyx_pybuffernd_d3.diminfo[0].strides, __pyx_t_65, __pyx_pybuffernd_d3.diminfo[1].strides))); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 882; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyNumber_Add(__pyx_v_pindex, __pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 882; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_66 = __Pyx_PyInt_from_py_Py_intptr_t(__pyx_t_2); if (unlikely((__pyx_t_66 == (npy_intp)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 882; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_v2 = __pyx_t_66; /* "nipy/algorithms/statistics/intvol.pyx":883 * v1 = pindex + d3[l,1] * v2 = pindex + d3[l,2] * w1 = d3[l,4] # <<<<<<<<<<<<<< * w2 = d3[l,5] * m = m * fpmask[v1] * fpmask[v2] */ __pyx_t_66 = __pyx_v_l; __pyx_t_67 = 4; __pyx_t_8 = -1; if (__pyx_t_66 < 0) { __pyx_t_66 += __pyx_pybuffernd_d3.diminfo[0].shape; if (unlikely(__pyx_t_66 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_66 >= __pyx_pybuffernd_d3.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_67 < 0) { __pyx_t_67 += __pyx_pybuffernd_d3.diminfo[1].shape; if (unlikely(__pyx_t_67 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_67 >= __pyx_pybuffernd_d3.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 883; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_2 = __Pyx_PyInt_to_py_Py_intptr_t((*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d3.rcbuffer->pybuffer.buf, __pyx_t_66, __pyx_pybuffernd_d3.diminfo[0].strides, __pyx_t_67, __pyx_pybuffernd_d3.diminfo[1].strides))); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 883; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_XDECREF(__pyx_v_w1); __pyx_v_w1 = __pyx_t_2; __pyx_t_2 = 0; /* "nipy/algorithms/statistics/intvol.pyx":884 * v2 = pindex + d3[l,2] * w1 = d3[l,4] * w2 = d3[l,5] # <<<<<<<<<<<<<< * m = m * fpmask[v1] * fpmask[v2] * l2 = l2 + mu2_tri(D[w0,w0], D[w0,w1], D[w0,w2], */ __pyx_t_68 = __pyx_v_l; __pyx_t_69 = 5; __pyx_t_8 = -1; if (__pyx_t_68 < 0) { __pyx_t_68 += __pyx_pybuffernd_d3.diminfo[0].shape; if (unlikely(__pyx_t_68 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_68 >= __pyx_pybuffernd_d3.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_69 < 0) { __pyx_t_69 += __pyx_pybuffernd_d3.diminfo[1].shape; if (unlikely(__pyx_t_69 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_69 >= __pyx_pybuffernd_d3.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 884; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_2 = __Pyx_PyInt_to_py_Py_intptr_t((*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d3.rcbuffer->pybuffer.buf, __pyx_t_68, __pyx_pybuffernd_d3.diminfo[0].strides, __pyx_t_69, __pyx_pybuffernd_d3.diminfo[1].strides))); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 884; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_XDECREF(__pyx_v_w2); __pyx_v_w2 = __pyx_t_2; __pyx_t_2 = 0; /* "nipy/algorithms/statistics/intvol.pyx":885 * w1 = d3[l,4] * w2 = d3[l,5] * m = m * fpmask[v1] * fpmask[v2] # <<<<<<<<<<<<<< * l2 = l2 + mu2_tri(D[w0,w0], D[w0,w1], D[w0,w2], * D[w1,w1], D[w1,w2], D[w2,w2]) * m */ __pyx_t_70 = __pyx_v_v1; __pyx_t_8 = -1; if (__pyx_t_70 < 0) { __pyx_t_70 += __pyx_pybuffernd_fpmask.diminfo[0].shape; if (unlikely(__pyx_t_70 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_70 >= __pyx_pybuffernd_fpmask.diminfo[0].shape)) __pyx_t_8 = 0; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 885; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_71 = __pyx_v_v2; __pyx_t_8 = -1; if (__pyx_t_71 < 0) { __pyx_t_71 += __pyx_pybuffernd_fpmask.diminfo[0].shape; if (unlikely(__pyx_t_71 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_71 >= __pyx_pybuffernd_fpmask.diminfo[0].shape)) __pyx_t_8 = 0; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 885; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_m = ((__pyx_v_m * (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_fpmask.rcbuffer->pybuffer.buf, __pyx_t_70, __pyx_pybuffernd_fpmask.diminfo[0].strides))) * (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_fpmask.rcbuffer->pybuffer.buf, __pyx_t_71, __pyx_pybuffernd_fpmask.diminfo[0].strides))); /* "nipy/algorithms/statistics/intvol.pyx":886 * w2 = d3[l,5] * m = m * fpmask[v1] * fpmask[v2] * l2 = l2 + mu2_tri(D[w0,w0], D[w0,w1], D[w0,w2], # <<<<<<<<<<<<<< * D[w1,w1], D[w1,w2], D[w2,w2]) * m * l1 = l1 - mu1_tri(D[w0,w0], D[w0,w1], D[w0,w2], */ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 886; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_w0); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_w0); __Pyx_GIVEREF(__pyx_v_w0); __Pyx_INCREF(__pyx_v_w0); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_w0); __Pyx_GIVEREF(__pyx_v_w0); __pyx_t_1 = PyObject_GetItem(((PyObject *)__pyx_v_D), ((PyObject *)__pyx_t_2)); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 886; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_t_72 = __pyx_PyFloat_AsDouble(__pyx_t_1); if (unlikely((__pyx_t_72 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 886; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 886; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_w0); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_w0); __Pyx_GIVEREF(__pyx_v_w0); __Pyx_INCREF(__pyx_v_w1); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_w1); __Pyx_GIVEREF(__pyx_v_w1); __pyx_t_2 = PyObject_GetItem(((PyObject *)__pyx_v_D), ((PyObject *)__pyx_t_1)); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 886; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_t_73 = __pyx_PyFloat_AsDouble(__pyx_t_2); if (unlikely((__pyx_t_73 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 886; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 886; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_w0); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_w0); __Pyx_GIVEREF(__pyx_v_w0); __Pyx_INCREF(__pyx_v_w2); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_w2); __Pyx_GIVEREF(__pyx_v_w2); __pyx_t_1 = PyObject_GetItem(((PyObject *)__pyx_v_D), ((PyObject *)__pyx_t_2)); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 886; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_t_74 = __pyx_PyFloat_AsDouble(__pyx_t_1); if (unlikely((__pyx_t_74 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 886; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":887 * m = m * fpmask[v1] * fpmask[v2] * l2 = l2 + mu2_tri(D[w0,w0], D[w0,w1], D[w0,w2], * D[w1,w1], D[w1,w2], D[w2,w2]) * m # <<<<<<<<<<<<<< * l1 = l1 - mu1_tri(D[w0,w0], D[w0,w1], D[w0,w2], * D[w1,w1], D[w1,w2], D[w2,w2]) * m */ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 887; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_w1); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_w1); __Pyx_GIVEREF(__pyx_v_w1); __Pyx_INCREF(__pyx_v_w1); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_w1); __Pyx_GIVEREF(__pyx_v_w1); __pyx_t_2 = PyObject_GetItem(((PyObject *)__pyx_v_D), ((PyObject *)__pyx_t_1)); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 887; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_t_75 = __pyx_PyFloat_AsDouble(__pyx_t_2); if (unlikely((__pyx_t_75 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 887; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 887; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_w1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_w1); __Pyx_GIVEREF(__pyx_v_w1); __Pyx_INCREF(__pyx_v_w2); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_w2); __Pyx_GIVEREF(__pyx_v_w2); __pyx_t_1 = PyObject_GetItem(((PyObject *)__pyx_v_D), ((PyObject *)__pyx_t_2)); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 887; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_t_76 = __pyx_PyFloat_AsDouble(__pyx_t_1); if (unlikely((__pyx_t_76 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 887; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 887; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_w2); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_w2); __Pyx_GIVEREF(__pyx_v_w2); __Pyx_INCREF(__pyx_v_w2); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_w2); __Pyx_GIVEREF(__pyx_v_w2); __pyx_t_2 = PyObject_GetItem(((PyObject *)__pyx_v_D), ((PyObject *)__pyx_t_1)); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 887; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_t_77 = __pyx_PyFloat_AsDouble(__pyx_t_2); if (unlikely((__pyx_t_77 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 887; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_l2 = (__pyx_v_l2 + (__pyx_f_4nipy_10algorithms_10statistics_6intvol_mu2_tri(__pyx_t_72, __pyx_t_73, __pyx_t_74, __pyx_t_75, __pyx_t_76, __pyx_t_77, 0) * __pyx_v_m)); /* "nipy/algorithms/statistics/intvol.pyx":888 * l2 = l2 + mu2_tri(D[w0,w0], D[w0,w1], D[w0,w2], * D[w1,w1], D[w1,w2], D[w2,w2]) * m * l1 = l1 - mu1_tri(D[w0,w0], D[w0,w1], D[w0,w2], # <<<<<<<<<<<<<< * D[w1,w1], D[w1,w2], D[w2,w2]) * m * l0 = l0 + m */ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 888; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_w0); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_w0); __Pyx_GIVEREF(__pyx_v_w0); __Pyx_INCREF(__pyx_v_w0); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_w0); __Pyx_GIVEREF(__pyx_v_w0); __pyx_t_1 = PyObject_GetItem(((PyObject *)__pyx_v_D), ((PyObject *)__pyx_t_2)); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 888; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_t_77 = __pyx_PyFloat_AsDouble(__pyx_t_1); if (unlikely((__pyx_t_77 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 888; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 888; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_w0); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_w0); __Pyx_GIVEREF(__pyx_v_w0); __Pyx_INCREF(__pyx_v_w1); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_w1); __Pyx_GIVEREF(__pyx_v_w1); __pyx_t_2 = PyObject_GetItem(((PyObject *)__pyx_v_D), ((PyObject *)__pyx_t_1)); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 888; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_t_76 = __pyx_PyFloat_AsDouble(__pyx_t_2); if (unlikely((__pyx_t_76 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 888; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 888; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_w0); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_w0); __Pyx_GIVEREF(__pyx_v_w0); __Pyx_INCREF(__pyx_v_w2); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_w2); __Pyx_GIVEREF(__pyx_v_w2); __pyx_t_1 = PyObject_GetItem(((PyObject *)__pyx_v_D), ((PyObject *)__pyx_t_2)); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 888; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_t_75 = __pyx_PyFloat_AsDouble(__pyx_t_1); if (unlikely((__pyx_t_75 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 888; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":889 * D[w1,w1], D[w1,w2], D[w2,w2]) * m * l1 = l1 - mu1_tri(D[w0,w0], D[w0,w1], D[w0,w2], * D[w1,w1], D[w1,w2], D[w2,w2]) * m # <<<<<<<<<<<<<< * l0 = l0 + m * */ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 889; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_w1); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_w1); __Pyx_GIVEREF(__pyx_v_w1); __Pyx_INCREF(__pyx_v_w1); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_w1); __Pyx_GIVEREF(__pyx_v_w1); __pyx_t_2 = PyObject_GetItem(((PyObject *)__pyx_v_D), ((PyObject *)__pyx_t_1)); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 889; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_t_74 = __pyx_PyFloat_AsDouble(__pyx_t_2); if (unlikely((__pyx_t_74 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 889; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 889; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_w1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_w1); __Pyx_GIVEREF(__pyx_v_w1); __Pyx_INCREF(__pyx_v_w2); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_w2); __Pyx_GIVEREF(__pyx_v_w2); __pyx_t_1 = PyObject_GetItem(((PyObject *)__pyx_v_D), ((PyObject *)__pyx_t_2)); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 889; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_t_73 = __pyx_PyFloat_AsDouble(__pyx_t_1); if (unlikely((__pyx_t_73 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 889; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 889; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_w2); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_w2); __Pyx_GIVEREF(__pyx_v_w2); __Pyx_INCREF(__pyx_v_w2); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_w2); __Pyx_GIVEREF(__pyx_v_w2); __pyx_t_2 = PyObject_GetItem(((PyObject *)__pyx_v_D), ((PyObject *)__pyx_t_1)); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 889; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_t_72 = __pyx_PyFloat_AsDouble(__pyx_t_2); if (unlikely((__pyx_t_72 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 889; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_l1 = (__pyx_v_l1 - (__pyx_f_4nipy_10algorithms_10statistics_6intvol_mu1_tri(__pyx_t_77, __pyx_t_76, __pyx_t_75, __pyx_t_74, __pyx_t_73, __pyx_t_72, 0) * __pyx_v_m)); /* "nipy/algorithms/statistics/intvol.pyx":890 * l1 = l1 - mu1_tri(D[w0,w0], D[w0,w1], D[w0,w2], * D[w1,w1], D[w1,w2], D[w2,w2]) * m * l0 = l0 + m # <<<<<<<<<<<<<< * * for l in range(ds2): */ __pyx_v_l0 = (__pyx_v_l0 + __pyx_v_m); goto __pyx_L31; } __pyx_L31:; } /* "nipy/algorithms/statistics/intvol.pyx":892 * l0 = l0 + m * * for l in range(ds2): # <<<<<<<<<<<<<< * v0 = pindex + d2[l,0] * w0 = d2[l,2] */ __pyx_t_39 = __pyx_v_ds2; for (__pyx_t_43 = 0; __pyx_t_43 < __pyx_t_39; __pyx_t_43+=1) { __pyx_v_l = __pyx_t_43; /* "nipy/algorithms/statistics/intvol.pyx":893 * * for l in range(ds2): * v0 = pindex + d2[l,0] # <<<<<<<<<<<<<< * w0 = d2[l,2] * m = fpmask[v0] */ __pyx_t_78 = __pyx_v_l; __pyx_t_79 = 0; __pyx_t_8 = -1; if (__pyx_t_78 < 0) { __pyx_t_78 += __pyx_pybuffernd_d2.diminfo[0].shape; if (unlikely(__pyx_t_78 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_78 >= __pyx_pybuffernd_d2.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_79 < 0) { __pyx_t_79 += __pyx_pybuffernd_d2.diminfo[1].shape; if (unlikely(__pyx_t_79 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_79 >= __pyx_pybuffernd_d2.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 893; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_2 = __Pyx_PyInt_to_py_Py_intptr_t((*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d2.rcbuffer->pybuffer.buf, __pyx_t_78, __pyx_pybuffernd_d2.diminfo[0].strides, __pyx_t_79, __pyx_pybuffernd_d2.diminfo[1].strides))); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 893; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyNumber_Add(__pyx_v_pindex, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 893; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_80 = __Pyx_PyInt_from_py_Py_intptr_t(__pyx_t_1); if (unlikely((__pyx_t_80 == (npy_intp)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 893; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_v0 = __pyx_t_80; /* "nipy/algorithms/statistics/intvol.pyx":894 * for l in range(ds2): * v0 = pindex + d2[l,0] * w0 = d2[l,2] # <<<<<<<<<<<<<< * m = fpmask[v0] * if m: */ __pyx_t_80 = __pyx_v_l; __pyx_t_81 = 2; __pyx_t_8 = -1; if (__pyx_t_80 < 0) { __pyx_t_80 += __pyx_pybuffernd_d2.diminfo[0].shape; if (unlikely(__pyx_t_80 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_80 >= __pyx_pybuffernd_d2.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_81 < 0) { __pyx_t_81 += __pyx_pybuffernd_d2.diminfo[1].shape; if (unlikely(__pyx_t_81 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_81 >= __pyx_pybuffernd_d2.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 894; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_1 = __Pyx_PyInt_to_py_Py_intptr_t((*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d2.rcbuffer->pybuffer.buf, __pyx_t_80, __pyx_pybuffernd_d2.diminfo[0].strides, __pyx_t_81, __pyx_pybuffernd_d2.diminfo[1].strides))); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 894; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_XDECREF(__pyx_v_w0); __pyx_v_w0 = __pyx_t_1; __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":895 * v0 = pindex + d2[l,0] * w0 = d2[l,2] * m = fpmask[v0] # <<<<<<<<<<<<<< * if m: * v1 = pindex + d2[l,1] */ __pyx_t_82 = __pyx_v_v0; __pyx_t_8 = -1; if (__pyx_t_82 < 0) { __pyx_t_82 += __pyx_pybuffernd_fpmask.diminfo[0].shape; if (unlikely(__pyx_t_82 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_82 >= __pyx_pybuffernd_fpmask.diminfo[0].shape)) __pyx_t_8 = 0; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 895; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_m = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_fpmask.rcbuffer->pybuffer.buf, __pyx_t_82, __pyx_pybuffernd_fpmask.diminfo[0].strides)); /* "nipy/algorithms/statistics/intvol.pyx":896 * w0 = d2[l,2] * m = fpmask[v0] * if m: # <<<<<<<<<<<<<< * v1 = pindex + d2[l,1] * w1 = d2[l,3] */ if (__pyx_v_m) { /* "nipy/algorithms/statistics/intvol.pyx":897 * m = fpmask[v0] * if m: * v1 = pindex + d2[l,1] # <<<<<<<<<<<<<< * w1 = d2[l,3] * m = m * fpmask[v1] */ __pyx_t_83 = __pyx_v_l; __pyx_t_84 = 1; __pyx_t_8 = -1; if (__pyx_t_83 < 0) { __pyx_t_83 += __pyx_pybuffernd_d2.diminfo[0].shape; if (unlikely(__pyx_t_83 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_83 >= __pyx_pybuffernd_d2.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_84 < 0) { __pyx_t_84 += __pyx_pybuffernd_d2.diminfo[1].shape; if (unlikely(__pyx_t_84 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_84 >= __pyx_pybuffernd_d2.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 897; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_1 = __Pyx_PyInt_to_py_Py_intptr_t((*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d2.rcbuffer->pybuffer.buf, __pyx_t_83, __pyx_pybuffernd_d2.diminfo[0].strides, __pyx_t_84, __pyx_pybuffernd_d2.diminfo[1].strides))); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 897; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyNumber_Add(__pyx_v_pindex, __pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 897; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_85 = __Pyx_PyInt_from_py_Py_intptr_t(__pyx_t_2); if (unlikely((__pyx_t_85 == (npy_intp)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 897; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_v1 = __pyx_t_85; /* "nipy/algorithms/statistics/intvol.pyx":898 * if m: * v1 = pindex + d2[l,1] * w1 = d2[l,3] # <<<<<<<<<<<<<< * m = m * fpmask[v1] * l1 = l1 + m * mu1_edge(D[w0,w0], D[w0,w1], D[w1,w1]) */ __pyx_t_85 = __pyx_v_l; __pyx_t_86 = 3; __pyx_t_8 = -1; if (__pyx_t_85 < 0) { __pyx_t_85 += __pyx_pybuffernd_d2.diminfo[0].shape; if (unlikely(__pyx_t_85 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_85 >= __pyx_pybuffernd_d2.diminfo[0].shape)) __pyx_t_8 = 0; if (__pyx_t_86 < 0) { __pyx_t_86 += __pyx_pybuffernd_d2.diminfo[1].shape; if (unlikely(__pyx_t_86 < 0)) __pyx_t_8 = 1; } else if (unlikely(__pyx_t_86 >= __pyx_pybuffernd_d2.diminfo[1].shape)) __pyx_t_8 = 1; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 898; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_2 = __Pyx_PyInt_to_py_Py_intptr_t((*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d2.rcbuffer->pybuffer.buf, __pyx_t_85, __pyx_pybuffernd_d2.diminfo[0].strides, __pyx_t_86, __pyx_pybuffernd_d2.diminfo[1].strides))); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 898; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_XDECREF(__pyx_v_w1); __pyx_v_w1 = __pyx_t_2; __pyx_t_2 = 0; /* "nipy/algorithms/statistics/intvol.pyx":899 * v1 = pindex + d2[l,1] * w1 = d2[l,3] * m = m * fpmask[v1] # <<<<<<<<<<<<<< * l1 = l1 + m * mu1_edge(D[w0,w0], D[w0,w1], D[w1,w1]) * l0 = l0 - m */ __pyx_t_87 = __pyx_v_v1; __pyx_t_8 = -1; if (__pyx_t_87 < 0) { __pyx_t_87 += __pyx_pybuffernd_fpmask.diminfo[0].shape; if (unlikely(__pyx_t_87 < 0)) __pyx_t_8 = 0; } else if (unlikely(__pyx_t_87 >= __pyx_pybuffernd_fpmask.diminfo[0].shape)) __pyx_t_8 = 0; if (unlikely(__pyx_t_8 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 899; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_m = (__pyx_v_m * (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_fpmask.rcbuffer->pybuffer.buf, __pyx_t_87, __pyx_pybuffernd_fpmask.diminfo[0].strides))); /* "nipy/algorithms/statistics/intvol.pyx":900 * w1 = d2[l,3] * m = m * fpmask[v1] * l1 = l1 + m * mu1_edge(D[w0,w0], D[w0,w1], D[w1,w1]) # <<<<<<<<<<<<<< * l0 = l0 - m * */ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 900; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_w0); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_w0); __Pyx_GIVEREF(__pyx_v_w0); __Pyx_INCREF(__pyx_v_w0); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_w0); __Pyx_GIVEREF(__pyx_v_w0); __pyx_t_1 = PyObject_GetItem(((PyObject *)__pyx_v_D), ((PyObject *)__pyx_t_2)); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 900; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_t_72 = __pyx_PyFloat_AsDouble(__pyx_t_1); if (unlikely((__pyx_t_72 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 900; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 900; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_w0); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_w0); __Pyx_GIVEREF(__pyx_v_w0); __Pyx_INCREF(__pyx_v_w1); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_w1); __Pyx_GIVEREF(__pyx_v_w1); __pyx_t_2 = PyObject_GetItem(((PyObject *)__pyx_v_D), ((PyObject *)__pyx_t_1)); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 900; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_t_73 = __pyx_PyFloat_AsDouble(__pyx_t_2); if (unlikely((__pyx_t_73 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 900; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 900; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_w1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_w1); __Pyx_GIVEREF(__pyx_v_w1); __Pyx_INCREF(__pyx_v_w1); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_w1); __Pyx_GIVEREF(__pyx_v_w1); __pyx_t_1 = PyObject_GetItem(((PyObject *)__pyx_v_D), ((PyObject *)__pyx_t_2)); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 900; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_t_74 = __pyx_PyFloat_AsDouble(__pyx_t_1); if (unlikely((__pyx_t_74 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 900; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_l1 = (__pyx_v_l1 + (__pyx_v_m * __pyx_f_4nipy_10algorithms_10statistics_6intvol_mu1_edge(__pyx_t_72, __pyx_t_73, __pyx_t_74, 0))); /* "nipy/algorithms/statistics/intvol.pyx":901 * m = m * fpmask[v1] * l1 = l1 + m * mu1_edge(D[w0,w0], D[w0,w1], D[w1,w1]) * l0 = l0 - m # <<<<<<<<<<<<<< * * l0 += mask.sum() */ __pyx_v_l0 = (__pyx_v_l0 - __pyx_v_m); goto __pyx_L34; } __pyx_L34:; } } } /* "nipy/algorithms/statistics/intvol.pyx":903 * l0 = l0 - m * * l0 += mask.sum() # <<<<<<<<<<<<<< * return np.array([l0,l1,l2]) * */ __pyx_t_1 = PyFloat_FromDouble(__pyx_v_l0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 903; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetAttr(__pyx_v_mask, __pyx_n_s__sum); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 903; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 903; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_InPlaceAdd(__pyx_t_1, __pyx_t_5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 903; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_74 = __pyx_PyFloat_AsDouble(__pyx_t_2); if (unlikely((__pyx_t_74 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 903; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_l0 = __pyx_t_74; /* "nipy/algorithms/statistics/intvol.pyx":904 * * l0 += mask.sum() * return np.array([l0,l1,l2]) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 904; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__array); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 904; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_l0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 904; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyFloat_FromDouble(__pyx_v_l1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 904; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_34 = PyFloat_FromDouble(__pyx_v_l2); if (unlikely(!__pyx_t_34)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 904; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_34); __pyx_t_19 = PyList_New(3); if (unlikely(!__pyx_t_19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 904; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_19); PyList_SET_ITEM(__pyx_t_19, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_19, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); PyList_SET_ITEM(__pyx_t_19, 2, __pyx_t_34); __Pyx_GIVEREF(__pyx_t_34); __pyx_t_2 = 0; __pyx_t_1 = 0; __pyx_t_34 = 0; __pyx_t_34 = PyTuple_New(1); if (unlikely(!__pyx_t_34)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 904; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_34); PyTuple_SET_ITEM(__pyx_t_34, 0, ((PyObject *)__pyx_t_19)); __Pyx_GIVEREF(((PyObject *)__pyx_t_19)); __pyx_t_19 = 0; __pyx_t_19 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_34), NULL); if (unlikely(!__pyx_t_19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 904; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_19); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_34)); __pyx_t_34 = 0; __pyx_r = __pyx_t_19; __pyx_t_19 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_19); __Pyx_XDECREF(__pyx_t_30); __Pyx_XDECREF(__pyx_t_34); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_D.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_coords_c.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_cvertices.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_d2.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_d3.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_dstrides.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_fcoords.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_fmask.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_fpmask.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_mask_c.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_pmask.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_strides.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("nipy.algorithms.statistics.intvol.Lips2d", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_D.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_coords_c.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_cvertices.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_d2.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_d3.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_dstrides.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_fcoords.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_fmask.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_fpmask.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_mask_c.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_pmask.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_strides.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XDECREF(__pyx_v_value); __Pyx_XDECREF((PyObject *)__pyx_v_coords_c); __Pyx_XDECREF((PyObject *)__pyx_v_mask_c); __Pyx_XDECREF((PyObject *)__pyx_v_fcoords); __Pyx_XDECREF((PyObject *)__pyx_v_D); __Pyx_XDECREF((PyObject *)__pyx_v_fmask); __Pyx_XDECREF((PyObject *)__pyx_v_fpmask); __Pyx_XDECREF((PyObject *)__pyx_v_pmask); __Pyx_XDECREF((PyObject *)__pyx_v_d3); __Pyx_XDECREF((PyObject *)__pyx_v_d2); __Pyx_XDECREF((PyObject *)__pyx_v_cvertices); __Pyx_XDECREF(__pyx_v_pmask_shape); __Pyx_XDECREF((PyObject *)__pyx_v_strides); __Pyx_XDECREF((PyObject *)__pyx_v_dstrides); __Pyx_XDECREF(__pyx_v_verts); __Pyx_XDECREF(__pyx_v_union); __Pyx_XDECREF(__pyx_v_c); __Pyx_XDECREF(__pyx_v_m3); __Pyx_XDECREF(__pyx_v_m2); __Pyx_XDECREF(__pyx_v_pindex); __Pyx_XDECREF(__pyx_v_w0); __Pyx_XDECREF(__pyx_v_w1); __Pyx_XDECREF(__pyx_v_w2); __Pyx_XDECREF(__pyx_v_v); __Pyx_XDECREF(__pyx_v_coords); __Pyx_XDECREF(__pyx_v_mask); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_6intvol_27EC2d(PyObject *__pyx_self, PyObject *__pyx_v_mask); /*proto*/ static char __pyx_doc_4nipy_10algorithms_10statistics_6intvol_26EC2d[] = " Compute Euler characteristic of 2D region in `mask`\n\n Given a 2d `mask`, compute the 0th intrinsic volume (Euler characteristic)\n of the masked region. The region is broken up into triangles / edges /\n vertices, which are included based on whether all voxels in the triangle /\n edge / vertex are in the mask or not.\n\n Parameters\n ----------\n mask : ndarray((i,j), np.int)\n Binary mask determining whether or not a voxel is in the mask.\n\n Returns\n -------\n mu0 : int\n Euler characteristic\n\n Notes\n -----\n The array mask is assumed to be binary. At the time of writing, it\n is not clear how to get cython to use np.bool arrays.\n\n References\n ----------\n Taylor, J.E. & Worsley, K.J. (2007). \"Detecting sparse signal in random fields,\n with an application to brain mapping.\"\n Journal of the American Statistical Association, 102(479):913-928.\n "; static PyMethodDef __pyx_mdef_4nipy_10algorithms_10statistics_6intvol_27EC2d = {__Pyx_NAMESTR("EC2d"), (PyCFunction)__pyx_pw_4nipy_10algorithms_10statistics_6intvol_27EC2d, METH_O, __Pyx_DOCSTR(__pyx_doc_4nipy_10algorithms_10statistics_6intvol_26EC2d)}; static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_6intvol_27EC2d(PyObject *__pyx_self, PyObject *__pyx_v_mask) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("EC2d (wrapper)", 0); __pyx_r = __pyx_pf_4nipy_10algorithms_10statistics_6intvol_26EC2d(__pyx_self, ((PyObject *)__pyx_v_mask)); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/algorithms/statistics/intvol.pyx":907 * * * def EC2d(mask): # <<<<<<<<<<<<<< * """ Compute Euler characteristic of 2D region in `mask` * */ static PyObject *__pyx_pf_4nipy_10algorithms_10statistics_6intvol_26EC2d(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_mask) { PyArrayObject *__pyx_v_mask_c = 0; PyArrayObject *__pyx_v_fpmask = 0; PyArrayObject *__pyx_v_d2 = 0; PyArrayObject *__pyx_v_d3 = 0; npy_intp __pyx_v_i; npy_intp __pyx_v_j; npy_intp __pyx_v_l; npy_intp __pyx_v_s0; npy_intp __pyx_v_s1; npy_intp __pyx_v_ds2; npy_intp __pyx_v_ds3; npy_intp __pyx_v_index; npy_intp __pyx_v_m; npy_intp __pyx_v_ss0; npy_intp __pyx_v_ss1; npy_intp __pyx_v_v0; npy_intp __pyx_v_v1; long __pyx_v_l0; PyObject *__pyx_v_pmask_shape = NULL; PyObject *__pyx_v_pmask = NULL; PyArrayObject *__pyx_v_strides = 0; PyObject *__pyx_v_union = NULL; PyObject *__pyx_v_c = NULL; PyObject *__pyx_v_v2 = NULL; __Pyx_LocalBuf_ND __pyx_pybuffernd_d2; __Pyx_Buffer __pyx_pybuffer_d2; __Pyx_LocalBuf_ND __pyx_pybuffernd_d3; __Pyx_Buffer __pyx_pybuffer_d3; __Pyx_LocalBuf_ND __pyx_pybuffernd_fpmask; __Pyx_Buffer __pyx_pybuffer_fpmask; __Pyx_LocalBuf_ND __pyx_pybuffernd_mask_c; __Pyx_Buffer __pyx_pybuffer_mask_c; __Pyx_LocalBuf_ND __pyx_pybuffernd_strides; __Pyx_Buffer __pyx_pybuffer_strides; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; PyArrayObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; npy_intp __pyx_t_11; npy_intp __pyx_t_12; PyArrayObject *__pyx_t_13 = NULL; PyObject *__pyx_t_14 = NULL; PyObject *__pyx_t_15 = NULL; PyArrayObject *__pyx_t_16 = NULL; long __pyx_t_17; __pyx_t_5numpy_intp_t __pyx_t_18; long __pyx_t_19; __pyx_t_5numpy_intp_t __pyx_t_20; PyObject *__pyx_t_21 = NULL; PyArrayObject *__pyx_t_22 = NULL; PyArrayObject *__pyx_t_23 = NULL; long __pyx_t_24; long __pyx_t_25; npy_intp __pyx_t_26; npy_intp __pyx_t_27; npy_intp __pyx_t_28; long __pyx_t_29; npy_intp __pyx_t_30; npy_intp __pyx_t_31; long __pyx_t_32; npy_intp __pyx_t_33; long __pyx_t_34; npy_intp __pyx_t_35; npy_intp __pyx_t_36; long __pyx_t_37; npy_intp __pyx_t_38; npy_intp __pyx_t_39; long __pyx_t_40; npy_intp __pyx_t_41; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("EC2d", 0); __pyx_pybuffer_mask_c.pybuffer.buf = NULL; __pyx_pybuffer_mask_c.refcount = 0; __pyx_pybuffernd_mask_c.data = NULL; __pyx_pybuffernd_mask_c.rcbuffer = &__pyx_pybuffer_mask_c; __pyx_pybuffer_fpmask.pybuffer.buf = NULL; __pyx_pybuffer_fpmask.refcount = 0; __pyx_pybuffernd_fpmask.data = NULL; __pyx_pybuffernd_fpmask.rcbuffer = &__pyx_pybuffer_fpmask; __pyx_pybuffer_d2.pybuffer.buf = NULL; __pyx_pybuffer_d2.refcount = 0; __pyx_pybuffernd_d2.data = NULL; __pyx_pybuffernd_d2.rcbuffer = &__pyx_pybuffer_d2; __pyx_pybuffer_d3.pybuffer.buf = NULL; __pyx_pybuffer_d3.refcount = 0; __pyx_pybuffernd_d3.data = NULL; __pyx_pybuffernd_d3.rcbuffer = &__pyx_pybuffer_d3; __pyx_pybuffer_strides.pybuffer.buf = NULL; __pyx_pybuffer_strides.refcount = 0; __pyx_pybuffernd_strides.data = NULL; __pyx_pybuffernd_strides.rcbuffer = &__pyx_pybuffer_strides; /* "nipy/algorithms/statistics/intvol.pyx":936 * Journal of the American Statistical Association, 102(479):913-928. * """ * if not set(np.unique(mask)).issubset([0,1]): # <<<<<<<<<<<<<< * raise ValueError('mask should be filled with 0/1 ' * 'values, but be of type np.int') */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 936; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__unique); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 936; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 936; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_mask); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_mask); __Pyx_GIVEREF(__pyx_v_mask); __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 936; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 936; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyObject_Call(((PyObject *)((PyObject*)(&PySet_Type))), ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 936; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_t_1 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__issubset); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 936; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyList_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 936; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_int_0); PyList_SET_ITEM(__pyx_t_3, 0, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_INCREF(__pyx_int_1); PyList_SET_ITEM(__pyx_t_3, 1, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 936; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_t_3)); __Pyx_GIVEREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 936; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 936; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_5 = (!__pyx_t_4); if (__pyx_t_5) { /* "nipy/algorithms/statistics/intvol.pyx":937 * """ * if not set(np.unique(mask)).issubset([0,1]): * raise ValueError('mask should be filled with 0/1 ' # <<<<<<<<<<<<<< * 'values, but be of type np.int') * cdef: */ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_56), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 937; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[0]; __pyx_lineno = 937; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L3; } __pyx_L3:; /* "nipy/algorithms/statistics/intvol.pyx":952 * np.npy_intp ss0, ss1 # strides * np.npy_intp v0, v1 # vertices * long l0 = 0 # <<<<<<<<<<<<<< * * mask_c = mask */ __pyx_v_l0 = 0; /* "nipy/algorithms/statistics/intvol.pyx":954 * long l0 = 0 * * mask_c = mask # <<<<<<<<<<<<<< * * pmask_shape = np.array(mask.shape) + 1 */ if (!(likely(((__pyx_v_mask) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_mask, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 954; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_v_mask); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_mask_c.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_mask_c.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_mask_c.rcbuffer->pybuffer, (PyObject*)__pyx_v_mask_c, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10); } } __pyx_pybuffernd_mask_c.diminfo[0].strides = __pyx_pybuffernd_mask_c.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_mask_c.diminfo[0].shape = __pyx_pybuffernd_mask_c.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_mask_c.diminfo[1].strides = __pyx_pybuffernd_mask_c.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_mask_c.diminfo[1].shape = __pyx_pybuffernd_mask_c.rcbuffer->pybuffer.shape[1]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 954; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __Pyx_INCREF(__pyx_v_mask); __pyx_v_mask_c = ((PyArrayObject *)__pyx_v_mask); /* "nipy/algorithms/statistics/intvol.pyx":956 * mask_c = mask * * pmask_shape = np.array(mask.shape) + 1 # <<<<<<<<<<<<<< * pmask = np.zeros(pmask_shape, np.int) * pmask[:-1,:-1] = mask_c */ __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 956; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__array); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 956; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyObject_GetAttr(__pyx_v_mask, __pyx_n_s__shape); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 956; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 956; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 956; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_t_1 = PyNumber_Add(__pyx_t_3, __pyx_int_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 956; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pmask_shape = __pyx_t_1; __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":957 * * pmask_shape = np.array(mask.shape) + 1 * pmask = np.zeros(pmask_shape, np.int) # <<<<<<<<<<<<<< * pmask[:-1,:-1] = mask_c * */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 957; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__zeros); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 957; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 957; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__int); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 957; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 957; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_pmask_shape); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_pmask_shape); __Pyx_GIVEREF(__pyx_v_pmask_shape); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 957; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_v_pmask = __pyx_t_2; __pyx_t_2 = 0; /* "nipy/algorithms/statistics/intvol.pyx":958 * pmask_shape = np.array(mask.shape) + 1 * pmask = np.zeros(pmask_shape, np.int) * pmask[:-1,:-1] = mask_c # <<<<<<<<<<<<<< * * s0, s1 = (pmask.shape[0], pmask.shape[1]) */ if (PyObject_SetItem(__pyx_v_pmask, ((PyObject *)__pyx_k_tuple_59), ((PyObject *)__pyx_v_mask_c)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 958; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/algorithms/statistics/intvol.pyx":960 * pmask[:-1,:-1] = mask_c * * s0, s1 = (pmask.shape[0], pmask.shape[1]) # <<<<<<<<<<<<<< * * fpmask = pmask.reshape(-1) */ __pyx_t_2 = PyObject_GetAttr(__pyx_v_pmask, __pyx_n_s__shape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 960; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_2, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 960; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_11 = __Pyx_PyInt_from_py_Py_intptr_t(__pyx_t_1); if (unlikely((__pyx_t_11 == (npy_intp)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 960; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyObject_GetAttr(__pyx_v_pmask, __pyx_n_s__shape); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 960; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_GetItemInt(__pyx_t_1, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 960; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_12 = __Pyx_PyInt_from_py_Py_intptr_t(__pyx_t_2); if (unlikely((__pyx_t_12 == (npy_intp)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 960; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_s0 = __pyx_t_11; __pyx_v_s1 = __pyx_t_12; /* "nipy/algorithms/statistics/intvol.pyx":962 * s0, s1 = (pmask.shape[0], pmask.shape[1]) * * fpmask = pmask.reshape(-1) # <<<<<<<<<<<<<< * * cdef: */ __pyx_t_2 = PyObject_GetAttr(__pyx_v_pmask, __pyx_n_s__reshape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 962; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_k_tuple_60), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 962; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 962; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_13 = ((PyArrayObject *)__pyx_t_1); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_fpmask.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_fpmask.rcbuffer->pybuffer, (PyObject*)__pyx_t_13, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_10, &__pyx_t_9, &__pyx_t_8); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_fpmask.rcbuffer->pybuffer, (PyObject*)__pyx_v_fpmask, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_8); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_10, __pyx_t_9, __pyx_t_8); } } __pyx_pybuffernd_fpmask.diminfo[0].strides = __pyx_pybuffernd_fpmask.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_fpmask.diminfo[0].shape = __pyx_pybuffernd_fpmask.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 962; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_13 = 0; __pyx_v_fpmask = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":966 * cdef: * np.ndarray[np.intp_t, ndim=1] strides * strides = np.array(strides_from(pmask_shape, np.bool), dtype=np.intp) # <<<<<<<<<<<<<< * ss0, ss1 = strides[0], strides[1] * */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 966; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__array); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 966; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__strides_from); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 966; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 966; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_14 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__bool); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 966; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_14); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 966; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_pmask_shape); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_pmask_shape); __Pyx_GIVEREF(__pyx_v_pmask_shape); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_14); __Pyx_GIVEREF(__pyx_t_14); __pyx_t_14 = 0; __pyx_t_14 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 966; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_14); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 966; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_14); __Pyx_GIVEREF(__pyx_t_14); __pyx_t_14 = 0; __pyx_t_14 = PyDict_New(); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 966; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_14)); __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 966; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_15 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__intp); if (unlikely(!__pyx_t_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 966; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_15); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (PyDict_SetItem(__pyx_t_14, ((PyObject *)__pyx_n_s__dtype), __pyx_t_15) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 966; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; __pyx_t_15 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), ((PyObject *)__pyx_t_14)); if (unlikely(!__pyx_t_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 966; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_15); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_14)); __pyx_t_14 = 0; if (!(likely(((__pyx_t_15) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_15, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 966; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_16 = ((PyArrayObject *)__pyx_t_15); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_strides.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_strides.rcbuffer->pybuffer, (PyObject*)__pyx_t_16, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_strides.rcbuffer->pybuffer, (PyObject*)__pyx_v_strides, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10); } } __pyx_pybuffernd_strides.diminfo[0].strides = __pyx_pybuffernd_strides.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_strides.diminfo[0].shape = __pyx_pybuffernd_strides.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 966; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_16 = 0; __pyx_v_strides = ((PyArrayObject *)__pyx_t_15); __pyx_t_15 = 0; /* "nipy/algorithms/statistics/intvol.pyx":967 * np.ndarray[np.intp_t, ndim=1] strides * strides = np.array(strides_from(pmask_shape, np.bool), dtype=np.intp) * ss0, ss1 = strides[0], strides[1] # <<<<<<<<<<<<<< * * # First do the interior contributions. */ __pyx_t_17 = 0; __pyx_t_7 = -1; if (__pyx_t_17 < 0) { __pyx_t_17 += __pyx_pybuffernd_strides.diminfo[0].shape; if (unlikely(__pyx_t_17 < 0)) __pyx_t_7 = 0; } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_strides.diminfo[0].shape)) __pyx_t_7 = 0; if (unlikely(__pyx_t_7 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 967; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_18 = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_strides.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_strides.diminfo[0].strides)); __pyx_t_19 = 1; __pyx_t_7 = -1; if (__pyx_t_19 < 0) { __pyx_t_19 += __pyx_pybuffernd_strides.diminfo[0].shape; if (unlikely(__pyx_t_19 < 0)) __pyx_t_7 = 0; } else if (unlikely(__pyx_t_19 >= __pyx_pybuffernd_strides.diminfo[0].shape)) __pyx_t_7 = 0; if (unlikely(__pyx_t_7 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 967; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_20 = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_strides.rcbuffer->pybuffer.buf, __pyx_t_19, __pyx_pybuffernd_strides.diminfo[0].strides)); __pyx_v_ss0 = __pyx_t_18; __pyx_v_ss1 = __pyx_t_20; /* "nipy/algorithms/statistics/intvol.pyx":972 * # We first figure out which vertices, edges, triangles, tetrahedra * # are uniquely associated with an interior voxel * union = join_complexes(*[cube_with_strides_center((0,1), strides), # <<<<<<<<<<<<<< * cube_with_strides_center((1,0), strides), * cube_with_strides_center((1,1), strides)]) */ __pyx_t_15 = __Pyx_GetName(__pyx_m, __pyx_n_s__join_complexes); if (unlikely(!__pyx_t_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 972; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_15); __pyx_t_14 = __Pyx_GetName(__pyx_m, __pyx_n_s_8); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 972; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_14); __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 972; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_k_tuple_61)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_k_tuple_61)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_61)); __Pyx_INCREF(((PyObject *)__pyx_v_strides)); PyTuple_SET_ITEM(__pyx_t_3, 1, ((PyObject *)__pyx_v_strides)); __Pyx_GIVEREF(((PyObject *)__pyx_v_strides)); __pyx_t_2 = PyObject_Call(__pyx_t_14, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 972; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; /* "nipy/algorithms/statistics/intvol.pyx":973 * # are uniquely associated with an interior voxel * union = join_complexes(*[cube_with_strides_center((0,1), strides), * cube_with_strides_center((1,0), strides), # <<<<<<<<<<<<<< * cube_with_strides_center((1,1), strides)]) * c = cube_with_strides_center((0,0), strides) */ __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s_8); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 973; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_14 = PyTuple_New(2); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 973; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_14); __Pyx_INCREF(((PyObject *)__pyx_k_tuple_62)); PyTuple_SET_ITEM(__pyx_t_14, 0, ((PyObject *)__pyx_k_tuple_62)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_62)); __Pyx_INCREF(((PyObject *)__pyx_v_strides)); PyTuple_SET_ITEM(__pyx_t_14, 1, ((PyObject *)__pyx_v_strides)); __Pyx_GIVEREF(((PyObject *)__pyx_v_strides)); __pyx_t_1 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_14), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 973; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_14)); __pyx_t_14 = 0; /* "nipy/algorithms/statistics/intvol.pyx":974 * union = join_complexes(*[cube_with_strides_center((0,1), strides), * cube_with_strides_center((1,0), strides), * cube_with_strides_center((1,1), strides)]) # <<<<<<<<<<<<<< * c = cube_with_strides_center((0,0), strides) * */ __pyx_t_14 = __Pyx_GetName(__pyx_m, __pyx_n_s_8); if (unlikely(!__pyx_t_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 974; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_14); __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 974; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_k_tuple_63)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_k_tuple_63)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_63)); __Pyx_INCREF(((PyObject *)__pyx_v_strides)); PyTuple_SET_ITEM(__pyx_t_3, 1, ((PyObject *)__pyx_v_strides)); __Pyx_GIVEREF(((PyObject *)__pyx_v_strides)); __pyx_t_21 = PyObject_Call(__pyx_t_14, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_21)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 974; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_21); __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __pyx_t_3 = PyList_New(3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 972; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyList_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); PyList_SET_ITEM(__pyx_t_3, 2, __pyx_t_21); __Pyx_GIVEREF(__pyx_t_21); __pyx_t_2 = 0; __pyx_t_1 = 0; __pyx_t_21 = 0; __pyx_t_21 = PySequence_Tuple(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_21)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 972; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_21)); __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __pyx_t_3 = PyObject_Call(__pyx_t_15, ((PyObject *)__pyx_t_21), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 972; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_21)); __pyx_t_21 = 0; __pyx_v_union = __pyx_t_3; __pyx_t_3 = 0; /* "nipy/algorithms/statistics/intvol.pyx":975 * cube_with_strides_center((1,0), strides), * cube_with_strides_center((1,1), strides)]) * c = cube_with_strides_center((0,0), strides) # <<<<<<<<<<<<<< * * d3 = np.array(list(c[3].difference(union[3]))) */ __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s_8); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 975; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_21 = PyTuple_New(2); if (unlikely(!__pyx_t_21)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 975; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_21); __Pyx_INCREF(((PyObject *)__pyx_k_tuple_64)); PyTuple_SET_ITEM(__pyx_t_21, 0, ((PyObject *)__pyx_k_tuple_64)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_64)); __Pyx_INCREF(((PyObject *)__pyx_v_strides)); PyTuple_SET_ITEM(__pyx_t_21, 1, ((PyObject *)__pyx_v_strides)); __Pyx_GIVEREF(((PyObject *)__pyx_v_strides)); __pyx_t_15 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_21), NULL); if (unlikely(!__pyx_t_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 975; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_15); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_21)); __pyx_t_21 = 0; __pyx_v_c = __pyx_t_15; __pyx_t_15 = 0; /* "nipy/algorithms/statistics/intvol.pyx":977 * c = cube_with_strides_center((0,0), strides) * * d3 = np.array(list(c[3].difference(union[3]))) # <<<<<<<<<<<<<< * d2 = np.array(list(c[2].difference(union[2]))) * */ __pyx_t_15 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 977; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_15); __pyx_t_21 = PyObject_GetAttr(__pyx_t_15, __pyx_n_s__array); if (unlikely(!__pyx_t_21)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 977; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_21); __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; __pyx_t_15 = __Pyx_GetItemInt(__pyx_v_c, 3, sizeof(long), PyInt_FromLong); if (!__pyx_t_15) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 977; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_15); __pyx_t_3 = PyObject_GetAttr(__pyx_t_15, __pyx_n_s__difference); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 977; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; __pyx_t_15 = __Pyx_GetItemInt(__pyx_v_union, 3, sizeof(long), PyInt_FromLong); if (!__pyx_t_15) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 977; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_15); __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 977; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_15); __Pyx_GIVEREF(__pyx_t_15); __pyx_t_15 = 0; __pyx_t_15 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 977; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_15); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 977; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_15); __Pyx_GIVEREF(__pyx_t_15); __pyx_t_15 = 0; __pyx_t_15 = PyObject_Call(((PyObject *)((PyObject*)(&PyList_Type))), ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 977; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_15); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 977; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_15); __Pyx_GIVEREF(__pyx_t_15); __pyx_t_15 = 0; __pyx_t_15 = PyObject_Call(__pyx_t_21, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 977; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_15); __Pyx_DECREF(__pyx_t_21); __pyx_t_21 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; if (!(likely(((__pyx_t_15) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_15, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 977; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_22 = ((PyArrayObject *)__pyx_t_15); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_d3.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_d3.rcbuffer->pybuffer, (PyObject*)__pyx_t_22, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_10, &__pyx_t_9, &__pyx_t_8); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_d3.rcbuffer->pybuffer, (PyObject*)__pyx_v_d3, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_8); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_10, __pyx_t_9, __pyx_t_8); } } __pyx_pybuffernd_d3.diminfo[0].strides = __pyx_pybuffernd_d3.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_d3.diminfo[0].shape = __pyx_pybuffernd_d3.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_d3.diminfo[1].strides = __pyx_pybuffernd_d3.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_d3.diminfo[1].shape = __pyx_pybuffernd_d3.rcbuffer->pybuffer.shape[1]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 977; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_22 = 0; __pyx_v_d3 = ((PyArrayObject *)__pyx_t_15); __pyx_t_15 = 0; /* "nipy/algorithms/statistics/intvol.pyx":978 * * d3 = np.array(list(c[3].difference(union[3]))) * d2 = np.array(list(c[2].difference(union[2]))) # <<<<<<<<<<<<<< * * ds2 = d2.shape[0] */ __pyx_t_15 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 978; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_15); __pyx_t_1 = PyObject_GetAttr(__pyx_t_15, __pyx_n_s__array); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 978; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; __pyx_t_15 = __Pyx_GetItemInt(__pyx_v_c, 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_15) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 978; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_15); __pyx_t_21 = PyObject_GetAttr(__pyx_t_15, __pyx_n_s__difference); if (unlikely(!__pyx_t_21)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 978; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_21); __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; __pyx_t_15 = __Pyx_GetItemInt(__pyx_v_union, 2, sizeof(long), PyInt_FromLong); if (!__pyx_t_15) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 978; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_15); __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 978; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_15); __Pyx_GIVEREF(__pyx_t_15); __pyx_t_15 = 0; __pyx_t_15 = PyObject_Call(__pyx_t_21, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 978; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_15); __Pyx_DECREF(__pyx_t_21); __pyx_t_21 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 978; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_15); __Pyx_GIVEREF(__pyx_t_15); __pyx_t_15 = 0; __pyx_t_15 = PyObject_Call(((PyObject *)((PyObject*)(&PyList_Type))), ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 978; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_15); __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 978; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_15); __Pyx_GIVEREF(__pyx_t_15); __pyx_t_15 = 0; __pyx_t_15 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 978; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_15); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; if (!(likely(((__pyx_t_15) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_15, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 978; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_23 = ((PyArrayObject *)__pyx_t_15); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_d2.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_d2.rcbuffer->pybuffer, (PyObject*)__pyx_t_23, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_d2.rcbuffer->pybuffer, (PyObject*)__pyx_v_d2, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10); } } __pyx_pybuffernd_d2.diminfo[0].strides = __pyx_pybuffernd_d2.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_d2.diminfo[0].shape = __pyx_pybuffernd_d2.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_d2.diminfo[1].strides = __pyx_pybuffernd_d2.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_d2.diminfo[1].shape = __pyx_pybuffernd_d2.rcbuffer->pybuffer.shape[1]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 978; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_23 = 0; __pyx_v_d2 = ((PyArrayObject *)__pyx_t_15); __pyx_t_15 = 0; /* "nipy/algorithms/statistics/intvol.pyx":980 * d2 = np.array(list(c[2].difference(union[2]))) * * ds2 = d2.shape[0] # <<<<<<<<<<<<<< * ds3 = d3.shape[0] * */ __pyx_v_ds2 = (__pyx_v_d2->dimensions[0]); /* "nipy/algorithms/statistics/intvol.pyx":981 * * ds2 = d2.shape[0] * ds3 = d3.shape[0] # <<<<<<<<<<<<<< * * for i in range(s0-1): */ __pyx_v_ds3 = (__pyx_v_d3->dimensions[0]); /* "nipy/algorithms/statistics/intvol.pyx":983 * ds3 = d3.shape[0] * * for i in range(s0-1): # <<<<<<<<<<<<<< * for j in range(s1-1): * index = i*ss0+j*ss1 */ __pyx_t_24 = (__pyx_v_s0 - 1); for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_24; __pyx_t_12+=1) { __pyx_v_i = __pyx_t_12; /* "nipy/algorithms/statistics/intvol.pyx":984 * * for i in range(s0-1): * for j in range(s1-1): # <<<<<<<<<<<<<< * index = i*ss0+j*ss1 * for l in range(ds3): */ __pyx_t_25 = (__pyx_v_s1 - 1); for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_25; __pyx_t_11+=1) { __pyx_v_j = __pyx_t_11; /* "nipy/algorithms/statistics/intvol.pyx":985 * for i in range(s0-1): * for j in range(s1-1): * index = i*ss0+j*ss1 # <<<<<<<<<<<<<< * for l in range(ds3): * v0 = index + d3[l,0] */ __pyx_v_index = ((__pyx_v_i * __pyx_v_ss0) + (__pyx_v_j * __pyx_v_ss1)); /* "nipy/algorithms/statistics/intvol.pyx":986 * for j in range(s1-1): * index = i*ss0+j*ss1 * for l in range(ds3): # <<<<<<<<<<<<<< * v0 = index + d3[l,0] * m = fpmask[v0] */ __pyx_t_26 = __pyx_v_ds3; for (__pyx_t_27 = 0; __pyx_t_27 < __pyx_t_26; __pyx_t_27+=1) { __pyx_v_l = __pyx_t_27; /* "nipy/algorithms/statistics/intvol.pyx":987 * index = i*ss0+j*ss1 * for l in range(ds3): * v0 = index + d3[l,0] # <<<<<<<<<<<<<< * m = fpmask[v0] * if m and v0: */ __pyx_t_28 = __pyx_v_l; __pyx_t_29 = 0; __pyx_t_7 = -1; if (__pyx_t_28 < 0) { __pyx_t_28 += __pyx_pybuffernd_d3.diminfo[0].shape; if (unlikely(__pyx_t_28 < 0)) __pyx_t_7 = 0; } else if (unlikely(__pyx_t_28 >= __pyx_pybuffernd_d3.diminfo[0].shape)) __pyx_t_7 = 0; if (__pyx_t_29 < 0) { __pyx_t_29 += __pyx_pybuffernd_d3.diminfo[1].shape; if (unlikely(__pyx_t_29 < 0)) __pyx_t_7 = 1; } else if (unlikely(__pyx_t_29 >= __pyx_pybuffernd_d3.diminfo[1].shape)) __pyx_t_7 = 1; if (unlikely(__pyx_t_7 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 987; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_v0 = (__pyx_v_index + (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d3.rcbuffer->pybuffer.buf, __pyx_t_28, __pyx_pybuffernd_d3.diminfo[0].strides, __pyx_t_29, __pyx_pybuffernd_d3.diminfo[1].strides))); /* "nipy/algorithms/statistics/intvol.pyx":988 * for l in range(ds3): * v0 = index + d3[l,0] * m = fpmask[v0] # <<<<<<<<<<<<<< * if m and v0: * v1 = index + d3[l,1] */ __pyx_t_30 = __pyx_v_v0; __pyx_t_7 = -1; if (__pyx_t_30 < 0) { __pyx_t_30 += __pyx_pybuffernd_fpmask.diminfo[0].shape; if (unlikely(__pyx_t_30 < 0)) __pyx_t_7 = 0; } else if (unlikely(__pyx_t_30 >= __pyx_pybuffernd_fpmask.diminfo[0].shape)) __pyx_t_7 = 0; if (unlikely(__pyx_t_7 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 988; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_m = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_fpmask.rcbuffer->pybuffer.buf, __pyx_t_30, __pyx_pybuffernd_fpmask.diminfo[0].strides)); /* "nipy/algorithms/statistics/intvol.pyx":989 * v0 = index + d3[l,0] * m = fpmask[v0] * if m and v0: # <<<<<<<<<<<<<< * v1 = index + d3[l,1] * v2 = index + d3[l,2] */ if (__pyx_v_m) { __pyx_t_5 = __pyx_v_v0; } else { __pyx_t_5 = __pyx_v_m; } if (__pyx_t_5) { /* "nipy/algorithms/statistics/intvol.pyx":990 * m = fpmask[v0] * if m and v0: * v1 = index + d3[l,1] # <<<<<<<<<<<<<< * v2 = index + d3[l,2] * m = m * fpmask[v1] * fpmask[v2] */ __pyx_t_31 = __pyx_v_l; __pyx_t_32 = 1; __pyx_t_7 = -1; if (__pyx_t_31 < 0) { __pyx_t_31 += __pyx_pybuffernd_d3.diminfo[0].shape; if (unlikely(__pyx_t_31 < 0)) __pyx_t_7 = 0; } else if (unlikely(__pyx_t_31 >= __pyx_pybuffernd_d3.diminfo[0].shape)) __pyx_t_7 = 0; if (__pyx_t_32 < 0) { __pyx_t_32 += __pyx_pybuffernd_d3.diminfo[1].shape; if (unlikely(__pyx_t_32 < 0)) __pyx_t_7 = 1; } else if (unlikely(__pyx_t_32 >= __pyx_pybuffernd_d3.diminfo[1].shape)) __pyx_t_7 = 1; if (unlikely(__pyx_t_7 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 990; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_v1 = (__pyx_v_index + (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d3.rcbuffer->pybuffer.buf, __pyx_t_31, __pyx_pybuffernd_d3.diminfo[0].strides, __pyx_t_32, __pyx_pybuffernd_d3.diminfo[1].strides))); /* "nipy/algorithms/statistics/intvol.pyx":991 * if m and v0: * v1 = index + d3[l,1] * v2 = index + d3[l,2] # <<<<<<<<<<<<<< * m = m * fpmask[v1] * fpmask[v2] * l0 = l0 + m */ __pyx_t_33 = __pyx_v_l; __pyx_t_34 = 2; __pyx_t_7 = -1; if (__pyx_t_33 < 0) { __pyx_t_33 += __pyx_pybuffernd_d3.diminfo[0].shape; if (unlikely(__pyx_t_33 < 0)) __pyx_t_7 = 0; } else if (unlikely(__pyx_t_33 >= __pyx_pybuffernd_d3.diminfo[0].shape)) __pyx_t_7 = 0; if (__pyx_t_34 < 0) { __pyx_t_34 += __pyx_pybuffernd_d3.diminfo[1].shape; if (unlikely(__pyx_t_34 < 0)) __pyx_t_7 = 1; } else if (unlikely(__pyx_t_34 >= __pyx_pybuffernd_d3.diminfo[1].shape)) __pyx_t_7 = 1; if (unlikely(__pyx_t_7 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 991; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_15 = PyInt_FromLong((__pyx_v_index + (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d3.rcbuffer->pybuffer.buf, __pyx_t_33, __pyx_pybuffernd_d3.diminfo[0].strides, __pyx_t_34, __pyx_pybuffernd_d3.diminfo[1].strides)))); if (unlikely(!__pyx_t_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 991; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_15); __Pyx_XDECREF(__pyx_v_v2); __pyx_v_v2 = __pyx_t_15; __pyx_t_15 = 0; /* "nipy/algorithms/statistics/intvol.pyx":992 * v1 = index + d3[l,1] * v2 = index + d3[l,2] * m = m * fpmask[v1] * fpmask[v2] # <<<<<<<<<<<<<< * l0 = l0 + m * */ __pyx_t_35 = __pyx_v_v1; __pyx_t_7 = -1; if (__pyx_t_35 < 0) { __pyx_t_35 += __pyx_pybuffernd_fpmask.diminfo[0].shape; if (unlikely(__pyx_t_35 < 0)) __pyx_t_7 = 0; } else if (unlikely(__pyx_t_35 >= __pyx_pybuffernd_fpmask.diminfo[0].shape)) __pyx_t_7 = 0; if (unlikely(__pyx_t_7 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 992; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_15 = PyInt_FromLong((__pyx_v_m * (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_fpmask.rcbuffer->pybuffer.buf, __pyx_t_35, __pyx_pybuffernd_fpmask.diminfo[0].strides)))); if (unlikely(!__pyx_t_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 992; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_15); __pyx_t_3 = PyObject_GetItem(((PyObject *)__pyx_v_fpmask), __pyx_v_v2); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 992; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = PyNumber_Multiply(__pyx_t_15, __pyx_t_3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 992; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_36 = __Pyx_PyInt_from_py_Py_intptr_t(__pyx_t_1); if (unlikely((__pyx_t_36 == (npy_intp)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 992; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_m = __pyx_t_36; /* "nipy/algorithms/statistics/intvol.pyx":993 * v2 = index + d3[l,2] * m = m * fpmask[v1] * fpmask[v2] * l0 = l0 + m # <<<<<<<<<<<<<< * * for l in range(ds2): */ __pyx_v_l0 = (__pyx_v_l0 + __pyx_v_m); goto __pyx_L10; } __pyx_L10:; } /* "nipy/algorithms/statistics/intvol.pyx":995 * l0 = l0 + m * * for l in range(ds2): # <<<<<<<<<<<<<< * v0 = index + d2[l,0] * m = fpmask[v0] */ __pyx_t_26 = __pyx_v_ds2; for (__pyx_t_27 = 0; __pyx_t_27 < __pyx_t_26; __pyx_t_27+=1) { __pyx_v_l = __pyx_t_27; /* "nipy/algorithms/statistics/intvol.pyx":996 * * for l in range(ds2): * v0 = index + d2[l,0] # <<<<<<<<<<<<<< * m = fpmask[v0] * if m: */ __pyx_t_36 = __pyx_v_l; __pyx_t_37 = 0; __pyx_t_7 = -1; if (__pyx_t_36 < 0) { __pyx_t_36 += __pyx_pybuffernd_d2.diminfo[0].shape; if (unlikely(__pyx_t_36 < 0)) __pyx_t_7 = 0; } else if (unlikely(__pyx_t_36 >= __pyx_pybuffernd_d2.diminfo[0].shape)) __pyx_t_7 = 0; if (__pyx_t_37 < 0) { __pyx_t_37 += __pyx_pybuffernd_d2.diminfo[1].shape; if (unlikely(__pyx_t_37 < 0)) __pyx_t_7 = 1; } else if (unlikely(__pyx_t_37 >= __pyx_pybuffernd_d2.diminfo[1].shape)) __pyx_t_7 = 1; if (unlikely(__pyx_t_7 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 996; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_v0 = (__pyx_v_index + (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d2.rcbuffer->pybuffer.buf, __pyx_t_36, __pyx_pybuffernd_d2.diminfo[0].strides, __pyx_t_37, __pyx_pybuffernd_d2.diminfo[1].strides))); /* "nipy/algorithms/statistics/intvol.pyx":997 * for l in range(ds2): * v0 = index + d2[l,0] * m = fpmask[v0] # <<<<<<<<<<<<<< * if m: * v1 = index + d2[l,1] */ __pyx_t_38 = __pyx_v_v0; __pyx_t_7 = -1; if (__pyx_t_38 < 0) { __pyx_t_38 += __pyx_pybuffernd_fpmask.diminfo[0].shape; if (unlikely(__pyx_t_38 < 0)) __pyx_t_7 = 0; } else if (unlikely(__pyx_t_38 >= __pyx_pybuffernd_fpmask.diminfo[0].shape)) __pyx_t_7 = 0; if (unlikely(__pyx_t_7 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 997; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_m = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_fpmask.rcbuffer->pybuffer.buf, __pyx_t_38, __pyx_pybuffernd_fpmask.diminfo[0].strides)); /* "nipy/algorithms/statistics/intvol.pyx":998 * v0 = index + d2[l,0] * m = fpmask[v0] * if m: # <<<<<<<<<<<<<< * v1 = index + d2[l,1] * m = m * fpmask[v1] */ if (__pyx_v_m) { /* "nipy/algorithms/statistics/intvol.pyx":999 * m = fpmask[v0] * if m: * v1 = index + d2[l,1] # <<<<<<<<<<<<<< * m = m * fpmask[v1] * l0 = l0 - m */ __pyx_t_39 = __pyx_v_l; __pyx_t_40 = 1; __pyx_t_7 = -1; if (__pyx_t_39 < 0) { __pyx_t_39 += __pyx_pybuffernd_d2.diminfo[0].shape; if (unlikely(__pyx_t_39 < 0)) __pyx_t_7 = 0; } else if (unlikely(__pyx_t_39 >= __pyx_pybuffernd_d2.diminfo[0].shape)) __pyx_t_7 = 0; if (__pyx_t_40 < 0) { __pyx_t_40 += __pyx_pybuffernd_d2.diminfo[1].shape; if (unlikely(__pyx_t_40 < 0)) __pyx_t_7 = 1; } else if (unlikely(__pyx_t_40 >= __pyx_pybuffernd_d2.diminfo[1].shape)) __pyx_t_7 = 1; if (unlikely(__pyx_t_7 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 999; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_v1 = (__pyx_v_index + (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_d2.rcbuffer->pybuffer.buf, __pyx_t_39, __pyx_pybuffernd_d2.diminfo[0].strides, __pyx_t_40, __pyx_pybuffernd_d2.diminfo[1].strides))); /* "nipy/algorithms/statistics/intvol.pyx":1000 * if m: * v1 = index + d2[l,1] * m = m * fpmask[v1] # <<<<<<<<<<<<<< * l0 = l0 - m * */ __pyx_t_41 = __pyx_v_v1; __pyx_t_7 = -1; if (__pyx_t_41 < 0) { __pyx_t_41 += __pyx_pybuffernd_fpmask.diminfo[0].shape; if (unlikely(__pyx_t_41 < 0)) __pyx_t_7 = 0; } else if (unlikely(__pyx_t_41 >= __pyx_pybuffernd_fpmask.diminfo[0].shape)) __pyx_t_7 = 0; if (unlikely(__pyx_t_7 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1000; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_m = (__pyx_v_m * (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_fpmask.rcbuffer->pybuffer.buf, __pyx_t_41, __pyx_pybuffernd_fpmask.diminfo[0].strides))); /* "nipy/algorithms/statistics/intvol.pyx":1001 * v1 = index + d2[l,1] * m = m * fpmask[v1] * l0 = l0 - m # <<<<<<<<<<<<<< * * l0 += mask.sum() */ __pyx_v_l0 = (__pyx_v_l0 - __pyx_v_m); goto __pyx_L13; } __pyx_L13:; } } } /* "nipy/algorithms/statistics/intvol.pyx":1003 * l0 = l0 - m * * l0 += mask.sum() # <<<<<<<<<<<<<< * return l0 * */ __pyx_t_1 = PyInt_FromLong(__pyx_v_l0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1003; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = PyObject_GetAttr(__pyx_v_mask, __pyx_n_s__sum); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1003; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_15 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1003; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_15); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_InPlaceAdd(__pyx_t_1, __pyx_t_15); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1003; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; __pyx_t_24 = __Pyx_PyInt_AsLong(__pyx_t_3); if (unlikely((__pyx_t_24 == (long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1003; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_l0 = __pyx_t_24; /* "nipy/algorithms/statistics/intvol.pyx":1004 * * l0 += mask.sum() * return l0 # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = PyInt_FromLong(__pyx_v_l0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1004; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_14); __Pyx_XDECREF(__pyx_t_15); __Pyx_XDECREF(__pyx_t_21); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_d2.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_d3.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_fpmask.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_mask_c.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_strides.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("nipy.algorithms.statistics.intvol.EC2d", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_d2.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_d3.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_fpmask.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_mask_c.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_strides.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_mask_c); __Pyx_XDECREF((PyObject *)__pyx_v_fpmask); __Pyx_XDECREF((PyObject *)__pyx_v_d2); __Pyx_XDECREF((PyObject *)__pyx_v_d3); __Pyx_XDECREF(__pyx_v_pmask_shape); __Pyx_XDECREF(__pyx_v_pmask); __Pyx_XDECREF((PyObject *)__pyx_v_strides); __Pyx_XDECREF(__pyx_v_union); __Pyx_XDECREF(__pyx_v_c); __Pyx_XDECREF(__pyx_v_v2); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_6intvol_29Lips1d(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_10algorithms_10statistics_6intvol_28Lips1d[] = " Estimate intrinsic volumes for 1D region in `mask` given `coords`\n\n Given a 1d `mask` and coordinates `coords`, estimate the intrinsic volumes\n of the masked region. The region is broken up into edges / vertices, which\n are included based on whether all voxels in the edge / vertex are in the\n mask or not.\n\n Parameters\n ----------\n coords : ndarray((N,i,j,k))\n Coordinates for the voxels in the mask. ``N`` will often be 1 (for 1\n dimensional coordinates, but can be any integer > 0\n mask : ndarray((i,), np.int)\n Binary mask determining whether or not a voxel is in the mask.\n\n Returns\n -------\n mu : ndarray\n Array of intrinsic volumes [mu0, mu1], being, respectively:\n #. Euler characteristic\n #. Line segment length\n\n Notes\n -----\n The array mask is assumed to be binary. At the time of writing, it\n is not clear how to get cython to use np.bool arrays.\n\n References\n ----------\n Taylor, J.E. & Worsley, K.J. (2007). \"Detecting sparse signal in random fields,\n with an application to brain mapping.\"\n Journal of the American Statistical Association, 102(479):913-928.\n "; static PyMethodDef __pyx_mdef_4nipy_10algorithms_10statistics_6intvol_29Lips1d = {__Pyx_NAMESTR("Lips1d"), (PyCFunction)__pyx_pw_4nipy_10algorithms_10statistics_6intvol_29Lips1d, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_10algorithms_10statistics_6intvol_28Lips1d)}; static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_6intvol_29Lips1d(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_coords = 0; PyArrayObject *__pyx_v_mask = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("Lips1d (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__coords,&__pyx_n_s__mask,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__coords)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__mask)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("Lips1d", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1007; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "Lips1d") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1007; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_coords = ((PyArrayObject *)values[0]); __pyx_v_mask = ((PyArrayObject *)values[1]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("Lips1d", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1007; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.algorithms.statistics.intvol.Lips1d", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_coords), __pyx_ptype_5numpy_ndarray, 1, "coords", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1007; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_mask), __pyx_ptype_5numpy_ndarray, 1, "mask", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1008; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_4nipy_10algorithms_10statistics_6intvol_28Lips1d(__pyx_self, __pyx_v_coords, __pyx_v_mask); goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/algorithms/statistics/intvol.pyx":1007 * * * def Lips1d(np.ndarray[np.float_t, ndim=2] coords, # <<<<<<<<<<<<<< * np.ndarray[np.intp_t, ndim=1] mask): * """ Estimate intrinsic volumes for 1D region in `mask` given `coords` */ static PyObject *__pyx_pf_4nipy_10algorithms_10statistics_6intvol_28Lips1d(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_coords, PyArrayObject *__pyx_v_mask) { npy_intp __pyx_v_i; npy_intp __pyx_v_l; npy_intp __pyx_v_r; npy_intp __pyx_v_s; npy_intp __pyx_v_rr; npy_intp __pyx_v_ss; npy_intp __pyx_v_mr; npy_intp __pyx_v_ms; npy_intp __pyx_v_s0; npy_intp __pyx_v_m; double __pyx_v_l0; double __pyx_v_l1; double __pyx_v_res; PyObject *__pyx_v_D = NULL; __Pyx_LocalBuf_ND __pyx_pybuffernd_coords; __Pyx_Buffer __pyx_pybuffer_coords; __Pyx_LocalBuf_ND __pyx_pybuffernd_mask; __Pyx_Buffer __pyx_pybuffer_mask; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; npy_intp __pyx_t_6; npy_intp __pyx_t_7; npy_intp __pyx_t_8; int __pyx_t_9; npy_intp __pyx_t_10; long __pyx_t_11; npy_intp __pyx_t_12; npy_intp __pyx_t_13; npy_intp __pyx_t_14; npy_intp __pyx_t_15; npy_intp __pyx_t_16; npy_intp __pyx_t_17; npy_intp __pyx_t_18; npy_intp __pyx_t_19; PyObject *__pyx_t_20 = NULL; long __pyx_t_21; double __pyx_t_22; double __pyx_t_23; double __pyx_t_24; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("Lips1d", 0); __pyx_pybuffer_coords.pybuffer.buf = NULL; __pyx_pybuffer_coords.refcount = 0; __pyx_pybuffernd_coords.data = NULL; __pyx_pybuffernd_coords.rcbuffer = &__pyx_pybuffer_coords; __pyx_pybuffer_mask.pybuffer.buf = NULL; __pyx_pybuffer_mask.refcount = 0; __pyx_pybuffernd_mask.data = NULL; __pyx_pybuffernd_mask.rcbuffer = &__pyx_pybuffer_mask; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_coords.rcbuffer->pybuffer, (PyObject*)__pyx_v_coords, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1007; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_coords.diminfo[0].strides = __pyx_pybuffernd_coords.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_coords.diminfo[0].shape = __pyx_pybuffernd_coords.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_coords.diminfo[1].strides = __pyx_pybuffernd_coords.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_coords.diminfo[1].shape = __pyx_pybuffernd_coords.rcbuffer->pybuffer.shape[1]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_mask.rcbuffer->pybuffer, (PyObject*)__pyx_v_mask, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1007; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_mask.diminfo[0].strides = __pyx_pybuffernd_mask.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_mask.diminfo[0].shape = __pyx_pybuffernd_mask.rcbuffer->pybuffer.shape[0]; /* "nipy/algorithms/statistics/intvol.pyx":1042 * Journal of the American Statistical Association, 102(479):913-928. * """ * if mask.shape[0] != coords.shape[1]: # <<<<<<<<<<<<<< * raise ValueError('shape of mask does not match coordinates') * if not set(np.unique(mask)).issubset([0,1]): */ __pyx_t_1 = ((__pyx_v_mask->dimensions[0]) != (__pyx_v_coords->dimensions[1])); if (__pyx_t_1) { /* "nipy/algorithms/statistics/intvol.pyx":1043 * """ * if mask.shape[0] != coords.shape[1]: * raise ValueError('shape of mask does not match coordinates') # <<<<<<<<<<<<<< * if not set(np.unique(mask)).issubset([0,1]): * raise ValueError('mask should be filled with 0/1 ' */ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_65), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1043; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1043; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L3; } __pyx_L3:; /* "nipy/algorithms/statistics/intvol.pyx":1044 * if mask.shape[0] != coords.shape[1]: * raise ValueError('shape of mask does not match coordinates') * if not set(np.unique(mask)).issubset([0,1]): # <<<<<<<<<<<<<< * raise ValueError('mask should be filled with 0/1 ' * 'values, but be of type np.int') */ __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1044; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__unique); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1044; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1044; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(((PyObject *)__pyx_v_mask)); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_mask)); __Pyx_GIVEREF(((PyObject *)__pyx_v_mask)); __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1044; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1044; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyObject_Call(((PyObject *)((PyObject*)(&PySet_Type))), ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1044; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_t_2 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__issubset); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1044; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyList_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1044; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_INCREF(__pyx_int_0); PyList_SET_ITEM(__pyx_t_4, 0, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_INCREF(__pyx_int_1); PyList_SET_ITEM(__pyx_t_4, 1, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1044; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_4)); __Pyx_GIVEREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __pyx_t_4 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1044; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1044; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = (!__pyx_t_1); if (__pyx_t_5) { /* "nipy/algorithms/statistics/intvol.pyx":1045 * raise ValueError('shape of mask does not match coordinates') * if not set(np.unique(mask)).issubset([0,1]): * raise ValueError('mask should be filled with 0/1 ' # <<<<<<<<<<<<<< * 'values, but be of type np.int') * cdef: */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_66), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1045; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1045; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L4; } __pyx_L4:; /* "nipy/algorithms/statistics/intvol.pyx":1052 * double res * * l0 = 0; l1 = 0 # <<<<<<<<<<<<<< * s0 = mask.shape[0] * D = np.zeros((2,2)) */ __pyx_v_l0 = 0.0; __pyx_v_l1 = 0.0; /* "nipy/algorithms/statistics/intvol.pyx":1053 * * l0 = 0; l1 = 0 * s0 = mask.shape[0] # <<<<<<<<<<<<<< * D = np.zeros((2,2)) * */ __pyx_v_s0 = (__pyx_v_mask->dimensions[0]); /* "nipy/algorithms/statistics/intvol.pyx":1054 * l0 = 0; l1 = 0 * s0 = mask.shape[0] * D = np.zeros((2,2)) # <<<<<<<<<<<<<< * * for i in range(s0): */ __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1054; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__zeros); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1054; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_k_tuple_68), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1054; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_D = __pyx_t_4; __pyx_t_4 = 0; /* "nipy/algorithms/statistics/intvol.pyx":1056 * D = np.zeros((2,2)) * * for i in range(s0): # <<<<<<<<<<<<<< * for r in range(2): * rr = (i+r) % s0 */ __pyx_t_6 = __pyx_v_s0; for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_6; __pyx_t_7+=1) { __pyx_v_i = __pyx_t_7; /* "nipy/algorithms/statistics/intvol.pyx":1057 * * for i in range(s0): * for r in range(2): # <<<<<<<<<<<<<< * rr = (i+r) % s0 * mr = mask[rr] */ for (__pyx_t_8 = 0; __pyx_t_8 < 2; __pyx_t_8+=1) { __pyx_v_r = __pyx_t_8; /* "nipy/algorithms/statistics/intvol.pyx":1058 * for i in range(s0): * for r in range(2): * rr = (i+r) % s0 # <<<<<<<<<<<<<< * mr = mask[rr] * for s in range(r+1): */ __pyx_t_9 = (__pyx_v_i + __pyx_v_r); if (unlikely(__pyx_v_s0 == 0)) { PyErr_Format(PyExc_ZeroDivisionError, "integer division or modulo by zero"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1058; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_rr = __Pyx_mod_int(__pyx_t_9, __pyx_v_s0); /* "nipy/algorithms/statistics/intvol.pyx":1059 * for r in range(2): * rr = (i+r) % s0 * mr = mask[rr] # <<<<<<<<<<<<<< * for s in range(r+1): * res = 0 */ __pyx_t_10 = __pyx_v_rr; __pyx_t_9 = -1; if (__pyx_t_10 < 0) { __pyx_t_10 += __pyx_pybuffernd_mask.diminfo[0].shape; if (unlikely(__pyx_t_10 < 0)) __pyx_t_9 = 0; } else if (unlikely(__pyx_t_10 >= __pyx_pybuffernd_mask.diminfo[0].shape)) __pyx_t_9 = 0; if (unlikely(__pyx_t_9 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1059; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_mr = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_mask.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_mask.diminfo[0].strides)); /* "nipy/algorithms/statistics/intvol.pyx":1060 * rr = (i+r) % s0 * mr = mask[rr] * for s in range(r+1): # <<<<<<<<<<<<<< * res = 0 * ss = (i+s) % s0 */ __pyx_t_11 = (__pyx_v_r + 1); for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) { __pyx_v_s = __pyx_t_12; /* "nipy/algorithms/statistics/intvol.pyx":1061 * mr = mask[rr] * for s in range(r+1): * res = 0 # <<<<<<<<<<<<<< * ss = (i+s) % s0 * ms = mask[ss] */ __pyx_v_res = 0.0; /* "nipy/algorithms/statistics/intvol.pyx":1062 * for s in range(r+1): * res = 0 * ss = (i+s) % s0 # <<<<<<<<<<<<<< * ms = mask[ss] * if mr * ms * ((i+r) < s0) * ((i+s) < s0): */ __pyx_t_9 = (__pyx_v_i + __pyx_v_s); if (unlikely(__pyx_v_s0 == 0)) { PyErr_Format(PyExc_ZeroDivisionError, "integer division or modulo by zero"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1062; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_ss = __Pyx_mod_int(__pyx_t_9, __pyx_v_s0); /* "nipy/algorithms/statistics/intvol.pyx":1063 * res = 0 * ss = (i+s) % s0 * ms = mask[ss] # <<<<<<<<<<<<<< * if mr * ms * ((i+r) < s0) * ((i+s) < s0): * for l in range(coords.shape[0]): */ __pyx_t_13 = __pyx_v_ss; __pyx_t_9 = -1; if (__pyx_t_13 < 0) { __pyx_t_13 += __pyx_pybuffernd_mask.diminfo[0].shape; if (unlikely(__pyx_t_13 < 0)) __pyx_t_9 = 0; } else if (unlikely(__pyx_t_13 >= __pyx_pybuffernd_mask.diminfo[0].shape)) __pyx_t_9 = 0; if (unlikely(__pyx_t_9 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1063; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_ms = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_mask.rcbuffer->pybuffer.buf, __pyx_t_13, __pyx_pybuffernd_mask.diminfo[0].strides)); /* "nipy/algorithms/statistics/intvol.pyx":1064 * ss = (i+s) % s0 * ms = mask[ss] * if mr * ms * ((i+r) < s0) * ((i+s) < s0): # <<<<<<<<<<<<<< * for l in range(coords.shape[0]): * res += coords[l,ss] * coords[l,rr] */ __pyx_t_9 = (((__pyx_v_mr * __pyx_v_ms) * ((__pyx_v_i + __pyx_v_r) < __pyx_v_s0)) * ((__pyx_v_i + __pyx_v_s) < __pyx_v_s0)); if (__pyx_t_9) { /* "nipy/algorithms/statistics/intvol.pyx":1065 * ms = mask[ss] * if mr * ms * ((i+r) < s0) * ((i+s) < s0): * for l in range(coords.shape[0]): # <<<<<<<<<<<<<< * res += coords[l,ss] * coords[l,rr] * D[r,s] = res */ __pyx_t_14 = (__pyx_v_coords->dimensions[0]); for (__pyx_t_15 = 0; __pyx_t_15 < __pyx_t_14; __pyx_t_15+=1) { __pyx_v_l = __pyx_t_15; /* "nipy/algorithms/statistics/intvol.pyx":1066 * if mr * ms * ((i+r) < s0) * ((i+s) < s0): * for l in range(coords.shape[0]): * res += coords[l,ss] * coords[l,rr] # <<<<<<<<<<<<<< * D[r,s] = res * D[s,r] = res */ __pyx_t_16 = __pyx_v_l; __pyx_t_17 = __pyx_v_ss; __pyx_t_9 = -1; if (__pyx_t_16 < 0) { __pyx_t_16 += __pyx_pybuffernd_coords.diminfo[0].shape; if (unlikely(__pyx_t_16 < 0)) __pyx_t_9 = 0; } else if (unlikely(__pyx_t_16 >= __pyx_pybuffernd_coords.diminfo[0].shape)) __pyx_t_9 = 0; if (__pyx_t_17 < 0) { __pyx_t_17 += __pyx_pybuffernd_coords.diminfo[1].shape; if (unlikely(__pyx_t_17 < 0)) __pyx_t_9 = 1; } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_coords.diminfo[1].shape)) __pyx_t_9 = 1; if (unlikely(__pyx_t_9 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1066; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_18 = __pyx_v_l; __pyx_t_19 = __pyx_v_rr; __pyx_t_9 = -1; if (__pyx_t_18 < 0) { __pyx_t_18 += __pyx_pybuffernd_coords.diminfo[0].shape; if (unlikely(__pyx_t_18 < 0)) __pyx_t_9 = 0; } else if (unlikely(__pyx_t_18 >= __pyx_pybuffernd_coords.diminfo[0].shape)) __pyx_t_9 = 0; if (__pyx_t_19 < 0) { __pyx_t_19 += __pyx_pybuffernd_coords.diminfo[1].shape; if (unlikely(__pyx_t_19 < 0)) __pyx_t_9 = 1; } else if (unlikely(__pyx_t_19 >= __pyx_pybuffernd_coords.diminfo[1].shape)) __pyx_t_9 = 1; if (unlikely(__pyx_t_9 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1066; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_res = (__pyx_v_res + ((*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_coords.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_coords.diminfo[0].strides, __pyx_t_17, __pyx_pybuffernd_coords.diminfo[1].strides)) * (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_coords.rcbuffer->pybuffer.buf, __pyx_t_18, __pyx_pybuffernd_coords.diminfo[0].strides, __pyx_t_19, __pyx_pybuffernd_coords.diminfo[1].strides)))); /* "nipy/algorithms/statistics/intvol.pyx":1067 * for l in range(coords.shape[0]): * res += coords[l,ss] * coords[l,rr] * D[r,s] = res # <<<<<<<<<<<<<< * D[s,r] = res * else: */ __pyx_t_4 = PyFloat_FromDouble(__pyx_v_res); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1067; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyInt_to_py_Py_intptr_t(__pyx_v_r); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1067; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyInt_to_py_Py_intptr_t(__pyx_v_s); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1067; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_20 = PyTuple_New(2); if (unlikely(!__pyx_t_20)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1067; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_20); PyTuple_SET_ITEM(__pyx_t_20, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_20, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_3 = 0; __pyx_t_2 = 0; if (PyObject_SetItem(__pyx_v_D, ((PyObject *)__pyx_t_20), __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1067; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(((PyObject *)__pyx_t_20)); __pyx_t_20 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "nipy/algorithms/statistics/intvol.pyx":1068 * res += coords[l,ss] * coords[l,rr] * D[r,s] = res * D[s,r] = res # <<<<<<<<<<<<<< * else: * D[r,s] = 0 */ __pyx_t_4 = PyFloat_FromDouble(__pyx_v_res); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1068; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_20 = __Pyx_PyInt_to_py_Py_intptr_t(__pyx_v_s); if (unlikely(!__pyx_t_20)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1068; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_20); __pyx_t_2 = __Pyx_PyInt_to_py_Py_intptr_t(__pyx_v_r); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1068; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1068; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_20); __Pyx_GIVEREF(__pyx_t_20); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_20 = 0; __pyx_t_2 = 0; if (PyObject_SetItem(__pyx_v_D, ((PyObject *)__pyx_t_3), __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1068; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } goto __pyx_L11; } /*else*/ { /* "nipy/algorithms/statistics/intvol.pyx":1070 * D[s,r] = res * else: * D[r,s] = 0 # <<<<<<<<<<<<<< * D[s,r] = 0 * */ __pyx_t_4 = __Pyx_PyInt_to_py_Py_intptr_t(__pyx_v_r); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1070; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyInt_to_py_Py_intptr_t(__pyx_v_s); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1070; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1070; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_4 = 0; __pyx_t_3 = 0; if (PyObject_SetItem(__pyx_v_D, ((PyObject *)__pyx_t_2), __pyx_int_0) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1070; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; /* "nipy/algorithms/statistics/intvol.pyx":1071 * else: * D[r,s] = 0 * D[s,r] = 0 # <<<<<<<<<<<<<< * * m = mask[i] */ __pyx_t_2 = __Pyx_PyInt_to_py_Py_intptr_t(__pyx_v_s); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1071; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_to_py_Py_intptr_t(__pyx_v_r); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1071; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1071; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_2 = 0; __pyx_t_3 = 0; if (PyObject_SetItem(__pyx_v_D, ((PyObject *)__pyx_t_4), __pyx_int_0) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1071; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; } __pyx_L11:; } } /* "nipy/algorithms/statistics/intvol.pyx":1073 * D[s,r] = 0 * * m = mask[i] # <<<<<<<<<<<<<< * if m: * m = m * (mask[(i+1) % s0] * ((i+1) < s0)) */ __pyx_t_8 = __pyx_v_i; __pyx_t_9 = -1; if (__pyx_t_8 < 0) { __pyx_t_8 += __pyx_pybuffernd_mask.diminfo[0].shape; if (unlikely(__pyx_t_8 < 0)) __pyx_t_9 = 0; } else if (unlikely(__pyx_t_8 >= __pyx_pybuffernd_mask.diminfo[0].shape)) __pyx_t_9 = 0; if (unlikely(__pyx_t_9 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1073; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_m = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_mask.rcbuffer->pybuffer.buf, __pyx_t_8, __pyx_pybuffernd_mask.diminfo[0].strides)); /* "nipy/algorithms/statistics/intvol.pyx":1074 * * m = mask[i] * if m: # <<<<<<<<<<<<<< * m = m * (mask[(i+1) % s0] * ((i+1) < s0)) * l1 = l1 + m * mu1_edge(D[0,0], D[0,1], D[1,1]) */ if (__pyx_v_m) { /* "nipy/algorithms/statistics/intvol.pyx":1075 * m = mask[i] * if m: * m = m * (mask[(i+1) % s0] * ((i+1) < s0)) # <<<<<<<<<<<<<< * l1 = l1 + m * mu1_edge(D[0,0], D[0,1], D[1,1]) * l0 = l0 - m */ __pyx_t_11 = (__pyx_v_i + 1); if (unlikely(__pyx_v_s0 == 0)) { PyErr_Format(PyExc_ZeroDivisionError, "integer division or modulo by zero"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1075; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_21 = __Pyx_mod_long(__pyx_t_11, __pyx_v_s0); __pyx_t_9 = -1; if (__pyx_t_21 < 0) { __pyx_t_21 += __pyx_pybuffernd_mask.diminfo[0].shape; if (unlikely(__pyx_t_21 < 0)) __pyx_t_9 = 0; } else if (unlikely(__pyx_t_21 >= __pyx_pybuffernd_mask.diminfo[0].shape)) __pyx_t_9 = 0; if (unlikely(__pyx_t_9 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1075; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_m = (__pyx_v_m * ((*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_mask.rcbuffer->pybuffer.buf, __pyx_t_21, __pyx_pybuffernd_mask.diminfo[0].strides)) * ((__pyx_v_i + 1) < __pyx_v_s0))); /* "nipy/algorithms/statistics/intvol.pyx":1076 * if m: * m = m * (mask[(i+1) % s0] * ((i+1) < s0)) * l1 = l1 + m * mu1_edge(D[0,0], D[0,1], D[1,1]) # <<<<<<<<<<<<<< * l0 = l0 - m * */ __pyx_t_4 = PyObject_GetItem(__pyx_v_D, ((PyObject *)__pyx_k_tuple_69)); if (!__pyx_t_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1076; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_22 = __pyx_PyFloat_AsDouble(__pyx_t_4); if (unlikely((__pyx_t_22 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1076; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyObject_GetItem(__pyx_v_D, ((PyObject *)__pyx_k_tuple_70)); if (!__pyx_t_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1076; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_23 = __pyx_PyFloat_AsDouble(__pyx_t_4); if (unlikely((__pyx_t_23 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1076; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyObject_GetItem(__pyx_v_D, ((PyObject *)__pyx_k_tuple_71)); if (!__pyx_t_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1076; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_24 = __pyx_PyFloat_AsDouble(__pyx_t_4); if (unlikely((__pyx_t_24 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1076; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_l1 = (__pyx_v_l1 + (__pyx_v_m * __pyx_f_4nipy_10algorithms_10statistics_6intvol_mu1_edge(__pyx_t_22, __pyx_t_23, __pyx_t_24, 0))); /* "nipy/algorithms/statistics/intvol.pyx":1077 * m = m * (mask[(i+1) % s0] * ((i+1) < s0)) * l1 = l1 + m * mu1_edge(D[0,0], D[0,1], D[1,1]) * l0 = l0 - m # <<<<<<<<<<<<<< * * l0 += mask.sum() */ __pyx_v_l0 = (__pyx_v_l0 - __pyx_v_m); goto __pyx_L14; } __pyx_L14:; } /* "nipy/algorithms/statistics/intvol.pyx":1079 * l0 = l0 - m * * l0 += mask.sum() # <<<<<<<<<<<<<< * return np.array([l0,l1]) * */ __pyx_t_4 = PyFloat_FromDouble(__pyx_v_l0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1079; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_GetAttr(((PyObject *)__pyx_v_mask), __pyx_n_s__sum); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1079; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1079; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_InPlaceAdd(__pyx_t_4, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1079; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_24 = __pyx_PyFloat_AsDouble(__pyx_t_3); if (unlikely((__pyx_t_24 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1079; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_l0 = __pyx_t_24; /* "nipy/algorithms/statistics/intvol.pyx":1080 * * l0 += mask.sum() * return np.array([l0,l1]) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1080; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__array); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1080; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyFloat_FromDouble(__pyx_v_l0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1080; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_l1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1080; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_20 = PyList_New(2); if (unlikely(!__pyx_t_20)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1080; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_20); PyList_SET_ITEM(__pyx_t_20, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); PyList_SET_ITEM(__pyx_t_20, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1080; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_20)); __Pyx_GIVEREF(((PyObject *)__pyx_t_20)); __pyx_t_20 = 0; __pyx_t_20 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_20)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1080; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_20); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __pyx_r = __pyx_t_20; __pyx_t_20 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_20); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_coords.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_mask.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("nipy.algorithms.statistics.intvol.Lips1d", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_coords.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_mask.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XDECREF(__pyx_v_D); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_6intvol_31EC1d(PyObject *__pyx_self, PyObject *__pyx_v_mask); /*proto*/ static char __pyx_doc_4nipy_10algorithms_10statistics_6intvol_30EC1d[] = " Compute Euler characteristic for 1d `mask`\n\n Given a 1d mask `mask`, compute the 0th intrinsic volume (Euler\n characteristic) of the masked region. The region is broken up into edges /\n vertices, which are included based on whether all voxels in the edge /\n vertex are in the mask or not.\n\n Parameters\n ----------\n mask : ndarray((i,), np.int)\n Binary mask determining whether or not a voxel is in the mask.\n\n Returns\n -------\n mu0 : int\n Euler characteristic\n\n Notes\n -----\n The array mask is assumed to be binary. At the time of writing, it\n is not clear how to get cython to use np.bool arrays.\n\n The 3d cubes are triangulated into 6 tetrahedra of equal volume, as\n described in the reference below.\n\n References\n ----------\n Taylor, J.E. & Worsley, K.J. (2007). \"Detecting sparse signal in random fields,\n with an application to brain mapping.\"\n Journal of the American Statistical Association, 102(479):913-928.\n "; static PyMethodDef __pyx_mdef_4nipy_10algorithms_10statistics_6intvol_31EC1d = {__Pyx_NAMESTR("EC1d"), (PyCFunction)__pyx_pw_4nipy_10algorithms_10statistics_6intvol_31EC1d, METH_O, __Pyx_DOCSTR(__pyx_doc_4nipy_10algorithms_10statistics_6intvol_30EC1d)}; static PyObject *__pyx_pw_4nipy_10algorithms_10statistics_6intvol_31EC1d(PyObject *__pyx_self, PyObject *__pyx_v_mask) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("EC1d (wrapper)", 0); if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_mask), __pyx_ptype_5numpy_ndarray, 1, "mask", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1083; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_4nipy_10algorithms_10statistics_6intvol_30EC1d(__pyx_self, ((PyArrayObject *)__pyx_v_mask)); goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/algorithms/statistics/intvol.pyx":1083 * * * def EC1d(np.ndarray[np.intp_t, ndim=1] mask): # <<<<<<<<<<<<<< * """ Compute Euler characteristic for 1d `mask` * */ static PyObject *__pyx_pf_4nipy_10algorithms_10statistics_6intvol_30EC1d(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_mask) { npy_intp __pyx_v_i; npy_intp __pyx_v_m; npy_intp __pyx_v_s0; double __pyx_v_l0; __Pyx_LocalBuf_ND __pyx_pybuffernd_mask; __Pyx_Buffer __pyx_pybuffer_mask; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; npy_intp __pyx_t_6; npy_intp __pyx_t_7; npy_intp __pyx_t_8; int __pyx_t_9; long __pyx_t_10; long __pyx_t_11; double __pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("EC1d", 0); __pyx_pybuffer_mask.pybuffer.buf = NULL; __pyx_pybuffer_mask.refcount = 0; __pyx_pybuffernd_mask.data = NULL; __pyx_pybuffernd_mask.rcbuffer = &__pyx_pybuffer_mask; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_mask.rcbuffer->pybuffer, (PyObject*)__pyx_v_mask, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1083; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_mask.diminfo[0].strides = __pyx_pybuffernd_mask.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_mask.diminfo[0].shape = __pyx_pybuffernd_mask.rcbuffer->pybuffer.shape[0]; /* "nipy/algorithms/statistics/intvol.pyx":1115 * Journal of the American Statistical Association, 102(479):913-928. * """ * if not set(np.unique(mask)).issubset([0,1]): # <<<<<<<<<<<<<< * raise ValueError('mask should be filled with 0/1 ' * 'values, but be of type np.int') */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1115; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__unique); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1115; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1115; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)__pyx_v_mask)); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_v_mask)); __Pyx_GIVEREF(((PyObject *)__pyx_v_mask)); __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1115; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1115; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyObject_Call(((PyObject *)((PyObject*)(&PySet_Type))), ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1115; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_t_1 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__issubset); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1115; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyList_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1115; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_int_0); PyList_SET_ITEM(__pyx_t_3, 0, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_INCREF(__pyx_int_1); PyList_SET_ITEM(__pyx_t_3, 1, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1115; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_t_3)); __Pyx_GIVEREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1115; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1115; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_5 = (!__pyx_t_4); if (__pyx_t_5) { /* "nipy/algorithms/statistics/intvol.pyx":1116 * """ * if not set(np.unique(mask)).issubset([0,1]): * raise ValueError('mask should be filled with 0/1 ' # <<<<<<<<<<<<<< * 'values, but be of type np.int') * cdef: */ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_72), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1116; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1116; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L3; } __pyx_L3:; /* "nipy/algorithms/statistics/intvol.pyx":1120 * cdef: * np.npy_intp i, m, s0 * double l0 = 0 # <<<<<<<<<<<<<< * * s0 = mask.shape[0] */ __pyx_v_l0 = 0.0; /* "nipy/algorithms/statistics/intvol.pyx":1122 * double l0 = 0 * * s0 = mask.shape[0] # <<<<<<<<<<<<<< * for i in range(s0): * m = mask[i] */ __pyx_v_s0 = (__pyx_v_mask->dimensions[0]); /* "nipy/algorithms/statistics/intvol.pyx":1123 * * s0 = mask.shape[0] * for i in range(s0): # <<<<<<<<<<<<<< * m = mask[i] * if m: */ __pyx_t_6 = __pyx_v_s0; for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_6; __pyx_t_7+=1) { __pyx_v_i = __pyx_t_7; /* "nipy/algorithms/statistics/intvol.pyx":1124 * s0 = mask.shape[0] * for i in range(s0): * m = mask[i] # <<<<<<<<<<<<<< * if m: * m = m * (mask[(i+1) % s0] * ((i+1) < s0)) */ __pyx_t_8 = __pyx_v_i; __pyx_t_9 = -1; if (__pyx_t_8 < 0) { __pyx_t_8 += __pyx_pybuffernd_mask.diminfo[0].shape; if (unlikely(__pyx_t_8 < 0)) __pyx_t_9 = 0; } else if (unlikely(__pyx_t_8 >= __pyx_pybuffernd_mask.diminfo[0].shape)) __pyx_t_9 = 0; if (unlikely(__pyx_t_9 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1124; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_m = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_mask.rcbuffer->pybuffer.buf, __pyx_t_8, __pyx_pybuffernd_mask.diminfo[0].strides)); /* "nipy/algorithms/statistics/intvol.pyx":1125 * for i in range(s0): * m = mask[i] * if m: # <<<<<<<<<<<<<< * m = m * (mask[(i+1) % s0] * ((i+1) < s0)) * l0 = l0 - m */ if (__pyx_v_m) { /* "nipy/algorithms/statistics/intvol.pyx":1126 * m = mask[i] * if m: * m = m * (mask[(i+1) % s0] * ((i+1) < s0)) # <<<<<<<<<<<<<< * l0 = l0 - m * */ __pyx_t_10 = (__pyx_v_i + 1); if (unlikely(__pyx_v_s0 == 0)) { PyErr_Format(PyExc_ZeroDivisionError, "integer division or modulo by zero"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1126; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_11 = __Pyx_mod_long(__pyx_t_10, __pyx_v_s0); __pyx_t_9 = -1; if (__pyx_t_11 < 0) { __pyx_t_11 += __pyx_pybuffernd_mask.diminfo[0].shape; if (unlikely(__pyx_t_11 < 0)) __pyx_t_9 = 0; } else if (unlikely(__pyx_t_11 >= __pyx_pybuffernd_mask.diminfo[0].shape)) __pyx_t_9 = 0; if (unlikely(__pyx_t_9 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1126; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_m = (__pyx_v_m * ((*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_intp_t *, __pyx_pybuffernd_mask.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_mask.diminfo[0].strides)) * ((__pyx_v_i + 1) < __pyx_v_s0))); /* "nipy/algorithms/statistics/intvol.pyx":1127 * if m: * m = m * (mask[(i+1) % s0] * ((i+1) < s0)) * l0 = l0 - m # <<<<<<<<<<<<<< * * l0 += mask.sum() */ __pyx_v_l0 = (__pyx_v_l0 - __pyx_v_m); goto __pyx_L6; } __pyx_L6:; } /* "nipy/algorithms/statistics/intvol.pyx":1129 * l0 = l0 - m * * l0 += mask.sum() # <<<<<<<<<<<<<< * return l0 */ __pyx_t_3 = PyFloat_FromDouble(__pyx_v_l0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1129; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyObject_GetAttr(((PyObject *)__pyx_v_mask), __pyx_n_s__sum); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1129; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1129; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_InPlaceAdd(__pyx_t_3, __pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1129; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_12 = __pyx_PyFloat_AsDouble(__pyx_t_2); if (unlikely((__pyx_t_12 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1129; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_l0 = __pyx_t_12; /* "nipy/algorithms/statistics/intvol.pyx":1130 * * l0 += mask.sum() * return l0 # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = PyFloat_FromDouble(__pyx_v_l0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1130; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_mask.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("nipy.algorithms.statistics.intvol.EC1d", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_mask.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":194 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_copy_shape; int __pyx_v_i; int __pyx_v_ndim; int __pyx_v_endian_detector; int __pyx_v_little_endian; int __pyx_v_t; char *__pyx_v_f; PyArray_Descr *__pyx_v_descr = 0; int __pyx_v_offset; int __pyx_v_hasfields; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; char *__pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "numpy.pxd":200 * # of flags * * if info == NULL: return # <<<<<<<<<<<<<< * * cdef int copy_shape, i, ndim */ __pyx_t_1 = (__pyx_v_info == NULL); if (__pyx_t_1) { __pyx_r = 0; goto __pyx_L0; goto __pyx_L3; } __pyx_L3:; /* "numpy.pxd":203 * * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((&endian_detector)[0] != 0) * */ __pyx_v_endian_detector = 1; /* "numpy.pxd":204 * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * * ndim = PyArray_NDIM(self) */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "numpy.pxd":206 * cdef bint little_endian = ((&endian_detector)[0] != 0) * * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< * * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); /* "numpy.pxd":208 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); if (__pyx_t_1) { /* "numpy.pxd":209 * * if sizeof(npy_intp) != sizeof(Py_ssize_t): * copy_shape = 1 # <<<<<<<<<<<<<< * else: * copy_shape = 0 */ __pyx_v_copy_shape = 1; goto __pyx_L4; } /*else*/ { /* "numpy.pxd":211 * copy_shape = 1 * else: * copy_shape = 0 # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) */ __pyx_v_copy_shape = 0; } __pyx_L4:; /* "numpy.pxd":213 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS); if (__pyx_t_1) { /* "numpy.pxd":214 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not C contiguous") * */ __pyx_t_2 = (!PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS)); __pyx_t_3 = __pyx_t_2; } else { __pyx_t_3 = __pyx_t_1; } if (__pyx_t_3) { /* "numpy.pxd":215 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_74), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L5; } __pyx_L5:; /* "numpy.pxd":217 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ __pyx_t_3 = ((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS); if (__pyx_t_3) { /* "numpy.pxd":218 * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not Fortran contiguous") * */ __pyx_t_1 = (!PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS)); __pyx_t_2 = __pyx_t_1; } else { __pyx_t_2 = __pyx_t_3; } if (__pyx_t_2) { /* "numpy.pxd":219 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_76), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L6; } __pyx_L6:; /* "numpy.pxd":221 * raise ValueError(u"ndarray is not Fortran contiguous") * * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< * info.ndim = ndim * if copy_shape: */ __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); /* "numpy.pxd":222 * * info.buf = PyArray_DATA(self) * info.ndim = ndim # <<<<<<<<<<<<<< * if copy_shape: * # Allocate new buffer for strides and shape info. */ __pyx_v_info->ndim = __pyx_v_ndim; /* "numpy.pxd":223 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ if (__pyx_v_copy_shape) { /* "numpy.pxd":226 * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) # <<<<<<<<<<<<<< * info.shape = info.strides + ndim * for i in range(ndim): */ __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2))); /* "numpy.pxd":227 * # This is allocated as one block, strides first. * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) * info.shape = info.strides + ndim # <<<<<<<<<<<<<< * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] */ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); /* "numpy.pxd":228 * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) * info.shape = info.strides + ndim * for i in range(ndim): # <<<<<<<<<<<<<< * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] */ __pyx_t_5 = __pyx_v_ndim; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "numpy.pxd":229 * info.shape = info.strides + ndim * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< * info.shape[i] = PyArray_DIMS(self)[i] * else: */ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); /* "numpy.pxd":230 * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< * else: * info.strides = PyArray_STRIDES(self) */ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); } goto __pyx_L7; } /*else*/ { /* "numpy.pxd":232 * info.shape[i] = PyArray_DIMS(self)[i] * else: * info.strides = PyArray_STRIDES(self) # <<<<<<<<<<<<<< * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL */ __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); /* "numpy.pxd":233 * else: * info.strides = PyArray_STRIDES(self) * info.shape = PyArray_DIMS(self) # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) */ __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); } __pyx_L7:; /* "numpy.pxd":234 * info.strides = PyArray_STRIDES(self) * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) */ __pyx_v_info->suboffsets = NULL; /* "numpy.pxd":235 * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< * info.readonly = not PyArray_ISWRITEABLE(self) * */ __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); /* "numpy.pxd":236 * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< * * cdef int t */ __pyx_v_info->readonly = (!PyArray_ISWRITEABLE(__pyx_v_self)); /* "numpy.pxd":239 * * cdef int t * cdef char* f = NULL # <<<<<<<<<<<<<< * cdef dtype descr = self.descr * cdef list stack */ __pyx_v_f = NULL; /* "numpy.pxd":240 * cdef int t * cdef char* f = NULL * cdef dtype descr = self.descr # <<<<<<<<<<<<<< * cdef list stack * cdef int offset */ __pyx_t_4 = ((PyObject *)__pyx_v_self->descr); __Pyx_INCREF(__pyx_t_4); __pyx_v_descr = ((PyArray_Descr *)__pyx_t_4); __pyx_t_4 = 0; /* "numpy.pxd":244 * cdef int offset * * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< * * if not hasfields and not copy_shape: */ __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); /* "numpy.pxd":246 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ __pyx_t_2 = (!__pyx_v_hasfields); if (__pyx_t_2) { __pyx_t_3 = (!__pyx_v_copy_shape); __pyx_t_1 = __pyx_t_3; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "numpy.pxd":248 * if not hasfields and not copy_shape: * # do not call releasebuffer * info.obj = None # <<<<<<<<<<<<<< * else: * # need to call releasebuffer */ __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = Py_None; goto __pyx_L10; } /*else*/ { /* "numpy.pxd":251 * else: * # need to call releasebuffer * info.obj = self # <<<<<<<<<<<<<< * * if not hasfields: */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); } __pyx_L10:; /* "numpy.pxd":253 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ __pyx_t_1 = (!__pyx_v_hasfields); if (__pyx_t_1) { /* "numpy.pxd":254 * * if not hasfields: * t = descr.type_num # <<<<<<<<<<<<<< * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): */ __pyx_t_5 = __pyx_v_descr->type_num; __pyx_v_t = __pyx_t_5; /* "numpy.pxd":255 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_1 = (__pyx_v_descr->byteorder == '>'); if (__pyx_t_1) { __pyx_t_2 = __pyx_v_little_endian; } else { __pyx_t_2 = __pyx_t_1; } if (!__pyx_t_2) { /* "numpy.pxd":256 * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" */ __pyx_t_1 = (__pyx_v_descr->byteorder == '<'); if (__pyx_t_1) { __pyx_t_3 = (!__pyx_v_little_endian); __pyx_t_7 = __pyx_t_3; } else { __pyx_t_7 = __pyx_t_1; } __pyx_t_1 = __pyx_t_7; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "numpy.pxd":257 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_78), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L12; } __pyx_L12:; /* "numpy.pxd":258 * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" */ __pyx_t_1 = (__pyx_v_t == NPY_BYTE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__b; goto __pyx_L13; } /* "numpy.pxd":259 * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" */ __pyx_t_1 = (__pyx_v_t == NPY_UBYTE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__B; goto __pyx_L13; } /* "numpy.pxd":260 * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" */ __pyx_t_1 = (__pyx_v_t == NPY_SHORT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__h; goto __pyx_L13; } /* "numpy.pxd":261 * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" */ __pyx_t_1 = (__pyx_v_t == NPY_USHORT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__H; goto __pyx_L13; } /* "numpy.pxd":262 * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" */ __pyx_t_1 = (__pyx_v_t == NPY_INT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__i; goto __pyx_L13; } /* "numpy.pxd":263 * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" */ __pyx_t_1 = (__pyx_v_t == NPY_UINT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__I; goto __pyx_L13; } /* "numpy.pxd":264 * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" */ __pyx_t_1 = (__pyx_v_t == NPY_LONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__l; goto __pyx_L13; } /* "numpy.pxd":265 * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" */ __pyx_t_1 = (__pyx_v_t == NPY_ULONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__L; goto __pyx_L13; } /* "numpy.pxd":266 * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" */ __pyx_t_1 = (__pyx_v_t == NPY_LONGLONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__q; goto __pyx_L13; } /* "numpy.pxd":267 * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" */ __pyx_t_1 = (__pyx_v_t == NPY_ULONGLONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Q; goto __pyx_L13; } /* "numpy.pxd":268 * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" */ __pyx_t_1 = (__pyx_v_t == NPY_FLOAT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__f; goto __pyx_L13; } /* "numpy.pxd":269 * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" */ __pyx_t_1 = (__pyx_v_t == NPY_DOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__d; goto __pyx_L13; } /* "numpy.pxd":270 * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" */ __pyx_t_1 = (__pyx_v_t == NPY_LONGDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__g; goto __pyx_L13; } /* "numpy.pxd":271 * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" */ __pyx_t_1 = (__pyx_v_t == NPY_CFLOAT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zf; goto __pyx_L13; } /* "numpy.pxd":272 * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" */ __pyx_t_1 = (__pyx_v_t == NPY_CDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zd; goto __pyx_L13; } /* "numpy.pxd":273 * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f = "O" * else: */ __pyx_t_1 = (__pyx_v_t == NPY_CLONGDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zg; goto __pyx_L13; } /* "numpy.pxd":274 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_1 = (__pyx_v_t == NPY_OBJECT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__O; goto __pyx_L13; } /*else*/ { /* "numpy.pxd":276 * elif t == NPY_OBJECT: f = "O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * info.format = f * return */ __pyx_t_4 = PyInt_FromLong(__pyx_v_t); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_8 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_79), __pyx_t_4); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_8)); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_8)); __Pyx_GIVEREF(((PyObject *)__pyx_t_8)); __pyx_t_8 = 0; __pyx_t_8 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L13:; /* "numpy.pxd":277 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f # <<<<<<<<<<<<<< * return * else: */ __pyx_v_info->format = __pyx_v_f; /* "numpy.pxd":278 * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f * return # <<<<<<<<<<<<<< * else: * info.format = stdlib.malloc(_buffer_format_string_len) */ __pyx_r = 0; goto __pyx_L0; goto __pyx_L11; } /*else*/ { /* "numpy.pxd":280 * return * else: * info.format = stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 */ __pyx_v_info->format = ((char *)malloc(255)); /* "numpy.pxd":281 * else: * info.format = stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< * offset = 0 * f = _util_dtypestring(descr, info.format + 1, */ (__pyx_v_info->format[0]) = '^'; /* "numpy.pxd":282 * info.format = stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 # <<<<<<<<<<<<<< * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, */ __pyx_v_offset = 0; /* "numpy.pxd":285 * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, * &offset) # <<<<<<<<<<<<<< * f[0] = c'\0' # Terminate format string * */ __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_9; /* "numpy.pxd":286 * info.format + _buffer_format_string_len, * &offset) * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< * * def __releasebuffer__(ndarray self, Py_buffer* info): */ (__pyx_v_f[0]) = '\x00'; } __pyx_L11:; __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_descr); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":288 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__releasebuffer__", 0); /* "numpy.pxd":289 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_t_1 = PyArray_HASFIELDS(__pyx_v_self); if (__pyx_t_1) { /* "numpy.pxd":290 * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): * stdlib.free(info.format) # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) */ free(__pyx_v_info->format); goto __pyx_L3; } __pyx_L3:; /* "numpy.pxd":291 * if PyArray_HASFIELDS(self): * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * stdlib.free(info.strides) * # info.shape was stored after info.strides in the same block */ __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); if (__pyx_t_1) { /* "numpy.pxd":292 * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) # <<<<<<<<<<<<<< * # info.shape was stored after info.strides in the same block * */ free(__pyx_v_info->strides); goto __pyx_L4; } __pyx_L4:; __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":768 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, a) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); /* "numpy.pxd":769 * * cdef inline object PyArray_MultiIterNew1(a): * return PyArray_MultiIterNew(1, a) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew2(a, b): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 769; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":771 * return PyArray_MultiIterNew(1, a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, a, b) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); /* "numpy.pxd":772 * * cdef inline object PyArray_MultiIterNew2(a, b): * return PyArray_MultiIterNew(2, a, b) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew3(a, b, c): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":774 * return PyArray_MultiIterNew(2, a, b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, a, b, c) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); /* "numpy.pxd":775 * * cdef inline object PyArray_MultiIterNew3(a, b, c): * return PyArray_MultiIterNew(3, a, b, c) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":777 * return PyArray_MultiIterNew(3, a, b, c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, a, b, c, d) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); /* "numpy.pxd":778 * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): * return PyArray_MultiIterNew(4, a, b, c, d) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 778; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":780 * return PyArray_MultiIterNew(4, a, b, c, d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, a, b, c, d, e) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); /* "numpy.pxd":781 * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): * return PyArray_MultiIterNew(5, a, b, c, d, e) # <<<<<<<<<<<<<< * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 781; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":783 * return PyArray_MultiIterNew(5, a, b, c, d, e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { PyArray_Descr *__pyx_v_child = 0; int __pyx_v_endian_detector; int __pyx_v_little_endian; PyObject *__pyx_v_fields = 0; PyObject *__pyx_v_childname = NULL; PyObject *__pyx_v_new_offset = NULL; PyObject *__pyx_v_t = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *(*__pyx_t_6)(PyObject *); int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_t_10; long __pyx_t_11; char *__pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_util_dtypestring", 0); /* "numpy.pxd":790 * cdef int delta_offset * cdef tuple i * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((&endian_detector)[0] != 0) * cdef tuple fields */ __pyx_v_endian_detector = 1; /* "numpy.pxd":791 * cdef tuple i * cdef int endian_detector = 1 * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * cdef tuple fields * */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "numpy.pxd":794 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ if (unlikely(((PyObject *)__pyx_v_descr->names) == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_1 = ((PyObject *)__pyx_v_descr->names); __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; for (;;) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif __Pyx_XDECREF(__pyx_v_childname); __pyx_v_childname = __pyx_t_3; __pyx_t_3 = 0; /* "numpy.pxd":795 * * for childname in descr.names: * fields = descr.fields[childname] # <<<<<<<<<<<<<< * child, new_offset = fields * */ __pyx_t_3 = PyObject_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (!__pyx_t_3) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected tuple, got %.200s", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF(((PyObject *)__pyx_v_fields)); __pyx_v_fields = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "numpy.pxd":796 * for childname in descr.names: * fields = descr.fields[childname] * child, new_offset = fields # <<<<<<<<<<<<<< * * if (end - f) - (new_offset - offset[0]) < 15: */ if (likely(PyTuple_CheckExact(((PyObject *)__pyx_v_fields)))) { PyObject* sequence = ((PyObject *)__pyx_v_fields); #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else if (1) { __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } else { Py_ssize_t index = -1; __pyx_t_5 = PyObject_GetIter(((PyObject *)__pyx_v_fields)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = Py_TYPE(__pyx_t_5)->tp_iternext; index = 0; __pyx_t_3 = __pyx_t_6(__pyx_t_5); if (unlikely(!__pyx_t_3)) goto __pyx_L5_unpacking_failed; __Pyx_GOTREF(__pyx_t_3); index = 1; __pyx_t_4 = __pyx_t_6(__pyx_t_5); if (unlikely(!__pyx_t_4)) goto __pyx_L5_unpacking_failed; __Pyx_GOTREF(__pyx_t_4); if (__Pyx_IternextUnpackEndCheck(__pyx_t_6(__pyx_t_5), 2) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = NULL; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L6_unpacking_done; __pyx_L5_unpacking_failed:; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_L6_unpacking_done:; } if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF(((PyObject *)__pyx_v_child)); __pyx_v_child = ((PyArray_Descr *)__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_v_new_offset); __pyx_v_new_offset = __pyx_t_4; __pyx_t_4 = 0; /* "numpy.pxd":798 * child, new_offset = fields * * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ __pyx_t_4 = PyInt_FromLong((__pyx_v_end - __pyx_v_f)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_Subtract(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyObject_RichCompare(__pyx_t_3, __pyx_int_15, Py_LT); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { /* "numpy.pxd":799 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_t_5 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_81), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L7; } __pyx_L7:; /* "numpy.pxd":801 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_7 = (__pyx_v_child->byteorder == '>'); if (__pyx_t_7) { __pyx_t_8 = __pyx_v_little_endian; } else { __pyx_t_8 = __pyx_t_7; } if (!__pyx_t_8) { /* "numpy.pxd":802 * * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * # One could encode it in the format string and have Cython */ __pyx_t_7 = (__pyx_v_child->byteorder == '<'); if (__pyx_t_7) { __pyx_t_9 = (!__pyx_v_little_endian); __pyx_t_10 = __pyx_t_9; } else { __pyx_t_10 = __pyx_t_7; } __pyx_t_7 = __pyx_t_10; } else { __pyx_t_7 = __pyx_t_8; } if (__pyx_t_7) { /* "numpy.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_82), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L8; } __pyx_L8:; /* "numpy.pxd":813 * * # Output padding bytes * while offset[0] < new_offset: # <<<<<<<<<<<<<< * f[0] = 120 # "x"; pad byte * f += 1 */ while (1) { __pyx_t_5 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_t_5, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!__pyx_t_7) break; /* "numpy.pxd":814 * # Output padding bytes * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< * f += 1 * offset[0] += 1 */ (__pyx_v_f[0]) = 120; /* "numpy.pxd":815 * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte * f += 1 # <<<<<<<<<<<<<< * offset[0] += 1 * */ __pyx_v_f = (__pyx_v_f + 1); /* "numpy.pxd":816 * f[0] = 120 # "x"; pad byte * f += 1 * offset[0] += 1 # <<<<<<<<<<<<<< * * offset[0] += child.itemsize */ __pyx_t_11 = 0; (__pyx_v_offset[__pyx_t_11]) = ((__pyx_v_offset[__pyx_t_11]) + 1); } /* "numpy.pxd":818 * offset[0] += 1 * * offset[0] += child.itemsize # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(child): */ __pyx_t_11 = 0; (__pyx_v_offset[__pyx_t_11]) = ((__pyx_v_offset[__pyx_t_11]) + __pyx_v_child->elsize); /* "numpy.pxd":820 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ __pyx_t_7 = (!PyDataType_HASFIELDS(__pyx_v_child)); if (__pyx_t_7) { /* "numpy.pxd":821 * * if not PyDataType_HASFIELDS(child): * t = child.type_num # <<<<<<<<<<<<<< * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") */ __pyx_t_3 = PyInt_FromLong(__pyx_v_child->type_num); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 821; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_XDECREF(__pyx_v_t); __pyx_v_t = __pyx_t_3; __pyx_t_3 = 0; /* "numpy.pxd":822 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ __pyx_t_7 = ((__pyx_v_end - __pyx_v_f) < 5); if (__pyx_t_7) { /* "numpy.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_t_3 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_84), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L12; } __pyx_L12:; /* "numpy.pxd":826 * * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" */ __pyx_t_3 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 98; goto __pyx_L13; } /* "numpy.pxd":827 * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" */ __pyx_t_5 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 66; goto __pyx_L13; } /* "numpy.pxd":828 * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" */ __pyx_t_3 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 104; goto __pyx_L13; } /* "numpy.pxd":829 * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" */ __pyx_t_5 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 72; goto __pyx_L13; } /* "numpy.pxd":830 * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" */ __pyx_t_3 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 105; goto __pyx_L13; } /* "numpy.pxd":831 * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" */ __pyx_t_5 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 73; goto __pyx_L13; } /* "numpy.pxd":832 * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" */ __pyx_t_3 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 108; goto __pyx_L13; } /* "numpy.pxd":833 * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" */ __pyx_t_5 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 76; goto __pyx_L13; } /* "numpy.pxd":834 * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" */ __pyx_t_3 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 113; goto __pyx_L13; } /* "numpy.pxd":835 * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" */ __pyx_t_5 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 81; goto __pyx_L13; } /* "numpy.pxd":836 * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" */ __pyx_t_3 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 102; goto __pyx_L13; } /* "numpy.pxd":837 * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf */ __pyx_t_5 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 100; goto __pyx_L13; } /* "numpy.pxd":838 * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd */ __pyx_t_3 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 103; goto __pyx_L13; } /* "numpy.pxd":839 * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg */ __pyx_t_5 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 102; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":840 * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" */ __pyx_t_3 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 100; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":841 * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: */ __pyx_t_5 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 103; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":842 * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_3 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 79; goto __pyx_L13; } /*else*/ { /* "numpy.pxd":844 * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * f += 1 * else: */ __pyx_t_5 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_79), __pyx_v_t); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_5)); __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_5)); __Pyx_GIVEREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L13:; /* "numpy.pxd":845 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * f += 1 # <<<<<<<<<<<<<< * else: * # Cython ignores struct boundary information ("T{...}"), */ __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L11; } /*else*/ { /* "numpy.pxd":849 * # Cython ignores struct boundary information ("T{...}"), * # so don't output it * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< * return f * */ __pyx_t_12 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_12 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 849; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_12; } __pyx_L11:; } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "numpy.pxd":850 * # so don't output it * f = _util_dtypestring(child, f, end, offset) * return f # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_f; goto __pyx_L0; __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_child); __Pyx_XDECREF(__pyx_v_fields); __Pyx_XDECREF(__pyx_v_childname); __Pyx_XDECREF(__pyx_v_new_offset); __Pyx_XDECREF(__pyx_v_t); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":965 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { PyObject *__pyx_v_baseptr; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("set_array_base", 0); /* "numpy.pxd":967 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ __pyx_t_1 = (__pyx_v_base == Py_None); if (__pyx_t_1) { /* "numpy.pxd":968 * cdef PyObject* baseptr * if base is None: * baseptr = NULL # <<<<<<<<<<<<<< * else: * Py_INCREF(base) # important to do this before decref below! */ __pyx_v_baseptr = NULL; goto __pyx_L3; } /*else*/ { /* "numpy.pxd":970 * baseptr = NULL * else: * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< * baseptr = base * Py_XDECREF(arr.base) */ Py_INCREF(__pyx_v_base); /* "numpy.pxd":971 * else: * Py_INCREF(base) # important to do this before decref below! * baseptr = base # <<<<<<<<<<<<<< * Py_XDECREF(arr.base) * arr.base = baseptr */ __pyx_v_baseptr = ((PyObject *)__pyx_v_base); } __pyx_L3:; /* "numpy.pxd":972 * Py_INCREF(base) # important to do this before decref below! * baseptr = base * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< * arr.base = baseptr * */ Py_XDECREF(__pyx_v_arr->base); /* "numpy.pxd":973 * baseptr = base * Py_XDECREF(arr.base) * arr.base = baseptr # <<<<<<<<<<<<<< * * cdef inline object get_array_base(ndarray arr): */ __pyx_v_arr->base = __pyx_v_baseptr; __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":975 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("get_array_base", 0); /* "numpy.pxd":976 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ __pyx_t_1 = (__pyx_v_arr->base == NULL); if (__pyx_t_1) { /* "numpy.pxd":977 * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: * return None # <<<<<<<<<<<<<< * else: * return arr.base */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; goto __pyx_L3; } /*else*/ { /* "numpy.pxd":979 * return None * else: * return arr.base # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); __pyx_r = ((PyObject *)__pyx_v_arr->base); goto __pyx_L0; } __pyx_L3:; __pyx_r = Py_None; __Pyx_INCREF(Py_None); __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyMethodDef __pyx_methods[] = { {__Pyx_NAMESTR("mu3_tet"), (PyCFunction)__pyx_pw_4nipy_10algorithms_10statistics_6intvol_1mu3_tet, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_10algorithms_10statistics_6intvol_mu3_tet)}, {__Pyx_NAMESTR("mu2_tet"), (PyCFunction)__pyx_pw_4nipy_10algorithms_10statistics_6intvol_3mu2_tet, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_10algorithms_10statistics_6intvol_2mu2_tet)}, {__Pyx_NAMESTR("mu1_tet"), (PyCFunction)__pyx_pw_4nipy_10algorithms_10statistics_6intvol_5mu1_tet, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_10algorithms_10statistics_6intvol_4mu1_tet)}, {__Pyx_NAMESTR("_mu1_tetface"), (PyCFunction)__pyx_pw_4nipy_10algorithms_10statistics_6intvol_7_mu1_tetface, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}, {__Pyx_NAMESTR("mu2_tri"), (PyCFunction)__pyx_pw_4nipy_10algorithms_10statistics_6intvol_9mu2_tri, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_10algorithms_10statistics_6intvol_8mu2_tri)}, {__Pyx_NAMESTR("mu1_tri"), (PyCFunction)__pyx_pw_4nipy_10algorithms_10statistics_6intvol_11mu1_tri, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_10algorithms_10statistics_6intvol_10mu1_tri)}, {__Pyx_NAMESTR("mu1_edge"), (PyCFunction)__pyx_pw_4nipy_10algorithms_10statistics_6intvol_13mu1_edge, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_10algorithms_10statistics_6intvol_12mu1_edge)}, {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, __Pyx_NAMESTR("intvol"), __Pyx_DOCSTR(__pyx_k_85), /* m_doc */ -1, /* m_size */ __pyx_methods /* m_methods */, NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_kp_s_1, __pyx_k_1, sizeof(__pyx_k_1), 0, 0, 1, 0}, {&__pyx_kp_s_17, __pyx_k_17, sizeof(__pyx_k_17), 0, 0, 1, 0}, {&__pyx_kp_u_73, __pyx_k_73, sizeof(__pyx_k_73), 0, 1, 0, 0}, {&__pyx_kp_u_75, __pyx_k_75, sizeof(__pyx_k_75), 0, 1, 0, 0}, {&__pyx_kp_u_77, __pyx_k_77, sizeof(__pyx_k_77), 0, 1, 0, 0}, {&__pyx_kp_u_79, __pyx_k_79, sizeof(__pyx_k_79), 0, 1, 0, 0}, {&__pyx_n_s_8, __pyx_k_8, sizeof(__pyx_k_8), 0, 0, 1, 1}, {&__pyx_kp_u_80, __pyx_k_80, sizeof(__pyx_k_80), 0, 1, 0, 0}, {&__pyx_kp_u_83, __pyx_k_83, sizeof(__pyx_k_83), 0, 1, 0, 0}, {&__pyx_n_s_86, __pyx_k_86, sizeof(__pyx_k_86), 0, 0, 1, 1}, {&__pyx_n_s_87, __pyx_k_87, sizeof(__pyx_k_87), 0, 0, 1, 1}, {&__pyx_kp_s_90, __pyx_k_90, sizeof(__pyx_k_90), 0, 0, 1, 0}, {&__pyx_n_s_91, __pyx_k_91, sizeof(__pyx_k_91), 0, 0, 1, 1}, {&__pyx_n_s__D, __pyx_k__D, sizeof(__pyx_k__D), 0, 0, 1, 1}, {&__pyx_n_s__D00, __pyx_k__D00, sizeof(__pyx_k__D00), 0, 0, 1, 1}, {&__pyx_n_s__D01, __pyx_k__D01, sizeof(__pyx_k__D01), 0, 0, 1, 1}, {&__pyx_n_s__D02, __pyx_k__D02, sizeof(__pyx_k__D02), 0, 0, 1, 1}, {&__pyx_n_s__D03, __pyx_k__D03, sizeof(__pyx_k__D03), 0, 0, 1, 1}, {&__pyx_n_s__D11, __pyx_k__D11, sizeof(__pyx_k__D11), 0, 0, 1, 1}, {&__pyx_n_s__D12, __pyx_k__D12, sizeof(__pyx_k__D12), 0, 0, 1, 1}, {&__pyx_n_s__D13, __pyx_k__D13, sizeof(__pyx_k__D13), 0, 0, 1, 1}, {&__pyx_n_s__D22, __pyx_k__D22, sizeof(__pyx_k__D22), 0, 0, 1, 1}, {&__pyx_n_s__D23, __pyx_k__D23, sizeof(__pyx_k__D23), 0, 0, 1, 1}, {&__pyx_n_s__D33, __pyx_k__D33, sizeof(__pyx_k__D33), 0, 0, 1, 1}, {&__pyx_n_s__Ds0s0, __pyx_k__Ds0s0, sizeof(__pyx_k__Ds0s0), 0, 0, 1, 1}, {&__pyx_n_s__Ds0s1, __pyx_k__Ds0s1, sizeof(__pyx_k__Ds0s1), 0, 0, 1, 1}, {&__pyx_n_s__Ds0t0, __pyx_k__Ds0t0, sizeof(__pyx_k__Ds0t0), 0, 0, 1, 1}, {&__pyx_n_s__Ds0t1, __pyx_k__Ds0t1, sizeof(__pyx_k__Ds0t1), 0, 0, 1, 1}, {&__pyx_n_s__Ds1s1, __pyx_k__Ds1s1, sizeof(__pyx_k__Ds1s1), 0, 0, 1, 1}, {&__pyx_n_s__Ds1t0, __pyx_k__Ds1t0, sizeof(__pyx_k__Ds1t0), 0, 0, 1, 1}, {&__pyx_n_s__Ds1t1, __pyx_k__Ds1t1, sizeof(__pyx_k__Ds1t1), 0, 0, 1, 1}, {&__pyx_n_s__Dt0t0, __pyx_k__Dt0t0, sizeof(__pyx_k__Dt0t0), 0, 0, 1, 1}, {&__pyx_n_s__Dt0t1, __pyx_k__Dt0t1, sizeof(__pyx_k__Dt0t1), 0, 0, 1, 1}, {&__pyx_n_s__Dt1t1, __pyx_k__Dt1t1, sizeof(__pyx_k__Dt1t1), 0, 0, 1, 1}, {&__pyx_n_s__EC1d, __pyx_k__EC1d, sizeof(__pyx_k__EC1d), 0, 0, 1, 1}, {&__pyx_n_s__EC2d, __pyx_k__EC2d, sizeof(__pyx_k__EC2d), 0, 0, 1, 1}, {&__pyx_n_s__EC3d, __pyx_k__EC3d, sizeof(__pyx_k__EC3d), 0, 0, 1, 1}, {&__pyx_n_s__Lips1d, __pyx_k__Lips1d, sizeof(__pyx_k__Lips1d), 0, 0, 1, 1}, {&__pyx_n_s__Lips2d, __pyx_k__Lips2d, sizeof(__pyx_k__Lips2d), 0, 0, 1, 1}, {&__pyx_n_s__Lips3d, __pyx_k__Lips3d, sizeof(__pyx_k__Lips3d), 0, 0, 1, 1}, {&__pyx_n_s__RuntimeError, __pyx_k__RuntimeError, sizeof(__pyx_k__RuntimeError), 0, 0, 1, 1}, {&__pyx_n_s__ValueError, __pyx_k__ValueError, sizeof(__pyx_k__ValueError), 0, 0, 1, 1}, {&__pyx_n_s____main__, __pyx_k____main__, sizeof(__pyx_k____main__), 0, 0, 1, 1}, {&__pyx_n_s____test__, __pyx_k____test__, sizeof(__pyx_k____test__), 0, 0, 1, 1}, {&__pyx_n_s___convert_stride1, __pyx_k___convert_stride1, sizeof(__pyx_k___convert_stride1), 0, 0, 1, 1}, {&__pyx_n_s___convert_stride2, __pyx_k___convert_stride2, sizeof(__pyx_k___convert_stride2), 0, 0, 1, 1}, {&__pyx_n_s___convert_stride3, __pyx_k___convert_stride3, sizeof(__pyx_k___convert_stride3), 0, 0, 1, 1}, {&__pyx_n_s__array, __pyx_k__array, sizeof(__pyx_k__array), 0, 0, 1, 1}, {&__pyx_n_s__bool, __pyx_k__bool, sizeof(__pyx_k__bool), 0, 0, 1, 1}, {&__pyx_n_s__c, __pyx_k__c, sizeof(__pyx_k__c), 0, 0, 1, 1}, {&__pyx_n_s__coords, __pyx_k__coords, sizeof(__pyx_k__coords), 0, 0, 1, 1}, {&__pyx_n_s__coords_c, __pyx_k__coords_c, sizeof(__pyx_k__coords_c), 0, 0, 1, 1}, {&__pyx_n_s__cvertices, __pyx_k__cvertices, sizeof(__pyx_k__cvertices), 0, 0, 1, 1}, {&__pyx_n_s__d2, __pyx_k__d2, sizeof(__pyx_k__d2), 0, 0, 1, 1}, {&__pyx_n_s__d3, __pyx_k__d3, sizeof(__pyx_k__d3), 0, 0, 1, 1}, {&__pyx_n_s__d4, __pyx_k__d4, sizeof(__pyx_k__d4), 0, 0, 1, 1}, {&__pyx_n_s__difference, __pyx_k__difference, sizeof(__pyx_k__difference), 0, 0, 1, 1}, {&__pyx_n_s__dok_matrix, __pyx_k__dok_matrix, sizeof(__pyx_k__dok_matrix), 0, 0, 1, 1}, {&__pyx_n_s__ds2, __pyx_k__ds2, sizeof(__pyx_k__ds2), 0, 0, 1, 1}, {&__pyx_n_s__ds3, __pyx_k__ds3, sizeof(__pyx_k__ds3), 0, 0, 1, 1}, {&__pyx_n_s__ds4, __pyx_k__ds4, sizeof(__pyx_k__ds4), 0, 0, 1, 1}, {&__pyx_n_s__dstrides, __pyx_k__dstrides, sizeof(__pyx_k__dstrides), 0, 0, 1, 1}, {&__pyx_n_s__dtype, __pyx_k__dtype, sizeof(__pyx_k__dtype), 0, 0, 1, 1}, {&__pyx_n_s__fcoords, __pyx_k__fcoords, sizeof(__pyx_k__fcoords), 0, 0, 1, 1}, {&__pyx_n_s__fmask, __pyx_k__fmask, sizeof(__pyx_k__fmask), 0, 0, 1, 1}, {&__pyx_n_s__fpmask, __pyx_k__fpmask, sizeof(__pyx_k__fpmask), 0, 0, 1, 1}, {&__pyx_n_s__hstack, __pyx_k__hstack, sizeof(__pyx_k__hstack), 0, 0, 1, 1}, {&__pyx_n_s__i, __pyx_k__i, sizeof(__pyx_k__i), 0, 0, 1, 1}, {&__pyx_n_s__index, __pyx_k__index, sizeof(__pyx_k__index), 0, 0, 1, 1}, {&__pyx_n_s__int, __pyx_k__int, sizeof(__pyx_k__int), 0, 0, 1, 1}, {&__pyx_n_s__intp, __pyx_k__intp, sizeof(__pyx_k__intp), 0, 0, 1, 1}, {&__pyx_n_s__issubset, __pyx_k__issubset, sizeof(__pyx_k__issubset), 0, 0, 1, 1}, {&__pyx_n_s__j, __pyx_k__j, sizeof(__pyx_k__j), 0, 0, 1, 1}, {&__pyx_n_s__join_complexes, __pyx_k__join_complexes, sizeof(__pyx_k__join_complexes), 0, 0, 1, 1}, {&__pyx_n_s__k, __pyx_k__k, sizeof(__pyx_k__k), 0, 0, 1, 1}, {&__pyx_n_s__l, __pyx_k__l, sizeof(__pyx_k__l), 0, 0, 1, 1}, {&__pyx_n_s__l0, __pyx_k__l0, sizeof(__pyx_k__l0), 0, 0, 1, 1}, {&__pyx_n_s__l1, __pyx_k__l1, sizeof(__pyx_k__l1), 0, 0, 1, 1}, {&__pyx_n_s__l2, __pyx_k__l2, sizeof(__pyx_k__l2), 0, 0, 1, 1}, {&__pyx_n_s__l3, __pyx_k__l3, sizeof(__pyx_k__l3), 0, 0, 1, 1}, {&__pyx_n_s__m, __pyx_k__m, sizeof(__pyx_k__m), 0, 0, 1, 1}, {&__pyx_n_s__m2, __pyx_k__m2, sizeof(__pyx_k__m2), 0, 0, 1, 1}, {&__pyx_n_s__m3, __pyx_k__m3, sizeof(__pyx_k__m3), 0, 0, 1, 1}, {&__pyx_n_s__m4, __pyx_k__m4, sizeof(__pyx_k__m4), 0, 0, 1, 1}, {&__pyx_n_s__mask, __pyx_k__mask, sizeof(__pyx_k__mask), 0, 0, 1, 1}, {&__pyx_n_s__mask_c, __pyx_k__mask_c, sizeof(__pyx_k__mask_c), 0, 0, 1, 1}, {&__pyx_n_s__mr, __pyx_k__mr, sizeof(__pyx_k__mr), 0, 0, 1, 1}, {&__pyx_n_s__ms, __pyx_k__ms, sizeof(__pyx_k__ms), 0, 0, 1, 1}, {&__pyx_n_s__ndim, __pyx_k__ndim, sizeof(__pyx_k__ndim), 0, 0, 1, 1}, {&__pyx_n_s__np, __pyx_k__np, sizeof(__pyx_k__np), 0, 0, 1, 1}, {&__pyx_n_s__npix, __pyx_k__npix, sizeof(__pyx_k__npix), 0, 0, 1, 1}, {&__pyx_n_s__numpy, __pyx_k__numpy, sizeof(__pyx_k__numpy), 0, 0, 1, 1}, {&__pyx_n_s__nvox, __pyx_k__nvox, sizeof(__pyx_k__nvox), 0, 0, 1, 1}, {&__pyx_n_s__pi, __pyx_k__pi, sizeof(__pyx_k__pi), 0, 0, 1, 1}, {&__pyx_n_s__pindex, __pyx_k__pindex, sizeof(__pyx_k__pindex), 0, 0, 1, 1}, {&__pyx_n_s__pmask, __pyx_k__pmask, sizeof(__pyx_k__pmask), 0, 0, 1, 1}, {&__pyx_n_s__pmask_shape, __pyx_k__pmask_shape, sizeof(__pyx_k__pmask_shape), 0, 0, 1, 1}, {&__pyx_n_s__r, __pyx_k__r, sizeof(__pyx_k__r), 0, 0, 1, 1}, {&__pyx_n_s__range, __pyx_k__range, sizeof(__pyx_k__range), 0, 0, 1, 1}, {&__pyx_n_s__res, __pyx_k__res, sizeof(__pyx_k__res), 0, 0, 1, 1}, {&__pyx_n_s__reshape, __pyx_k__reshape, sizeof(__pyx_k__reshape), 0, 0, 1, 1}, {&__pyx_n_s__rr, __pyx_k__rr, sizeof(__pyx_k__rr), 0, 0, 1, 1}, {&__pyx_n_s__s, __pyx_k__s, sizeof(__pyx_k__s), 0, 0, 1, 1}, {&__pyx_n_s__s0, __pyx_k__s0, sizeof(__pyx_k__s0), 0, 0, 1, 1}, {&__pyx_n_s__s1, __pyx_k__s1, sizeof(__pyx_k__s1), 0, 0, 1, 1}, {&__pyx_n_s__s2, __pyx_k__s2, sizeof(__pyx_k__s2), 0, 0, 1, 1}, {&__pyx_n_s__shape, __pyx_k__shape, sizeof(__pyx_k__shape), 0, 0, 1, 1}, {&__pyx_n_s__size, __pyx_k__size, sizeof(__pyx_k__size), 0, 0, 1, 1}, {&__pyx_n_s__sorted, __pyx_k__sorted, sizeof(__pyx_k__sorted), 0, 0, 1, 1}, {&__pyx_n_s__squeeze, __pyx_k__squeeze, sizeof(__pyx_k__squeeze), 0, 0, 1, 1}, {&__pyx_n_s__ss, __pyx_k__ss, sizeof(__pyx_k__ss), 0, 0, 1, 1}, {&__pyx_n_s__ss0, __pyx_k__ss0, sizeof(__pyx_k__ss0), 0, 0, 1, 1}, {&__pyx_n_s__ss0d, __pyx_k__ss0d, sizeof(__pyx_k__ss0d), 0, 0, 1, 1}, {&__pyx_n_s__ss1, __pyx_k__ss1, sizeof(__pyx_k__ss1), 0, 0, 1, 1}, {&__pyx_n_s__ss1d, __pyx_k__ss1d, sizeof(__pyx_k__ss1d), 0, 0, 1, 1}, {&__pyx_n_s__ss2, __pyx_k__ss2, sizeof(__pyx_k__ss2), 0, 0, 1, 1}, {&__pyx_n_s__ss2d, __pyx_k__ss2d, sizeof(__pyx_k__ss2d), 0, 0, 1, 1}, {&__pyx_n_s__stride1, __pyx_k__stride1, sizeof(__pyx_k__stride1), 0, 0, 1, 1}, {&__pyx_n_s__stride2, __pyx_k__stride2, sizeof(__pyx_k__stride2), 0, 0, 1, 1}, {&__pyx_n_s__strides, __pyx_k__strides, sizeof(__pyx_k__strides), 0, 0, 1, 1}, {&__pyx_n_s__strides_from, __pyx_k__strides_from, sizeof(__pyx_k__strides_from), 0, 0, 1, 1}, {&__pyx_n_s__sum, __pyx_k__sum, sizeof(__pyx_k__sum), 0, 0, 1, 1}, {&__pyx_n_s__union, __pyx_k__union, sizeof(__pyx_k__union), 0, 0, 1, 1}, {&__pyx_n_s__unique, __pyx_k__unique, sizeof(__pyx_k__unique), 0, 0, 1, 1}, {&__pyx_n_s__utils, __pyx_k__utils, sizeof(__pyx_k__utils), 0, 0, 1, 1}, {&__pyx_n_s__v, __pyx_k__v, sizeof(__pyx_k__v), 0, 0, 1, 1}, {&__pyx_n_s__v0, __pyx_k__v0, sizeof(__pyx_k__v0), 0, 0, 1, 1}, {&__pyx_n_s__v1, __pyx_k__v1, sizeof(__pyx_k__v1), 0, 0, 1, 1}, {&__pyx_n_s__v2, __pyx_k__v2, sizeof(__pyx_k__v2), 0, 0, 1, 1}, {&__pyx_n_s__v3, __pyx_k__v3, sizeof(__pyx_k__v3), 0, 0, 1, 1}, {&__pyx_n_s__value, __pyx_k__value, sizeof(__pyx_k__value), 0, 0, 1, 1}, {&__pyx_n_s__verts, __pyx_k__verts, sizeof(__pyx_k__verts), 0, 0, 1, 1}, {&__pyx_n_s__w0, __pyx_k__w0, sizeof(__pyx_k__w0), 0, 0, 1, 1}, {&__pyx_n_s__w1, __pyx_k__w1, sizeof(__pyx_k__w1), 0, 0, 1, 1}, {&__pyx_n_s__w2, __pyx_k__w2, sizeof(__pyx_k__w2), 0, 0, 1, 1}, {&__pyx_n_s__w3, __pyx_k__w3, sizeof(__pyx_k__w3), 0, 0, 1, 1}, {&__pyx_n_s__zeros, __pyx_k__zeros, sizeof(__pyx_k__zeros), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_ValueError = __Pyx_GetName(__pyx_b, __pyx_n_s__ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 373; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_range = __Pyx_GetName(__pyx_b, __pyx_n_s__range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 430; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_sorted = __Pyx_GetName(__pyx_b, __pyx_n_s__sorted); if (!__pyx_builtin_sorted) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 581; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_RuntimeError = __Pyx_GetName(__pyx_b, __pyx_n_s__RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "nipy/algorithms/statistics/intvol.pyx":373 * """ * if not set(np.unique(mask)).issubset([0,1]): * raise ValueError('mask should be filled with 0/1 ' # <<<<<<<<<<<<<< * 'values, but be of type np.int') * cdef: */ __pyx_k_tuple_2 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 373; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_2); __Pyx_INCREF(((PyObject *)__pyx_kp_s_1)); PyTuple_SET_ITEM(__pyx_k_tuple_2, 0, ((PyObject *)__pyx_kp_s_1)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_1)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_2)); /* "nipy/algorithms/statistics/intvol.pyx":395 * pmask_shape = np.array(mask.shape) + 1 * pmask = np.zeros(pmask_shape, np.int) * pmask[:-1,:-1,:-1] = mask_c # <<<<<<<<<<<<<< * * s0, s1, s2 = (pmask.shape[0], pmask.shape[1], pmask.shape[2]) */ __pyx_k_slice_3 = PySlice_New(Py_None, __pyx_int_neg_1, Py_None); if (unlikely(!__pyx_k_slice_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 395; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_slice_3); __Pyx_GIVEREF(__pyx_k_slice_3); __pyx_k_slice_4 = PySlice_New(Py_None, __pyx_int_neg_1, Py_None); if (unlikely(!__pyx_k_slice_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 395; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_slice_4); __Pyx_GIVEREF(__pyx_k_slice_4); __pyx_k_slice_5 = PySlice_New(Py_None, __pyx_int_neg_1, Py_None); if (unlikely(!__pyx_k_slice_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 395; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_slice_5); __Pyx_GIVEREF(__pyx_k_slice_5); __pyx_k_tuple_6 = PyTuple_New(3); if (unlikely(!__pyx_k_tuple_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 395; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_6); __Pyx_INCREF(__pyx_k_slice_3); PyTuple_SET_ITEM(__pyx_k_tuple_6, 0, __pyx_k_slice_3); __Pyx_GIVEREF(__pyx_k_slice_3); __Pyx_INCREF(__pyx_k_slice_4); PyTuple_SET_ITEM(__pyx_k_tuple_6, 1, __pyx_k_slice_4); __Pyx_GIVEREF(__pyx_k_slice_4); __Pyx_INCREF(__pyx_k_slice_5); PyTuple_SET_ITEM(__pyx_k_tuple_6, 2, __pyx_k_slice_5); __Pyx_GIVEREF(__pyx_k_slice_5); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_6)); /* "nipy/algorithms/statistics/intvol.pyx":399 * s0, s1, s2 = (pmask.shape[0], pmask.shape[1], pmask.shape[2]) * * fpmask = pmask.reshape(-1) # <<<<<<<<<<<<<< * cdef: * np.ndarray[np.intp_t, ndim=1] strides */ __pyx_k_tuple_7 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 399; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_7); __Pyx_INCREF(__pyx_int_neg_1); PyTuple_SET_ITEM(__pyx_k_tuple_7, 0, __pyx_int_neg_1); __Pyx_GIVEREF(__pyx_int_neg_1); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_7)); /* "nipy/algorithms/statistics/intvol.pyx":407 * # We first figure out which vertices, edges, triangles, tetrahedra * # are uniquely associated with an interior voxel * union = join_complexes(*[cube_with_strides_center((0,0,1), strides), # <<<<<<<<<<<<<< * cube_with_strides_center((0,1,0), strides), * cube_with_strides_center((0,1,1), strides), */ __pyx_k_tuple_9 = PyTuple_New(3); if (unlikely(!__pyx_k_tuple_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 407; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_9); __Pyx_INCREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_k_tuple_9, 0, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_INCREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_k_tuple_9, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_INCREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_k_tuple_9, 2, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_9)); /* "nipy/algorithms/statistics/intvol.pyx":408 * # are uniquely associated with an interior voxel * union = join_complexes(*[cube_with_strides_center((0,0,1), strides), * cube_with_strides_center((0,1,0), strides), # <<<<<<<<<<<<<< * cube_with_strides_center((0,1,1), strides), * cube_with_strides_center((1,0,0), strides), */ __pyx_k_tuple_10 = PyTuple_New(3); if (unlikely(!__pyx_k_tuple_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 408; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_10); __Pyx_INCREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_k_tuple_10, 0, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_INCREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_k_tuple_10, 1, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __Pyx_INCREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_k_tuple_10, 2, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_10)); /* "nipy/algorithms/statistics/intvol.pyx":409 * union = join_complexes(*[cube_with_strides_center((0,0,1), strides), * cube_with_strides_center((0,1,0), strides), * cube_with_strides_center((0,1,1), strides), # <<<<<<<<<<<<<< * cube_with_strides_center((1,0,0), strides), * cube_with_strides_center((1,0,1), strides), */ __pyx_k_tuple_11 = PyTuple_New(3); if (unlikely(!__pyx_k_tuple_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 409; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_11); __Pyx_INCREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_k_tuple_11, 0, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_INCREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_k_tuple_11, 1, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __Pyx_INCREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_k_tuple_11, 2, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_11)); /* "nipy/algorithms/statistics/intvol.pyx":410 * cube_with_strides_center((0,1,0), strides), * cube_with_strides_center((0,1,1), strides), * cube_with_strides_center((1,0,0), strides), # <<<<<<<<<<<<<< * cube_with_strides_center((1,0,1), strides), * cube_with_strides_center((1,1,0), strides), */ __pyx_k_tuple_12 = PyTuple_New(3); if (unlikely(!__pyx_k_tuple_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 410; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_12); __Pyx_INCREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_k_tuple_12, 0, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __Pyx_INCREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_k_tuple_12, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_INCREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_k_tuple_12, 2, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_12)); /* "nipy/algorithms/statistics/intvol.pyx":411 * cube_with_strides_center((0,1,1), strides), * cube_with_strides_center((1,0,0), strides), * cube_with_strides_center((1,0,1), strides), # <<<<<<<<<<<<<< * cube_with_strides_center((1,1,0), strides), * cube_with_strides_center((1,1,1), strides)]) */ __pyx_k_tuple_13 = PyTuple_New(3); if (unlikely(!__pyx_k_tuple_13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 411; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_13); __Pyx_INCREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_k_tuple_13, 0, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __Pyx_INCREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_k_tuple_13, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_INCREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_k_tuple_13, 2, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_13)); /* "nipy/algorithms/statistics/intvol.pyx":412 * cube_with_strides_center((1,0,0), strides), * cube_with_strides_center((1,0,1), strides), * cube_with_strides_center((1,1,0), strides), # <<<<<<<<<<<<<< * cube_with_strides_center((1,1,1), strides)]) * c = cube_with_strides_center((0,0,0), strides) */ __pyx_k_tuple_14 = PyTuple_New(3); if (unlikely(!__pyx_k_tuple_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 412; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_14); __Pyx_INCREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_k_tuple_14, 0, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __Pyx_INCREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_k_tuple_14, 1, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __Pyx_INCREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_k_tuple_14, 2, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_14)); /* "nipy/algorithms/statistics/intvol.pyx":413 * cube_with_strides_center((1,0,1), strides), * cube_with_strides_center((1,1,0), strides), * cube_with_strides_center((1,1,1), strides)]) # <<<<<<<<<<<<<< * c = cube_with_strides_center((0,0,0), strides) * */ __pyx_k_tuple_15 = PyTuple_New(3); if (unlikely(!__pyx_k_tuple_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 413; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_15); __Pyx_INCREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_k_tuple_15, 0, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __Pyx_INCREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_k_tuple_15, 1, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __Pyx_INCREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_k_tuple_15, 2, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_15)); /* "nipy/algorithms/statistics/intvol.pyx":414 * cube_with_strides_center((1,1,0), strides), * cube_with_strides_center((1,1,1), strides)]) * c = cube_with_strides_center((0,0,0), strides) # <<<<<<<<<<<<<< * * d4 = np.array(list(c[4].difference(union[4]))) */ __pyx_k_tuple_16 = PyTuple_New(3); if (unlikely(!__pyx_k_tuple_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 414; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_16); __Pyx_INCREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_k_tuple_16, 0, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_INCREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_k_tuple_16, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_INCREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_k_tuple_16, 2, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_16)); /* "nipy/algorithms/statistics/intvol.pyx":505 * """ * if mask.shape != coords.shape[1:]: * raise ValueError('shape of mask does not match coordinates') # <<<<<<<<<<<<<< * # if the data can be squeezed, we must use the lower dimensional function * mask = np.squeeze(mask) */ __pyx_k_tuple_18 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_18)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 505; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_18); __Pyx_INCREF(((PyObject *)__pyx_kp_s_17)); PyTuple_SET_ITEM(__pyx_k_tuple_18, 0, ((PyObject *)__pyx_kp_s_17)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_17)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_18)); /* "nipy/algorithms/statistics/intvol.pyx":509 * mask = np.squeeze(mask) * if mask.ndim < 3: * value = np.zeros(4) # <<<<<<<<<<<<<< * coords = coords.reshape((coords.shape[0],) + mask.shape) * if mask.ndim == 2: */ __pyx_k_tuple_19 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 509; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_19); __Pyx_INCREF(__pyx_int_4); PyTuple_SET_ITEM(__pyx_k_tuple_19, 0, __pyx_int_4); __Pyx_GIVEREF(__pyx_int_4); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_19)); /* "nipy/algorithms/statistics/intvol.pyx":518 * * if not set(np.unique(mask)).issubset([0,1]): * raise ValueError('mask should be filled with 0/1 ' # <<<<<<<<<<<<<< * 'values, but be of type np.int') * cdef: */ __pyx_k_tuple_20 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_20)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 518; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_20); __Pyx_INCREF(((PyObject *)__pyx_kp_s_1)); PyTuple_SET_ITEM(__pyx_k_tuple_20, 0, ((PyObject *)__pyx_kp_s_1)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_1)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_20)); /* "nipy/algorithms/statistics/intvol.pyx":555 * pmask_shape = np.array(mask.shape) + 1 * pmask = np.zeros(pmask_shape, np.int) * pmask[:-1,:-1,:-1] = mask_c # <<<<<<<<<<<<<< * * s0, s1, s2 = (pmask.shape[0], pmask.shape[1], pmask.shape[2]) */ __pyx_k_slice_21 = PySlice_New(Py_None, __pyx_int_neg_1, Py_None); if (unlikely(!__pyx_k_slice_21)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 555; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_slice_21); __Pyx_GIVEREF(__pyx_k_slice_21); __pyx_k_slice_22 = PySlice_New(Py_None, __pyx_int_neg_1, Py_None); if (unlikely(!__pyx_k_slice_22)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 555; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_slice_22); __Pyx_GIVEREF(__pyx_k_slice_22); __pyx_k_slice_23 = PySlice_New(Py_None, __pyx_int_neg_1, Py_None); if (unlikely(!__pyx_k_slice_23)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 555; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_slice_23); __Pyx_GIVEREF(__pyx_k_slice_23); __pyx_k_tuple_24 = PyTuple_New(3); if (unlikely(!__pyx_k_tuple_24)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 555; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_24); __Pyx_INCREF(__pyx_k_slice_21); PyTuple_SET_ITEM(__pyx_k_tuple_24, 0, __pyx_k_slice_21); __Pyx_GIVEREF(__pyx_k_slice_21); __Pyx_INCREF(__pyx_k_slice_22); PyTuple_SET_ITEM(__pyx_k_tuple_24, 1, __pyx_k_slice_22); __Pyx_GIVEREF(__pyx_k_slice_22); __Pyx_INCREF(__pyx_k_slice_23); PyTuple_SET_ITEM(__pyx_k_tuple_24, 2, __pyx_k_slice_23); __Pyx_GIVEREF(__pyx_k_slice_23); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_24)); /* "nipy/algorithms/statistics/intvol.pyx":559 * s0, s1, s2 = (pmask.shape[0], pmask.shape[1], pmask.shape[2]) * * fpmask = pmask.reshape(-1) # <<<<<<<<<<<<<< * fmask = mask_c.reshape(-1) * fcoords = coords_c.reshape((coords_c.shape[0], -1)) */ __pyx_k_tuple_25 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_25)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 559; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_25); __Pyx_INCREF(__pyx_int_neg_1); PyTuple_SET_ITEM(__pyx_k_tuple_25, 0, __pyx_int_neg_1); __Pyx_GIVEREF(__pyx_int_neg_1); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_25)); /* "nipy/algorithms/statistics/intvol.pyx":560 * * fpmask = pmask.reshape(-1) * fmask = mask_c.reshape(-1) # <<<<<<<<<<<<<< * fcoords = coords_c.reshape((coords_c.shape[0], -1)) * */ __pyx_k_tuple_26 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_26)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 560; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_26); __Pyx_INCREF(__pyx_int_neg_1); PyTuple_SET_ITEM(__pyx_k_tuple_26, 0, __pyx_int_neg_1); __Pyx_GIVEREF(__pyx_int_neg_1); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_26)); /* "nipy/algorithms/statistics/intvol.pyx":583 * cvertices = np.array(sorted(verts), np.intp) * * union = join_complexes(*[cube_with_strides_center((0,0,1), strides), # <<<<<<<<<<<<<< * cube_with_strides_center((0,1,0), strides), * cube_with_strides_center((0,1,1), strides), */ __pyx_k_tuple_27 = PyTuple_New(3); if (unlikely(!__pyx_k_tuple_27)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 583; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_27); __Pyx_INCREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_k_tuple_27, 0, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_INCREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_k_tuple_27, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_INCREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_k_tuple_27, 2, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_27)); /* "nipy/algorithms/statistics/intvol.pyx":584 * * union = join_complexes(*[cube_with_strides_center((0,0,1), strides), * cube_with_strides_center((0,1,0), strides), # <<<<<<<<<<<<<< * cube_with_strides_center((0,1,1), strides), * cube_with_strides_center((1,0,0), strides), */ __pyx_k_tuple_28 = PyTuple_New(3); if (unlikely(!__pyx_k_tuple_28)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 584; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_28); __Pyx_INCREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_k_tuple_28, 0, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_INCREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_k_tuple_28, 1, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __Pyx_INCREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_k_tuple_28, 2, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_28)); /* "nipy/algorithms/statistics/intvol.pyx":585 * union = join_complexes(*[cube_with_strides_center((0,0,1), strides), * cube_with_strides_center((0,1,0), strides), * cube_with_strides_center((0,1,1), strides), # <<<<<<<<<<<<<< * cube_with_strides_center((1,0,0), strides), * cube_with_strides_center((1,0,1), strides), */ __pyx_k_tuple_29 = PyTuple_New(3); if (unlikely(!__pyx_k_tuple_29)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 585; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_29); __Pyx_INCREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_k_tuple_29, 0, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_INCREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_k_tuple_29, 1, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __Pyx_INCREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_k_tuple_29, 2, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_29)); /* "nipy/algorithms/statistics/intvol.pyx":586 * cube_with_strides_center((0,1,0), strides), * cube_with_strides_center((0,1,1), strides), * cube_with_strides_center((1,0,0), strides), # <<<<<<<<<<<<<< * cube_with_strides_center((1,0,1), strides), * cube_with_strides_center((1,1,0), strides), */ __pyx_k_tuple_30 = PyTuple_New(3); if (unlikely(!__pyx_k_tuple_30)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 586; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_30); __Pyx_INCREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_k_tuple_30, 0, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __Pyx_INCREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_k_tuple_30, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_INCREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_k_tuple_30, 2, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_30)); /* "nipy/algorithms/statistics/intvol.pyx":587 * cube_with_strides_center((0,1,1), strides), * cube_with_strides_center((1,0,0), strides), * cube_with_strides_center((1,0,1), strides), # <<<<<<<<<<<<<< * cube_with_strides_center((1,1,0), strides), * cube_with_strides_center((1,1,1), strides)]) */ __pyx_k_tuple_31 = PyTuple_New(3); if (unlikely(!__pyx_k_tuple_31)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 587; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_31); __Pyx_INCREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_k_tuple_31, 0, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __Pyx_INCREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_k_tuple_31, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_INCREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_k_tuple_31, 2, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_31)); /* "nipy/algorithms/statistics/intvol.pyx":588 * cube_with_strides_center((1,0,0), strides), * cube_with_strides_center((1,0,1), strides), * cube_with_strides_center((1,1,0), strides), # <<<<<<<<<<<<<< * cube_with_strides_center((1,1,1), strides)]) * c = cube_with_strides_center((0,0,0), strides) */ __pyx_k_tuple_32 = PyTuple_New(3); if (unlikely(!__pyx_k_tuple_32)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_32); __Pyx_INCREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_k_tuple_32, 0, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __Pyx_INCREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_k_tuple_32, 1, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __Pyx_INCREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_k_tuple_32, 2, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_32)); /* "nipy/algorithms/statistics/intvol.pyx":589 * cube_with_strides_center((1,0,1), strides), * cube_with_strides_center((1,1,0), strides), * cube_with_strides_center((1,1,1), strides)]) # <<<<<<<<<<<<<< * c = cube_with_strides_center((0,0,0), strides) * m4 = np.array(list(c[4].difference(union[4]))) */ __pyx_k_tuple_33 = PyTuple_New(3); if (unlikely(!__pyx_k_tuple_33)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 589; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_33); __Pyx_INCREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_k_tuple_33, 0, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __Pyx_INCREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_k_tuple_33, 1, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __Pyx_INCREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_k_tuple_33, 2, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_33)); /* "nipy/algorithms/statistics/intvol.pyx":590 * cube_with_strides_center((1,1,0), strides), * cube_with_strides_center((1,1,1), strides)]) * c = cube_with_strides_center((0,0,0), strides) # <<<<<<<<<<<<<< * m4 = np.array(list(c[4].difference(union[4]))) * m3 = np.array(list(c[3].difference(union[3]))) */ __pyx_k_tuple_34 = PyTuple_New(3); if (unlikely(!__pyx_k_tuple_34)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 590; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_34); __Pyx_INCREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_k_tuple_34, 0, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_INCREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_k_tuple_34, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_INCREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_k_tuple_34, 2, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_34)); /* "nipy/algorithms/statistics/intvol.pyx":595 * m2 = np.array(list(c[2].difference(union[2]))) * * d4 = np.array([[_convert_stride3(v, strides, (4,2,1)) for v in m4[i]] for i in range(m4.shape[0])]) # <<<<<<<<<<<<<< * d4 = np.hstack([m4, d4]) * ds4 = d4.shape[0] */ __pyx_k_tuple_35 = PyTuple_New(3); if (unlikely(!__pyx_k_tuple_35)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_35); __Pyx_INCREF(__pyx_int_4); PyTuple_SET_ITEM(__pyx_k_tuple_35, 0, __pyx_int_4); __Pyx_GIVEREF(__pyx_int_4); __Pyx_INCREF(__pyx_int_2); PyTuple_SET_ITEM(__pyx_k_tuple_35, 1, __pyx_int_2); __Pyx_GIVEREF(__pyx_int_2); __Pyx_INCREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_k_tuple_35, 2, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_35)); /* "nipy/algorithms/statistics/intvol.pyx":599 * ds4 = d4.shape[0] * * d3 = np.array([[_convert_stride3(v, strides, (4,2,1)) for v in m3[i]] for i in range(m3.shape[0])]) # <<<<<<<<<<<<<< * d3 = np.hstack([m3, d3]) * ds3 = d3.shape[0] */ __pyx_k_tuple_36 = PyTuple_New(3); if (unlikely(!__pyx_k_tuple_36)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 599; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_36); __Pyx_INCREF(__pyx_int_4); PyTuple_SET_ITEM(__pyx_k_tuple_36, 0, __pyx_int_4); __Pyx_GIVEREF(__pyx_int_4); __Pyx_INCREF(__pyx_int_2); PyTuple_SET_ITEM(__pyx_k_tuple_36, 1, __pyx_int_2); __Pyx_GIVEREF(__pyx_int_2); __Pyx_INCREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_k_tuple_36, 2, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_36)); /* "nipy/algorithms/statistics/intvol.pyx":603 * ds3 = d3.shape[0] * * d2 = np.array([[_convert_stride3(v, strides, (4,2,1)) for v in m2[i]] for i in range(m2.shape[0])]) # <<<<<<<<<<<<<< * d2 = np.hstack([m2, d2]) * ds2 = d2.shape[0] */ __pyx_k_tuple_37 = PyTuple_New(3); if (unlikely(!__pyx_k_tuple_37)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 603; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_37); __Pyx_INCREF(__pyx_int_4); PyTuple_SET_ITEM(__pyx_k_tuple_37, 0, __pyx_int_4); __Pyx_GIVEREF(__pyx_int_4); __Pyx_INCREF(__pyx_int_2); PyTuple_SET_ITEM(__pyx_k_tuple_37, 1, __pyx_int_2); __Pyx_GIVEREF(__pyx_int_2); __Pyx_INCREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_k_tuple_37, 2, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_37)); /* "nipy/algorithms/statistics/intvol.pyx":609 * nvox = mask.size * * D = np.zeros((8,8)) # <<<<<<<<<<<<<< * * for i in range(s0-1): */ __pyx_k_tuple_38 = PyTuple_New(2); if (unlikely(!__pyx_k_tuple_38)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 609; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_38); __Pyx_INCREF(__pyx_int_8); PyTuple_SET_ITEM(__pyx_k_tuple_38, 0, __pyx_int_8); __Pyx_GIVEREF(__pyx_int_8); __Pyx_INCREF(__pyx_int_8); PyTuple_SET_ITEM(__pyx_k_tuple_38, 1, __pyx_int_8); __Pyx_GIVEREF(__pyx_int_8); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_38)); __pyx_k_tuple_39 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_39)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 609; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_39); __Pyx_INCREF(((PyObject *)__pyx_k_tuple_38)); PyTuple_SET_ITEM(__pyx_k_tuple_39, 0, ((PyObject *)__pyx_k_tuple_38)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_38)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_39)); /* "nipy/algorithms/statistics/intvol.pyx":766 * """ * if mask.shape != coords.shape[1:]: * raise ValueError('shape of mask does not match coordinates') # <<<<<<<<<<<<<< * # if the data can be squeezed, we must use the lower dimensional function * mask = np.squeeze(mask) */ __pyx_k_tuple_40 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_40)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 766; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_40); __Pyx_INCREF(((PyObject *)__pyx_kp_s_17)); PyTuple_SET_ITEM(__pyx_k_tuple_40, 0, ((PyObject *)__pyx_kp_s_17)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_17)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_40)); /* "nipy/algorithms/statistics/intvol.pyx":770 * mask = np.squeeze(mask) * if mask.ndim == 1: * value = np.zeros(3) # <<<<<<<<<<<<<< * coords = coords.reshape((coords.shape[0],) + mask.shape) * value[:2] = Lips1d(coords, mask) */ __pyx_k_tuple_41 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_41)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 770; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_41); __Pyx_INCREF(__pyx_int_3); PyTuple_SET_ITEM(__pyx_k_tuple_41, 0, __pyx_int_3); __Pyx_GIVEREF(__pyx_int_3); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_41)); /* "nipy/algorithms/statistics/intvol.pyx":776 * * if not set(np.unique(mask)).issubset([0,1]): * raise ValueError('mask should be filled with 0/1 ' # <<<<<<<<<<<<<< * 'values, but be of type np.int') * */ __pyx_k_tuple_42 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_42)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 776; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_42); __Pyx_INCREF(((PyObject *)__pyx_kp_s_1)); PyTuple_SET_ITEM(__pyx_k_tuple_42, 0, ((PyObject *)__pyx_kp_s_1)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_1)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_42)); /* "nipy/algorithms/statistics/intvol.pyx":809 * pmask_shape = np.array(mask.shape) + 1 * pmask = np.zeros(pmask_shape, np.int) * pmask[:-1,:-1] = mask_c # <<<<<<<<<<<<<< * * s0, s1 = pmask.shape[0], pmask.shape[1] */ __pyx_k_slice_43 = PySlice_New(Py_None, __pyx_int_neg_1, Py_None); if (unlikely(!__pyx_k_slice_43)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 809; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_slice_43); __Pyx_GIVEREF(__pyx_k_slice_43); __pyx_k_slice_44 = PySlice_New(Py_None, __pyx_int_neg_1, Py_None); if (unlikely(!__pyx_k_slice_44)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 809; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_slice_44); __Pyx_GIVEREF(__pyx_k_slice_44); __pyx_k_tuple_45 = PyTuple_New(2); if (unlikely(!__pyx_k_tuple_45)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 809; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_45); __Pyx_INCREF(__pyx_k_slice_43); PyTuple_SET_ITEM(__pyx_k_tuple_45, 0, __pyx_k_slice_43); __Pyx_GIVEREF(__pyx_k_slice_43); __Pyx_INCREF(__pyx_k_slice_44); PyTuple_SET_ITEM(__pyx_k_tuple_45, 1, __pyx_k_slice_44); __Pyx_GIVEREF(__pyx_k_slice_44); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_45)); /* "nipy/algorithms/statistics/intvol.pyx":813 * s0, s1 = pmask.shape[0], pmask.shape[1] * * fpmask = pmask.reshape(-1) # <<<<<<<<<<<<<< * fmask = mask_c.reshape(-1) * fcoords = coords.reshape((coords.shape[0], -1)) */ __pyx_k_tuple_46 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_46)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_46); __Pyx_INCREF(__pyx_int_neg_1); PyTuple_SET_ITEM(__pyx_k_tuple_46, 0, __pyx_int_neg_1); __Pyx_GIVEREF(__pyx_int_neg_1); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_46)); /* "nipy/algorithms/statistics/intvol.pyx":814 * * fpmask = pmask.reshape(-1) * fmask = mask_c.reshape(-1) # <<<<<<<<<<<<<< * fcoords = coords.reshape((coords.shape[0], -1)) * */ __pyx_k_tuple_47 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_47)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 814; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_47); __Pyx_INCREF(__pyx_int_neg_1); PyTuple_SET_ITEM(__pyx_k_tuple_47, 0, __pyx_int_neg_1); __Pyx_GIVEREF(__pyx_int_neg_1); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_47)); /* "nipy/algorithms/statistics/intvol.pyx":836 * cvertices = np.array(sorted(verts), np.intp) * * union = join_complexes(*[cube_with_strides_center((0,1), strides), # <<<<<<<<<<<<<< * cube_with_strides_center((1,0), strides), * cube_with_strides_center((1,1), strides)]) */ __pyx_k_tuple_48 = PyTuple_New(2); if (unlikely(!__pyx_k_tuple_48)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_48); __Pyx_INCREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_k_tuple_48, 0, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_INCREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_k_tuple_48, 1, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_48)); /* "nipy/algorithms/statistics/intvol.pyx":837 * * union = join_complexes(*[cube_with_strides_center((0,1), strides), * cube_with_strides_center((1,0), strides), # <<<<<<<<<<<<<< * cube_with_strides_center((1,1), strides)]) * */ __pyx_k_tuple_49 = PyTuple_New(2); if (unlikely(!__pyx_k_tuple_49)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_49); __Pyx_INCREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_k_tuple_49, 0, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __Pyx_INCREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_k_tuple_49, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_49)); /* "nipy/algorithms/statistics/intvol.pyx":838 * union = join_complexes(*[cube_with_strides_center((0,1), strides), * cube_with_strides_center((1,0), strides), * cube_with_strides_center((1,1), strides)]) # <<<<<<<<<<<<<< * * c = cube_with_strides_center((0,0), strides) */ __pyx_k_tuple_50 = PyTuple_New(2); if (unlikely(!__pyx_k_tuple_50)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_50); __Pyx_INCREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_k_tuple_50, 0, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __Pyx_INCREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_k_tuple_50, 1, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_50)); /* "nipy/algorithms/statistics/intvol.pyx":840 * cube_with_strides_center((1,1), strides)]) * * c = cube_with_strides_center((0,0), strides) # <<<<<<<<<<<<<< * m3 = np.array(list(c[3].difference(union[3]))) * m2 = np.array(list(c[2].difference(union[2]))) */ __pyx_k_tuple_51 = PyTuple_New(2); if (unlikely(!__pyx_k_tuple_51)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_51); __Pyx_INCREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_k_tuple_51, 0, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_INCREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_k_tuple_51, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_51)); /* "nipy/algorithms/statistics/intvol.pyx":844 * m2 = np.array(list(c[2].difference(union[2]))) * * d3 = np.array([[_convert_stride2(v, strides, (2,1)) for v in m3[i]] for i in range(m3.shape[0])]) # <<<<<<<<<<<<<< * d3 = np.hstack([m3, d3]) * ds3 = d3.shape[0] */ __pyx_k_tuple_52 = PyTuple_New(2); if (unlikely(!__pyx_k_tuple_52)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_52); __Pyx_INCREF(__pyx_int_2); PyTuple_SET_ITEM(__pyx_k_tuple_52, 0, __pyx_int_2); __Pyx_GIVEREF(__pyx_int_2); __Pyx_INCREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_k_tuple_52, 1, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_52)); /* "nipy/algorithms/statistics/intvol.pyx":848 * ds3 = d3.shape[0] * * d2 = np.array([[_convert_stride2(v, strides, (2,1)) for v in m2[i]] for i in range(m2.shape[0])]) # <<<<<<<<<<<<<< * d2 = np.hstack([m2, d2]) * ds2 = d2.shape[0] */ __pyx_k_tuple_53 = PyTuple_New(2); if (unlikely(!__pyx_k_tuple_53)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 848; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_53); __Pyx_INCREF(__pyx_int_2); PyTuple_SET_ITEM(__pyx_k_tuple_53, 0, __pyx_int_2); __Pyx_GIVEREF(__pyx_int_2); __Pyx_INCREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_k_tuple_53, 1, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_53)); /* "nipy/algorithms/statistics/intvol.pyx":852 * ds2 = d2.shape[0] * * D = np.zeros((4,4)) # <<<<<<<<<<<<<< * * npix = mask.size */ __pyx_k_tuple_54 = PyTuple_New(2); if (unlikely(!__pyx_k_tuple_54)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 852; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_54); __Pyx_INCREF(__pyx_int_4); PyTuple_SET_ITEM(__pyx_k_tuple_54, 0, __pyx_int_4); __Pyx_GIVEREF(__pyx_int_4); __Pyx_INCREF(__pyx_int_4); PyTuple_SET_ITEM(__pyx_k_tuple_54, 1, __pyx_int_4); __Pyx_GIVEREF(__pyx_int_4); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_54)); __pyx_k_tuple_55 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_55)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 852; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_55); __Pyx_INCREF(((PyObject *)__pyx_k_tuple_54)); PyTuple_SET_ITEM(__pyx_k_tuple_55, 0, ((PyObject *)__pyx_k_tuple_54)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_54)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_55)); /* "nipy/algorithms/statistics/intvol.pyx":937 * """ * if not set(np.unique(mask)).issubset([0,1]): * raise ValueError('mask should be filled with 0/1 ' # <<<<<<<<<<<<<< * 'values, but be of type np.int') * cdef: */ __pyx_k_tuple_56 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_56)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 937; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_56); __Pyx_INCREF(((PyObject *)__pyx_kp_s_1)); PyTuple_SET_ITEM(__pyx_k_tuple_56, 0, ((PyObject *)__pyx_kp_s_1)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_1)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_56)); /* "nipy/algorithms/statistics/intvol.pyx":958 * pmask_shape = np.array(mask.shape) + 1 * pmask = np.zeros(pmask_shape, np.int) * pmask[:-1,:-1] = mask_c # <<<<<<<<<<<<<< * * s0, s1 = (pmask.shape[0], pmask.shape[1]) */ __pyx_k_slice_57 = PySlice_New(Py_None, __pyx_int_neg_1, Py_None); if (unlikely(!__pyx_k_slice_57)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 958; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_slice_57); __Pyx_GIVEREF(__pyx_k_slice_57); __pyx_k_slice_58 = PySlice_New(Py_None, __pyx_int_neg_1, Py_None); if (unlikely(!__pyx_k_slice_58)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 958; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_slice_58); __Pyx_GIVEREF(__pyx_k_slice_58); __pyx_k_tuple_59 = PyTuple_New(2); if (unlikely(!__pyx_k_tuple_59)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 958; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_59); __Pyx_INCREF(__pyx_k_slice_57); PyTuple_SET_ITEM(__pyx_k_tuple_59, 0, __pyx_k_slice_57); __Pyx_GIVEREF(__pyx_k_slice_57); __Pyx_INCREF(__pyx_k_slice_58); PyTuple_SET_ITEM(__pyx_k_tuple_59, 1, __pyx_k_slice_58); __Pyx_GIVEREF(__pyx_k_slice_58); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_59)); /* "nipy/algorithms/statistics/intvol.pyx":962 * s0, s1 = (pmask.shape[0], pmask.shape[1]) * * fpmask = pmask.reshape(-1) # <<<<<<<<<<<<<< * * cdef: */ __pyx_k_tuple_60 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_60)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 962; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_60); __Pyx_INCREF(__pyx_int_neg_1); PyTuple_SET_ITEM(__pyx_k_tuple_60, 0, __pyx_int_neg_1); __Pyx_GIVEREF(__pyx_int_neg_1); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_60)); /* "nipy/algorithms/statistics/intvol.pyx":972 * # We first figure out which vertices, edges, triangles, tetrahedra * # are uniquely associated with an interior voxel * union = join_complexes(*[cube_with_strides_center((0,1), strides), # <<<<<<<<<<<<<< * cube_with_strides_center((1,0), strides), * cube_with_strides_center((1,1), strides)]) */ __pyx_k_tuple_61 = PyTuple_New(2); if (unlikely(!__pyx_k_tuple_61)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 972; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_61); __Pyx_INCREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_k_tuple_61, 0, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_INCREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_k_tuple_61, 1, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_61)); /* "nipy/algorithms/statistics/intvol.pyx":973 * # are uniquely associated with an interior voxel * union = join_complexes(*[cube_with_strides_center((0,1), strides), * cube_with_strides_center((1,0), strides), # <<<<<<<<<<<<<< * cube_with_strides_center((1,1), strides)]) * c = cube_with_strides_center((0,0), strides) */ __pyx_k_tuple_62 = PyTuple_New(2); if (unlikely(!__pyx_k_tuple_62)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 973; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_62); __Pyx_INCREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_k_tuple_62, 0, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __Pyx_INCREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_k_tuple_62, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_62)); /* "nipy/algorithms/statistics/intvol.pyx":974 * union = join_complexes(*[cube_with_strides_center((0,1), strides), * cube_with_strides_center((1,0), strides), * cube_with_strides_center((1,1), strides)]) # <<<<<<<<<<<<<< * c = cube_with_strides_center((0,0), strides) * */ __pyx_k_tuple_63 = PyTuple_New(2); if (unlikely(!__pyx_k_tuple_63)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 974; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_63); __Pyx_INCREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_k_tuple_63, 0, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __Pyx_INCREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_k_tuple_63, 1, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_63)); /* "nipy/algorithms/statistics/intvol.pyx":975 * cube_with_strides_center((1,0), strides), * cube_with_strides_center((1,1), strides)]) * c = cube_with_strides_center((0,0), strides) # <<<<<<<<<<<<<< * * d3 = np.array(list(c[3].difference(union[3]))) */ __pyx_k_tuple_64 = PyTuple_New(2); if (unlikely(!__pyx_k_tuple_64)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 975; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_64); __Pyx_INCREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_k_tuple_64, 0, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_INCREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_k_tuple_64, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_64)); /* "nipy/algorithms/statistics/intvol.pyx":1043 * """ * if mask.shape[0] != coords.shape[1]: * raise ValueError('shape of mask does not match coordinates') # <<<<<<<<<<<<<< * if not set(np.unique(mask)).issubset([0,1]): * raise ValueError('mask should be filled with 0/1 ' */ __pyx_k_tuple_65 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_65)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1043; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_65); __Pyx_INCREF(((PyObject *)__pyx_kp_s_17)); PyTuple_SET_ITEM(__pyx_k_tuple_65, 0, ((PyObject *)__pyx_kp_s_17)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_17)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_65)); /* "nipy/algorithms/statistics/intvol.pyx":1045 * raise ValueError('shape of mask does not match coordinates') * if not set(np.unique(mask)).issubset([0,1]): * raise ValueError('mask should be filled with 0/1 ' # <<<<<<<<<<<<<< * 'values, but be of type np.int') * cdef: */ __pyx_k_tuple_66 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_66)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1045; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_66); __Pyx_INCREF(((PyObject *)__pyx_kp_s_1)); PyTuple_SET_ITEM(__pyx_k_tuple_66, 0, ((PyObject *)__pyx_kp_s_1)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_1)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_66)); /* "nipy/algorithms/statistics/intvol.pyx":1054 * l0 = 0; l1 = 0 * s0 = mask.shape[0] * D = np.zeros((2,2)) # <<<<<<<<<<<<<< * * for i in range(s0): */ __pyx_k_tuple_67 = PyTuple_New(2); if (unlikely(!__pyx_k_tuple_67)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1054; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_67); __Pyx_INCREF(__pyx_int_2); PyTuple_SET_ITEM(__pyx_k_tuple_67, 0, __pyx_int_2); __Pyx_GIVEREF(__pyx_int_2); __Pyx_INCREF(__pyx_int_2); PyTuple_SET_ITEM(__pyx_k_tuple_67, 1, __pyx_int_2); __Pyx_GIVEREF(__pyx_int_2); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_67)); __pyx_k_tuple_68 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_68)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1054; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_68); __Pyx_INCREF(((PyObject *)__pyx_k_tuple_67)); PyTuple_SET_ITEM(__pyx_k_tuple_68, 0, ((PyObject *)__pyx_k_tuple_67)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_67)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_68)); /* "nipy/algorithms/statistics/intvol.pyx":1076 * if m: * m = m * (mask[(i+1) % s0] * ((i+1) < s0)) * l1 = l1 + m * mu1_edge(D[0,0], D[0,1], D[1,1]) # <<<<<<<<<<<<<< * l0 = l0 - m * */ __pyx_k_tuple_69 = PyTuple_New(2); if (unlikely(!__pyx_k_tuple_69)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1076; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_69); __Pyx_INCREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_k_tuple_69, 0, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_INCREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_k_tuple_69, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_69)); __pyx_k_tuple_70 = PyTuple_New(2); if (unlikely(!__pyx_k_tuple_70)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1076; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_70); __Pyx_INCREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_k_tuple_70, 0, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); __Pyx_INCREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_k_tuple_70, 1, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_70)); __pyx_k_tuple_71 = PyTuple_New(2); if (unlikely(!__pyx_k_tuple_71)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1076; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_71); __Pyx_INCREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_k_tuple_71, 0, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __Pyx_INCREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_k_tuple_71, 1, __pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_71)); /* "nipy/algorithms/statistics/intvol.pyx":1116 * """ * if not set(np.unique(mask)).issubset([0,1]): * raise ValueError('mask should be filled with 0/1 ' # <<<<<<<<<<<<<< * 'values, but be of type np.int') * cdef: */ __pyx_k_tuple_72 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_72)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1116; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_72); __Pyx_INCREF(((PyObject *)__pyx_kp_s_1)); PyTuple_SET_ITEM(__pyx_k_tuple_72, 0, ((PyObject *)__pyx_kp_s_1)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_1)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_72)); /* "numpy.pxd":215 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_k_tuple_74 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_74)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_74); __Pyx_INCREF(((PyObject *)__pyx_kp_u_73)); PyTuple_SET_ITEM(__pyx_k_tuple_74, 0, ((PyObject *)__pyx_kp_u_73)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_73)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_74)); /* "numpy.pxd":219 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_k_tuple_76 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_76)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_76); __Pyx_INCREF(((PyObject *)__pyx_kp_u_75)); PyTuple_SET_ITEM(__pyx_k_tuple_76, 0, ((PyObject *)__pyx_kp_u_75)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_75)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_76)); /* "numpy.pxd":257 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_k_tuple_78 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_78)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_78); __Pyx_INCREF(((PyObject *)__pyx_kp_u_77)); PyTuple_SET_ITEM(__pyx_k_tuple_78, 0, ((PyObject *)__pyx_kp_u_77)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_77)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_78)); /* "numpy.pxd":799 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_k_tuple_81 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_81)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_81); __Pyx_INCREF(((PyObject *)__pyx_kp_u_80)); PyTuple_SET_ITEM(__pyx_k_tuple_81, 0, ((PyObject *)__pyx_kp_u_80)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_80)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_81)); /* "numpy.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_k_tuple_82 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_82)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_82); __Pyx_INCREF(((PyObject *)__pyx_kp_u_77)); PyTuple_SET_ITEM(__pyx_k_tuple_82, 0, ((PyObject *)__pyx_kp_u_77)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_77)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_82)); /* "numpy.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_k_tuple_84 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_84)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_84); __Pyx_INCREF(((PyObject *)__pyx_kp_u_83)); PyTuple_SET_ITEM(__pyx_k_tuple_84, 0, ((PyObject *)__pyx_kp_u_83)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_83)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_84)); /* "nipy/algorithms/statistics/intvol.pyx":340 * * * def EC3d(mask): # <<<<<<<<<<<<<< * """ Compute Euler characteristic of region within `mask` * */ __pyx_k_tuple_88 = PyTuple_New(32); if (unlikely(!__pyx_k_tuple_88)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 340; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_88); __Pyx_INCREF(((PyObject *)__pyx_n_s__mask)); PyTuple_SET_ITEM(__pyx_k_tuple_88, 0, ((PyObject *)__pyx_n_s__mask)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__mask)); __Pyx_INCREF(((PyObject *)__pyx_n_s__mask_c)); PyTuple_SET_ITEM(__pyx_k_tuple_88, 1, ((PyObject *)__pyx_n_s__mask_c)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__mask_c)); __Pyx_INCREF(((PyObject *)__pyx_n_s__fpmask)); PyTuple_SET_ITEM(__pyx_k_tuple_88, 2, ((PyObject *)__pyx_n_s__fpmask)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__fpmask)); __Pyx_INCREF(((PyObject *)__pyx_n_s__d2)); PyTuple_SET_ITEM(__pyx_k_tuple_88, 3, ((PyObject *)__pyx_n_s__d2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__d2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__d3)); PyTuple_SET_ITEM(__pyx_k_tuple_88, 4, ((PyObject *)__pyx_n_s__d3)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__d3)); __Pyx_INCREF(((PyObject *)__pyx_n_s__d4)); PyTuple_SET_ITEM(__pyx_k_tuple_88, 5, ((PyObject *)__pyx_n_s__d4)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__d4)); __Pyx_INCREF(((PyObject *)__pyx_n_s__i)); PyTuple_SET_ITEM(__pyx_k_tuple_88, 6, ((PyObject *)__pyx_n_s__i)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__i)); __Pyx_INCREF(((PyObject *)__pyx_n_s__j)); PyTuple_SET_ITEM(__pyx_k_tuple_88, 7, ((PyObject *)__pyx_n_s__j)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__j)); __Pyx_INCREF(((PyObject *)__pyx_n_s__k)); PyTuple_SET_ITEM(__pyx_k_tuple_88, 8, ((PyObject *)__pyx_n_s__k)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__k)); __Pyx_INCREF(((PyObject *)__pyx_n_s__l)); PyTuple_SET_ITEM(__pyx_k_tuple_88, 9, ((PyObject *)__pyx_n_s__l)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__l)); __Pyx_INCREF(((PyObject *)__pyx_n_s__s0)); PyTuple_SET_ITEM(__pyx_k_tuple_88, 10, ((PyObject *)__pyx_n_s__s0)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__s0)); __Pyx_INCREF(((PyObject *)__pyx_n_s__s1)); PyTuple_SET_ITEM(__pyx_k_tuple_88, 11, ((PyObject *)__pyx_n_s__s1)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__s1)); __Pyx_INCREF(((PyObject *)__pyx_n_s__s2)); PyTuple_SET_ITEM(__pyx_k_tuple_88, 12, ((PyObject *)__pyx_n_s__s2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__s2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ds2)); PyTuple_SET_ITEM(__pyx_k_tuple_88, 13, ((PyObject *)__pyx_n_s__ds2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ds2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ds3)); PyTuple_SET_ITEM(__pyx_k_tuple_88, 14, ((PyObject *)__pyx_n_s__ds3)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ds3)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ds4)); PyTuple_SET_ITEM(__pyx_k_tuple_88, 15, ((PyObject *)__pyx_n_s__ds4)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ds4)); __Pyx_INCREF(((PyObject *)__pyx_n_s__index)); PyTuple_SET_ITEM(__pyx_k_tuple_88, 16, ((PyObject *)__pyx_n_s__index)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__index)); __Pyx_INCREF(((PyObject *)__pyx_n_s__m)); PyTuple_SET_ITEM(__pyx_k_tuple_88, 17, ((PyObject *)__pyx_n_s__m)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__m)); __Pyx_INCREF(((PyObject *)__pyx_n_s__nvox)); PyTuple_SET_ITEM(__pyx_k_tuple_88, 18, ((PyObject *)__pyx_n_s__nvox)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__nvox)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ss0)); PyTuple_SET_ITEM(__pyx_k_tuple_88, 19, ((PyObject *)__pyx_n_s__ss0)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ss0)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ss1)); PyTuple_SET_ITEM(__pyx_k_tuple_88, 20, ((PyObject *)__pyx_n_s__ss1)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ss1)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ss2)); PyTuple_SET_ITEM(__pyx_k_tuple_88, 21, ((PyObject *)__pyx_n_s__ss2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ss2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__v0)); PyTuple_SET_ITEM(__pyx_k_tuple_88, 22, ((PyObject *)__pyx_n_s__v0)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__v0)); __Pyx_INCREF(((PyObject *)__pyx_n_s__v1)); PyTuple_SET_ITEM(__pyx_k_tuple_88, 23, ((PyObject *)__pyx_n_s__v1)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__v1)); __Pyx_INCREF(((PyObject *)__pyx_n_s__v2)); PyTuple_SET_ITEM(__pyx_k_tuple_88, 24, ((PyObject *)__pyx_n_s__v2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__v2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__v3)); PyTuple_SET_ITEM(__pyx_k_tuple_88, 25, ((PyObject *)__pyx_n_s__v3)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__v3)); __Pyx_INCREF(((PyObject *)__pyx_n_s__l0)); PyTuple_SET_ITEM(__pyx_k_tuple_88, 26, ((PyObject *)__pyx_n_s__l0)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__l0)); __Pyx_INCREF(((PyObject *)__pyx_n_s__pmask_shape)); PyTuple_SET_ITEM(__pyx_k_tuple_88, 27, ((PyObject *)__pyx_n_s__pmask_shape)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__pmask_shape)); __Pyx_INCREF(((PyObject *)__pyx_n_s__pmask)); PyTuple_SET_ITEM(__pyx_k_tuple_88, 28, ((PyObject *)__pyx_n_s__pmask)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__pmask)); __Pyx_INCREF(((PyObject *)__pyx_n_s__strides)); PyTuple_SET_ITEM(__pyx_k_tuple_88, 29, ((PyObject *)__pyx_n_s__strides)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__strides)); __Pyx_INCREF(((PyObject *)__pyx_n_s__union)); PyTuple_SET_ITEM(__pyx_k_tuple_88, 30, ((PyObject *)__pyx_n_s__union)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__union)); __Pyx_INCREF(((PyObject *)__pyx_n_s__c)); PyTuple_SET_ITEM(__pyx_k_tuple_88, 31, ((PyObject *)__pyx_n_s__c)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__c)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_88)); __pyx_k_codeobj_89 = (PyObject*)__Pyx_PyCode_New(1, 0, 32, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_88, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_90, __pyx_n_s__EC3d, 340, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_89)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 340; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/algorithms/statistics/intvol.pyx":464 * * * def Lips3d(coords, mask): # <<<<<<<<<<<<<< * """ Estimated intrinsic volumes within masked region given coordinates * */ __pyx_k_tuple_92 = PyTuple_New(63); if (unlikely(!__pyx_k_tuple_92)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 464; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_92); __Pyx_INCREF(((PyObject *)__pyx_n_s__coords)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 0, ((PyObject *)__pyx_n_s__coords)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__coords)); __Pyx_INCREF(((PyObject *)__pyx_n_s__mask)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 1, ((PyObject *)__pyx_n_s__mask)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__mask)); __Pyx_INCREF(((PyObject *)__pyx_n_s__value)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 2, ((PyObject *)__pyx_n_s__value)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__value)); __Pyx_INCREF(((PyObject *)__pyx_n_s__coords_c)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 3, ((PyObject *)__pyx_n_s__coords_c)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__coords_c)); __Pyx_INCREF(((PyObject *)__pyx_n_s__mask_c)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 4, ((PyObject *)__pyx_n_s__mask_c)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__mask_c)); __Pyx_INCREF(((PyObject *)__pyx_n_s__fcoords)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 5, ((PyObject *)__pyx_n_s__fcoords)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__fcoords)); __Pyx_INCREF(((PyObject *)__pyx_n_s__D)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 6, ((PyObject *)__pyx_n_s__D)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__D)); __Pyx_INCREF(((PyObject *)__pyx_n_s__fmask)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 7, ((PyObject *)__pyx_n_s__fmask)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__fmask)); __Pyx_INCREF(((PyObject *)__pyx_n_s__fpmask)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 8, ((PyObject *)__pyx_n_s__fpmask)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__fpmask)); __Pyx_INCREF(((PyObject *)__pyx_n_s__pmask)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 9, ((PyObject *)__pyx_n_s__pmask)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__pmask)); __Pyx_INCREF(((PyObject *)__pyx_n_s__d4)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 10, ((PyObject *)__pyx_n_s__d4)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__d4)); __Pyx_INCREF(((PyObject *)__pyx_n_s__m4)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 11, ((PyObject *)__pyx_n_s__m4)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__m4)); __Pyx_INCREF(((PyObject *)__pyx_n_s__d3)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 12, ((PyObject *)__pyx_n_s__d3)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__d3)); __Pyx_INCREF(((PyObject *)__pyx_n_s__m3)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 13, ((PyObject *)__pyx_n_s__m3)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__m3)); __Pyx_INCREF(((PyObject *)__pyx_n_s__d2)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 14, ((PyObject *)__pyx_n_s__d2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__d2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__m2)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 15, ((PyObject *)__pyx_n_s__m2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__m2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__cvertices)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 16, ((PyObject *)__pyx_n_s__cvertices)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__cvertices)); __Pyx_INCREF(((PyObject *)__pyx_n_s__i)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 17, ((PyObject *)__pyx_n_s__i)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__i)); __Pyx_INCREF(((PyObject *)__pyx_n_s__j)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 18, ((PyObject *)__pyx_n_s__j)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__j)); __Pyx_INCREF(((PyObject *)__pyx_n_s__k)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 19, ((PyObject *)__pyx_n_s__k)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__k)); __Pyx_INCREF(((PyObject *)__pyx_n_s__l)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 20, ((PyObject *)__pyx_n_s__l)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__l)); __Pyx_INCREF(((PyObject *)__pyx_n_s__s0)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 21, ((PyObject *)__pyx_n_s__s0)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__s0)); __Pyx_INCREF(((PyObject *)__pyx_n_s__s1)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 22, ((PyObject *)__pyx_n_s__s1)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__s1)); __Pyx_INCREF(((PyObject *)__pyx_n_s__s2)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 23, ((PyObject *)__pyx_n_s__s2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__s2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ds4)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 24, ((PyObject *)__pyx_n_s__ds4)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ds4)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ds3)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 25, ((PyObject *)__pyx_n_s__ds3)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ds3)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ds2)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 26, ((PyObject *)__pyx_n_s__ds2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ds2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__index)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 27, ((PyObject *)__pyx_n_s__index)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__index)); __Pyx_INCREF(((PyObject *)__pyx_n_s__pindex)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 28, ((PyObject *)__pyx_n_s__pindex)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__pindex)); __Pyx_INCREF(((PyObject *)__pyx_n_s__m)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 29, ((PyObject *)__pyx_n_s__m)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__m)); __Pyx_INCREF(((PyObject *)__pyx_n_s__nvox)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 30, ((PyObject *)__pyx_n_s__nvox)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__nvox)); __Pyx_INCREF(((PyObject *)__pyx_n_s__r)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 31, ((PyObject *)__pyx_n_s__r)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__r)); __Pyx_INCREF(((PyObject *)__pyx_n_s__s)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 32, ((PyObject *)__pyx_n_s__s)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__s)); __Pyx_INCREF(((PyObject *)__pyx_n_s__rr)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 33, ((PyObject *)__pyx_n_s__rr)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__rr)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ss)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 34, ((PyObject *)__pyx_n_s__ss)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ss)); __Pyx_INCREF(((PyObject *)__pyx_n_s__mr)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 35, ((PyObject *)__pyx_n_s__mr)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__mr)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ms)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 36, ((PyObject *)__pyx_n_s__ms)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ms)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ss0)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 37, ((PyObject *)__pyx_n_s__ss0)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ss0)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ss1)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 38, ((PyObject *)__pyx_n_s__ss1)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ss1)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ss2)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 39, ((PyObject *)__pyx_n_s__ss2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ss2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__v0)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 40, ((PyObject *)__pyx_n_s__v0)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__v0)); __Pyx_INCREF(((PyObject *)__pyx_n_s__v1)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 41, ((PyObject *)__pyx_n_s__v1)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__v1)); __Pyx_INCREF(((PyObject *)__pyx_n_s__v2)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 42, ((PyObject *)__pyx_n_s__v2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__v2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__v3)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 43, ((PyObject *)__pyx_n_s__v3)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__v3)); __Pyx_INCREF(((PyObject *)__pyx_n_s__w0)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 44, ((PyObject *)__pyx_n_s__w0)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__w0)); __Pyx_INCREF(((PyObject *)__pyx_n_s__w1)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 45, ((PyObject *)__pyx_n_s__w1)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__w1)); __Pyx_INCREF(((PyObject *)__pyx_n_s__w2)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 46, ((PyObject *)__pyx_n_s__w2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__w2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__w3)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 47, ((PyObject *)__pyx_n_s__w3)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__w3)); __Pyx_INCREF(((PyObject *)__pyx_n_s__l0)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 48, ((PyObject *)__pyx_n_s__l0)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__l0)); __Pyx_INCREF(((PyObject *)__pyx_n_s__l1)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 49, ((PyObject *)__pyx_n_s__l1)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__l1)); __Pyx_INCREF(((PyObject *)__pyx_n_s__l2)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 50, ((PyObject *)__pyx_n_s__l2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__l2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__l3)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 51, ((PyObject *)__pyx_n_s__l3)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__l3)); __Pyx_INCREF(((PyObject *)__pyx_n_s__res)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 52, ((PyObject *)__pyx_n_s__res)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__res)); __Pyx_INCREF(((PyObject *)__pyx_n_s__pmask_shape)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 53, ((PyObject *)__pyx_n_s__pmask_shape)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__pmask_shape)); __Pyx_INCREF(((PyObject *)__pyx_n_s__strides)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 54, ((PyObject *)__pyx_n_s__strides)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__strides)); __Pyx_INCREF(((PyObject *)__pyx_n_s__dstrides)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 55, ((PyObject *)__pyx_n_s__dstrides)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__dstrides)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ss0d)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 56, ((PyObject *)__pyx_n_s__ss0d)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ss0d)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ss1d)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 57, ((PyObject *)__pyx_n_s__ss1d)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ss1d)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ss2d)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 58, ((PyObject *)__pyx_n_s__ss2d)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ss2d)); __Pyx_INCREF(((PyObject *)__pyx_n_s__verts)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 59, ((PyObject *)__pyx_n_s__verts)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__verts)); __Pyx_INCREF(((PyObject *)__pyx_n_s__union)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 60, ((PyObject *)__pyx_n_s__union)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__union)); __Pyx_INCREF(((PyObject *)__pyx_n_s__c)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 61, ((PyObject *)__pyx_n_s__c)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__c)); __Pyx_INCREF(((PyObject *)__pyx_n_s__v)); PyTuple_SET_ITEM(__pyx_k_tuple_92, 62, ((PyObject *)__pyx_n_s__v)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__v)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_92)); __pyx_k_codeobj_93 = (PyObject*)__Pyx_PyCode_New(2, 0, 63, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_92, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_90, __pyx_n_s__Lips3d, 464, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_93)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 464; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/algorithms/statistics/intvol.pyx":699 * * * def _convert_stride3(v, stride1, stride2): # <<<<<<<<<<<<<< * """ * Take a voxel, expressed as in index in stride1 and */ __pyx_k_tuple_94 = PyTuple_New(6); if (unlikely(!__pyx_k_tuple_94)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 699; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_94); __Pyx_INCREF(((PyObject *)__pyx_n_s__v)); PyTuple_SET_ITEM(__pyx_k_tuple_94, 0, ((PyObject *)__pyx_n_s__v)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__v)); __Pyx_INCREF(((PyObject *)__pyx_n_s__stride1)); PyTuple_SET_ITEM(__pyx_k_tuple_94, 1, ((PyObject *)__pyx_n_s__stride1)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__stride1)); __Pyx_INCREF(((PyObject *)__pyx_n_s__stride2)); PyTuple_SET_ITEM(__pyx_k_tuple_94, 2, ((PyObject *)__pyx_n_s__stride2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__stride2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__v0)); PyTuple_SET_ITEM(__pyx_k_tuple_94, 3, ((PyObject *)__pyx_n_s__v0)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__v0)); __Pyx_INCREF(((PyObject *)__pyx_n_s__v1)); PyTuple_SET_ITEM(__pyx_k_tuple_94, 4, ((PyObject *)__pyx_n_s__v1)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__v1)); __Pyx_INCREF(((PyObject *)__pyx_n_s__v2)); PyTuple_SET_ITEM(__pyx_k_tuple_94, 5, ((PyObject *)__pyx_n_s__v2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__v2)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_94)); __pyx_k_codeobj_95 = (PyObject*)__Pyx_PyCode_New(3, 0, 6, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_94, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_90, __pyx_n_s___convert_stride3, 699, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_95)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 699; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/algorithms/statistics/intvol.pyx":711 * * * def _convert_stride2(v, stride1, stride2): # <<<<<<<<<<<<<< * """ * Take a voxel, expressed as in index in stride1 and */ __pyx_k_tuple_96 = PyTuple_New(5); if (unlikely(!__pyx_k_tuple_96)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 711; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_96); __Pyx_INCREF(((PyObject *)__pyx_n_s__v)); PyTuple_SET_ITEM(__pyx_k_tuple_96, 0, ((PyObject *)__pyx_n_s__v)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__v)); __Pyx_INCREF(((PyObject *)__pyx_n_s__stride1)); PyTuple_SET_ITEM(__pyx_k_tuple_96, 1, ((PyObject *)__pyx_n_s__stride1)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__stride1)); __Pyx_INCREF(((PyObject *)__pyx_n_s__stride2)); PyTuple_SET_ITEM(__pyx_k_tuple_96, 2, ((PyObject *)__pyx_n_s__stride2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__stride2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__v0)); PyTuple_SET_ITEM(__pyx_k_tuple_96, 3, ((PyObject *)__pyx_n_s__v0)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__v0)); __Pyx_INCREF(((PyObject *)__pyx_n_s__v1)); PyTuple_SET_ITEM(__pyx_k_tuple_96, 4, ((PyObject *)__pyx_n_s__v1)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__v1)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_96)); __pyx_k_codeobj_97 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_96, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_90, __pyx_n_s___convert_stride2, 711, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_97)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 711; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/algorithms/statistics/intvol.pyx":721 * * * def _convert_stride1(v, stride1, stride2): # <<<<<<<<<<<<<< * """ * Take a voxel, expressed as in index in stride1 and */ __pyx_k_tuple_98 = PyTuple_New(4); if (unlikely(!__pyx_k_tuple_98)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 721; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_98); __Pyx_INCREF(((PyObject *)__pyx_n_s__v)); PyTuple_SET_ITEM(__pyx_k_tuple_98, 0, ((PyObject *)__pyx_n_s__v)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__v)); __Pyx_INCREF(((PyObject *)__pyx_n_s__stride1)); PyTuple_SET_ITEM(__pyx_k_tuple_98, 1, ((PyObject *)__pyx_n_s__stride1)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__stride1)); __Pyx_INCREF(((PyObject *)__pyx_n_s__stride2)); PyTuple_SET_ITEM(__pyx_k_tuple_98, 2, ((PyObject *)__pyx_n_s__stride2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__stride2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__v0)); PyTuple_SET_ITEM(__pyx_k_tuple_98, 3, ((PyObject *)__pyx_n_s__v0)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__v0)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_98)); __pyx_k_codeobj_99 = (PyObject*)__Pyx_PyCode_New(3, 0, 4, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_98, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_90, __pyx_n_s___convert_stride1, 721, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_99)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 721; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/algorithms/statistics/intvol.pyx":730 * * * def Lips2d(coords, mask): # <<<<<<<<<<<<<< * """ Estimate intrinsic volumes for 2d region in `mask` given `coords` * */ __pyx_k_tuple_100 = PyTuple_New(54); if (unlikely(!__pyx_k_tuple_100)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 730; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_100); __Pyx_INCREF(((PyObject *)__pyx_n_s__coords)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 0, ((PyObject *)__pyx_n_s__coords)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__coords)); __Pyx_INCREF(((PyObject *)__pyx_n_s__mask)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 1, ((PyObject *)__pyx_n_s__mask)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__mask)); __Pyx_INCREF(((PyObject *)__pyx_n_s__value)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 2, ((PyObject *)__pyx_n_s__value)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__value)); __Pyx_INCREF(((PyObject *)__pyx_n_s__coords_c)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 3, ((PyObject *)__pyx_n_s__coords_c)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__coords_c)); __Pyx_INCREF(((PyObject *)__pyx_n_s__mask_c)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 4, ((PyObject *)__pyx_n_s__mask_c)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__mask_c)); __Pyx_INCREF(((PyObject *)__pyx_n_s__fcoords)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 5, ((PyObject *)__pyx_n_s__fcoords)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__fcoords)); __Pyx_INCREF(((PyObject *)__pyx_n_s__D)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 6, ((PyObject *)__pyx_n_s__D)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__D)); __Pyx_INCREF(((PyObject *)__pyx_n_s__fmask)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 7, ((PyObject *)__pyx_n_s__fmask)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__fmask)); __Pyx_INCREF(((PyObject *)__pyx_n_s__fpmask)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 8, ((PyObject *)__pyx_n_s__fpmask)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__fpmask)); __Pyx_INCREF(((PyObject *)__pyx_n_s__pmask)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 9, ((PyObject *)__pyx_n_s__pmask)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__pmask)); __Pyx_INCREF(((PyObject *)__pyx_n_s__d3)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 10, ((PyObject *)__pyx_n_s__d3)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__d3)); __Pyx_INCREF(((PyObject *)__pyx_n_s__d2)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 11, ((PyObject *)__pyx_n_s__d2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__d2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__cvertices)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 12, ((PyObject *)__pyx_n_s__cvertices)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__cvertices)); __Pyx_INCREF(((PyObject *)__pyx_n_s__i)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 13, ((PyObject *)__pyx_n_s__i)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__i)); __Pyx_INCREF(((PyObject *)__pyx_n_s__j)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 14, ((PyObject *)__pyx_n_s__j)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__j)); __Pyx_INCREF(((PyObject *)__pyx_n_s__k)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 15, ((PyObject *)__pyx_n_s__k)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__k)); __Pyx_INCREF(((PyObject *)__pyx_n_s__l)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 16, ((PyObject *)__pyx_n_s__l)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__l)); __Pyx_INCREF(((PyObject *)__pyx_n_s__r)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 17, ((PyObject *)__pyx_n_s__r)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__r)); __Pyx_INCREF(((PyObject *)__pyx_n_s__s)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 18, ((PyObject *)__pyx_n_s__s)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__s)); __Pyx_INCREF(((PyObject *)__pyx_n_s__rr)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 19, ((PyObject *)__pyx_n_s__rr)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__rr)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ss)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 20, ((PyObject *)__pyx_n_s__ss)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ss)); __Pyx_INCREF(((PyObject *)__pyx_n_s__mr)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 21, ((PyObject *)__pyx_n_s__mr)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__mr)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ms)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 22, ((PyObject *)__pyx_n_s__ms)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ms)); __Pyx_INCREF(((PyObject *)__pyx_n_s__s0)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 23, ((PyObject *)__pyx_n_s__s0)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__s0)); __Pyx_INCREF(((PyObject *)__pyx_n_s__s1)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 24, ((PyObject *)__pyx_n_s__s1)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__s1)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ds2)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 25, ((PyObject *)__pyx_n_s__ds2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ds2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ds3)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 26, ((PyObject *)__pyx_n_s__ds3)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ds3)); __Pyx_INCREF(((PyObject *)__pyx_n_s__index)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 27, ((PyObject *)__pyx_n_s__index)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__index)); __Pyx_INCREF(((PyObject *)__pyx_n_s__m)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 28, ((PyObject *)__pyx_n_s__m)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__m)); __Pyx_INCREF(((PyObject *)__pyx_n_s__npix)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 29, ((PyObject *)__pyx_n_s__npix)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__npix)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ss0)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 30, ((PyObject *)__pyx_n_s__ss0)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ss0)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ss1)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 31, ((PyObject *)__pyx_n_s__ss1)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ss1)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ss0d)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 32, ((PyObject *)__pyx_n_s__ss0d)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ss0d)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ss1d)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 33, ((PyObject *)__pyx_n_s__ss1d)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ss1d)); __Pyx_INCREF(((PyObject *)__pyx_n_s__v0)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 34, ((PyObject *)__pyx_n_s__v0)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__v0)); __Pyx_INCREF(((PyObject *)__pyx_n_s__v1)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 35, ((PyObject *)__pyx_n_s__v1)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__v1)); __Pyx_INCREF(((PyObject *)__pyx_n_s__v2)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 36, ((PyObject *)__pyx_n_s__v2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__v2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__l0)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 37, ((PyObject *)__pyx_n_s__l0)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__l0)); __Pyx_INCREF(((PyObject *)__pyx_n_s__l1)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 38, ((PyObject *)__pyx_n_s__l1)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__l1)); __Pyx_INCREF(((PyObject *)__pyx_n_s__l2)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 39, ((PyObject *)__pyx_n_s__l2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__l2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__res)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 40, ((PyObject *)__pyx_n_s__res)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__res)); __Pyx_INCREF(((PyObject *)__pyx_n_s__pmask_shape)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 41, ((PyObject *)__pyx_n_s__pmask_shape)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__pmask_shape)); __Pyx_INCREF(((PyObject *)__pyx_n_s__strides)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 42, ((PyObject *)__pyx_n_s__strides)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__strides)); __Pyx_INCREF(((PyObject *)__pyx_n_s__dstrides)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 43, ((PyObject *)__pyx_n_s__dstrides)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__dstrides)); __Pyx_INCREF(((PyObject *)__pyx_n_s__verts)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 44, ((PyObject *)__pyx_n_s__verts)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__verts)); __Pyx_INCREF(((PyObject *)__pyx_n_s__union)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 45, ((PyObject *)__pyx_n_s__union)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__union)); __Pyx_INCREF(((PyObject *)__pyx_n_s__c)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 46, ((PyObject *)__pyx_n_s__c)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__c)); __Pyx_INCREF(((PyObject *)__pyx_n_s__m3)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 47, ((PyObject *)__pyx_n_s__m3)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__m3)); __Pyx_INCREF(((PyObject *)__pyx_n_s__m2)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 48, ((PyObject *)__pyx_n_s__m2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__m2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__pindex)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 49, ((PyObject *)__pyx_n_s__pindex)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__pindex)); __Pyx_INCREF(((PyObject *)__pyx_n_s__w0)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 50, ((PyObject *)__pyx_n_s__w0)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__w0)); __Pyx_INCREF(((PyObject *)__pyx_n_s__w1)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 51, ((PyObject *)__pyx_n_s__w1)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__w1)); __Pyx_INCREF(((PyObject *)__pyx_n_s__w2)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 52, ((PyObject *)__pyx_n_s__w2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__w2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__v)); PyTuple_SET_ITEM(__pyx_k_tuple_100, 53, ((PyObject *)__pyx_n_s__v)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__v)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_100)); __pyx_k_codeobj_101 = (PyObject*)__Pyx_PyCode_New(2, 0, 54, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_100, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_90, __pyx_n_s__Lips2d, 730, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_101)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 730; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/algorithms/statistics/intvol.pyx":907 * * * def EC2d(mask): # <<<<<<<<<<<<<< * """ Compute Euler characteristic of 2D region in `mask` * */ __pyx_k_tuple_102 = PyTuple_New(26); if (unlikely(!__pyx_k_tuple_102)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 907; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_102); __Pyx_INCREF(((PyObject *)__pyx_n_s__mask)); PyTuple_SET_ITEM(__pyx_k_tuple_102, 0, ((PyObject *)__pyx_n_s__mask)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__mask)); __Pyx_INCREF(((PyObject *)__pyx_n_s__mask_c)); PyTuple_SET_ITEM(__pyx_k_tuple_102, 1, ((PyObject *)__pyx_n_s__mask_c)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__mask_c)); __Pyx_INCREF(((PyObject *)__pyx_n_s__fpmask)); PyTuple_SET_ITEM(__pyx_k_tuple_102, 2, ((PyObject *)__pyx_n_s__fpmask)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__fpmask)); __Pyx_INCREF(((PyObject *)__pyx_n_s__d2)); PyTuple_SET_ITEM(__pyx_k_tuple_102, 3, ((PyObject *)__pyx_n_s__d2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__d2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__d3)); PyTuple_SET_ITEM(__pyx_k_tuple_102, 4, ((PyObject *)__pyx_n_s__d3)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__d3)); __Pyx_INCREF(((PyObject *)__pyx_n_s__i)); PyTuple_SET_ITEM(__pyx_k_tuple_102, 5, ((PyObject *)__pyx_n_s__i)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__i)); __Pyx_INCREF(((PyObject *)__pyx_n_s__j)); PyTuple_SET_ITEM(__pyx_k_tuple_102, 6, ((PyObject *)__pyx_n_s__j)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__j)); __Pyx_INCREF(((PyObject *)__pyx_n_s__k)); PyTuple_SET_ITEM(__pyx_k_tuple_102, 7, ((PyObject *)__pyx_n_s__k)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__k)); __Pyx_INCREF(((PyObject *)__pyx_n_s__l)); PyTuple_SET_ITEM(__pyx_k_tuple_102, 8, ((PyObject *)__pyx_n_s__l)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__l)); __Pyx_INCREF(((PyObject *)__pyx_n_s__s0)); PyTuple_SET_ITEM(__pyx_k_tuple_102, 9, ((PyObject *)__pyx_n_s__s0)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__s0)); __Pyx_INCREF(((PyObject *)__pyx_n_s__s1)); PyTuple_SET_ITEM(__pyx_k_tuple_102, 10, ((PyObject *)__pyx_n_s__s1)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__s1)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ds2)); PyTuple_SET_ITEM(__pyx_k_tuple_102, 11, ((PyObject *)__pyx_n_s__ds2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ds2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ds3)); PyTuple_SET_ITEM(__pyx_k_tuple_102, 12, ((PyObject *)__pyx_n_s__ds3)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ds3)); __Pyx_INCREF(((PyObject *)__pyx_n_s__index)); PyTuple_SET_ITEM(__pyx_k_tuple_102, 13, ((PyObject *)__pyx_n_s__index)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__index)); __Pyx_INCREF(((PyObject *)__pyx_n_s__m)); PyTuple_SET_ITEM(__pyx_k_tuple_102, 14, ((PyObject *)__pyx_n_s__m)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__m)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ss0)); PyTuple_SET_ITEM(__pyx_k_tuple_102, 15, ((PyObject *)__pyx_n_s__ss0)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ss0)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ss1)); PyTuple_SET_ITEM(__pyx_k_tuple_102, 16, ((PyObject *)__pyx_n_s__ss1)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ss1)); __Pyx_INCREF(((PyObject *)__pyx_n_s__v0)); PyTuple_SET_ITEM(__pyx_k_tuple_102, 17, ((PyObject *)__pyx_n_s__v0)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__v0)); __Pyx_INCREF(((PyObject *)__pyx_n_s__v1)); PyTuple_SET_ITEM(__pyx_k_tuple_102, 18, ((PyObject *)__pyx_n_s__v1)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__v1)); __Pyx_INCREF(((PyObject *)__pyx_n_s__l0)); PyTuple_SET_ITEM(__pyx_k_tuple_102, 19, ((PyObject *)__pyx_n_s__l0)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__l0)); __Pyx_INCREF(((PyObject *)__pyx_n_s__pmask_shape)); PyTuple_SET_ITEM(__pyx_k_tuple_102, 20, ((PyObject *)__pyx_n_s__pmask_shape)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__pmask_shape)); __Pyx_INCREF(((PyObject *)__pyx_n_s__pmask)); PyTuple_SET_ITEM(__pyx_k_tuple_102, 21, ((PyObject *)__pyx_n_s__pmask)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__pmask)); __Pyx_INCREF(((PyObject *)__pyx_n_s__strides)); PyTuple_SET_ITEM(__pyx_k_tuple_102, 22, ((PyObject *)__pyx_n_s__strides)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__strides)); __Pyx_INCREF(((PyObject *)__pyx_n_s__union)); PyTuple_SET_ITEM(__pyx_k_tuple_102, 23, ((PyObject *)__pyx_n_s__union)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__union)); __Pyx_INCREF(((PyObject *)__pyx_n_s__c)); PyTuple_SET_ITEM(__pyx_k_tuple_102, 24, ((PyObject *)__pyx_n_s__c)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__c)); __Pyx_INCREF(((PyObject *)__pyx_n_s__v2)); PyTuple_SET_ITEM(__pyx_k_tuple_102, 25, ((PyObject *)__pyx_n_s__v2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__v2)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_102)); __pyx_k_codeobj_103 = (PyObject*)__Pyx_PyCode_New(1, 0, 26, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_102, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_90, __pyx_n_s__EC2d, 907, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_103)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 907; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/algorithms/statistics/intvol.pyx":1007 * * * def Lips1d(np.ndarray[np.float_t, ndim=2] coords, # <<<<<<<<<<<<<< * np.ndarray[np.intp_t, ndim=1] mask): * """ Estimate intrinsic volumes for 1D region in `mask` given `coords` */ __pyx_k_tuple_104 = PyTuple_New(17); if (unlikely(!__pyx_k_tuple_104)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1007; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_104); __Pyx_INCREF(((PyObject *)__pyx_n_s__coords)); PyTuple_SET_ITEM(__pyx_k_tuple_104, 0, ((PyObject *)__pyx_n_s__coords)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__coords)); __Pyx_INCREF(((PyObject *)__pyx_n_s__mask)); PyTuple_SET_ITEM(__pyx_k_tuple_104, 1, ((PyObject *)__pyx_n_s__mask)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__mask)); __Pyx_INCREF(((PyObject *)__pyx_n_s__i)); PyTuple_SET_ITEM(__pyx_k_tuple_104, 2, ((PyObject *)__pyx_n_s__i)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__i)); __Pyx_INCREF(((PyObject *)__pyx_n_s__l)); PyTuple_SET_ITEM(__pyx_k_tuple_104, 3, ((PyObject *)__pyx_n_s__l)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__l)); __Pyx_INCREF(((PyObject *)__pyx_n_s__r)); PyTuple_SET_ITEM(__pyx_k_tuple_104, 4, ((PyObject *)__pyx_n_s__r)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__r)); __Pyx_INCREF(((PyObject *)__pyx_n_s__s)); PyTuple_SET_ITEM(__pyx_k_tuple_104, 5, ((PyObject *)__pyx_n_s__s)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__s)); __Pyx_INCREF(((PyObject *)__pyx_n_s__rr)); PyTuple_SET_ITEM(__pyx_k_tuple_104, 6, ((PyObject *)__pyx_n_s__rr)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__rr)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ss)); PyTuple_SET_ITEM(__pyx_k_tuple_104, 7, ((PyObject *)__pyx_n_s__ss)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ss)); __Pyx_INCREF(((PyObject *)__pyx_n_s__mr)); PyTuple_SET_ITEM(__pyx_k_tuple_104, 8, ((PyObject *)__pyx_n_s__mr)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__mr)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ms)); PyTuple_SET_ITEM(__pyx_k_tuple_104, 9, ((PyObject *)__pyx_n_s__ms)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ms)); __Pyx_INCREF(((PyObject *)__pyx_n_s__s0)); PyTuple_SET_ITEM(__pyx_k_tuple_104, 10, ((PyObject *)__pyx_n_s__s0)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__s0)); __Pyx_INCREF(((PyObject *)__pyx_n_s__index)); PyTuple_SET_ITEM(__pyx_k_tuple_104, 11, ((PyObject *)__pyx_n_s__index)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__index)); __Pyx_INCREF(((PyObject *)__pyx_n_s__m)); PyTuple_SET_ITEM(__pyx_k_tuple_104, 12, ((PyObject *)__pyx_n_s__m)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__m)); __Pyx_INCREF(((PyObject *)__pyx_n_s__l0)); PyTuple_SET_ITEM(__pyx_k_tuple_104, 13, ((PyObject *)__pyx_n_s__l0)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__l0)); __Pyx_INCREF(((PyObject *)__pyx_n_s__l1)); PyTuple_SET_ITEM(__pyx_k_tuple_104, 14, ((PyObject *)__pyx_n_s__l1)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__l1)); __Pyx_INCREF(((PyObject *)__pyx_n_s__res)); PyTuple_SET_ITEM(__pyx_k_tuple_104, 15, ((PyObject *)__pyx_n_s__res)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__res)); __Pyx_INCREF(((PyObject *)__pyx_n_s__D)); PyTuple_SET_ITEM(__pyx_k_tuple_104, 16, ((PyObject *)__pyx_n_s__D)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__D)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_104)); __pyx_k_codeobj_105 = (PyObject*)__Pyx_PyCode_New(2, 0, 17, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_104, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_90, __pyx_n_s__Lips1d, 1007, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_105)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1007; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/algorithms/statistics/intvol.pyx":1083 * * * def EC1d(np.ndarray[np.intp_t, ndim=1] mask): # <<<<<<<<<<<<<< * """ Compute Euler characteristic for 1d `mask` * */ __pyx_k_tuple_106 = PyTuple_New(5); if (unlikely(!__pyx_k_tuple_106)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1083; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_106); __Pyx_INCREF(((PyObject *)__pyx_n_s__mask)); PyTuple_SET_ITEM(__pyx_k_tuple_106, 0, ((PyObject *)__pyx_n_s__mask)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__mask)); __Pyx_INCREF(((PyObject *)__pyx_n_s__i)); PyTuple_SET_ITEM(__pyx_k_tuple_106, 1, ((PyObject *)__pyx_n_s__i)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__i)); __Pyx_INCREF(((PyObject *)__pyx_n_s__m)); PyTuple_SET_ITEM(__pyx_k_tuple_106, 2, ((PyObject *)__pyx_n_s__m)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__m)); __Pyx_INCREF(((PyObject *)__pyx_n_s__s0)); PyTuple_SET_ITEM(__pyx_k_tuple_106, 3, ((PyObject *)__pyx_n_s__s0)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__s0)); __Pyx_INCREF(((PyObject *)__pyx_n_s__l0)); PyTuple_SET_ITEM(__pyx_k_tuple_106, 4, ((PyObject *)__pyx_n_s__l0)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__l0)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_106)); __pyx_k_codeobj_107 = (PyObject*)__Pyx_PyCode_New(1, 0, 5, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_106, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_90, __pyx_n_s__EC1d, 1083, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_107)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1083; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_2 = PyInt_FromLong(2); if (unlikely(!__pyx_int_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_3 = PyInt_FromLong(3); if (unlikely(!__pyx_int_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_4 = PyInt_FromLong(4); if (unlikely(!__pyx_int_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_8 = PyInt_FromLong(8); if (unlikely(!__pyx_int_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_15 = PyInt_FromLong(15); if (unlikely(!__pyx_int_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC initintvol(void); /*proto*/ PyMODINIT_FUNC initintvol(void) #else PyMODINIT_FUNC PyInit_intvol(void); /*proto*/ PyMODINIT_FUNC PyInit_intvol(void) #endif { PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; double __pyx_t_3; __Pyx_RefNannyDeclarations #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_intvol(void)", 0); if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #ifdef __Pyx_CyFunction_USED if (__Pyx_CyFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4(__Pyx_NAMESTR("intvol"), __pyx_methods, __Pyx_DOCSTR(__pyx_k_85), 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (!PyDict_GetItemString(modules, "nipy.algorithms.statistics.intvol")) { if (unlikely(PyDict_SetItemString(modules, "nipy.algorithms.statistics.intvol", __pyx_m) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } } #endif __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); if (unlikely(!__pyx_b)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; /*--- Initialize various global constants etc. ---*/ if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_module_is_main_nipy__algorithms__statistics__intvol) { if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s____main__) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; } /*--- Builtin init code ---*/ if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Constants init code ---*/ if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Global init code ---*/ /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ /*--- Type import code ---*/ __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", #if CYTHON_COMPILING_IN_PYPY sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 861; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Variable import code ---*/ /*--- Function import code ---*/ /*--- Execution code ---*/ /* "nipy/algorithms/statistics/intvol.pyx":12 * cimport cython * * import numpy as np # <<<<<<<<<<<<<< * cimport numpy as np * */ __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__numpy), 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__np, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":15 * cimport numpy as np * * from scipy.sparse import dok_matrix # <<<<<<<<<<<<<< * * # Array helper */ __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)__pyx_n_s__dok_matrix)); PyList_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_n_s__dok_matrix)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__dok_matrix)); __pyx_t_2 = __Pyx_Import(((PyObject *)__pyx_n_s_86), ((PyObject *)__pyx_t_1), -1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__dok_matrix); if (__pyx_t_1 == NULL) { if (PyErr_ExceptionMatches(PyExc_AttributeError)) __Pyx_RaiseImportError(__pyx_n_s__dok_matrix); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__dok_matrix, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "nipy/algorithms/statistics/intvol.pyx":18 * * # Array helper * from nipy.utils.arrays import strides_from # <<<<<<<<<<<<<< * * # local imports */ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 18; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(((PyObject *)__pyx_n_s__strides_from)); PyList_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_n_s__strides_from)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__strides_from)); __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s_87), ((PyObject *)__pyx_t_2), -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 18; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__strides_from); if (__pyx_t_2 == NULL) { if (PyErr_ExceptionMatches(PyExc_AttributeError)) __Pyx_RaiseImportError(__pyx_n_s__strides_from); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 18; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __Pyx_GOTREF(__pyx_t_2); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__strides_from, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 18; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":21 * * # local imports * from utils import cube_with_strides_center, join_complexes # <<<<<<<<<<<<<< * * */ __pyx_t_1 = PyList_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)__pyx_n_s_8)); PyList_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_n_s_8)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s_8)); __Pyx_INCREF(((PyObject *)__pyx_n_s__join_complexes)); PyList_SET_ITEM(__pyx_t_1, 1, ((PyObject *)__pyx_n_s__join_complexes)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__join_complexes)); __pyx_t_2 = __Pyx_Import(((PyObject *)__pyx_n_s__utils), ((PyObject *)__pyx_t_1), -1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s_8); if (__pyx_t_1 == NULL) { if (PyErr_ExceptionMatches(PyExc_AttributeError)) __Pyx_RaiseImportError(__pyx_n_s_8); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s_8, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__join_complexes); if (__pyx_t_1 == NULL) { if (PyErr_ExceptionMatches(PyExc_AttributeError)) __Pyx_RaiseImportError(__pyx_n_s__join_complexes); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__join_complexes, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "nipy/algorithms/statistics/intvol.pyx":24 * * * cdef double PI = np.pi # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__pi); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_3 = __pyx_PyFloat_AsDouble(__pyx_t_1); if (unlikely((__pyx_t_3 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_4nipy_10algorithms_10statistics_6intvol_PI = __pyx_t_3; /* "nipy/algorithms/statistics/intvol.pyx":340 * * * def EC3d(mask): # <<<<<<<<<<<<<< * """ Compute Euler characteristic of region within `mask` * */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_10algorithms_10statistics_6intvol_15EC3d, NULL, __pyx_n_s_91); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 340; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__EC3d, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 340; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":464 * * * def Lips3d(coords, mask): # <<<<<<<<<<<<<< * """ Estimated intrinsic volumes within masked region given coordinates * */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_10algorithms_10statistics_6intvol_17Lips3d, NULL, __pyx_n_s_91); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 464; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__Lips3d, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 464; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":699 * * * def _convert_stride3(v, stride1, stride2): # <<<<<<<<<<<<<< * """ * Take a voxel, expressed as in index in stride1 and */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_10algorithms_10statistics_6intvol_19_convert_stride3, NULL, __pyx_n_s_91); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 699; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s___convert_stride3, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 699; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":711 * * * def _convert_stride2(v, stride1, stride2): # <<<<<<<<<<<<<< * """ * Take a voxel, expressed as in index in stride1 and */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_10algorithms_10statistics_6intvol_21_convert_stride2, NULL, __pyx_n_s_91); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 711; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s___convert_stride2, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 711; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":721 * * * def _convert_stride1(v, stride1, stride2): # <<<<<<<<<<<<<< * """ * Take a voxel, expressed as in index in stride1 and */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_10algorithms_10statistics_6intvol_23_convert_stride1, NULL, __pyx_n_s_91); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 721; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s___convert_stride1, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 721; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":730 * * * def Lips2d(coords, mask): # <<<<<<<<<<<<<< * """ Estimate intrinsic volumes for 2d region in `mask` given `coords` * */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_10algorithms_10statistics_6intvol_25Lips2d, NULL, __pyx_n_s_91); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 730; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__Lips2d, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 730; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":907 * * * def EC2d(mask): # <<<<<<<<<<<<<< * """ Compute Euler characteristic of 2D region in `mask` * */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_10algorithms_10statistics_6intvol_27EC2d, NULL, __pyx_n_s_91); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 907; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__EC2d, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 907; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":1007 * * * def Lips1d(np.ndarray[np.float_t, ndim=2] coords, # <<<<<<<<<<<<<< * np.ndarray[np.intp_t, ndim=1] mask): * """ Estimate intrinsic volumes for 1D region in `mask` given `coords` */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_10algorithms_10statistics_6intvol_29Lips1d, NULL, __pyx_n_s_91); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1007; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__Lips1d, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1007; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":1083 * * * def EC1d(np.ndarray[np.intp_t, ndim=1] mask): # <<<<<<<<<<<<<< * """ Compute Euler characteristic for 1d `mask` * */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_10algorithms_10statistics_6intvol_31EC1d, NULL, __pyx_n_s_91); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1083; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__EC1d, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1083; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/algorithms/statistics/intvol.pyx":1 * """ # <<<<<<<<<<<<<< * The estimators for the intrinsic volumes appearing in this module * were partially supported by NSF grant DMS-0405970. */ __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); if (PyObject_SetAttr(__pyx_m, __pyx_n_s____test__, ((PyObject *)__pyx_t_1)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; /* "numpy.pxd":975 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); if (__pyx_m) { __Pyx_AddTraceback("init nipy.algorithms.statistics.intvol", __pyx_clineno, __pyx_lineno, __pyx_filename); Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init nipy.algorithms.statistics.intvol"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if PY_MAJOR_VERSION < 3 return; #else return __pyx_m; #endif } /* Runtime support code */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* CYTHON_REFNANNY */ static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) { PyObject *result; result = PyObject_GetAttr(dict, name); if (!result) { if (dict != __pyx_b) { PyErr_Clear(); result = PyObject_GetAttr(__pyx_b, name); } if (!result) { PyErr_SetObject(PyExc_NameError, name); } } return result; } static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%s() takes %s %" CYTHON_FORMAT_SSIZE_T "d positional argument%s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%s() got an unexpected keyword argument '%s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) { #if CYTHON_COMPILING_IN_CPYTHON PyObject *tmp_type, *tmp_value, *tmp_tb; PyThreadState *tstate = PyThreadState_GET(); tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_Restore(type, value, tb); #endif } static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(type, value, tb); #endif } #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } #if PY_VERSION_HEX < 0x02050000 if (PyClass_Check(type)) { #else if (PyType_Check(type)) { #endif #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; #if PY_VERSION_HEX < 0x02050000 if (PyInstance_Check(type)) { type = (PyObject*) ((PyInstanceObject*)type)->in_class; Py_INCREF(type); } else { type = 0; PyErr_SetString(PyExc_TypeError, "raise: exception must be an old-style class or instance"); goto raise_error; } #else type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } #endif } __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else /* Python 3+ */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyEval_CallObject(type, args); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause && cause != Py_None) { PyObject *fixed_cause; if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { PyThreadState *tstate = PyThreadState_GET(); PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } } bad: Py_XDECREF(owned_instance); return; } #endif static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_Format(PyExc_SystemError, "Missing type object"); return 0; } if (likely(PyObject_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } static CYTHON_INLINE int __Pyx_IsLittleEndian(void) { unsigned int n = 1; return *(unsigned char*)(&n) != 0; } static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t < '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) /* First char was not a digit */ PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; /* Consume from buffer string */ while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; /* breaks both loops as ctx->enc_count == 0 */ } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; /* empty struct */ field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static CYTHON_INLINE PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number; int ndim = ctx->head->field->type->ndim; ; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; while (*ts && *ts != ')') { if (isspace(*ts)) continue; number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case 10: case 13: ++ts; break; case '<': if (!__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': /* substruct */ { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; /* Erase processed last struct element */ ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': /* end of substruct; either repeat or move on */ { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; /* Erase processed last struct element */ if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } /* fall through */ case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 's': case 'p': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { ctx->enc_count += ctx->new_count; } else { if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; } ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) { buf->buf = NULL; buf->obj = NULL; buf->strides = __Pyx_zeros; buf->shape = __Pyx_zeros; buf->suboffsets = __Pyx_minusones; } static CYTHON_INLINE int __Pyx_GetBufferAndValidate( Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack) { if (obj == Py_None || obj == NULL) { __Pyx_ZeroBuffer(buf); return 0; } buf->buf = NULL; if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail; if (buf->ndim != nd) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", nd, buf->ndim); goto fail; } if (!cast) { __Pyx_BufFmt_Context ctx; __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned)buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; return 0; fail:; __Pyx_ZeroBuffer(buf); return -1; } static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { if (info->buf == NULL) return; if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; __Pyx_ReleaseBuffer(info); } static void __Pyx_RaiseBufferFallbackError(void) { PyErr_Format(PyExc_ValueError, "Buffer acquisition failed on assignment; and then reacquiring the old buffer failed too!"); } static void __Pyx_RaiseBufferIndexError(int axis) { PyErr_Format(PyExc_IndexError, "Out of bounds on buffer access (axis %d)", axis); } static CYTHON_INLINE int __Pyx_mod_int(int a, int b) { int r = a % b; r += ((r != 0) & ((r ^ b) < 0)) * b; return r; } static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact) { if (!type) { PyErr_Format(PyExc_SystemError, "Missing type object"); return 0; } if (none_allowed && obj == Py_None) return 1; else if (exact) { if (Py_TYPE(obj) == type) return 1; } else { if (PyObject_TypeCheck(obj, type)) return 1; } PyErr_Format(PyExc_TypeError, "Argument '%s' has incorrect type (expected %s, got %s)", name, type->tp_name, Py_TYPE(obj)->tp_name); return 0; } static CYTHON_INLINE long __Pyx_mod_long(long a, long b) { long r = a % b; r += ((r != 0) & ((r ^ b) < 0)) * b; return r; } static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%s to unpack", index, (index == 1) ? "" : "s"); } static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } static CYTHON_INLINE int __Pyx_IterFinish(void) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); PyObject* exc_type = tstate->curexc_type; if (unlikely(exc_type)) { if (likely(exc_type == PyExc_StopIteration) || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration)) { PyObject *exc_value, *exc_tb; exc_value = tstate->curexc_value; exc_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; Py_DECREF(exc_type); Py_XDECREF(exc_value); Py_XDECREF(exc_tb); return 0; } else { return -1; } } return 0; #else if (unlikely(PyErr_Occurred())) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) { PyErr_Clear(); return 0; } else { return -1; } } return 0; #endif } static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) { if (unlikely(retval)) { Py_DECREF(retval); __Pyx_RaiseTooManyValuesError(expected); return -1; } else { return __Pyx_IterFinish(); } return 0; } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { CYTHON_UNUSED PyObject *getbuffer_cobj; #if PY_VERSION_HEX >= 0x02060000 if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); #endif if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags); #if PY_VERSION_HEX < 0x02060000 if (obj->ob_type->tp_dict && (getbuffer_cobj = PyMapping_GetItemString(obj->ob_type->tp_dict, "__pyx_getbuffer"))) { getbufferproc func; #if PY_VERSION_HEX >= 0x02070000 && !(PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION == 0) func = (getbufferproc) PyCapsule_GetPointer(getbuffer_cobj, "getbuffer(obj, view, flags)"); #else func = (getbufferproc) PyCObject_AsVoidPtr(getbuffer_cobj); #endif Py_DECREF(getbuffer_cobj); if (!func) goto fail; return func(obj, view, flags); } else { PyErr_Clear(); } #endif PyErr_Format(PyExc_TypeError, "'%100s' does not have the buffer interface", Py_TYPE(obj)->tp_name); #if PY_VERSION_HEX < 0x02060000 fail: #endif return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; CYTHON_UNUSED PyObject *releasebuffer_cobj; if (!obj) return; #if PY_VERSION_HEX >= 0x02060000 if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } #endif if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) { __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view); return; } #if PY_VERSION_HEX < 0x02060000 if (obj->ob_type->tp_dict && (releasebuffer_cobj = PyMapping_GetItemString(obj->ob_type->tp_dict, "__pyx_releasebuffer"))) { releasebufferproc func; #if PY_VERSION_HEX >= 0x02070000 && !(PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION == 0) func = (releasebufferproc) PyCapsule_GetPointer(releasebuffer_cobj, "releasebuffer(obj, view)"); #else func = (releasebufferproc) PyCObject_AsVoidPtr(releasebuffer_cobj); #endif Py_DECREF(releasebuffer_cobj); if (!func) goto fail; func(obj, view); return; } else { PyErr_Clear(); } #endif goto nofail; #if PY_VERSION_HEX < 0x02060000 fail: #endif PyErr_WriteUnraisable(obj); nofail: Py_DECREF(obj); view->obj = NULL; } #endif /* PY_MAJOR_VERSION < 3 */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level) { PyObject *py_import = 0; PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; py_import = __Pyx_GetAttrString(__pyx_b, "__import__"); if (!py_import) goto bad; if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; #if PY_VERSION_HEX >= 0x02050000 { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { /* try package relative import first */ PyObject *py_level = PyInt_FromLong(1); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; /* try absolute import on failure */ } #endif if (!module) { PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); } } #else if (level>0) { PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4."); goto bad; } module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, NULL); #endif bad: Py_XDECREF(empty_list); Py_XDECREF(py_import); Py_XDECREF(empty_dict); return module; } static CYTHON_INLINE void __Pyx_RaiseImportError(PyObject *name) { #if PY_MAJOR_VERSION < 3 PyErr_Format(PyExc_ImportError, "cannot import name %.230s", PyString_AsString(name)); #else PyErr_Format(PyExc_ImportError, "cannot import name %S", name); #endif } static CYTHON_INLINE Py_intptr_t __Pyx_PyInt_from_py_Py_intptr_t(PyObject* x) { const Py_intptr_t neg_one = (Py_intptr_t)-1, const_zero = (Py_intptr_t)0; const int is_unsigned = const_zero < neg_one; if (sizeof(Py_intptr_t) == sizeof(char)) { if (is_unsigned) return (Py_intptr_t)__Pyx_PyInt_AsUnsignedChar(x); else return (Py_intptr_t)__Pyx_PyInt_AsSignedChar(x); } else if (sizeof(Py_intptr_t) == sizeof(short)) { if (is_unsigned) return (Py_intptr_t)__Pyx_PyInt_AsUnsignedShort(x); else return (Py_intptr_t)__Pyx_PyInt_AsSignedShort(x); } else if (sizeof(Py_intptr_t) == sizeof(int)) { if (is_unsigned) return (Py_intptr_t)__Pyx_PyInt_AsUnsignedInt(x); else return (Py_intptr_t)__Pyx_PyInt_AsSignedInt(x); } else if (sizeof(Py_intptr_t) == sizeof(long)) { if (is_unsigned) return (Py_intptr_t)__Pyx_PyInt_AsUnsignedLong(x); else return (Py_intptr_t)__Pyx_PyInt_AsSignedLong(x); } else if (sizeof(Py_intptr_t) == sizeof(PY_LONG_LONG)) { if (is_unsigned) return (Py_intptr_t)__Pyx_PyInt_AsUnsignedLongLong(x); else return (Py_intptr_t)__Pyx_PyInt_AsSignedLongLong(x); } else { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else Py_intptr_t val; PyObject *v = __Pyx_PyNumber_Int(x); #if PY_VERSION_HEX < 0x03000000 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (Py_intptr_t)-1; } } static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_Py_intptr_t(Py_intptr_t val) { const Py_intptr_t neg_one = (Py_intptr_t)-1, const_zero = (Py_intptr_t)0; const int is_unsigned = const_zero < neg_one; if ((sizeof(Py_intptr_t) == sizeof(char)) || (sizeof(Py_intptr_t) == sizeof(short))) { return PyInt_FromLong((long)val); } else if ((sizeof(Py_intptr_t) == sizeof(int)) || (sizeof(Py_intptr_t) == sizeof(long))) { if (is_unsigned) return PyLong_FromUnsignedLong((unsigned long)val); else return PyInt_FromLong((long)val); } else if (sizeof(Py_intptr_t) == sizeof(PY_LONG_LONG)) { if (is_unsigned) return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG)val); else return PyLong_FromLongLong((PY_LONG_LONG)val); } else { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; return _PyLong_FromByteArray(bytes, sizeof(Py_intptr_t), little, !is_unsigned); } } #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return ::std::complex< float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y*(__pyx_t_float_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real*z.real + z.imag*z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(a, a); case 3: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, a); case 4: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_absf(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return ::std::complex< double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y*(__pyx_t_double_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real*z.real + z.imag*z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(a, a); case 3: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, a); case 4: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_abs(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject* x) { const unsigned char neg_one = (unsigned char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned char" : "value too large to convert to unsigned char"); } return (unsigned char)-1; } return (unsigned char)val; } return (unsigned char)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject* x) { const unsigned short neg_one = (unsigned short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned short" : "value too large to convert to unsigned short"); } return (unsigned short)-1; } return (unsigned short)val; } return (unsigned short)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject* x) { const unsigned int neg_one = (unsigned int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned int" : "value too large to convert to unsigned int"); } return (unsigned int)-1; } return (unsigned int)val; } return (unsigned int)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject* x) { const char neg_one = (char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to char" : "value too large to convert to char"); } return (char)-1; } return (char)val; } return (char)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject* x) { const short neg_one = (short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to short" : "value too large to convert to short"); } return (short)-1; } return (short)val; } return (short)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject* x) { const int neg_one = (int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to int" : "value too large to convert to int"); } return (int)-1; } return (int)val; } return (int)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject* x) { const signed char neg_one = (signed char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed char" : "value too large to convert to signed char"); } return (signed char)-1; } return (signed char)val; } return (signed char)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject* x) { const signed short neg_one = (signed short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed short" : "value too large to convert to signed short"); } return (signed short)-1; } return (signed short)val; } return (signed short)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject* x) { const signed int neg_one = (signed int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed int" : "value too large to convert to signed int"); } return (signed int)-1; } return (signed int)val; } return (signed int)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject* x) { const int neg_one = (int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to int" : "value too large to convert to int"); } return (int)-1; } return (int)val; } return (int)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) { const unsigned long neg_one = (unsigned long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned long"); return (unsigned long)-1; } return (unsigned long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned long"); return (unsigned long)-1; } return (unsigned long)PyLong_AsUnsignedLong(x); } else { return (unsigned long)PyLong_AsLong(x); } } else { unsigned long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned long)-1; val = __Pyx_PyInt_AsUnsignedLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject* x) { const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned PY_LONG_LONG"); return (unsigned PY_LONG_LONG)-1; } return (unsigned PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned PY_LONG_LONG"); return (unsigned PY_LONG_LONG)-1; } return (unsigned PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (unsigned PY_LONG_LONG)PyLong_AsLongLong(x); } } else { unsigned PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned PY_LONG_LONG)-1; val = __Pyx_PyInt_AsUnsignedLongLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject* x) { const long neg_one = (long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long)-1; } return (long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long)-1; } return (long)PyLong_AsUnsignedLong(x); } else { return (long)PyLong_AsLong(x); } } else { long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (long)-1; val = __Pyx_PyInt_AsLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) { const PY_LONG_LONG neg_one = (PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to PY_LONG_LONG"); return (PY_LONG_LONG)-1; } return (PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to PY_LONG_LONG"); return (PY_LONG_LONG)-1; } return (PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (PY_LONG_LONG)PyLong_AsLongLong(x); } } else { PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (PY_LONG_LONG)-1; val = __Pyx_PyInt_AsLongLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) { const signed long neg_one = (signed long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed long"); return (signed long)-1; } return (signed long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed long"); return (signed long)-1; } return (signed long)PyLong_AsUnsignedLong(x); } else { return (signed long)PyLong_AsLong(x); } } else { signed long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (signed long)-1; val = __Pyx_PyInt_AsSignedLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject* x) { const signed PY_LONG_LONG neg_one = (signed PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed PY_LONG_LONG"); return (signed PY_LONG_LONG)-1; } return (signed PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed PY_LONG_LONG"); return (signed PY_LONG_LONG)-1; } return (signed PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (signed PY_LONG_LONG)PyLong_AsLongLong(x); } } else { signed PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (signed PY_LONG_LONG)-1; val = __Pyx_PyInt_AsSignedLongLong(tmp); Py_DECREF(tmp); return val; } } static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); #if PY_VERSION_HEX < 0x02050000 return PyErr_Warn(NULL, message); #else return PyErr_WarnEx(NULL, message, 1); #endif } return 0; } #ifndef __PYX_HAVE_RT_ImportModule #define __PYX_HAVE_RT_ImportModule static PyObject *__Pyx_ImportModule(const char *name) { PyObject *py_name = 0; PyObject *py_module = 0; py_name = __Pyx_PyIdentifier_FromString(name); if (!py_name) goto bad; py_module = PyImport_Import(py_name); Py_DECREF(py_name); return py_module; bad: Py_XDECREF(py_name); return 0; } #endif #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict) { PyObject *py_module = 0; PyObject *result = 0; PyObject *py_name = 0; char warning[200]; py_module = __Pyx_ImportModule(module_name); if (!py_module) goto bad; py_name = __Pyx_PyIdentifier_FromString(class_name); if (!py_name) goto bad; result = PyObject_GetAttr(py_module, py_name); Py_DECREF(py_name); py_name = 0; Py_DECREF(py_module); py_module = 0; if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%s.%s is not a type object", module_name, class_name); goto bad; } if (!strict && (size_t)((PyTypeObject *)result)->tp_basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility", module_name, class_name); #if PY_VERSION_HEX < 0x02050000 if (PyErr_Warn(NULL, warning) < 0) goto bad; #else if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; #endif } else if ((size_t)((PyTypeObject *)result)->tp_basicsize != size) { PyErr_Format(PyExc_ValueError, "%s.%s has the wrong size, try recompiling", module_name, class_name); goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(py_module); Py_XDECREF(result); return NULL; } #endif static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = (start + end) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, /*int argcount,*/ 0, /*int kwonlyargcount,*/ 0, /*int nlocals,*/ 0, /*int stacksize,*/ 0, /*int flags,*/ __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, /*int firstlineno,*/ __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_globals = 0; PyFrameObject *py_frame = 0; py_code = __pyx_find_code_object(c_line ? c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? c_line : py_line, py_code); } py_globals = PyModule_GetDict(__pyx_m); if (!py_globals) goto bad; py_frame = PyFrame_New( PyThreadState_GET(), /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ py_globals, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; py_frame->f_lineno = py_line; PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else /* Python 3+ has unicode identifiers */ if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; ++t; } return 0; } /* Type Conversion Functions */ static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) { PyNumberMethods *m; const char *name = NULL; PyObject *res = NULL; #if PY_VERSION_HEX < 0x03000000 if (PyInt_Check(x) || PyLong_Check(x)) #else if (PyLong_Check(x)) #endif return Py_INCREF(x), x; m = Py_TYPE(x)->tp_as_number; #if PY_VERSION_HEX < 0x03000000 if (m && m->nb_int) { name = "int"; res = PyNumber_Int(x); } else if (m && m->nb_long) { name = "long"; res = PyNumber_Long(x); } #else if (m && m->nb_int) { name = "int"; res = PyNumber_Long(x); } #endif if (res) { #if PY_VERSION_HEX < 0x03000000 if (!PyInt_Check(res) && !PyLong_Check(res)) { #else if (!PyLong_Check(res)) { #endif PyErr_Format(PyExc_TypeError, "__%s__ returned non-%s (type %.200s)", name, name, Py_TYPE(res)->tp_name); Py_DECREF(res); return NULL; } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject* x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { #if PY_VERSION_HEX < 0x02050000 if (ival <= LONG_MAX) return PyInt_FromLong((long)ival); else { unsigned char *bytes = (unsigned char *) &ival; int one = 1; int little = (int)*(unsigned char*)&one; return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0); } #else return PyInt_FromSize_t(ival); #endif } static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject* x) { unsigned PY_LONG_LONG val = __Pyx_PyInt_AsUnsignedLongLong(x); if (unlikely(val == (unsigned PY_LONG_LONG)-1 && PyErr_Occurred())) { return (size_t)-1; } else if (unlikely(val != (unsigned PY_LONG_LONG)(size_t)val)) { PyErr_SetString(PyExc_OverflowError, "value too large to convert to size_t"); return (size_t)-1; } return (size_t)val; } #endif /* Py_PYTHON_H */ nipy-0.3.0/nipy/algorithms/statistics/intvol.pyx000066400000000000000000001102471210344137400220740ustar00rootroot00000000000000""" The estimators for the intrinsic volumes appearing in this module were partially supported by NSF grant DMS-0405970. Taylor, J.E. & Worsley, K.J. (2007). "Detecting sparse signal in random fields, with an application to brain mapping." Journal of the American Statistical Association, 102(479):913-928. """ cimport cython import numpy as np cimport numpy as np from scipy.sparse import dok_matrix # Array helper from nipy.utils.arrays import strides_from # local imports from utils import cube_with_strides_center, join_complexes cdef double PI = np.pi cdef extern from "math.h" nogil: double floor(double x) double sqrt(double x) double fabs(double x) double log2(double x) double acos(double x) bint isnan(double x) cpdef double mu3_tet(double D00, double D01, double D02, double D03, double D11, double D12, double D13, double D22, double D23, double D33) nogil: """ Compute the 3rd intrinsic volume of a tetrahedron. 3rd intrinsic volume (just volume in this case) of a tetrahedron with coordinates implied by dot products below. Parameters ---------- D00 : float If ``cv0`` is a 3-vector of coordinates for the first vertex, `D00` is ``cv0.dot(cv0)`` D01 : float ``cv0.dot(cv1)`` where ``cv1`` is the coordinates for the second vertex. D02 : float ``cv0.dot(cv2)`` D03 : float ``cv0.dot(cv3)`` D11 : float ``cv1.dot(cv1)`` D12 : float ``cv1.dot(cv2)`` D13 : float ``cv1.dot(cv3)`` D22 : float ``cv2.dot(cv2)`` D23 : float ``cv2.dot(cv2)`` D33 : float ``cv3.dot(cv3)`` Returns ------- mu3 : float volume of tetrahedron """ cdef double C00, C01, C02, C11, C12, C22, v2 C00 = D00 - 2*D03 + D33 C01 = D01 - D13 - D03 + D33 C02 = D02 - D23 - D03 + D33 C11 = D11 - 2*D13 + D33 C12 = D12 - D13 - D23 + D33 C22 = D22 - 2*D23 + D33 v2 = (C00 * (C11 * C22 - C12 * C12) - C01 * (C01 * C22 - C02 * C12) + C02 * (C01 * C12 - C11 * C02)) # Rounding errors near 0 cause NaNs if v2 <= 0: return 0 return sqrt(v2) / 6. cpdef double mu2_tet(double D00, double D01, double D02, double D03, double D11, double D12, double D13, double D22, double D23, double D33) nogil: """ Compute the 2nd intrinsic volume of tetrahedron 2nd intrinsic volume (half the surface area) of a tetrahedron with coordinates implied by dot products below. Parameters ---------- D00 : float If ``cv0`` is a 3-vector of coordinates for the first vertex, `D00` is ``cv0.dot(cv0)`` D01 : float ``cv0.dot(cv1)`` where ``cv1`` is the coordinates for the second vertex. D02 : float ``cv0.dot(cv2)`` D03 : float ``cv0.dot(cv3)`` D11 : float ``cv1.dot(cv1)`` D12 : float ``cv1.dot(cv2)`` D13 : float ``cv1.dot(cv3)`` D22 : float ``cv2.dot(cv2)`` D23 : float ``cv2.dot(cv2)`` D33 : float ``cv3.dot(cv3)`` Returns ------- mu2 : float Half tetrahedron surface area """ cdef double mu = 0 mu += mu2_tri(D00, D01, D02, D11, D12, D22) mu += mu2_tri(D00, D02, D03, D22, D23, D33) mu += mu2_tri(D11, D12, D13, D22, D23, D33) mu += mu2_tri(D00, D01, D03, D11, D13, D33) return mu * 0.5 cpdef double mu1_tet(double D00, double D01, double D02, double D03, double D11, double D12, double D13, double D22, double D23, double D33) nogil: """ Return 3rd intrinsic volume of tetrahedron Compute the 3rd intrinsic volume (sum of external angles * edge lengths) of a tetrahedron for which the input arguments represent the coordinate dot products of the vertices. Parameters ---------- D00 : float If ``cv0`` is a 3-vector of coordinates for the first vertex, `D00` is ``cv0.dot(cv0)`` D01 : float ``cv0.dot(cv1)`` where ``cv1`` is the coordinates for the second vertex. D02 : float ``cv0.dot(cv2)`` D03 : float ``cv0.dot(cv3)`` D11 : float ``cv1.dot(cv1)`` D12 : float ``cv1.dot(cv2)`` D13 : float ``cv1.dot(cv3)`` D22 : float ``cv2.dot(cv2)`` D23 : float ``cv2.dot(cv2)`` D33 : float ``cv3.dot(cv3)`` Returns ------- mu1 : float 3rd intrinsic volume of tetrahedron """ cdef double mu mu = 0 mu += _mu1_tetface(D00, D01, D11, D02, D03, D12, D13, D22, D23, D33) mu += _mu1_tetface(D00, D02, D22, D01, D03, D12, D23, D11, D13, D33) mu += _mu1_tetface(D00, D03, D33, D01, D02, D13, D23, D11, D12, D22) mu += _mu1_tetface(D11, D12, D22, D01, D13, D02, D23, D00, D03, D33) mu += _mu1_tetface(D11, D13, D33, D01, D12, D03, D23, D00, D02, D22) mu += _mu1_tetface(D22, D23, D33, D02, D12, D03, D13, D00, D01, D11) return mu cdef inline double limited_acos(double val) nogil: """ Check for -1 <= val <= 1 before returning acos(val) Avoids nan values from small rounding errors """ if val >= 1: return 0 elif val <= -1: return PI return acos(val) @cython.cdivision(True) cpdef double _mu1_tetface(double Ds0s0, double Ds0s1, double Ds1s1, double Ds0t0, double Ds0t1, double Ds1t0, double Ds1t1, double Dt0t0, double Dt0t1, double Dt1t1) nogil: cdef double A00, A01, A02, A11, A12, A22, np_len, a, acosval cdef double length, norm_proj0, norm_proj1, inner_prod_proj A00 = Ds1s1 - 2 * Ds0s1 + Ds0s0 # all norms divided by this value, leading to NaN value for output, for # values <= 0 if A00 <= 0: return 0 A11 = Dt0t0 - 2 * Ds0t0 + Ds0s0 A22 = Dt1t1 - 2 * Ds0t1 + Ds0s0 A01 = Ds1t0 - Ds0t0 - Ds0s1 + Ds0s0 A02 = Ds1t1 - Ds0t1 - Ds0s1 + Ds0s0 A12 = Dt0t1 - Ds0t0 - Ds0t1 + Ds0s0 length = sqrt(A00) norm_proj0 = A11 - A01 * A01 / A00 norm_proj1 = A22 - A02 * A02 / A00 inner_prod_proj = A12 - A01 * A02 / A00 np_len = norm_proj0 * norm_proj1 if np_len <= 0: # would otherwise lead to NaN return value return 0 # hedge for small rounding errors above 1 and below -1 acosval = limited_acos(inner_prod_proj / sqrt(np_len)) a = (PI - acosval) * length / (2 * PI) return a cpdef double mu2_tri(double D00, double D01, double D02, double D11, double D12, double D22) nogil: """ Compute the 2nd intrinsic volume of triangle 2nd intrinsic volume (just area in this case) of a triangle with coordinates implied by the dot products below. Parameters ---------- D00 : float If ``cv0`` is a 3-vector of coordinates for the first vertex, `D00` is ``cv0.dot(cv0)`` D01 : float ``cv0.dot(cv1)`` where ``cv1`` is the coordinates for the second vertex. D02 : float ``cv0.dot(cv2)`` D11 : float ``cv1.dot(cv1)`` D12 : float ``cv1.dot(cv2)`` D22 : float ``cv2.dot(cv2)`` Returns ------- mu2 : float area of triangle """ cdef double C00, C01, C11, L C00 = D11 - 2*D01 + D00 C01 = D12 - D01 - D02 + D00 C11 = D22 - 2*D02 + D00 L = C00 * C11 - C01 * C01 # Negative area appeared to result from floating point errors on PPC if L < 0: return 0.0 return sqrt(L) * 0.5 cpdef double mu1_tri(double D00, double D01, double D02, double D11, double D12, double D22) nogil: """ Compute the 1st intrinsic volume of triangle 1st intrinsic volume (1/2 the perimeter) of a triangle with coordinates implied by the dot products below. Parameters ---------- D00 : float If ``cv0`` is a 3-vector of coordinates for the first vertex, `D00` is ``cv0.dot(cv0)`` D01 : float ``cv0.dot(cv1)`` where ``cv1`` is the coordinates for the second vertex. D02 : float ``cv0.dot(cv2)`` D11 : float ``cv1.dot(cv1)`` D12 : float ``cv1.dot(cv2)`` D22 : float ``cv2.dot(cv2)`` Returns ------- mu1 : float 1/2 perimeter of triangle """ cdef double mu = 0 mu += mu1_edge(D00, D01, D11) mu += mu1_edge(D00, D02, D22) mu += mu1_edge(D11, D12, D22) return mu * 0.5 cpdef double mu1_edge(double D00, double D01, double D11) nogil: """ Compute the 1st intrinsic volume (length) of line segment Length of a line segment with vertex coordinates implied by dot products below. Parameters ---------- D00 : float If ``cv0`` is a 3-vector of coordinates for the line start, `D00` is ``cv0.dot(cv0)`` D01 : float ``cv0.dot(cv1)`` where ``cv1`` is the coordinates for the line end. D11 : float ``cv1.dot(cv1)`` Returns ------- mu0 : float length of line segment """ return sqrt(D00 - 2*D01 + D11) def EC3d(mask): """ Compute Euler characteristic of region within `mask` Given a 3d `mask`, compute the 0th intrinsic volume (Euler characteristic) of the masked region. The region is broken up into tetrahedra / triangles / edges / vertices, which are included based on whether all voxels in the tetrahedron / triangle / edge / vertex are in the mask or not. Parameters ---------- mask : ndarray((i,j,k), np.int) Binary mask determining whether or not a voxel is in the mask. Returns ------- mu0 : int Euler characteristic Notes ----- The array mask is assumed to be binary. At the time of writing, it is not clear how to get cython to use np.bool arrays. The 3d cubes are triangulated into 6 tetrahedra of equal volume, as described in the reference below. References ---------- Taylor, J.E. & Worsley, K.J. (2007). "Detecting sparse signal in random fields, with an application to brain mapping." Journal of the American Statistical Association, 102(479):913-928. """ if not set(np.unique(mask)).issubset([0,1]): raise ValueError('mask should be filled with 0/1 ' 'values, but be of type np.int') cdef: # c-level versions of the array np.ndarray[np.intp_t, ndim=3] mask_c # 'flattened' mask (1d array) np.ndarray[np.intp_t, ndim=1] fpmask # d3 and d4 are lists of triangles and tetrahedra # associated to particular voxels in the cuve np.ndarray[np.intp_t, ndim=2] d2 np.ndarray[np.intp_t, ndim=2] d3 np.ndarray[np.intp_t, ndim=2] d4 # scalars np.npy_intp i, j, k, l, s0, s1, s2, ds2, ds3, ds4, index, m, nvox np.npy_intp ss0, ss1, ss2 # strides np.npy_intp v0, v1, v2, v3 # vertices np.npy_intp l0 = 0 mask_c = mask pmask_shape = np.array(mask.shape) + 1 pmask = np.zeros(pmask_shape, np.int) pmask[:-1,:-1,:-1] = mask_c s0, s1, s2 = (pmask.shape[0], pmask.shape[1], pmask.shape[2]) fpmask = pmask.reshape(-1) cdef: np.ndarray[np.intp_t, ndim=1] strides strides = np.array(strides_from(pmask_shape, np.bool), dtype=np.intp) # First do the interior contributions. # We first figure out which vertices, edges, triangles, tetrahedra # are uniquely associated with an interior voxel union = join_complexes(*[cube_with_strides_center((0,0,1), strides), cube_with_strides_center((0,1,0), strides), cube_with_strides_center((0,1,1), strides), cube_with_strides_center((1,0,0), strides), cube_with_strides_center((1,0,1), strides), cube_with_strides_center((1,1,0), strides), cube_with_strides_center((1,1,1), strides)]) c = cube_with_strides_center((0,0,0), strides) d4 = np.array(list(c[4].difference(union[4]))) d3 = np.array(list(c[3].difference(union[3]))) d2 = np.array(list(c[2].difference(union[2]))) ds2 = d2.shape[0] ds3 = d3.shape[0] ds4 = d4.shape[0] ss0 = strides[0] ss1 = strides[1] ss2 = strides[2] nvox = mask.size for i in range(s0-1): for j in range(s1-1): for k in range(s2-1): index = i*ss0+j*ss1+k*ss2 for l in range(ds4): v0 = index + d4[l,0] m = fpmask[v0] if m: v1 = index + d4[l,1] v2 = index + d4[l,2] v3 = index + d4[l,3] m = m * fpmask[v1] * fpmask[v2] * fpmask[v3] l0 = l0 - m for l in range(ds3): v0 = index + d3[l,0] m = fpmask[v0] if m: v1 = index + d3[l,1] v2 = index + d3[l,2] m = m * fpmask[v1] * fpmask[v2] l0 = l0 + m for l in range(ds2): v0 = index + d2[l,0] m = fpmask[v0] if m: v1 = index + d2[l,1] m = m * fpmask[v1] l0 = l0 - m l0 += mask.sum() return l0 def Lips3d(coords, mask): """ Estimated intrinsic volumes within masked region given coordinates Given a 3d `mask` and coordinates `coords`, estimate the intrinsic volumes of the masked region. The region is broken up into tetrahedra / triangles / edges / vertices, which are included based on whether all voxels in the tetrahedron / triangle / edge / vertex are in the mask or not. Parameters ---------- coords : ndarray((N,i,j,k)) Coordinates for the voxels in the mask. ``N`` will often be 3 (for 3 dimensional coordinates, but can be any integer > 0 mask : ndarray((i,j,k), np.int) Binary mask determining whether or not a voxel is in the mask. Returns ------- mu : ndarray Array of intrinsic volumes [mu0, mu1, mu2, mu3], being, respectively: #. Euler characteristic #. 2 * mean caliper diameter #. 0.5 * surface area #. Volume. Notes ----- The array mask is assumed to be binary. At the time of writing, it is not clear how to get cython to use np.bool arrays. The 3d cubes are triangulated into 6 tetrahedra of equal volume, as described in the reference below. References ---------- Taylor, J.E. & Worsley, K.J. (2007). "Detecting sparse signal in random fields, with an application to brain mapping." Journal of the American Statistical Association, 102(479):913-928. """ if mask.shape != coords.shape[1:]: raise ValueError('shape of mask does not match coordinates') # if the data can be squeezed, we must use the lower dimensional function mask = np.squeeze(mask) if mask.ndim < 3: value = np.zeros(4) coords = coords.reshape((coords.shape[0],) + mask.shape) if mask.ndim == 2: value[:3] = Lips2d(coords, mask) elif mask.ndim == 1: value[:2] = Lips1d(coords, mask) return value if not set(np.unique(mask)).issubset([0,1]): raise ValueError('mask should be filled with 0/1 ' 'values, but be of type np.int') cdef: # c-level versions of the arrays np.ndarray[np.float_t, ndim=4] coords_c np.ndarray[np.intp_t, ndim=3] mask_c # 'flattened' coords (2d array) np.ndarray[np.float_t, ndim=2] fcoords np.ndarray[np.float_t, ndim=2] D # 'flattened' mask (1d array) np.ndarray[np.intp_t, ndim=1] fmask np.ndarray[np.intp_t, ndim=1] fpmask np.ndarray[np.intp_t, ndim=3] pmask # d3 and d4 are lists of triangles and tetrahedra # associated to particular voxels in the cube np.ndarray[np.intp_t, ndim=2] d4 np.ndarray[np.intp_t, ndim=2] m4 np.ndarray[np.intp_t, ndim=2] d3 np.ndarray[np.intp_t, ndim=2] m3 np.ndarray[np.intp_t, ndim=2] d2 np.ndarray[np.intp_t, ndim=2] m2 np.ndarray[np.intp_t, ndim=1] cvertices # scalars np.npy_intp i, j, k, l, s0, s1, s2, ds4, ds3, ds2 np.npy_intp index, pindex, m, nvox, r, s, rr, ss, mr, ms np.npy_intp ss0, ss1, ss2 # strides np.npy_intp v0, v1, v2, v3 # vertices for mask np.npy_intp w0, w1, w2, w3 # vertices for data double l0, l1, l2, l3 double res coords_c = coords mask_c = mask l0 = 0; l1 = 0; l2 = 0; l3 = 0 pmask_shape = np.array(mask.shape) + 1 pmask = np.zeros(pmask_shape, np.int) pmask[:-1,:-1,:-1] = mask_c s0, s1, s2 = (pmask.shape[0], pmask.shape[1], pmask.shape[2]) fpmask = pmask.reshape(-1) fmask = mask_c.reshape(-1) fcoords = coords_c.reshape((coords_c.shape[0], -1)) # First do the interior contributions. # We first figure out which vertices, edges, triangles, tetrahedra # are uniquely associated with an interior voxel # The mask is copied into a larger array, hence it will have different # strides than the data cdef: np.ndarray[np.intp_t, ndim=1] strides np.ndarray[np.intp_t, ndim=1] dstrides strides = np.array(strides_from(pmask_shape, np.bool), dtype=np.intp) dstrides = np.array(strides_from(mask.shape, np.bool), dtype=np.intp) ss0, ss1, ss2 = strides[0], strides[1], strides[2] ss0d, ss1d, ss2d = dstrides[0], dstrides[1], dstrides[2] verts = [] for i in range(2): for j in range(2): for k in range(2): verts.append(ss0d * i + ss1d * j + ss2d * k) cvertices = np.array(sorted(verts), np.intp) union = join_complexes(*[cube_with_strides_center((0,0,1), strides), cube_with_strides_center((0,1,0), strides), cube_with_strides_center((0,1,1), strides), cube_with_strides_center((1,0,0), strides), cube_with_strides_center((1,0,1), strides), cube_with_strides_center((1,1,0), strides), cube_with_strides_center((1,1,1), strides)]) c = cube_with_strides_center((0,0,0), strides) m4 = np.array(list(c[4].difference(union[4]))) m3 = np.array(list(c[3].difference(union[3]))) m2 = np.array(list(c[2].difference(union[2]))) d4 = np.array([[_convert_stride3(v, strides, (4,2,1)) for v in m4[i]] for i in range(m4.shape[0])]) d4 = np.hstack([m4, d4]) ds4 = d4.shape[0] d3 = np.array([[_convert_stride3(v, strides, (4,2,1)) for v in m3[i]] for i in range(m3.shape[0])]) d3 = np.hstack([m3, d3]) ds3 = d3.shape[0] d2 = np.array([[_convert_stride3(v, strides, (4,2,1)) for v in m2[i]] for i in range(m2.shape[0])]) d2 = np.hstack([m2, d2]) ds2 = d2.shape[0] nvox = mask.size D = np.zeros((8,8)) for i in range(s0-1): for j in range(s1-1): for k in range(s2-1): pindex = i*ss0+j*ss1+k*ss2 index = i*ss0d+j*ss1d+k*ss2d for r in range(8): rr = (index+cvertices[r]) % nvox mr = fmask[rr] for s in range(r+1): res = 0 ss = (index+cvertices[s]) % nvox ms = fmask[ss] if mr * ms: for l in range(fcoords.shape[0]): res += fcoords[l,ss] * fcoords[l,rr] D[r,s] = res D[s,r] = res else: D[r,s] = 0 D[s,r] = 0 for l in range(ds4): v0 = pindex + d4[l,0] w0 = d4[l,4] m = fpmask[v0] if m: v1 = pindex + d4[l,1] v2 = pindex + d4[l,2] v3 = pindex + d4[l,3] w1 = d4[l,5] w2 = d4[l,6] w3 = d4[l,7] m = m * fpmask[v1] * fpmask[v2] * fpmask[v3] l3 = l3 + m * mu3_tet(D[w0,w0], D[w0,w1], D[w0,w2], D[w0,w3], D[w1,w1], D[w1,w2], D[w1,w3], D[w2,w2], D[w2,w3], D[w3,w3]) l2 = l2 - m * mu2_tet(D[w0,w0], D[w0,w1], D[w0,w2], D[w0,w3], D[w1,w1], D[w1,w2], D[w1,w3], D[w2,w2], D[w2,w3], D[w3,w3]) l1 = l1 + m * mu1_tet(D[w0,w0], D[w0,w1], D[w0,w2], D[w0,w3], D[w1,w1], D[w1,w2], D[w1,w3], D[w2,w2], D[w2,w3], D[w3,w3]) l0 = l0 - m for l in range(ds3): v0 = pindex + d3[l,0] w0 = d3[l,3] m = fpmask[v0] if m: v1 = pindex + d3[l,1] v2 = pindex + d3[l,2] w1 = d3[l,4] w2 = d3[l,5] m = m * fpmask[v1] * fpmask[v2] l2 = l2 + m * mu2_tri(D[w0,w0], D[w0,w1], D[w0,w2], D[w1,w1], D[w1,w2], D[w2,w2]) l1 = l1 - m * mu1_tri(D[w0,w0], D[w0,w1], D[w0,w2], D[w1,w1], D[w1,w2], D[w2,w2]) l0 = l0 + m for l in range(ds2): v0 = pindex + d2[l,0] w0 = d2[l,2] m = fpmask[v0] if m: v1 = pindex + d2[l,1] w1 = d2[l,3] m = m * fpmask[v1] l1 = l1 + m * mu1_edge(D[w0,w0], D[w0,w1], D[w1,w1]) l0 = l0 - m l0 += mask.sum() return np.array([l0, l1, l2, l3]) def _convert_stride3(v, stride1, stride2): """ Take a voxel, expressed as in index in stride1 and re-express it as an index in stride2 """ v0 = v // stride1[0] v -= v0 * stride1[0] v1 = v // stride1[1] v2 = v - v1 * stride1[1] return v0*stride2[0] + v1*stride2[1] + v2*stride2[2] def _convert_stride2(v, stride1, stride2): """ Take a voxel, expressed as in index in stride1 and re-express it as an index in stride2 """ v0 = v // stride1[0] v1 = v - v0 * stride1[0] return v0*stride2[0] + v1*stride2[1] def _convert_stride1(v, stride1, stride2): """ Take a voxel, expressed as in index in stride1 and re-express it as an index in stride2 """ v0 = v // stride1[0] return v0 * stride2[0] def Lips2d(coords, mask): """ Estimate intrinsic volumes for 2d region in `mask` given `coords` Given a 2d `mask` and coordinates `coords`, estimate the intrinsic volumes of the masked region. The region is broken up into triangles / edges / vertices, which are included based on whether all voxels in the triangle / edge / vertex are in the mask or not. Parameters ---------- coords : ndarray((N,i,j,k)) Coordinates for the voxels in the mask. ``N`` will often be 2 (for 2 dimensional coordinates, but can be any integer > 0 mask : ndarray((i,j), np.int) Binary mask determining whether or not a voxel is in the mask. Returns ------- mu : ndarray Array of intrinsic volumes [mu0, mu1, mu2], being, respectively: #. Euler characteristic #. 2 * mean caliper diameter #. Area. Notes ----- The array mask is assumed to be binary. At the time of writing, it is not clear how to get cython to use np.bool arrays. References ---------- Taylor, J.E. & Worsley, K.J. (2007). "Detecting sparse signal in random fields, with an application to brain mapping." Journal of the American Statistical Association, 102(479):913-928. """ if mask.shape != coords.shape[1:]: raise ValueError('shape of mask does not match coordinates') # if the data can be squeezed, we must use the lower dimensional function mask = np.squeeze(mask) if mask.ndim == 1: value = np.zeros(3) coords = coords.reshape((coords.shape[0],) + mask.shape) value[:2] = Lips1d(coords, mask) return value if not set(np.unique(mask)).issubset([0,1]): raise ValueError('mask should be filled with 0/1 ' 'values, but be of type np.int') cdef: # c-level versions of the arrays np.ndarray[np.float_t, ndim=3] coords_c np.ndarray[np.intp_t, ndim=2] mask_c # 'flattened' coords (2d array) np.ndarray[np.float_t, ndim=2] fcoords np.ndarray[np.float_t, ndim=2] D # 'flattened' mask (1d array) np.ndarray[np.intp_t, ndim=1] fmask np.ndarray[np.intp_t, ndim=1] fpmask np.ndarray[np.intp_t, ndim=2] pmask # d2 and d3 are lists of triangles associated to particular voxels in # the square np.ndarray[np.intp_t, ndim=2] d3 np.ndarray[np.intp_t, ndim=2] d2 np.ndarray[np.intp_t, ndim=1] cvertices # scalars np.npy_intp i, j, k, l, r, s, rr, ss, mr, ms, s0, s1 np.npy_intp ds2, ds3, index, m, npix np.npy_intp ss0, ss1, ss0d, ss1d # strides np.npy_intp v0, v1, v2 # vertices double l0, l1, l2 double res coords_c = coords mask_c = mask l0 = 0; l1 = 0; l2 = 0 pmask_shape = np.array(mask.shape) + 1 pmask = np.zeros(pmask_shape, np.int) pmask[:-1,:-1] = mask_c s0, s1 = pmask.shape[0], pmask.shape[1] fpmask = pmask.reshape(-1) fmask = mask_c.reshape(-1) fcoords = coords.reshape((coords.shape[0], -1)) # First do the interior contributions. # We first figure out which vertices, edges, triangles, tetrahedra # are uniquely associated with an interior voxel # The mask is copied into a larger array, hence it will have different # strides than the data cdef: np.ndarray[np.intp_t, ndim=1] strides np.ndarray[np.intp_t, ndim=1] dstrides strides = np.array(strides_from(pmask_shape, np.bool), dtype=np.intp) dstrides = np.array(strides_from(mask.shape, np.bool), dtype=np.intp) ss0, ss1 = strides[0], strides[1] ss0d, ss1d = dstrides[0], dstrides[1] verts = [] for i in range(2): for j in range(2): verts.append(ss0d * i + ss1d * j) cvertices = np.array(sorted(verts), np.intp) union = join_complexes(*[cube_with_strides_center((0,1), strides), cube_with_strides_center((1,0), strides), cube_with_strides_center((1,1), strides)]) c = cube_with_strides_center((0,0), strides) m3 = np.array(list(c[3].difference(union[3]))) m2 = np.array(list(c[2].difference(union[2]))) d3 = np.array([[_convert_stride2(v, strides, (2,1)) for v in m3[i]] for i in range(m3.shape[0])]) d3 = np.hstack([m3, d3]) ds3 = d3.shape[0] d2 = np.array([[_convert_stride2(v, strides, (2,1)) for v in m2[i]] for i in range(m2.shape[0])]) d2 = np.hstack([m2, d2]) ds2 = d2.shape[0] D = np.zeros((4,4)) npix = mask.size for i in range(s0-1): for j in range(s1-1): pindex = i*ss0+j*ss1 index = i*ss0d+j*ss1d for r in range(4): rr = (index+cvertices[r]) % npix mr = fmask[rr] for s in range(r+1): res = 0 ss = (index+cvertices[s]) % npix ms = fmask[ss] if mr * ms: for l in range(fcoords.shape[0]): res += fcoords[l,ss] * fcoords[l,rr] D[r,s] = res D[s,r] = res else: D[r,s] = 0 D[s,r] = 0 for l in range(ds3): v0 = pindex + d3[l,0] w0 = d3[l,3] m = fpmask[v0] if m: v1 = pindex + d3[l,1] v2 = pindex + d3[l,2] w1 = d3[l,4] w2 = d3[l,5] m = m * fpmask[v1] * fpmask[v2] l2 = l2 + mu2_tri(D[w0,w0], D[w0,w1], D[w0,w2], D[w1,w1], D[w1,w2], D[w2,w2]) * m l1 = l1 - mu1_tri(D[w0,w0], D[w0,w1], D[w0,w2], D[w1,w1], D[w1,w2], D[w2,w2]) * m l0 = l0 + m for l in range(ds2): v0 = pindex + d2[l,0] w0 = d2[l,2] m = fpmask[v0] if m: v1 = pindex + d2[l,1] w1 = d2[l,3] m = m * fpmask[v1] l1 = l1 + m * mu1_edge(D[w0,w0], D[w0,w1], D[w1,w1]) l0 = l0 - m l0 += mask.sum() return np.array([l0,l1,l2]) def EC2d(mask): """ Compute Euler characteristic of 2D region in `mask` Given a 2d `mask`, compute the 0th intrinsic volume (Euler characteristic) of the masked region. The region is broken up into triangles / edges / vertices, which are included based on whether all voxels in the triangle / edge / vertex are in the mask or not. Parameters ---------- mask : ndarray((i,j), np.int) Binary mask determining whether or not a voxel is in the mask. Returns ------- mu0 : int Euler characteristic Notes ----- The array mask is assumed to be binary. At the time of writing, it is not clear how to get cython to use np.bool arrays. References ---------- Taylor, J.E. & Worsley, K.J. (2007). "Detecting sparse signal in random fields, with an application to brain mapping." Journal of the American Statistical Association, 102(479):913-928. """ if not set(np.unique(mask)).issubset([0,1]): raise ValueError('mask should be filled with 0/1 ' 'values, but be of type np.int') cdef: # c-level versions of the array np.ndarray[np.intp_t, ndim=2] mask_c # 'flattened' mask (1d array) np.ndarray[np.intp_t, ndim=1] fpmask # d2 and d3 are lists of triangles and tetrahedra # associated to particular voxels in the cuve np.ndarray[np.intp_t, ndim=2] d2 np.ndarray[np.intp_t, ndim=2] d3 # scalars np.npy_intp i, j, k, l, s0, s1, ds2, ds3, index, m np.npy_intp ss0, ss1 # strides np.npy_intp v0, v1 # vertices long l0 = 0 mask_c = mask pmask_shape = np.array(mask.shape) + 1 pmask = np.zeros(pmask_shape, np.int) pmask[:-1,:-1] = mask_c s0, s1 = (pmask.shape[0], pmask.shape[1]) fpmask = pmask.reshape(-1) cdef: np.ndarray[np.intp_t, ndim=1] strides strides = np.array(strides_from(pmask_shape, np.bool), dtype=np.intp) ss0, ss1 = strides[0], strides[1] # First do the interior contributions. # We first figure out which vertices, edges, triangles, tetrahedra # are uniquely associated with an interior voxel union = join_complexes(*[cube_with_strides_center((0,1), strides), cube_with_strides_center((1,0), strides), cube_with_strides_center((1,1), strides)]) c = cube_with_strides_center((0,0), strides) d3 = np.array(list(c[3].difference(union[3]))) d2 = np.array(list(c[2].difference(union[2]))) ds2 = d2.shape[0] ds3 = d3.shape[0] for i in range(s0-1): for j in range(s1-1): index = i*ss0+j*ss1 for l in range(ds3): v0 = index + d3[l,0] m = fpmask[v0] if m and v0: v1 = index + d3[l,1] v2 = index + d3[l,2] m = m * fpmask[v1] * fpmask[v2] l0 = l0 + m for l in range(ds2): v0 = index + d2[l,0] m = fpmask[v0] if m: v1 = index + d2[l,1] m = m * fpmask[v1] l0 = l0 - m l0 += mask.sum() return l0 def Lips1d(np.ndarray[np.float_t, ndim=2] coords, np.ndarray[np.intp_t, ndim=1] mask): """ Estimate intrinsic volumes for 1D region in `mask` given `coords` Given a 1d `mask` and coordinates `coords`, estimate the intrinsic volumes of the masked region. The region is broken up into edges / vertices, which are included based on whether all voxels in the edge / vertex are in the mask or not. Parameters ---------- coords : ndarray((N,i,j,k)) Coordinates for the voxels in the mask. ``N`` will often be 1 (for 1 dimensional coordinates, but can be any integer > 0 mask : ndarray((i,), np.int) Binary mask determining whether or not a voxel is in the mask. Returns ------- mu : ndarray Array of intrinsic volumes [mu0, mu1], being, respectively: #. Euler characteristic #. Line segment length Notes ----- The array mask is assumed to be binary. At the time of writing, it is not clear how to get cython to use np.bool arrays. References ---------- Taylor, J.E. & Worsley, K.J. (2007). "Detecting sparse signal in random fields, with an application to brain mapping." Journal of the American Statistical Association, 102(479):913-928. """ if mask.shape[0] != coords.shape[1]: raise ValueError('shape of mask does not match coordinates') if not set(np.unique(mask)).issubset([0,1]): raise ValueError('mask should be filled with 0/1 ' 'values, but be of type np.int') cdef: np.npy_intp i, l, r, s, rr, ss, mr, ms, s0, index, m double l0, l1 double res l0 = 0; l1 = 0 s0 = mask.shape[0] D = np.zeros((2,2)) for i in range(s0): for r in range(2): rr = (i+r) % s0 mr = mask[rr] for s in range(r+1): res = 0 ss = (i+s) % s0 ms = mask[ss] if mr * ms * ((i+r) < s0) * ((i+s) < s0): for l in range(coords.shape[0]): res += coords[l,ss] * coords[l,rr] D[r,s] = res D[s,r] = res else: D[r,s] = 0 D[s,r] = 0 m = mask[i] if m: m = m * (mask[(i+1) % s0] * ((i+1) < s0)) l1 = l1 + m * mu1_edge(D[0,0], D[0,1], D[1,1]) l0 = l0 - m l0 += mask.sum() return np.array([l0,l1]) def EC1d(np.ndarray[np.intp_t, ndim=1] mask): """ Compute Euler characteristic for 1d `mask` Given a 1d mask `mask`, compute the 0th intrinsic volume (Euler characteristic) of the masked region. The region is broken up into edges / vertices, which are included based on whether all voxels in the edge / vertex are in the mask or not. Parameters ---------- mask : ndarray((i,), np.int) Binary mask determining whether or not a voxel is in the mask. Returns ------- mu0 : int Euler characteristic Notes ----- The array mask is assumed to be binary. At the time of writing, it is not clear how to get cython to use np.bool arrays. The 3d cubes are triangulated into 6 tetrahedra of equal volume, as described in the reference below. References ---------- Taylor, J.E. & Worsley, K.J. (2007). "Detecting sparse signal in random fields, with an application to brain mapping." Journal of the American Statistical Association, 102(479):913-928. """ if not set(np.unique(mask)).issubset([0,1]): raise ValueError('mask should be filled with 0/1 ' 'values, but be of type np.int') cdef: np.npy_intp i, m, s0 double l0 = 0 s0 = mask.shape[0] for i in range(s0): m = mask[i] if m: m = m * (mask[(i+1) % s0] * ((i+1) < s0)) l0 = l0 - m l0 += mask.sum() return l0 nipy-0.3.0/nipy/algorithms/statistics/mixed_effects_stat.py000066400000000000000000000304041210344137400242250ustar00rootroot00000000000000""" Module for computation of mixed effects statistics with an EM algorithm. i.e. solves problems of the form y = X beta + e1 + e2, where X and Y are known, e1 and e2 are centered with diagonal covariance. V1 = var(e1) is known, and V2 = var(e2) = lambda identity. the code estimates beta and lambda using an EM algorithm. Likelihood ratio tests can then be used to test the columns of beta. Author: Bertrand Thirion, 2012. >>> N, P = 15, 500 >>> V1 = np.random.randn(N, P) ** 2 >>> effects = np.random.randn(P) > 0 >>> Y = generate_data(np.ones(N), effects, .25, V1) >>> T1 = one_sample_ttest(Y, V1, n_iter=5) >>> T1 = [T1[effects == x] for x in np.unique(effects)] >>> T2 = [t_stat(Y)[effects == x] for x in np.unique(effects)] >>> assert np.array([t1.std() < t2.std() for t1, t2 in zip(T1, T2)]).all() """ import numpy as np EPS = 100 * np.finfo(float).eps def generate_data(X, beta, V2, V1): """ Generate a group of individuals from the provided parameters Parameters ---------- X: array of shape (n_samples, n_reg), the design matrix of the model beta: float or array of shape (n_reg, n_tests), the associated effects V2: float or array of shape (n_tests), group variance V1: array of shape(n_samples, n_tests), the individual variances Returns ------- Y: array of shape(n_samples, n_tests) the individual data related to the two-level normal model """ # check that the variances are positive if (V1 < 0).any(): raise ValueError('Variance should be positive') Y = np.random.randn(*V1.shape) Y *= np.sqrt(V2 + V1) if X.ndim == 1: X = X[:, np.newaxis] if np.isscalar(beta): beta = beta * np.ones((X.shape[1], V1.shape[1])) if beta.ndim == 1: beta = beta[np.newaxis] Y += np.dot(X, beta) return Y def check_arrays(Y, V1): """Check that the given data can be used for the models Parameters ---------- Y: array of shape (n_samples, n_tests) or (n_samples) the estimated effects V1: array of shape (n_samples, n_tests) or (n_samples) first-level variance """ if (V1 < 0).any(): raise ValueError("a negative variance has been provided") if np.size(Y) == Y.shape[0]: Y = Y[:, np.newaxis] if np.size(V1) == V1.shape[0]: V1 = V1[:, np.newaxis] if Y.shape != V1.shape: raise ValueError("Y and V1 do not have the same shape") return Y, V1 def t_stat(Y): """ Returns the t stat of the sample on each row of the matrix Parameters ---------- Y, array of shape (n_samples, n_tests) Returns ------- t_variates, array of shape (n_tests) """ return Y.mean(0) / Y.std(0) * np.sqrt(Y.shape[0] - 1) class MixedEffectsModel(object): """Class to handle multiple one-sample mixed effects models """ def __init__(self, X, n_iter=5, verbose=False): """ Set the effects and first-level variance, and initialize related quantities Parameters ---------- X: array of shape(n_samples, n_effects), the design matrix n_iter: int, optional, number of iterations of the EM algorithm verbose: bool, optional, verbosity mode """ self.n_iter = n_iter self.verbose = verbose self.X = X self.pinv_X = np.linalg.pinv(X) def log_like(self, Y, V1): """ Compute the log-likelihood of (Y, V1) under the model Parameters ---------- Y, array of shape (n_samples, n_tests) or (n_samples) the estimated effects V1, array of shape (n_samples, n_tests) or (n_samples) first-level variance Returns ------- logl: array of shape self.n_tests, the log-likelihood of the model """ Y, V1 = check_arrays(Y, V1) tvar = self.V2 + V1 logl = np.sum(((Y - self.Y_) ** 2) / tvar, 0) logl += np.sum(np.log(tvar), 0) logl += np.log(2 * np.pi) * Y.shape[0] logl *= (- 0.5) return logl def predict(self, Y, V1): """Return the log_likelihood of the data.See the log_like method""" return self.log_like(Y, V1) def score(self, Y, V1): """Return the log_likelihood of the data. See the log_like method""" return self.log_like(Y, V1) def _one_step(self, Y, V1): """Applies one step of an EM algorithm to estimate self.mean_, self.var Parameters ---------- Y, array of shape (n_samples, n_tests) or (n_samples) the estimated effects V1, array of shape (n_samples, n_tests) or (n_samples) first-level variance """ # E step prec = 1. / (self.V2 + V1) Y_ = prec * (self.V2 * Y + V1 * self.Y_) cvar = V1 * self.V2 * prec # M step self.beta_ = np.dot(self.pinv_X, Y_) self.Y_ = np.dot(self.X, self.beta_) self.V2 = np.mean((Y_ - self.Y_) ** 2, 0) + cvar.mean(0) def fit(self, Y, V1): """ Launches the EM algorithm to estimate self Parameters ---------- Y, array of shape (n_samples, n_tests) or (n_samples) the estimated effects V1, array of shape (n_samples, n_tests) or (n_samples) first-level variance Returns ------- self """ # Basic data checks if self.X.shape[0] != Y.shape[0]: raise ValueError('X and Y must have the same numbers of rows') Y, V1 = check_arrays(Y, V1) self.beta_ = np.dot(self.pinv_X, Y) self.Y_ = np.dot(self.X, self.beta_) self.V2 = np.mean((Y - self.Y_) ** 2, 0) if self.verbose: log_like_init = self.log_like(Y, V1) print('Average log-likelihood: ', log_like_init.mean()) for i in range(self.n_iter): self._one_step(Y, V1) if self.verbose: log_like_ = self.log_like(Y, V1) if (log_like_ < (log_like_init - EPS)).any(): raise ValueError('The log-likelihood cannot decrease') log_like_init = log_like_ print ('Iteration %d, average log-likelihood: %f' % ( i, log_like_.mean())) return self def two_sample_ftest(Y, V1, group, n_iter=5, verbose=False): """Returns the mixed effects t-stat for each row of the X (one sample test) This uses the Formula in Roche et al., NeuroImage 2007 Parameters ---------- Y: array of shape (n_samples, n_tests) the data V1: array of shape (n_samples, n_tests) first-level variance assocated with the data group: array of shape (n_samples) a vector of indicators yielding the samples membership n_iter: int, optional, number of iterations of the EM algorithm verbose: bool, optional, verbosity mode Returns ------- tstat: array of shape (n_tests), statistical values obtained from the likelihood ratio test """ # check that group is correct if group.size != Y.shape[0]: raise ValueError('The number of labels is not the number of samples') if (np.unique(group) != np.array([0, 1])).all(): raise ValueError('group should be composed only of zeros and ones') # create design matrices X = np.vstack((np.ones_like(group), group)).T return mfx_stat(Y, V1, X, 1, n_iter=n_iter, verbose=verbose, return_t=False, return_f=True)[0] def two_sample_ttest(Y, V1, group, n_iter=5, verbose=False): """Returns the mixed effects t-stat for each row of the X (one sample test) This uses the Formula in Roche et al., NeuroImage 2007 Parameters ---------- Y: array of shape (n_samples, n_tests) the data V1: array of shape (n_samples, n_tests) first-level variance assocated with the data group: array of shape (n_samples) a vector of indicators yielding the samples membership n_iter: int, optional, number of iterations of the EM algorithm verbose: bool, optional, verbosity mode Returns ------- tstat: array of shape (n_tests), statistical values obtained from the likelihood ratio test """ X = np.vstack((np.ones_like(group), group)).T return mfx_stat(Y, V1, X, 1, n_iter=n_iter, verbose=verbose, return_t=True)[0] def one_sample_ftest(Y, V1, n_iter=5, verbose=False): """Returns the mixed effects F-stat for each row of the X (one sample test) This uses the Formula in Roche et al., NeuroImage 2007 Parameters ---------- Y: array of shape (n_samples, n_tests) the data V1: array of shape (n_samples, n_tests) first-level variance ssociated with the data n_iter: int, optional, number of iterations of the EM algorithm verbose: bool, optional, verbosity mode Returns ------- fstat, array of shape (n_tests), statistical values obtained from the likelihood ratio test sign, array of shape (n_tests), sign of the mean for each test (allow for post-hoc signed tests) """ return mfx_stat(Y, V1, np.ones((Y.shape[0], 1)), 0, n_iter=n_iter, verbose=verbose, return_t=False, return_f=True)[0] def one_sample_ttest(Y, V1, n_iter=5, verbose=False): """Returns the mixed effects t-stat for each row of the X (one sample test) This uses the Formula in Roche et al., NeuroImage 2007 Parameters ---------- Y: array of shape (n_samples, n_tests) the observations V1: array of shape (n_samples, n_tests) first-level variance associated with the observations n_iter: int, optional, number of iterations of the EM algorithm verbose: bool, optional, verbosity mode Returns ------- tstat: array of shape (n_tests), statistical values obtained from the likelihood ratio test """ return mfx_stat(Y, V1, np.ones((Y.shape[0], 1)), 0, n_iter=n_iter, verbose=verbose, return_t=True)[0] def mfx_stat(Y, V1, X, column, n_iter=5, return_t=True, return_f=False, return_effect=False, return_var=False, verbose=False): """Run a mixed-effects model test on the column of the design matrix Parameters ---------- Y: array of shape (n_samples, n_tests) the data V1: array of shape (n_samples, n_tests) first-level variance assocated with the data X: array of shape(n_samples, n_regressors) the design matrix of the model column: int, index of the column of X to be tested n_iter: int, optional, number of iterations of the EM algorithm return_t: bool, optional, should one return the t test (True by default) return_f: bool, optional, should one return the F test (False by default) return_effect: bool, optional, should one return the effect estimate (False by default) return_var: bool, optional, should one return the variance estimate (False by default) verbose: bool, optional, verbosity mode Returns ------- (tstat, fstat, effect, var): tuple of arrays of shape (n_tests), those required by the input return booleans """ # check that X/columns are correct column = int(column) if X.shape[0] != Y.shape[0]: raise ValueError('X.shape[0] is not the number of samples') if (column > X.shape[1]): raise ValueError('the column index is more than the number of columns') # create design matrices contrast_mask = 1 - np.eye(X.shape[1])[column] X0 = X * contrast_mask # instantiate the mixed effects models model_0 = MixedEffectsModel(X0, n_iter=n_iter, verbose=verbose).fit(Y, V1) model_1 = MixedEffectsModel(X, n_iter=n_iter, verbose=verbose).fit(Y, V1) # compute the log-likelihood ratio statistic fstat = 2 * (model_1.log_like(Y, V1) - model_0.log_like(Y, V1)) fstat = np.maximum(0, fstat) sign = np.sign(model_1.beta_[column]) output = () if return_t: output += (np.sqrt(fstat) * sign,) if return_f: output += (fstat,) if return_var: output += (model_1.V2,) if return_effect: output += (model_1.beta_[column],) return output nipy-0.3.0/nipy/algorithms/statistics/models/000077500000000000000000000000001210344137400212755ustar00rootroot00000000000000nipy-0.3.0/nipy/algorithms/statistics/models/LICENSE.txt000066400000000000000000000025531210344137400231250ustar00rootroot00000000000000Copyright (C) 2006, Jonathan E. Taylor Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. nipy-0.3.0/nipy/algorithms/statistics/models/TODO.txt000066400000000000000000000024751210344137400226130ustar00rootroot00000000000000TODO for scipy.stats.models =========================== In converting the bspline.so from a weave build to a C extension, we found several things that should be fixed or looked into more thoroughly. Hopefully we can dedicate some time to this effort at the Scipy Conf 2008. However, many of these items should be addressed before stats.models goes into a release of scipy. Items ----- * Run pychecker on the stats.models and fix numerous errors. There are import errors, undefined globals, undefined attrs, etc... Running the command below in stats/models produced 140+ errors.:: # Run pychecker on all python modules except __init__.py $ grind "[a-z|_][a-z]*.py" | xargs pychecker * Address the FIXME issues in the code. * Determine and cleanup the public API. Functions/classes used internally should be private (leading underscore). Public functions should be obvious and documented. Packaging should be reviewed and cleaned up. * Update documentation to scipy standards. Especially adding example sections showing how to use the public functions. * Tests! Robust tests are needed! Of the subset of tests we looked at, most only checked attribute setting, not the results of applying the function to data. * Remove code duplication. smoothers.py and bspline.py define SmoothingSpline class. nipy-0.3.0/nipy/algorithms/statistics/models/__init__.py000066400000000000000000000006011210344137400234030ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: # # models - Statistical Models # __docformat__ = 'restructuredtext' from .info import __doc__ from . import model from . import regression from . import glm __all__ = filter(lambda s:not s.startswith('_'),dir()) from nipy.testing import Tester test = Tester().test nipy-0.3.0/nipy/algorithms/statistics/models/family/000077500000000000000000000000001210344137400225565ustar00rootroot00000000000000nipy-0.3.0/nipy/algorithms/statistics/models/family/__init__.py000066400000000000000000000007671210344137400247010ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ''' This module contains the one-parameter exponential families used for fitting GLMs and GAMs. These families are described in P. McCullagh and J. A. Nelder. "Generalized linear models." Monographs on Statistics and Applied Probability. Chapman & Hall, London, 1983. ''' from .family import (Gaussian, Family, Poisson, Gamma, InverseGaussian, Binomial) nipy-0.3.0/nipy/algorithms/statistics/models/family/family.py000066400000000000000000000132551210344137400244170ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np from . import links as L from . import varfuncs as V class Family(object): """ A class to model one-parameter exponential families. INPUTS: link -- a Link instance variance -- a variance function (models means as a function of mean) """ valid = [-np.inf, np.inf] tol = 1.0e-05 links = [] def _setlink(self, link): self._link = link if hasattr(self, "links"): if link not in self.links: raise ValueError, 'invalid link for family, should be in %s' % `self.links` def _getlink(self): return self._link link = property(_getlink, _setlink) def __init__(self, link, variance): self.link = link self.variance = variance def weights(self, mu): """ Weights for IRLS step. w = 1 / (link'(mu)**2 * variance(mu)) INPUTS: mu -- mean parameter in exponential family OUTPUTS: w -- weights used in WLS step of GLM/GAM fit """ return 1. / (self.link.deriv(mu)**2 * self.variance(mu)) def deviance(self, Y, mu, scale=1.): """ Deviance of (Y,mu) pair. Deviance is usually defined as the difference DEV = (SUM_i -2 log Likelihood(Y_i,mu_i) + 2 log Likelihood(mu_i,mu_i)) / scale INPUTS: Y -- response variable mu -- mean parameter scale -- optional scale in denominator of deviance OUTPUTS: dev dev -- DEV, as described aboce """ return np.power(self.devresid(Y, mu), 2).sum() / scale def devresid(self, Y, mu): """ The deviance residuals, defined as the residuals in the deviance. Without knowing the link, they default to Pearson residuals resid_P = (Y - mu) * sqrt(weight(mu)) INPUTS: Y -- response variable mu -- mean parameter OUTPUTS: resid resid -- deviance residuals """ return (Y - mu) * np.sqrt(self.weights(mu)) def fitted(self, eta): """ Fitted values based on linear predictors eta. INPUTS: eta -- values of linear predictors, say, X beta in a generalized linear model. OUTPUTS: mu mu -- link.inverse(eta), mean parameter based on eta """ return self.link.inverse(eta) def predict(self, mu): """ Linear predictors based on given mu values. INPUTS: mu -- mean parameter of one-parameter exponential family OUTPUTS: eta eta -- link(mu), linear predictors, based on mean parameters mu """ return self.link(mu) class Poisson(Family): """ Poisson exponential family. INPUTS: link -- a Link instance """ links = [L.log, L.identity, L.sqrt] variance = V.mu valid = [0, np.inf] def __init__(self, link=L.log): self.variance = Poisson.variance self.link = link def devresid(self, Y, mu): """ Poisson deviance residual INPUTS: Y -- response variable mu -- mean parameter OUTPUTS: resid resid -- deviance residuals """ return np.sign(Y - mu) * np.sqrt(2 * Y * np.log(Y / mu) - 2 * (Y - mu)) class Gaussian(Family): """ Gaussian exponential family. INPUTS: link -- a Link instance """ links = [L.log, L.identity, L.inverse] variance = V.constant def __init__(self, link=L.identity): self.variance = Gaussian.variance self.link = link def devresid(self, Y, mu, scale=1.): """ Gaussian deviance residual INPUTS: Y -- response variable mu -- mean parameter scale -- optional scale in denominator (after taking sqrt) OUTPUTS: resid resid -- deviance residuals """ return (Y - mu) / np.sqrt(self.variance(mu) * scale) class Gamma(Family): """ Gamma exponential family. INPUTS: link -- a Link instance BUGS: no deviance residuals? """ links = [L.log, L.identity, L.inverse] variance = V.mu_squared def __init__(self, link=L.identity): self.variance = Gamma.variance self.link = link class Binomial(Family): """ Binomial exponential family. INPUTS: link -- a Link instance n -- number of trials for Binomial """ links = [L.logit, L.probit, L.cauchy, L.log, L.cloglog] variance = V.binary def __init__(self, link=L.logit, n=1): self.n = n self.variance = V.Binomial(n=self.n) self.link = link def devresid(self, Y, mu): """ Binomial deviance residual INPUTS: Y -- response variable mu -- mean parameter OUTPUTS: resid resid -- deviance residuals """ mu = self.link.clean(mu) return np.sign(Y - mu) * np.sqrt(-2 * (Y * np.log(mu / self.n) + (self.n - Y) * np.log(1 - mu / self.n))) class InverseGaussian(Family): """ InverseGaussian exponential family. INPUTS: link -- a Link instance n -- number of trials for Binomial """ links = [L.inverse_squared, L.inverse, L.identity, L.log] variance = V.mu_cubed def __init__(self, link=L.identity): self.n = n self.variance = InverseGaussian.variance self.link = link nipy-0.3.0/nipy/algorithms/statistics/models/family/links.py000066400000000000000000000150621210344137400242540ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np import scipy.stats class Link(object): """ A generic link function for one-parameter exponential family, with call, inverse and deriv methods. """ def initialize(self, Y): return np.asarray(Y).mean() * np.ones(Y.shape) def __call__(self, p): return NotImplementedError def inverse(self, z): return NotImplementedError def deriv(self, p): return NotImplementedError class Logit(Link): """ The logit transform as a link function: g'(x) = 1 / (x * (1 - x)) g^(-1)(x) = exp(x)/(1 + exp(x)) """ tol = 1.0e-10 def clean(self, p): """ Clip logistic values to range (tol, 1-tol) INPUTS: p -- probabilities OUTPUTS: pclip pclip -- clipped probabilities """ return np.clip(p, Logit.tol, 1. - Logit.tol) def __call__(self, p): """ Logit transform g(p) = log(p / (1 - p)) INPUTS: p -- probabilities OUTPUTS: z z -- logit transform of p """ p = self.clean(p) return np.log(p / (1. - p)) def inverse(self, z): """ Inverse logit transform h(z) = exp(z)/(1+exp(z)) INPUTS: z -- logit transform of p OUTPUTS: p p -- probabilities """ t = np.exp(z) return t / (1. + t) def deriv(self, p): """ Derivative of logit transform g(p) = 1 / (p * (1 - p)) INPUTS: p -- probabilities OUTPUTS: y y -- derivative of logit transform of p """ p = self.clean(p) return 1. / (p * (1 - p)) logit = Logit() class Power(Link): """ The power transform as a link function: g(x) = x**power """ def __init__(self, power=1.): self.power = power def __call__(self, x): """ Power transform g(x) = x**self.power INPUTS: x -- mean parameters OUTPUTS: z z -- power transform of x """ return np.power(x, self.power) def inverse(self, z): """ Inverse of power transform g(x) = x**(1/self.power) INPUTS: z -- linear predictors in GLM OUTPUTS: x x -- mean parameters """ return np.power(z, 1. / self.power) def deriv(self, x): """ Derivative of power transform g(x) = self.power * x**(self.power - 1) INPUTS: x -- mean parameters OUTPUTS: z z -- derivative of power transform of x """ return self.power * np.power(x, self.power - 1) inverse = Power(power=-1.) inverse.__doc__ = """ The inverse transform as a link function: g(x) = 1 / x """ sqrt = Power(power=0.5) sqrt.__doc__ = """ The square-root transform as a link function: g(x) = sqrt(x) """ inverse_squared = Power(power=-2.) inverse_squared.__doc__ = """ The inverse squared transform as a link function: g(x) = 1 / x**2 """ identity = Power(power=1.) identity.__doc__ = """ The identity transform as a link function: g(x) = x """ class Log(Link): """ The log transform as a link function: g(x) = log(x) """ tol = 1.0e-10 def clean(self, x): return np.clip(x, Logit.tol, np.inf) def __call__(self, x, **extra): """ Log transform g(x) = log(x) INPUTS: x -- mean parameters OUTPUTS: z z -- log(x) """ x = self.clean(x) return np.log(x) def inverse(self, z): """ Inverse of log transform g(x) = exp(x) INPUTS: z -- linear predictors in GLM OUTPUTS: x x -- exp(z) """ return np.exp(z) def deriv(self, x): """ Derivative of log transform g(x) = 1/x INPUTS: x -- mean parameters OUTPUTS: z z -- derivative of log transform of x """ x = self.clean(x) return 1. / x log = Log() class CDFLink(Logit): """ The use the CDF of a scipy.stats distribution as a link function: g(x) = dbn.ppf(x) """ def __init__(self, dbn=scipy.stats.norm): self.dbn = dbn def __call__(self, p): """ CDF link g(p) = self.dbn.pdf(p) INPUTS: p -- mean parameters OUTPUTS: z z -- derivative of CDF transform of p """ p = self.clean(p) return self.dbn.ppf(p) def inverse(self, z): """ Derivative of CDF link g(z) = self.dbn.cdf(z) INPUTS: z -- linear predictors in GLM OUTPUTS: p p -- inverse of CDF link of z """ return self.dbn.cdf(z) def deriv(self, p): """ Derivative of CDF link g(p) = 1/self.dbn.pdf(self.dbn.ppf(p)) INPUTS: x -- mean parameters OUTPUTS: z z -- derivative of CDF transform of x """ p = self.clean(p) return 1. / self.dbn.pdf(self(p)) probit = CDFLink() probit.__doc__ = """ The probit (standard normal CDF) transform as a link function: g(x) = scipy.stats.norm.ppf(x) """ cauchy = CDFLink(dbn=scipy.stats.cauchy) cauchy.__doc__ = """ The Cauchy (standard Cauchy CDF) transform as a link function: g(x) = scipy.stats.cauchy.ppf(x) """ class CLogLog(Logit): """ The complementary log-log transform as a link function: g(x) = log(-log(x)) """ def __call__(self, p): """ C-Log-Log transform g(p) = log(-log(p)) INPUTS: p -- mean parameters OUTPUTS: z z -- log(-log(p)) """ p = self.clean(p) return np.log(-np.log(p)) def inverse(self, z): """ Inverse of C-Log-Log transform g(z) = exp(-exp(z)) INPUTS: z -- linear predictor scale OUTPUTS: p p -- mean parameters """ return np.exp(-np.exp(z)) def deriv(self, p): """ Derivatve of C-Log-Log transform g(p) = - 1 / (log(p) * p) INPUTS: p -- mean parameters OUTPUTS: z z -- - 1 / (log(p) * p) """ p = self.clean(p) return -1. / (np.log(p) * p) cloglog = CLogLog() nipy-0.3.0/nipy/algorithms/statistics/models/family/varfuncs.py000066400000000000000000000033351210344137400247630ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: __docformat__ = 'restructuredtext' import numpy as np class VarianceFunction(object): """ Variance function that relates the variance of a random variable to its mean. Defaults to 1. """ def __call__(self, mu): """ Default variance function INPUTS: mu -- mean parameters OUTPUTS: v v -- ones(mu.shape) """ return np.ones(mu.shape, np.float64) constant = VarianceFunction() class Power(object): """ Power variance function: V(mu) = fabs(mu)**power INPUTS: power -- exponent used in power variance function """ def __init__(self, power=1.): self.power = power def __call__(self, mu): """ Power variance function INPUTS: mu -- mean parameters OUTPUTS: v v -- fabs(mu)**self.power """ return np.power(np.fabs(mu), self.power) class Binomial(object): """ Binomial variance function p = mu / n; V(mu) = p * (1 - p) * n INPUTS: n -- number of trials in Binomial """ tol = 1.0e-10 def __init__(self, n=1): self.n = n def clean(self, p): return np.clip(p, Binomial.tol, 1 - Binomial.tol) def __call__(self, mu): """ Binomial variance function INPUTS: mu -- mean parameters OUTPUTS: v v -- mu / self.n * (1 - mu / self.n) * self.n """ p = self.clean(mu / self.n) return p * (1 - p) * self.n mu = Power() mu_squared = Power(power=2) mu_cubed = Power(power=3) binary = Binomial() nipy-0.3.0/nipy/algorithms/statistics/models/glm.py000066400000000000000000000050731210344137400224330ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ General linear models -------------------- """ import numpy as np from . import family from .regression import WLSModel class Model(WLSModel): niter = 10 def __init__(self, design, family=family.Gaussian()): self.family = family super(Model, self).__init__(design, weights=1) def __iter__(self): self.iter = 0 self.dev = np.inf return self def deviance(self, Y=None, results=None, scale=1.): """ Return (unnormalized) log-likelihood for GLM. Note that self.scale is interpreted as a variance in old_model, so we divide the residuals by its sqrt. """ if results is None: results = self.results if Y is None: Y = self.Y return self.family.deviance(Y, results.mu) / scale def next(self): results = self.results Y = self.Y self.weights = self.family.weights(results.mu) self.initialize(self.design) Z = results.predicted + self.family.link.deriv(results.mu) *\ (Y - results.mu) newresults = super(Model, self).fit(Z) newresults.Y = Y newresults.mu = self.family.link.inverse(newresults.predicted) self.iter += 1 return newresults def cont(self, tol=1.0e-05): """ Continue iterating, or has convergence been obtained? """ if self.iter >= Model.niter: return False curdev = self.deviance(results=self.results) if np.fabs((self.dev - curdev) / curdev) < tol: return False self.dev = curdev return True def estimate_scale(self, Y=None, results=None): """ Return Pearson\'s X^2 estimate of scale. """ if results is None: results = self.results if Y is None: Y = self.Y resid = Y - results.mu return ((np.power(resid, 2) / self.family.variance(results.mu)).sum() / results.df_resid) def fit(self, Y): self.Y = np.asarray(Y, np.float64) iter(self) self.results = super(Model, self).fit( self.family.link.initialize(Y)) self.results.mu = self.family.link.inverse(self.results.predicted) self.scale = self.results.scale = self.estimate_scale() while self.cont(): self.results = self.next() self.scale = self.results.scale = self.estimate_scale() return self.results nipy-0.3.0/nipy/algorithms/statistics/models/info.py000066400000000000000000000014121210344137400226000ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Statistical models - model `formula` - standard `regression` models - `OLSModel` (ordinary least square regression) - `WLSModel` (weighted least square regression) - `ARModel` (autoregressive model) - `glm.Model` (generalized linear models) - robust statistical models - `rlm.Model` (robust linear models using M estimators) - `robust.norms` estimates - `robust.scale` estimates (MAD, Huber's proposal 2). - `mixed` effects models - `gam` (generalized additive models) """ __docformat__ = 'restructuredtext en' depends = ['special.orthogonal', 'integrate', 'optimize', 'linalg'] postpone_import = True nipy-0.3.0/nipy/algorithms/statistics/models/model.py000066400000000000000000000343031210344137400227520ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np from numpy.linalg import inv from scipy.stats import t as t_distribution from nibabel.onetime import setattr_on_read from ...utils.matrices import pos_recipr # Inverse t cumulative distribution inv_t_cdf = t_distribution.ppf class Model(object): """ A (predictive) statistical model. The class Model itself does nothing but lays out the methods expected of any subclass. """ def __init__(self): pass def initialize(self): """ Initialize (possibly re-initialize) a Model instance. For instance, the design matrix of a linear model may change and some things must be recomputed. """ raise NotImplementedError def fit(self): """ Fit a model to data. """ raise NotImplementedError def predict(self, design=None): """ After a model has been fit, results are (assumed to be) stored in self.results, which itself should have a predict method. """ # XXX method is from an earlier API and needs to be rethought self.results.predict(design) class LikelihoodModel(Model): def logL(self, theta, Y, nuisance=None): """ Log-likelihood of model. """ raise NotImplementedError def score(self, theta, Y, nuisance=None): """ Gradient of logL with respect to theta. This is the score function of the model """ raise NotImplementedError def information(self, theta, nuisance=None): """ Fisher information matrix The inverse of the expected value of ``- d^2 logL / dtheta^2.`` """ raise NotImplementedError class LikelihoodModelResults(object): ''' Class to contain results from likelihood models ''' # This is the class in which things like AIC, BIC, llf can be implemented as # methods, not computed in, say, the fit method of OLSModel def __init__(self, theta, Y, model, cov=None, dispersion=1., nuisance=None, rank=None): ''' Set up results structure Parameters ---------- theta : ndarray parameter estimates from estimated model Y : ndarray data model : ``LikelihoodModel`` instance model used to generate fit cov : None or ndarray, optional covariance of thetas dispersion : scalar, optional multiplicative factor in front of `cov` nuisance : None of ndarray parameter estimates needed to compute logL rank : None or scalar rank of the model. If rank is not None, it is used for df_model instead of the usual counting of parameters. Notes ----- The covariance of thetas is given by: dispersion * cov For (some subset of models) `dispersion` will typically be the mean square error from the estimated model (sigma^2) ''' self.theta = theta self.Y = Y self.model = model if cov is None: self.cov = self.model.information(self.theta, nuisance=self.nuisance) else: self.cov = cov self.dispersion = dispersion self.nuisance = nuisance self.df_total = Y.shape[0] self.df_model = model.df_model # put this as a parameter of LikelihoodModel self.df_resid = self.df_total - self.df_model @setattr_on_read def logL(self): """ The maximized log-likelihood """ return self.model.logL(self.theta, self.Y, nuisance=self.nuisance) @setattr_on_read def AIC(self): """ Akaike Information Criterion """ p = self.theta.shape[0] return -2 * self.logL + 2 * p @setattr_on_read def BIC(self): """ Schwarz's Bayesian Information Criterion """ n = self.Y.shape[0] p = self.theta.shape[0] return - 2 * self.logL + np.log(n) * p def t(self, column=None): """ Return the (Wald) t-statistic for a given parameter estimate. Use Tcontrast for more complicated (Wald) t-statistics. """ if column is None: column = range(self.theta.shape[0]) column = np.asarray(column) _theta = self.theta[column] _cov = self.vcov(column=column) if _cov.ndim == 2: _cov = np.diag(_cov) _t = _theta * pos_recipr(np.sqrt(_cov)) return _t def vcov(self, matrix=None, column=None, dispersion=None, other=None): """ Variance/covariance matrix of linear contrast Parameters ---------- matrix: (dim, self.theta.shape[0]) array, optional numerical contrast specification, where ``dim`` refers to the 'dimension' of the contrast i.e. 1 for t contrasts, 1 or more for F contrasts. column: int, optional alternative way of specifying contrasts (column index) dispersion: float or (n_voxels,) array, optional value(s) for the dispersion parameters other: (dim, self.theta.shape[0]) array, optional alternative contrast specification (?) Returns ------- cov: (dim, dim) or (n_voxels, dim, dim) array the estimated covariance matrix/matrices Returns the variance/covariance matrix of a linear contrast of the estimates of theta, multiplied by `dispersion` which will often be an estimate of `dispersion`, like, sigma^2. The covariance of interest is either specified as a (set of) column(s) or a matrix. """ if self.cov is None: raise ValueError('need covariance of parameters for computing' +\ '(unnormalized) covariances') if dispersion is None: dispersion = self.dispersion if column is not None: column = np.asarray(column) if column.shape == (): return self.cov[column, column] * dispersion else: return self.cov[column][:, column] * dispersion elif matrix is not None: if other is None: other = matrix tmp = np.dot(matrix, np.dot(self.cov, np.transpose(other))) if np.isscalar(dispersion): return tmp * dispersion else: return tmp[:, :, np.newaxis] * dispersion if matrix is None and column is None: return self.cov * dispersion def Tcontrast(self, matrix, store=('t', 'effect', 'sd'), dispersion=None): """ Compute a Tcontrast for a row vector `matrix` To get the t-statistic for a single column, use the 't' method. Parameters ---------- matrix : 1D array-like contrast matrix store : sequence, optional components of t to store in results output object. Defaults to all components ('t', 'effect', 'sd'). dispersion : None or float, optional Returns ------- res : ``TContrastResults`` object """ matrix = np.asarray(matrix) # 1D vectors assumed to be row vector if matrix.ndim == 1: matrix = matrix[None] if matrix.shape[0] != 1: raise ValueError("t contrasts should have only one row") if matrix.shape[1] != self.theta.shape[0]: raise ValueError("t contrasts should be length P=%d, " "but this is length %d" % (self.theta.shape[0], matrix.shape[1])) store = set(store) if not store.issubset(('t', 'effect', 'sd')): raise ValueError('Unexpected store request in %s' % store) st_t = st_effect = st_sd = effect = sd = None if 't' in store or 'effect' in store: effect = np.dot(matrix, self.theta) if 'effect' in store: st_effect = np.squeeze(effect) if 't' in store or 'sd' in store: sd = np.sqrt(self.vcov(matrix=matrix, dispersion=dispersion)) if 'sd' in store: st_sd = np.squeeze(sd) if 't' in store: st_t = np.squeeze(effect * pos_recipr(sd)) return TContrastResults(effect=st_effect, t=st_t, sd=st_sd, df_den=self.df_resid) def Fcontrast(self, matrix, dispersion=None, invcov=None): """ Compute an Fcontrast for a contrast matrix `matrix`. Here, `matrix` M is assumed to be non-singular. More precisely .. math:: M pX pX' M' is assumed invertible. Here, :math:`pX` is the generalized inverse of the design matrix of the model. There can be problems in non-OLS models where the rank of the covariance of the noise is not full. See the contrast module to see how to specify contrasts. In particular, the matrices from these contrasts will always be non-singular in the sense above. Parameters ---------- matrix : 1D array-like contrast matrix dispersion : None or float, optional If None, use ``self.dispersion`` invcov : None or array, optional Known inverse of variance covariance matrix. If None, calculate this matrix. Returns ------- f_res : ``FContrastResults`` instance with attributes F, df_den, df_num Notes ----- For F contrasts, we now specify an effect and covariance """ matrix = np.asarray(matrix) # 1D vectors assumed to be row vector if matrix.ndim == 1: matrix = matrix[None] if matrix.shape[1] != self.theta.shape[0]: raise ValueError("F contrasts should have shape[1] P=%d, " "but this has shape[1] %d" % (self.theta.shape[0], matrix.shape[1])) ctheta = np.dot(matrix, self.theta) if matrix.ndim == 1: matrix = matrix.reshape((1, matrix.shape[0])) if dispersion is None: dispersion = self.dispersion q = matrix.shape[0] if invcov is None: invcov = inv(self.vcov(matrix=matrix, dispersion=1.0)) F = np.add.reduce(np.dot(invcov, ctheta) * ctheta, 0) *\ pos_recipr((q * dispersion)) F = np.squeeze(F) return FContrastResults( effect=ctheta, covariance=self.vcov( matrix=matrix, dispersion=dispersion[np.newaxis]), F=F, df_den=self.df_resid, df_num=invcov.shape[0]) def conf_int(self, alpha=.05, cols=None, dispersion=None): ''' The confidence interval of the specified theta estimates. Parameters ---------- alpha : float, optional The `alpha` level for the confidence interval. ie., `alpha` = .05 returns a 95% confidence interval. cols : tuple, optional `cols` specifies which confidence intervals to return dispersion : None or scalar scale factor for the variance / covariance (see class docstring and ``vcov`` method docstring) Returns ------- cis : ndarray `cis` is shape ``(len(cols), 2)`` where each row contains [lower, upper] for the given entry in `cols` Examples -------- >>> from numpy.random import standard_normal as stan >>> from nipy.algorithms.statistics.models.regression import OLSModel >>> x = np.hstack((stan((30,1)),stan((30,1)),stan((30,1)))) >>> beta=np.array([3.25, 1.5, 7.0]) >>> y = np.dot(x,beta) + stan((30)) >>> model = OLSModel(x).fit(y) >>> confidence_intervals = model.conf_int(cols=(1,2)) Notes ----- Confidence intervals are two-tailed. TODO: tails : string, optional `tails` can be "two", "upper", or "lower" ''' if cols is None: lower = self.theta - inv_t_cdf(1 - alpha / 2, self.df_resid) *\ np.sqrt(np.diag(self.vcov(dispersion=dispersion))) upper = self.theta + inv_t_cdf(1 - alpha / 2, self.df_resid) *\ np.sqrt(np.diag(self.vcov(dispersion=dispersion))) else: lower, upper = [], [] for i in cols: lower.append( self.theta[i] - inv_t_cdf(1 - alpha / 2, self.df_resid) * np.sqrt(self.vcov(column=i, dispersion=dispersion))) upper.append( self.theta[i] + inv_t_cdf(1 - alpha / 2, self.df_resid) * np.sqrt(self.vcov(column=i, dispersion=dispersion))) return np.asarray(zip(lower, upper)) class TContrastResults(object): """ Results from a t contrast of coefficients in a parametric model. The class does nothing, it is a container for the results from T contrasts, and returns the T-statistics when np.asarray is called. """ def __init__(self, t, sd, effect, df_den=None): if df_den is None: df_den = np.inf self.t = t self.sd = sd self.effect = effect self.df_den = df_den def __array__(self): return np.asarray(self.t) def __str__(self): return ('' % (self.effect, self.sd, self.t, self.df_den)) class FContrastResults(object): """ Results from an F contrast of coefficients in a parametric model. The class does nothing, it is a container for the results from F contrasts, and returns the F-statistics when np.asarray is called. """ def __init__(self, effect, covariance, F, df_num, df_den=None): if df_den is None: df_den = np.inf self.effect = effect self.covariance = covariance self.F = F self.df_den = df_den self.df_num = df_num def __array__(self): return np.asarray(self.F) def __str__(self): return '' % \ (repr(self.F), self.df_den, self.df_num) nipy-0.3.0/nipy/algorithms/statistics/models/nlsmodel.py000066400000000000000000000077511210344137400234760ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Non-linear least squares model """ __docformat__ = 'restructuredtext' import numpy as np import scipy.linalg as spl from .model import Model class NLSModel(Model): """ Class representing a simple nonlinear least squares model. """ def __init__(self, Y, design, f, grad, theta, niter=10): """ Initialize non-linear model instance Parameters ---------- Y : ndarray the data in the NLS model design : ndarray the design matrix, X f : callable the map between the (linear parameters (in the design matrix) and the nonlinear parameters (theta)) and the predicted data. `f` accepts the design matrix and the parameters (theta) as input, and returns the predicted data at that design. grad : callable the gradient of f, this should be a function of an nxp design matrix X and qx1 vector theta that returns an nxq matrix df_i/dtheta_j where: .. math:: f_i(theta) = f(X[i], theta) is the nonlinear response function for the i-th instance in the model. theta : array parameters niter : int number of iterations """ Model.__init__(self) self.Y = Y self.design = design self.f = f self.grad = grad self.theta = theta self.niter = niter if self.design is not None and self.Y != None: if self.Y.shape[0] != self.design.shape[0]: raise ValueError('Y should be same shape as design') def _Y_changed(self): if self.design is not None: if self.Y.shape[0] != self.design.shape[0]: raise ValueError('Y should be same shape as design') def _design_changed(self): if self.Y is not None: if self.Y.shape[0] != self.design.shape[0]: raise ValueError('Y should be same shape as design') def getZ(self): """ Set Z into `self` Returns ------- None """ self._Z = self.grad(self.design, self.theta) def getomega(self): """ Set omega into `self` Returns ------- None """ self._omega = self.predict() - np.dot(self._Z, self.theta) def predict(self, design=None): """ Get predicted values for `design` or ``self.design`` Parameters ---------- design : None or array, optional design at which to predict data. If None (the default) then use the initial ``self.design`` Returns ------- y_predicted : array predicted data at given (or initial) design """ if design is None: design = self.design return self.f(design, self.theta) def SSE(self): """ Sum of squares error. Returns ------- sse: float sum of squared residuals """ return sum((self.Y - self.predict()) ** 2) def __iter__(self): """ Get iterator from model instance Returns ------- itor : iterator Returns ``self`` """ if self.theta is not None: self.initial = self.theta elif self.initial is not None: self.theta = self.initial else: raise ValueError('need an initial estimate for theta') self._iter = 0 self.theta = self.initial return self def next(self): """ Do an iteration of fit Returns ------- None """ if self._iter < self.niter: self.getZ() self.getomega() Zpinv = spl.pinv(self._Z) self.theta = np.dot(Zpinv, self.Y - self._omega) else: raise StopIteration self._iter += 1 nipy-0.3.0/nipy/algorithms/statistics/models/regression.py000066400000000000000000000702701210344137400240350ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This module implements some standard regression models: OLS and WLS models, as well as an AR(p) regression model. Models are specified with a design matrix and are fit using their 'fit' method. Subclasses that have more complicated covariance matrices should write over the 'whiten' method as the fit method prewhitens the response by calling 'whiten'. General reference for regression models: 'Introduction to Linear Regression Analysis', Douglas C. Montgomery, Elizabeth A. Peck, G. Geoffrey Vining. Wiley, 2006. """ __docformat__ = 'restructuredtext en' import warnings import numpy as np from scipy import stats import scipy.linalg as spl from nibabel.onetime import setattr_on_read from nipy.algorithms.utils.matrices import matrix_rank, pos_recipr from .model import LikelihoodModel, LikelihoodModelResults class OLSModel(LikelihoodModel): """ A simple ordinary least squares model. Parameters ---------- design : array-like This is your design matrix. Data are assumed to be column ordered with observations in rows. Methods ------- model.__init___(design) model.logL(b=self.beta, Y) Attributes ---------- design : ndarray This is the design, or X, matrix. wdesign : ndarray This is the whitened design matrix. `design` == `wdesign` by default for the OLSModel, though models that inherit from the OLSModel will whiten the design. calc_beta : ndarray This is the Moore-Penrose pseudoinverse of the whitened design matrix. normalized_cov_beta : ndarray ``np.dot(calc_beta, calc_beta.T)`` df_resid : scalar Degrees of freedom of the residuals. Number of observations less the rank of the design. df_model : scalar Degrees of freedome of the model. The rank of the design. Examples -------- >>> from nipy.algorithms.statistics.api import Term, Formula >>> data = np.rec.fromarrays(([1,3,4,5,2,3,4], range(1,8)), ... names=('Y', 'X')) >>> f = Formula([Term("X"), 1]) >>> dmtx = f.design(data, return_float=True) >>> model = OLSModel(dmtx) >>> results = model.fit(data['Y']) >>> results.theta array([ 0.25 , 2.14285714]) >>> results.t() array([ 0.98019606, 1.87867287]) >>> print results.Tcontrast([0,1]) #doctest: +FP_6DP >>> print results.Fcontrast(np.eye(2)) #doctest: +FP_6DP """ def __init__(self, design): """ Parameters ---------- design : array-like This is your design matrix. Data are assumed to be column ordered with observations in rows. """ super(OLSModel, self).__init__() self.initialize(design) def initialize(self, design): # PLEASE don't assume we have a constant... # TODO: handle case for noconstant regression self.design = design self.wdesign = self.whiten(self.design) self.calc_beta = spl.pinv(self.wdesign) self.normalized_cov_beta = np.dot(self.calc_beta, np.transpose(self.calc_beta)) self.df_total = self.wdesign.shape[0] self.df_model = matrix_rank(self.design) self.df_resid = self.df_total - self.df_model def logL(self, beta, Y, nuisance=None): r''' Returns the value of the loglikelihood function at beta. Given the whitened design matrix, the loglikelihood is evaluated at the parameter vector, beta, for the dependent variable, Y and the nuisance parameter, sigma. Parameters ---------- beta : ndarray The parameter estimates. Must be of length df_model. Y : ndarray The dependent variable nuisance : dict, optional A dict with key 'sigma', which is an optional estimate of sigma. If None, defaults to its maximum likelihood estimate (with beta fixed) as ``sum((Y - X*beta)**2) / n``, where n=Y.shape[0], X=self.design. Returns ------- loglf : float The value of the loglikelihood function. Notes ----- The log-Likelihood Function is defined as .. math:: \ell(\beta,\sigma,Y)= -\frac{n}{2}\log(2\pi\sigma^2) - \|Y-X\beta\|^2/(2\sigma^2) The parameter :math:`\sigma` above is what is sometimes referred to as a nuisance parameter. That is, the likelihood is considered as a function of :math:`\beta`, but to evaluate it, a value of :math:`\sigma` is needed. If :math:`\sigma` is not provided, then its maximum likelihood estimate: .. math:: \hat{\sigma}(\beta) = \frac{\text{SSE}(\beta)}{n} is plugged in. This likelihood is now a function of only :math:`\beta` and is technically referred to as a profile-likelihood. References ---------- .. [1] W. Green. "Econometric Analysis," 5th ed., Pearson, 2003. ''' # This is overwriting an abstract method of LikelihoodModel X = self.wdesign wY = self.whiten(Y) r = wY - np.dot(X, beta) n = self.df_total SSE = (r ** 2).sum(0) if nuisance is None: sigmasq = SSE / n else: sigmasq = nuisance['sigma'] loglf = - n / 2. * np.log(2 * np.pi * sigmasq) - SSE / (2 * sigmasq) return loglf def score(self, beta, Y, nuisance=None): ''' Gradient of the loglikelihood function at (beta, Y, nuisance). The graient of the loglikelihood function at (beta, Y, nuisance) is the score function. See :meth:`logL` for details. Parameters ---------- beta : ndarray The parameter estimates. Must be of length df_model. Y : ndarray The dependent variable. nuisance : dict, optional A dict with key 'sigma', which is an optional estimate of sigma. If None, defaults to its maximum likelihood estimate (with beta fixed) as ``sum((Y - X*beta)**2) / n``, where n=Y.shape[0], X=self.design. Returns ------- The gradient of the loglikelihood function. ''' # This is overwriting an abstract method of LikelihoodModel X = self.wdesign wY = self.whiten(Y) r = wY - np.dot(X, beta) n = self.df_total if nuisance is None: SSE = (r ** 2).sum(0) sigmasq = SSE / n else: sigmasq = nuisance['sigma'] return np.dot(X, r) / sigmasq def information(self, beta, nuisance=None): ''' Returns the information matrix at (beta, Y, nuisance). See logL for details. Parameters ---------- beta : ndarray The parameter estimates. Must be of length df_model. nuisance : dict A dict with key 'sigma', which is an estimate of sigma. If None, defaults to its maximum likelihood estimate (with beta fixed) as ``sum((Y - X*beta)**2) / n`` where n=Y.shape[0], X=self.design. Returns ------- info : array The information matrix, the negative of the inverse of the Hessian of the of the log-likelihood function evaluated at (theta, Y, nuisance). ''' # This is overwriting an abstract method of LikelihoodModel # The subclasses WLSModel, ARModel and GLSModel all overwrite this # method. The point of these subclasses is such that not much of # OLSModel has to be changed. X = self.design sigmasq = nuisance['sigma'] C = sigmasq * np.dot(X.T, X) return C def whiten(self, X): """ Whiten design matrix Parameters ---------- X : array design matrix Returns ------- wX : array This matrix is the matrix whose pseudoinverse is ultimately used in estimating the coefficients. For OLSModel, it is does nothing. For WLSmodel, ARmodel, it pre-applies a square root of the covariance matrix to X. """ return X @setattr_on_read def has_intercept(self): """ Check if column of 1s is in column space of design """ o = np.ones(self.design.shape[0]) obeta = np.dot(self.calc_beta, o) ohat = np.dot(self.wdesign, obeta) if np.allclose(ohat, o): return True return False @setattr_on_read def rank(self): """ Compute rank of design matrix """ return matrix_rank(self.wdesign) def fit(self, Y): """ Fit model to data `Y` Full fit of the model including estimate of covariance matrix, (whitened) residuals and scale. Parameters ---------- Y : array-like The dependent variable for the Least Squares problem. Returns ------- fit : RegressionResults """ # Other estimates of the covariance matrix for a heteroscedastic # regression model can be implemented in WLSmodel. (Weighted least # squares models assume covariance is diagonal, i.e. heteroscedastic). wY = self.whiten(Y) beta = np.dot(self.calc_beta, wY) wresid = wY - np.dot(self.wdesign, beta) dispersion = np.sum(wresid ** 2, 0) / (self.wdesign.shape[0] - self.wdesign.shape[1]) lfit = RegressionResults(beta, Y, self, wY, wresid, dispersion=dispersion, cov=self.normalized_cov_beta) return lfit class ARModel(OLSModel): """ A regression model with an AR(p) covariance structure. In terms of a LikelihoodModel, the parameters are beta, the usual regression parameters, and sigma, a scalar nuisance parameter that shows up as multiplier in front of the AR(p) covariance. The linear autoregressive process of order p--AR(p)--is defined as: TODO Examples -------- >>> from nipy.algorithms.statistics.api import Term, Formula >>> data = np.rec.fromarrays(([1,3,4,5,8,10,9], range(1,8)), ... names=('Y', 'X')) >>> f = Formula([Term("X"), 1]) >>> dmtx = f.design(data, return_float=True) >>> model = ARModel(dmtx, 2) We go through the ``model.iterative_fit`` procedure long-hand: >>> for i in range(6): ... results = model.fit(data['Y']) ... print "AR coefficients:", model.rho ... rho, sigma = yule_walker(data["Y"] - results.predicted, ... order=2, ... df=model.df_resid) ... model = ARModel(model.design, rho) #doctest: +FP_6DP ... AR coefficients: [ 0. 0.] AR coefficients: [-0.61530877 -1.01542645] AR coefficients: [-0.72660832 -1.06201457] AR coefficients: [-0.7220361 -1.05365352] AR coefficients: [-0.72229201 -1.05408193] AR coefficients: [-0.722278 -1.05405838] >>> results.theta #doctest: +FP_6DP array([ 1.59564228, -0.58562172]) >>> results.t() #doctest: +FP_6DP array([ 38.0890515 , -3.45429252]) >>> print results.Tcontrast([0,1]) #doctest: +FP_6DP >>> print results.Fcontrast(np.identity(2)) #doctest: +FP_6DP Reinitialize the model, and do the automated iterative fit >>> model.rho = np.array([0,0]) >>> model.iterative_fit(data['Y'], niter=3) >>> print model.rho #doctest: +FP_6DP [-0.7220361 -1.05365352] """ def __init__(self, design, rho): """ Initialize AR model instance Parameters ---------- design : ndarray 2D array with design matrix rho : int or array-like If int, gives order of model, and initializes rho to zeros. If ndarray, gives initial estimate of rho. Be careful as ``ARModel(X, 1) != ARModel(X, 1.0)``. """ if type(rho) is type(1): self.order = rho self.rho = np.zeros(self.order, np.float64) else: self.rho = np.squeeze(np.asarray(rho)) if len(self.rho.shape) not in [0, 1]: raise ValueError("AR parameters must be a scalar or a vector") if self.rho.shape == (): self.rho.shape = (1,) self.order = self.rho.shape[0] super(ARModel, self).__init__(design) def iterative_fit(self, Y, niter=3): """ Perform an iterative two-stage procedure to estimate AR(p) parameters and regression coefficients simultaneously. Parameters ---------- Y : ndarray data to which to fit model niter : optional, int the number of iterations (default 3) Returns ------- None """ for i in range(niter): self.initialize(self.design) results = self.fit(Y) self.rho, _ = yule_walker(Y - results.predicted, order=self.order, df=self.df_resid) def whiten(self, X): """ Whiten a series of columns according to AR(p) covariance structure Parameters ---------- X : array-like of shape (n_features) array to whiten Returns ------- wX : ndarray X whitened with order self.order AR """ X = np.asarray(X, np.float64) _X = X.copy() for i in range(self.order): _X[(i + 1):] = _X[(i + 1):] - self.rho[i] * X[0: - (i + 1)] return _X def yule_walker(X, order=1, method="unbiased", df=None, inv=False): """ Estimate AR(p) parameters from a sequence X using Yule-Walker equation. unbiased or maximum-likelihood estimator (mle) See, for example: http://en.wikipedia.org/wiki/Autoregressive_moving_average_model Parameters ---------- X : ndarray of shape(n) order : int, optional Order of AR process. method : str, optional Method can be "unbiased" or "mle" and this determines denominator in estimate of autocorrelation function (ACF) at lag k. If "mle", the denominator is n=X.shape[0], if "unbiased" the denominator is n-k. df : int, optional Specifies the degrees of freedom. If df is supplied, then it is assumed the X has df degrees of freedom rather than n. inv : bool, optional Whether to return the inverse of the R matrix (see code) Returns ------- rho : (`order`,) ndarray sigma : int standard deviation of the residuals after fit R_inv : ndarray If `inv` is True, also return the inverse of the R matrix Notes ----- See also http://en.wikipedia.org/wiki/AR_model#Calculation_of_the_AR_parameters """ method = str(method).lower() if method not in ["unbiased", "mle"]: raise ValueError("ACF estimation method must be 'unbiased or 'MLE'") X = np.asarray(X, np.float64) if X.ndim != 1: raise ValueError("Expecting a vector to estimate AR parameters") X -= X.mean(0) n = df or X.shape[0] if method == "unbiased": den = lambda k: n - k else: den = lambda k: n r = np.zeros(order + 1, np.float64) r[0] = (X ** 2).sum() / den(0) for k in range(1, order + 1): r[k] = (X[0: - k] * X[k:]).sum() / den(k) R = spl.toeplitz(r[: - 1]) rho = spl.solve(R, r[1:]) sigmasq = r[0] - (r[1:] * rho).sum() if inv == True: return rho, np.sqrt(sigmasq), spl.inv(R) return rho, np.sqrt(sigmasq) def ar_bias_corrector(design, calc_beta, order=1): """ Return bias correcting matrix for `design` and AR order `order` There is a slight bias in the rho estimates on residuals due to the correlations induced in the residuals by fitting a linear model. See [Worsley2002]_. This routine implements the bias correction described in appendix A.1 of [Worsley2002]_. Parameters ---------- design : array Design matrix calc_beta : array Moore-Penrose pseudoinverse of the (maybe) whitened design matrix. This is the matrix that, when applied to the (maybe whitened) data, produces the betas. order : int, optional Order p of AR(p) process Returns ------- invM : array Matrix to bias correct estimated covariance matrix in calculating the AR coefficients References ---------- .. [Worsley2002] K.J. Worsley, C.H. Liao, J. Aston, V. Petre, G.H. Duncan, F. Morales, A.C. Evans (2002) A General Statistical Analysis for fMRI Data. Neuroimage 15:1:15 """ R = np.eye(design.shape[0]) - np.dot(design, calc_beta) M = np.zeros((order + 1,) * 2) I = np.eye(R.shape[0]) for i in range(order + 1): Di = np.dot(R, spl.toeplitz(I[i])) for j in range(order + 1): Dj = np.dot(R, spl.toeplitz(I[j])) M[i, j] = np.diag((np.dot(Di, Dj)) / (1. + (i > 0))).sum() return spl.inv(M) def ar_bias_correct(results, order, invM=None): """ Apply bias correction in calculating AR(p) coefficients from `results` There is a slight bias in the rho estimates on residuals due to the correlations induced in the residuals by fitting a linear model. See [Worsley2002]_. This routine implements the bias correction described in appendix A.1 of [Worsley2002]_. Parameters ---------- results : ndarray or results object If ndarray, assume these are residuals, from a simple model. If a results object, with attribute ``resid``, then use these for the residuals. See Notes for more detail order : int Order ``p`` of AR(p) model invM : None or array Known bias correcting matrix for covariance. If None, calculate from ``results.model`` Returns ------- rho : array Bias-corrected AR(p) coefficients Notes ----- If `results` has attributes ``resid`` and ``scale``, then assume ``scale`` has come from a fit of a potentially customized model, and we use that for the sum of squared residuals. In this case we also need ``results.df_resid``. Otherwise we assume this is a simple Gaussian model, like OLS, and take the simple sum of squares of the residuals. References ---------- .. [Worsley2002] K.J. Worsley, C.H. Liao, J. Aston, V. Petre, G.H. Duncan, F. Morales, A.C. Evans (2002) A General Statistical Analysis for fMRI Data. Neuroimage 15:1:15 """ if invM is None: # We need a model from ``results`` if invM is not specified model = results.model invM = ar_bias_corrector(model.design, model.calc_beta, order) if hasattr(results, 'resid'): resid = results.resid else: resid = results in_shape = resid.shape n_features = in_shape[0] # Allows results residuals to have shapes other than 2D. This allows us to # use this routine for image data as well as more standard 2D model data resid = resid.reshape((n_features, - 1)) # glm.Model fit methods fill in a ``scale`` estimate. For simpler # models, there is no scale estimate written into the results. # However, the same calculation resolves (with Gaussian family) # to ``np.sum(resid**2) / results.df_resid``. # See ``estimate_scale`` from glm.Model if hasattr(results, 'scale'): sum_sq = results.scale.reshape(resid.shape[1:]) * results.df_resid else: # No scale in results sum_sq = np.sum(resid ** 2, axis=0) cov = np.zeros((order + 1,) + sum_sq.shape) cov[0] = sum_sq for i in range(1, order + 1): cov[i] = np.sum(resid[i:] * resid[0:- i], axis=0) # cov is shape (order + 1, V) where V = np.product(in_shape[1:]) cov = np.dot(invM, cov) output = cov[1:] * pos_recipr(cov[0]) return np.squeeze(output.reshape((order,) + in_shape[1:])) class AREstimator(object): """ A class to estimate AR(p) coefficients from residuals """ def __init__(self, model, p=1): """ Bias-correcting AR estimation class Parameters ---------- model : ``OSLModel`` instance A models.regression.OLSmodel instance, where `model` has attribute ``design`` p : int, optional Order of AR(p) noise """ self.p = p self.invM = ar_bias_corrector(model.design, model.calc_beta, p) def __call__(self, results): """ Calculate AR(p) coefficients from `results`.``residuals`` Parameters ---------- results : Results instance A models.model.LikelihoodModelResults instance Returns ------- ar_p : array AR(p) coefficients """ return ar_bias_correct(results, self.p, self.invM) class WLSModel(OLSModel): """ A regression model with diagonal but non-identity covariance structure. The weights are presumed to be (proportional to the) inverse of the variance of the observations. Examples -------- >>> from nipy.algorithms.statistics.api import Term, Formula >>> data = np.rec.fromarrays(([1,3,4,5,2,3,4], range(1,8)), ... names=('Y', 'X')) >>> f = Formula([Term("X"), 1]) >>> dmtx = f.design(data, return_float=True) >>> model = WLSModel(dmtx, weights=range(1,8)) >>> results = model.fit(data['Y']) >>> results.theta array([ 0.0952381 , 2.91666667]) >>> results.t() array([ 0.35684428, 2.0652652 ]) >>> print results.Tcontrast([0,1]) #doctest: +FP_6DP >>> print results.Fcontrast(np.identity(2)) #doctest: +FP_6DP """ def __init__(self, design, weights=1): weights = np.array(weights) if weights.shape == (): # scalar self.weights = weights else: design_rows = design.shape[0] if not(weights.shape[0] == design_rows and weights.size == design_rows): raise ValueError( 'Weights must be scalar or same length as design') self.weights = weights.reshape(design_rows) super(WLSModel, self).__init__(design) def whiten(self, X): """ Whitener for WLS model, multiplies by sqrt(self.weights) """ X = np.asarray(X, np.float64) if X.ndim == 1: return X * np.sqrt(self.weights) elif X.ndim == 2: c = np.sqrt(self.weights) v = np.zeros(X.shape, np.float64) for i in range(X.shape[1]): v[:, i] = X[:, i] * c return v class RegressionResults(LikelihoodModelResults): """ This class summarizes the fit of a linear regression model. It handles the output of contrasts, estimates of covariance, etc. """ def __init__(self, theta, Y, model, wY, wresid, cov=None, dispersion=1., nuisance=None): """See LikelihoodModelResults constructor. The only difference is that the whitened Y and residual values are stored for a regression model. """ LikelihoodModelResults.__init__(self, theta, Y, model, cov, dispersion, nuisance) self.wY = wY self.wresid = wresid @setattr_on_read def resid(self): """ Residuals from the fit. """ return self.Y - self.predicted @setattr_on_read def norm_resid(self): """ Residuals, normalized to have unit length. Notes ----- Is this supposed to return "stanardized residuals," residuals standardized to have mean zero and approximately unit variance? d_i = e_i / sqrt(MS_E) Where MS_E = SSE / (n - k) See: Montgomery and Peck 3.2.1 p. 68 Davidson and MacKinnon 15.2 p 662 """ return self.resid * pos_recipr(np.sqrt(self.dispersion)) @setattr_on_read def predicted(self): """ Return linear predictor values from a design matrix. """ beta = self.theta # the LikelihoodModelResults has parameters named 'theta' X = self.model.design return np.dot(X, beta) @setattr_on_read def R2_adj(self): """Return the R^2 value for each row of the response Y. Notes ----- Changed to the textbook definition of R^2. See: Davidson and MacKinnon p 74 """ if not self.model.has_intercept: warnings.warn("model does not have intercept term, " +\ "SST inappropriate") d = 1. - self.R2 d *= ((self.df_total - 1.) / self.df_resid) return 1 - d @setattr_on_read def R2(self): """ Return the adjusted R^2 value for each row of the response Y. Notes ----- Changed to the textbook definition of R^2. See: Davidson and MacKinnon p 74 """ d = self.SSE / self.SST return 1 - d @setattr_on_read def SST(self): """Total sum of squares. If not from an OLS model this is "pseudo"-SST. """ if not self.model.has_intercept: warnings.warn("model does not have intercept term, " +\ "SST inappropriate") return ((self.wY - self.wY.mean(0)) ** 2).sum(0) @setattr_on_read def SSE(self): """Error sum of squares. If not from an OLS model this is "pseudo"-SSE. """ return (self.wresid ** 2).sum(0) @setattr_on_read def SSR(self): """ Regression sum of squares """ return self.SST - self.SSE @setattr_on_read def MSR(self): """ Mean square (regression)""" return self.SSR / (self.df_model - 1) @setattr_on_read def MSE(self): """ Mean square (error) """ return self.SSE / self.df_resid @setattr_on_read def MST(self): """ Mean square (total) """ return self.SST / (self.df_total - 1) @setattr_on_read def F_overall(self): """ Overall goodness of fit F test, comparing model to a model with just an intercept. If not an OLS model this is a pseudo-F. """ F = self.MSR / self.MSE Fp = stats.f.sf(F, self.df_model - 1, self.df_resid) return {'F': F, 'p_value': Fp, 'df_num': self.df_model-1, 'df_den': self.df_resid} class GLSModel(OLSModel): """Generalized least squares model with a general covariance structure """ def __init__(self, design, sigma): self.cholsigmainv = spl.linalg.cholesky(spl.linalg.pinv(sigma)).T super(GLSModel, self).__init__(design) def whiten(self, Y): return np.dot(self.cholsigmainv, Y) def isestimable(C, D): """ True if (Q, P) contrast `C` is estimable for (N, P) design `D` From an Q x P contrast matrix `C` and an N x P design matrix `D`, checks if the contrast `C` is estimable by looking at the rank of ``vstack([C,D])`` and verifying it is the same as the rank of `D`. Parameters ---------- C : (Q, P) array-like contrast matrix. If `C` has is 1 dimensional assume shape (1, P) D: (N, P) array-like design matrix Returns ------- tf : bool True if the contrast `C` is estimable on design `D` Examples -------- >>> D = np.array([[1, 1, 1, 0, 0, 0], ... [0, 0, 0, 1, 1, 1], ... [1, 1, 1, 1, 1, 1]]).T >>> isestimable([1, 0, 0], D) False >>> isestimable([1, -1, 0], D) True """ C = np.asarray(C) D = np.asarray(D) if C.ndim == 1: C = C[None, :] if C.shape[1] != D.shape[1]: raise ValueError('Contrast should have %d columns' % D.shape[1]) new = np.vstack([C, D]) if matrix_rank(new) != matrix_rank(D): return False return True nipy-0.3.0/nipy/algorithms/statistics/models/setup.py000066400000000000000000000011171210344137400230070ustar00rootroot00000000000000 def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('models', parent_package, top_path) config.add_subpackage('family') config.add_subpackage('tests') config.add_data_files('tests/*.bin') # config.add_extension('_hbspline', # sources=['src/bspline_ext.c', # 'src/bspline_impl.c'], # ) return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipy-0.3.0/nipy/algorithms/statistics/models/setupscons.py000066400000000000000000000011011210344137400240460ustar00rootroot00000000000000 def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('models', parent_package, top_path) config.add_subpackage('family') config.add_subpackage('robust') config.add_data_dir('tests') config.add_extension('_hbspline', sources=['src/bspline_ext.c', 'src/bspline_impl.c'], ) return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipy-0.3.0/nipy/algorithms/statistics/models/tests/000077500000000000000000000000001210344137400224375ustar00rootroot00000000000000nipy-0.3.0/nipy/algorithms/statistics/models/tests/__init__.py000066400000000000000000000000001210344137400245360ustar00rootroot00000000000000nipy-0.3.0/nipy/algorithms/statistics/models/tests/exampledata.py000066400000000000000000000004761210344137400253050ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os import numpy as np filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_data.bin") data = np.fromfile(filename, " 1: df[n] = c.shape[0] SS[n] = np.dot(cbeta, np.dot(np.linalg.pinv(cov_cbeta), cbeta)) MS[n] = SS[n] / df[n] F[n] = MS[n] / sigmasq else: df[n] = 1 SS[n] = (cbeta**2).sum() / cov_cbeta MS[n] = SS[n] / df[n] F[n] = MS[n] / sigmasq p[n] = scipy.stats.f.sf(F[n], df[n], df_resid) routput = \ """ Output of R: ----------- > anova(lm(Days~Duration*Weight, X)) Analysis of Variance Table Response: Days Df Sum Sq Mean Sq F value Pr(>F) Duration 1 209.07 209.07 7.2147 0.009587 ** Weight 2 760.43 380.22 13.1210 2.269e-05 *** Duration:Weight 2 109.03 54.52 1.8813 0.162240 Residuals 54 1564.80 28.98 --- """ def test_Ragreement(): # This code would fit the two-way ANOVA model in R # X = read.table('http://www-stat.stanford.edu/~jtaylo/courses/stats191/data/kidney.table', header=T) # names(X) # X$Duration = factor(X$Duration) # X$Weight = factor(X$Weight) # lm(Days~Duration*Weight, X) # A = anova(lm(Days~Duration*Weight, X)) # rA = rpy.r('A') rA = {'Df': [1, 2, 2, 54], 'F value': [7.2147239263803673, 13.120973926380339, 1.8813266871165633, np.nan], 'Mean Sq': [209.06666666666663, 380.21666666666584, 54.51666666666663, 28.977777777777778], 'Pr(>F)': [0.0095871255601553771, 2.2687781292164585e-05, 0.16224035152442268, np.nan], 'Sum Sq': [209.06666666666663, 760.43333333333169, 109.03333333333326, 1564.8]} # rn = rpy.r('rownames(A)') rn= ['Duration', 'Weight', 'Duration:Weight', 'Residuals'] pairs = [(rn.index('Duration'), 'Duration'), (rn.index('Weight'), 'Weight'), (rn.index('Duration:Weight'), 'Interaction')] for i, j in pairs: assert_almost_equal(F[j], rA['F value'][i]) assert_almost_equal(p[j], rA['Pr(>F)'][i]) assert_almost_equal(MS[j], rA['Mean Sq'][i]) assert_almost_equal(df[j], rA['Df'][i]) assert_almost_equal(SS[j], rA['Sum Sq'][i]) def test_scipy_stats(): # Using scipy.stats.models X, cons = twoway.design(D, contrasts=contrasts) Y = D['Days'] m = OLSModel(X) f = m.fit(Y) F_m = {} df_m = {} p_m = {} for n, c in cons.items(): r = f.Fcontrast(c) F_m[n] = r.F df_m[n] = r.df_num p_m[n] = scipy.stats.f.sf(F_m[n], df_m[n], r.df_den) assert_almost_equal(F[n], F_m[n]) assert_almost_equal(df[n], df_m[n]) assert_almost_equal(p[n], p_m[n]) nipy-0.3.0/nipy/algorithms/statistics/models/tests/test_ar.py000066400000000000000000000014621210344137400244550ustar00rootroot00000000000000 from numpy.testing import assert_array_equal from .exampledata import x, y from .. import regression # FIXME: This test does not test any values # TODO: spend an hour or so to create a test like test_ols.py # with R's output, the script and the data used for the script # # Although, it should be said that this, in R # x = as.matrix(read_table('x.csv')) # y = as.matrix(read_table('y.csv')) # res = arima(y, xreg=x, order=c(2,0,0)) # # gives an error ``system is computationally singular`` def test_armodel(): for i in range(1,4): model = regression.ARModel(x, i) for i in range(20): results = model.fit(y) rho, sigma = regression.yule_walker(y - results.predicted) model = regression.ARModel(model.design, rho) print "AR coefficients:", model.rho nipy-0.3.0/nipy/algorithms/statistics/models/tests/test_data.bin000066400000000000000000000354201210344137400251050ustar00rootroot00000000000000R3o@jsXSag7{ᘿz̰?Z?+T tLd hJ?e +p@ҍRk]?PE#PF@?r҉KX?n=DKZ5CA?dC7@V.jp@Tǩ @:r!0p@jT7`?$~]W?]f[?Va@M m;?dC7@V.jp0@TǩP@Kp@ _EEJS@&!!b?!V?{cP_:?s'Y?u%G ~:?"S@![x~B@ʔ'_l@DRp@;f(pq u?v=?T?Ў?SN]Y? Q1;Bll#?dC7 @V.jpP@Tǩ@sC[shp@kЭll0'r?(w9?>;?@PV?6Bc9C2Dbu%?E$@f.)Y@JبE@喩1iAp@*rKcySi?47akq?O?Y[q?;S? I,?@G??f,>2@zfGt@eIP@QDp@ dJXnΣ?ͽ^1`?u??PFvS?3#@@n>?E4@f.)y@JبE@5KS?Co@k>?"S8@![x~@ʔ'_@&eHp@=YӽΒ??ޑ$?J T?6t> H4-< ??B[ΥZ:@-s=@m@au!@?,#8p@F-Xʩ0?@{3?h?3V?HMqAxӴ+"??oض(a<@Q#,@6zS@u,p@fFe2?$]hX$?+JR?x.?&n@Ʒ %??Uh>@tT@O\u@bz :p@0Vx?o?*/!?ſKDT? U?Em}c€#??dC7@@V.jp@Tǩ@o&p@uOW9j*?f?rт?/PR?AEaB@zfGΔ@eIP@ 7p@=֊`?kŃ?2GA?8yQ?G\ElPZ"??'KBC@X܆.@M@+JBeHp@܃?oxԢ?ʞ:?B a?_e< jZqƬRZ@V?v?f' N ??B[ΥZJ@-s=@m@au!ALRN8p@Z b*?\J@?`~/PU?O =s?ٙB5^K@zISh@AEF՜Kp@ Fq'WN=?EyTؔ?ČwFbgCW?xNWD`.ҏ=??oض(aL@Q#,@6zSAݛ .p@MQɶ{.]?|OR?4#,Rl,1W?jݗ-;`Z?+j0eM@U@A^-p@ QǿbcE-&՘?/O}q,']^ŞV?T\ám=b ?UhN@tT@O\u A'2;p@Yڠ\ȔD#?h5=k k&?#V? lPCY[?2*lO@yOۮ@#MuLAd'+p@WxrBJ!? R@zfGδ@eIPAZ ֣@q~|`p@꾤(zs>`??w}?!F?{i7 k*6?܅:R@K@wkAzBؔ@Z?5p@;ၒcžn6|`?@8j.r?F%:B?lz@*C$(2?'KBS@X܆.@MA{N@sN5Gp@<2!j?x*KT(w?#ܽ8?J==?fusB4&?rZ| S@!Ej@v8*AqL_*@RعDp@'z"zE]F?)?7wy?ccF?jWe@F*-$?ET@f.)@JبE A&|X@1Jp@wEySH3 jEDh?a+?~\!B?$contXC[?5@V@1'A@*ga+'A?q@PW@`&@چ(A#ߖr@Ln!Q2Fp@ h`5#p{PS?{r{t?NKHD?˫>iCBt C"?˾+W@[bdA@~/e*AH@Qb۠Fp@jDF8Tlf.H?"^#?l_u?ƄI?@$`F-L?"SX@![x~@ʔ'_,AX#H @R:p@\ZY* Cn?nL,?xO?Qy`I? IOEPd8??a|X@BlE@-A@T"p@<7ИLRZ?IzV?$??~E0?Zd;WY@,:@| ƹS/A$r|w@%%p@bs?]3Uo0Ybjx?to6?->I ͗6>?;Y@8@4*0AS@M%T">p@ tLd? N5doLR?Lb̂?NJBG?c2J'4־?B[ΥZZ@-s=@m@au!1AX@`(p@LM#㣙G@>Pa7/k:?.)G2u?$[@f H@FvĹ&5Aw0)@^=p@r\lm='q?LxC)?@?Y CP}d\?oض(a\@Q#,@6zS6AA}s@VAp@ =hȊT?rc?T5HU,?XiGŠ@2xvv>?pq\@?tW@˫)7A/pܹA%Lp@N܂p)?@BwJnˆ?/喿&?1&0eٰw??+j0e]@U@8A''AS6?p@+M*K؆,U?DѼ@{ @7AdnunT6{#&??P6 ]@!@aws:A|ɧAO{F1p@_uf?|B#y?47Ρ?D}1D11a??Uh^@tT@O\u;A"'A _A6#07p@S~\q{7`8tzx»?Cu*7MT`6^?tYLl^@r`@@f*@i#M?RT?pf'*f 8\I(#?2*l_@yO@#MuL>A2[ Ap@O/ g˖!c?ps?^q,4eCr7O*)?}͍_@-T@ WO?Ae.?آ*? gx`@0Z@ϠtAA\%J@[_*p@jcB=fB%fän c?H銿A{^0a+}h(>2?`@Jƍ{@RY׭RFBAR%`h@N0p@'/=pFgOL/#q?0(* 5ø[|A:;+0#?U[r`@yh?4@#W6CAԜ?}@w&p@Jo=>r^GuOF?e2m*B+\z?'XQ;a@O7@u0CAx@o-p@삟SPj*?,ns(#^>1֕?70|a@Z@ȩDA~L4@$p@ˎ0gSٜW䫽`ADŽ`Q*bҜ>zE)'?EGra@.{@396EA@tp@*O mIM$ }V-iV|/Έ? :͍,?VIa@7QZ;@ r)FA$D@]8Up@ p)Kғ 5K9Y65oU3\:'?f,>b@zfG@eIPGAZ ֣@3;+l5p@T@'|_tj'4Meb=E3ڥ <3hw-?6vb@y0Bc@VHA1Je3@Qݣ]3p@=K)mP^JYbpOJ8H(`\_MgNܔF3?܅:b@K@wkIAzB@KT5p@B»x='ٱRRZ?ePv%6 jgu5)a4?Clc@RH2a@aJA&@c63p@G!t=#Z 6?<<6hMxp 0I8܇q)2"pZ)?'KBc@X܆.@MKA{N@/7f[7p@FyF#=;@7p@Uh'/@(eS@T? J\;/<|98䱲Q?6&řI#?Ed@f.)@JبEPA&|X@S7?p@ow)@~̈A:$?Zƨ=D߀?%?cqmd@j]U@@(PPAPI-@{uE%p@]l^MMa[z?|*l8D>L,0P?\ ?Ͻd@Р@Z˅AQAV/m7K@)p@](KkSV9`u |R+-?5?Xɩ-?@ .?,ge@@\7F>,RA@#p@FK}^ Nk^t 0Ԝol_ni;d@ o$? Qz#?S"^FIe@|KQ@-nRAۣw@f cp@Qo([fXu I^B' Υq*A&K:e?.O98?1%e@l~K@pE˸SAϰ|X?@f#p@}tڵҫ`4inps ٱ{;MrELD I"(Y<?ACe@jf,@/(7TAaq&M@ȕV#p@!eUq;\΀:5"?աD B?`Lf@C@׭UA%w@@ 9/p@P I x)հ;WH_8^^H(%?Ʈ&rB?pZf@@@e@X#gVA_ AP4p@Fk9沁iR8`T0E?fVFgA QЗdt ?>t@?5@f@1'A@*ga+WA-%?סQ??ڏag@/ /#@)ELWA ʇAغu@p@,Aõxܰ?Pk>PJ+0?8=7?q@Pg@`&@چXA#ߖrA_SMDp@BdD vʟ vm+?XC FZ8PV_a)'4?Di<?%1 g@|s0[@6~YAT<8= AX9=p@dX!걿^} i?P˪/uƄƶTÎ8?[ƥ<?˾+g@[bdA@~/eZAH AQ5p@¯ێ׾t2:?ʇ%?;'m!W86?CCI<?pΈh@$\Y@[&?[A1??O-0p@vL9? _?P SU"7?)D A2?"Sh@![x~@ʔ'_\Ao1@33p@^L2+ Fve?iR_)?m?<,SDm1?\ ;?Bsh@QDU`@aW]A- Y@=a p@,Ӊf32xBPYen&RjRg#,4? kb@?a|h@BlE@]Ai?r@ӕ p@@Z7`׃oe:SÖ[J)?@? \i@H~@f[^A }G@m.Y)p@ T\=0mx٬2 uWꍿh!~FX!R|cZk/?l_r2?Zd;Wi@,:@| ƹS_AG@G%&p@T+]nx0e{j0)΋d->GV\4 6?T6.7?R,i@ K)|x@VGn_`An74@ߺ p@738k7ǽjY  \b?? MkK??P6 m@!@awsjA˱@ b/5p@ o='?"q&Rֲo`YZu c?P??EB['n@lMoj@YojAclm'@G9p@dy=OXP? .6_PaЉמ+]Y ` d^?XRX??Uhn@tT@O\ukAsAb.p@cv= ?ͫ+Տp;q5VCU[?\^??Aen@h ja@&lAr{AYԇFS(p@Nk<=M aL?H^нx۳)?ӛ\{aC`*8Ul[?`ՕTZ??tYLln@r`@@f*lA#Am?p@9llh @n?V0n񉛥? P\zS :Q+X?J9\??K+o@t\@(ǑOmAvABmp@mw5?0GDlr~)?J&<`" Y?d;j[??2*lo@yO@#MuLnA4j A2q.G&p@ C$< ?bIė2??f4] iX?"c_??ףp= o@8ōZ@+iká oACD Anipy-0.3.0/nipy/algorithms/statistics/models/tests/test_estimable.py000066400000000000000000000026621210344137400260230ustar00rootroot00000000000000""" Testing ``isestimable`` in regression module """ import numpy as np from ..regression import isestimable from numpy.testing import (assert_almost_equal, assert_array_equal) from nose.tools import (assert_true, assert_false, assert_raises, assert_equal, assert_not_equal) def test_estimable(): rng = np.random.RandomState(20120713) N, P = (40, 10) X = rng.normal(size=(N, P)) C = rng.normal(size=(1, P)) assert_true(isestimable(C, X)) assert_true(isestimable(np.eye(P), X)) for row in np.eye(P): assert_true(isestimable(row, X)) X = np.ones((40, 2)) assert_true(isestimable([1, 1], X)) assert_false(isestimable([1, 0], X)) assert_false(isestimable([0, 1], X)) assert_false(isestimable(np.eye(2), X)) halfX = rng.normal(size=(N, 5)) X = np.hstack([halfX, halfX]) assert_false(isestimable(np.hstack([np.eye(5), np.zeros((5, 5))]), X)) assert_false(isestimable(np.hstack([np.zeros((5, 5)), np.eye(5)]), X)) assert_true(isestimable(np.hstack([np.eye(5), np.eye(5)]), X)) # Test array-like for design XL = X.tolist() assert_true(isestimable(np.hstack([np.eye(5), np.eye(5)]), XL)) # Test ValueError for incorrect number of columns X = rng.normal(size=(N, 5)) for n in range(1, 4): assert_raises(ValueError, isestimable, np.ones((n,)), X) assert_raises(ValueError, isestimable, np.eye(4), X) nipy-0.3.0/nipy/algorithms/statistics/models/tests/test_glm.py000066400000000000000000000021351210344137400246300ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Test functions for models.GLM """ import numpy as np from .. import family from ..glm import Model as GLM from nose.tools import assert_equal, assert_true, assert_false VARS = {} def setup(): rng = np.random.RandomState(20110928) VARS['X'] = rng.standard_normal((40,10)) Y = rng.standard_normal((40,)) VARS['Y'] = np.greater(Y, 0) def test_Logistic(): X = VARS['X'] Y = VARS['Y'] cmodel = GLM(design=X, family=family.Binomial()) results = cmodel.fit(Y) assert_equal(results.df_resid, 30) def test_cont(): # Test continue function works as expected X = VARS['X'] Y = VARS['Y'] cmodel = GLM(design=X, family=family.Binomial()) cmodel.fit(Y) assert_true(cmodel.cont(0)) assert_false(cmodel.cont(np.inf)) def test_Logisticdegenerate(): X = VARS['X'].copy() X[:,0] = X[:,1] + X[:,2] Y = VARS['Y'] cmodel = GLM(design=X, family=family.Binomial()) results = cmodel.fit(Y) assert_equal(results.df_resid, 31) nipy-0.3.0/nipy/algorithms/statistics/models/tests/test_model.py000066400000000000000000000104501210344137400251500ustar00rootroot00000000000000""" Testing models module """ import numpy as np # In fact we're testing methods defined in model from ..regression import OLSModel from nose.tools import assert_true, assert_equal, assert_raises from nose import SkipTest from numpy.testing import (assert_array_almost_equal, assert_array_equal) N = 10 X = np.c_[np.linspace(-1,1,N), np.ones((N,))] Y = np.r_[range(5), range(1,6)] MODEL = OLSModel(X) RESULTS = MODEL.fit(Y) """ R script :: X = cbind(0:9 * 2/9 -1, 1) Y = as.matrix(c(0:4, 1:5)) results = lm(Y ~ X-1) print(results) print(summary(results)) gives:: Call: lm(formula = Y ~ X - 1) Coefficients: X1 X2 1.773 2.500 Residuals: Min 1Q Median 3Q Max -1.6970 -0.6667 0.0000 0.6667 1.6970 Coefficients: Estimate Std. Error t value Pr(>|t|) X1 1.7727 0.5455 3.250 0.0117 * X2 2.5000 0.3482 7.181 9.42e-05 *** --- Residual standard error: 1.101 on 8 degrees of freedom Multiple R-squared: 0.8859, Adjusted R-squared: 0.8574 F-statistic: 31.06 on 2 and 8 DF, p-value: 0.0001694 """ def test_model(): # Check basics about the model fit # Check we fit the mean assert_array_almost_equal(RESULTS.theta[1], np.mean(Y)) # Check we get the same as R assert_array_almost_equal(RESULTS.theta, [1.773, 2.5], 3) try: percentile = np.percentile except AttributeError: # Numpy <=1.4.1 does not have percentile function raise SkipTest('Numpy does not have percentile function') pcts = percentile(RESULTS.resid, [0,25,50,75,100]) assert_array_almost_equal(pcts, [-1.6970, -0.6667, 0, 0.6667, 1.6970], 4) def test_t_contrast(): # Test indivudual t against R assert_array_almost_equal(RESULTS.t(0), 3.25) assert_array_almost_equal(RESULTS.t(1), 7.181, 3) # And contrast assert_array_almost_equal(RESULTS.Tcontrast([1,0]).t, 3.25) assert_array_almost_equal(RESULTS.Tcontrast([0,1]).t, 7.181, 3) # Input matrix checked for size assert_raises(ValueError, RESULTS.Tcontrast, [1]) assert_raises(ValueError, RESULTS.Tcontrast, [1, 0, 0]) # And shape assert_raises(ValueError, RESULTS.Tcontrast, np.array([1, 0])[:,None]) def test_t_output(): # Check we get required outputs exp_t = RESULTS.t(0) exp_effect = RESULTS.theta[0] exp_sd = exp_effect / exp_t res = RESULTS.Tcontrast([1,0]) assert_array_almost_equal(res.t, exp_t) assert_array_almost_equal(res.effect, exp_effect) assert_array_almost_equal(res.sd, exp_sd) res = RESULTS.Tcontrast([1,0], store=('effect',)) assert_equal(res.t, None) assert_array_almost_equal(res.effect, exp_effect) assert_equal(res.sd, None) res = RESULTS.Tcontrast([1,0], store=('t',)) assert_array_almost_equal(res.t, exp_t) assert_equal(res.effect, None) assert_equal(res.sd, None) res = RESULTS.Tcontrast([1,0], store=('sd',)) assert_equal(res.t, None) assert_equal(res.effect, None) assert_array_almost_equal(res.sd, exp_sd) res = RESULTS.Tcontrast([1,0], store=('effect', 'sd')) assert_equal(res.t, None) assert_array_almost_equal(res.effect, exp_effect) assert_array_almost_equal(res.sd, exp_sd) def test_f_output(): # Test f_output res = RESULTS.Fcontrast([1,0]) exp_f = RESULTS.t(0) ** 2 assert_array_almost_equal(exp_f, res.F) # Test arrays work as well as lists res = RESULTS.Fcontrast(np.array([1,0])) assert_array_almost_equal(exp_f, res.F) # Test with matrix against R res = RESULTS.Fcontrast(np.eye(2)) assert_array_almost_equal(31.06, res.F, 2) # Input matrix checked for size assert_raises(ValueError, RESULTS.Fcontrast, [1]) assert_raises(ValueError, RESULTS.Fcontrast, [1, 0, 0]) # And shape assert_raises(ValueError, RESULTS.Fcontrast, np.array([1, 0])[:,None]) def test_f_output_new_api(): res = RESULTS.Fcontrast([1, 0]) assert_array_almost_equal(res.effect, RESULTS.theta[0]) assert_array_almost_equal(res.covariance, RESULTS.vcov()[0][0]) def test_conf_int(): lower_, upper_ = RESULTS.conf_int() assert_true((lower_ < upper_).all()) assert_true((lower_ > upper_ - 10).all()) lower_, upper_ = RESULTS.conf_int(cols=[1]).T assert_true(lower_ < upper_) assert_true(lower_ > upper_ - 10) nipy-0.3.0/nipy/algorithms/statistics/models/tests/test_olsR.py000066400000000000000000001567031210344137400250030ustar00rootroot00000000000000 import numpy as np from ..regression import OLSModel import nipy.testing as niptest import scipy.stats from .exampledata import x, y Rscript = ''' d = read.table('data.csv', header=T, sep=' ') y.lm = lm(Y ~ X1 + X2 + X3 + X4 + X5 + X6 + X7 + X8 + X9 + X10 + X11 + X12 + X13 + X14, data=d) print(summary(y.lm)) y.lm2 = lm(Y ~ X1 + X2 + X3 + X4 + X5 + X6 + X7 + X8 + X9 + X10 + X11 + X12 + X13 + X14 - 1, data=d) print(summary(y.lm2)) SSE = sum(resid(y.lm)^2) SST = sum((d$Y - mean(d$Y))^2) SSR = SST - SSE print(data.frame(SSE, SST, SSR)) MSE = SSE / y.lm$df.resid MST = SST / (length(d$Y) - 1) MSR = SSR / (length(d$Y) - y.lm$df.resid - 1) print(data.frame(MSE, MST, MSR)) print(AIC(y.lm)) print(AIC(y.lm2)) ''' # lines about "Signif. codes" were deleted due to a character encoding issue Rresults = \ """ These are the results from fitting the model in R, i.e. running the commands Rscript in R A few things to note, X8 is a column of 1s, so by not including a '-1' in the formula, X8 gets thrown out of the model, with its coefficients being the "(Intercept)" term. An alternative is to use "-1" in the formula, but then R gives nonsensical F, R2 and adjusted R2 values. This means that R2, R2a and F cannot fully be trusted in R. In OLSModel, we have checked whether a column of 1s is in the column space, in which case the F, R2, and R2a are seneible. > source('test.R') [1] "Without using '-1'" [1] "------------------" Call: lm(formula = Y ~ X1 + X2 + X3 + X4 + X5 + X6 + X7 + X8 + X9 + X10 + X11 + X12 + X13 + X14, data = d) Residuals: Min 1Q Median 3Q Max -2.125783 -0.567850 0.004305 0.532145 2.372263 Coefficients: (1 not defined because of singularities) Estimate Std. Error t value Pr(>|t|) (Intercept) 2.603e+02 8.226e-01 316.463 < 2e-16 *** X1 1.439e-02 2.649e-02 0.543 0.5881 X2 -6.975e+00 1.022e+01 -0.683 0.4963 X3 4.410e+01 5.740e+00 7.682 6.42e-12 *** X4 3.864e+00 5.770e+00 0.670 0.5044 X5 2.458e+02 4.594e+02 0.535 0.5937 X6 9.789e+02 3.851e+02 2.542 0.0124 * X7 1.339e+03 8.418e+02 1.591 0.1145 X8 NA NA NA NA X9 -1.955e-02 1.539e-02 -1.270 0.2066 X10 7.042e-05 2.173e-04 0.324 0.7465 X11 -3.743e-08 6.770e-07 -0.055 0.9560 X12 3.060e-06 2.094e-06 1.461 0.1469 X13 1.440e-06 1.992e-06 0.723 0.4711 X14 -1.044e-05 7.215e-06 -1.448 0.1505 --- Residual standard error: 0.8019 on 112 degrees of freedom Multiple R-squared: 0.5737,Adjusted R-squared: 0.5242 F-statistic: 11.59 on 13 and 112 DF, p-value: 1.818e-15 [1] "Using '-1'" [1] "------------------" Call: lm(formula = Y ~ X1 + X2 + X3 + X4 + X5 + X6 + X7 + X8 + X9 + X10 + X11 + X12 + X13 + X14 - 1, data = d) Residuals: Min 1Q Median 3Q Max -2.125783 -0.567850 0.004305 0.532145 2.372263 Coefficients: Estimate Std. Error t value Pr(>|t|) X1 1.439e-02 2.649e-02 0.543 0.5881 X2 -6.975e+00 1.022e+01 -0.683 0.4963 X3 4.410e+01 5.740e+00 7.682 6.42e-12 *** X4 3.864e+00 5.770e+00 0.670 0.5044 X5 2.458e+02 4.594e+02 0.535 0.5937 X6 9.789e+02 3.851e+02 2.542 0.0124 * X7 1.339e+03 8.418e+02 1.591 0.1145 X8 2.603e+02 8.226e-01 316.463 < 2e-16 *** X9 -1.955e-02 1.539e-02 -1.270 0.2066 X10 7.042e-05 2.173e-04 0.324 0.7465 X11 -3.743e-08 6.770e-07 -0.055 0.9560 X12 3.060e-06 2.094e-06 1.461 0.1469 X13 1.440e-06 1.992e-06 0.723 0.4711 X14 -1.044e-05 7.215e-06 -1.448 0.1505 --- Residual standard error: 0.8019 on 112 degrees of freedom Multiple R-squared: 1,Adjusted R-squared: 1 F-statistic: 9.399e+05 on 14 and 112 DF, p-value: < 2.2e-16 SSE SST SSR 1 72.02328 168.9401 96.91685 MSE MST MSR 1 0.643065 1.351521 7.455142 [1] "AIC" [1] 317.1017 [1] "BIC" [1] 359.6459 """ def test_results(): m = OLSModel(x) r = m.fit(y) # results hand compared with R's printout yield niptest.assert_equal, '%0.4f' % r.R2, '0.5737' yield niptest.assert_equal, '%0.4f' % r.R2_adj, '0.5242' f = r.F_overall yield niptest.assert_equal, '%0.2f' % f['F'], '11.59' yield niptest.assert_equal, f['df_num'], 13 yield niptest.assert_equal, f['df_den'], 112 yield niptest.assert_equal, '%0.3e' % f['p_value'], '1.818e-15' # test Fcontrast, the 8th column of m.design is all 1s # let's construct a contrast matrix that tests everything # but column 8 is zero M = np.identity(14) M = np.array([M[i] for i in [0,1,2,3,4,5,6,8,9,10,11,12,13]]) Fc = r.Fcontrast(M) yield niptest.assert_almost_equal, Fc.F, f['F'], 6 yield niptest.assert_almost_equal, Fc.df_num, f['df_num'], 6 yield niptest.assert_almost_equal, Fc.df_den, f['df_den'], 6 thetas = [] sds = [] ts = [] ps = [] # the model has an intercept yield niptest.assert_true, r.model.has_intercept # design matrix has full rank yield niptest.assert_equal, r.model.rank, 14 # design matrix has full rank yield niptest.assert_equal, r.df_model, 14 yield niptest.assert_equal, r.df_total, 126 yield niptest.assert_equal, r.df_resid, 112 # entries with '*****' are not tested as they were a different format resultstr = \ ''' X1 1.439e-02 2.649e-02 0.543 0.5881 X2 -6.975e+00 1.022e+01 -0.683 0.4963 X3 4.410e+01 5.740e+00 7.682 ****** X4 3.864e+00 5.770e+00 0.670 0.5044 X5 2.458e+02 4.594e+02 0.535 0.5937 X6 9.789e+02 3.851e+02 2.542 0.0124 X7 1.339e+03 8.418e+02 1.591 0.1145 X8 2.603e+02 8.226e-01 316.463 ****** X9 -1.955e-02 1.539e-02 -1.270 0.2066 X10 7.042e-05 2.173e-04 0.324 0.7465 X11 -3.743e-08 6.770e-07 -0.055 0.9560 X12 3.060e-06 2.094e-06 1.461 0.1469 X13 1.440e-06 1.992e-06 0.723 0.4711 X14 -1.044e-05 7.215e-06 -1.448 0.1505 X1 1.439e-02 2.649e-02 0.543 0.5881 X2 -6.975e+00 1.022e+01 -0.683 0.4963 X3 4.410e+01 5.740e+00 7.682 ****** X4 3.864e+00 5.770e+00 0.670 0.5044 X5 2.458e+02 4.594e+02 0.535 0.5937 X6 9.789e+02 3.851e+02 2.542 0.0124 X7 1.339e+03 8.418e+02 1.591 0.1145 X8 2.603e+02 8.226e-01 316.463 ****** X9 -1.955e-02 1.539e-02 -1.270 0.2066 X10 7.042e-05 2.173e-04 0.324 0.7465 X11 -3.743e-08 6.770e-07 -0.055 0.9560 X12 3.060e-06 2.094e-06 1.461 0.1469 X13 1.440e-06 1.992e-06 0.723 0.4711 X14 -1.044e-05 7.215e-06 -1.448 0.1505 ''' for row in resultstr.strip().split('\n'): row = row.strip() _, th, sd, t, p = row.split() thetas.append(th) sds.append(sd) ts.append(t) ps.append(p) for th, thstr in zip(r.theta, thetas): yield niptest.assert_equal, '%0.3e' % th, thstr for sd, sdstr in zip([np.sqrt(r.vcov(column=i)) for i in range(14)], sds): yield niptest.assert_equal, '%0.3e' % sd, sdstr for t, tstr in zip([r.t(column=i) for i in range(14)], ts): yield niptest.assert_equal, '%0.3f' % t, tstr for i, t in enumerate([r.t(column=i) for i in range(14)]): m = np.zeros((14,)) m[i] = 1. tv = r.Tcontrast(m) e = r.theta[i] sd = np.sqrt(r.vcov(column=i)) yield niptest.assert_almost_equal, tv.t, t, 6 yield niptest.assert_almost_equal, tv.sd, sd, 6 yield niptest.assert_almost_equal, tv.effect, e, 6 for p, pstr in zip([2*scipy.stats.t.sf(np.fabs(r.t(column=i)), r.df_resid) for i in range(14)], ps): if pstr.find('*') < 0: yield niptest.assert_equal, '%0.4f' % p, pstr yield niptest.assert_equal, "%0.5f" % r.SSE, "72.02328" yield niptest.assert_equal, "%0.4f" % r.SST, "168.9401" yield niptest.assert_equal, "%0.5f" % r.SSR, "96.91685" yield niptest.assert_equal, "%0.6f" % r.MSE, "0.643065" yield niptest.assert_equal, "%0.6f" % r.MST, "1.351521" yield niptest.assert_equal, "%0.6f" % r.MSR, "7.455142" yield niptest.assert_equal, "%0.4f" % np.sqrt(r.MSE), "0.8019" # the difference here comes from the fact that # we've treated sigma as a nuisance parameter, # so our AIC is the AIC of the profiled log-likelihood... yield niptest.assert_equal, '%0.4f'% (r.AIC + 2,), '317.1017' yield niptest.assert_equal, '%0.4f'% (r.BIC + np.log(126),), '359.6459' # this is the file "data.csv" referred to in Rscript above Rdata = ''' Y X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 2.558020266818153345e+02 -4.423009200784273898e-02 -6.615177603161188392e-03 -2.429792163411158279e-02 4.236447886547620167e-02 1.618533936246031348e-03 -8.683269025079367589e-04 -8.181821468255191711e-04 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.573856564029982792e+02 -1.247753847628743987e-02 8.132393396825286086e-03 -4.413603363412710312e-02 3.174380286547619917e-02 1.507591026246031356e-03 -8.321096135079367661e-04 -5.268108768253958792e-04 1.000000000000000000e+00 2.027260000000000062e+00 4.109783107600000207e+00 8.331598902713176713e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.590080857852332201e+02 -3.265906165554512651e-03 1.963457496825285822e-03 -1.398771363412710383e-02 3.088127086547619998e-02 1.672285950246031301e-03 -8.927174265079367271e-04 -4.244701868253958994e-04 1.000000000000000000e+00 4.054520000000000124e+00 1.643913243040000083e+01 6.665279122170541370e+01 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.607408786477914759e+02 -8.017150588157394330e-04 2.213062996825285525e-03 1.398740365872893493e-03 1.085352386547620146e-02 1.533498042246031435e-03 -7.043727325079367782e-04 -4.042463468253959091e-04 1.000000000000000000e+00 6.081780000000000186e+00 3.698804796840000364e+01 2.249531703732557730e+02 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.611418084786566283e+02 -1.861685769802005528e-04 1.047713639682528591e-02 1.167152736587289547e-02 1.489745686547620102e-02 1.548124779246031315e-03 -5.563730125079367241e-04 -1.481969968253959513e-04 1.000000000000000000e+00 8.109040000000000248e+00 6.575652972160000331e+01 5.332223297736433096e+02 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.625281634787599501e+02 -4.117603177916723598e-05 9.983357396825286167e-03 2.268076636587289252e-02 3.341529466547620009e-02 1.378939226246031274e-03 -5.824833125079368051e-04 -1.637155968253958946e-04 1.000000000000000000e+00 1.013630000000000031e+01 1.027445776899999998e+02 1.041449862839147045e+03 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.600881821274363688e+02 -8.724125662125817594e-06 2.118458339682528432e-02 -3.638986341271063796e-04 7.819901865476201752e-03 1.343526296246031447e-03 -4.266495825079367706e-04 -3.036430682539588335e-05 1.000000000000000000e+00 1.216356000000000037e+01 1.479521918736000146e+02 1.799625362986046184e+03 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.604916986023719687e+02 -1.779095604735100062e-06 2.110365339682528443e-02 -1.333419963412710470e-02 3.556263356547620380e-02 1.176156066246031480e-03 -2.915726925079367704e-04 -1.372058068253959344e-04 1.000000000000000000e+00 1.419082000000000043e+01 2.013793722724000190e+02 2.857738423630619764e+03 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.631421465595319091e+02 -3.505829544571274576e-07 3.057060839682528355e-02 2.450720636587289808e-02 2.371273386547620085e-02 1.109560806246031196e-03 -4.451344925079367475e-04 -4.868320682539588849e-05 1.000000000000000000e+00 1.621808000000000050e+01 2.630261188864000133e+02 4.265778638189146477e+03 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.607404475404462687e+02 -6.698859808659203534e-08 4.212096239682527887e-02 4.201216436587289910e-02 1.535293186547620134e-02 1.200805636246031222e-03 -4.756955025079367830e-04 4.163935317460414412e-05 1.000000000000000000e+00 1.824533999999999878e+01 3.328924317155999688e+02 6.073735600077903655e+03 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.602563139919928403e+02 -1.244731173797263160e-08 3.868433239682528280e-02 3.198940136587289512e-02 1.951312986547620171e-02 1.210561816246031458e-03 -5.037184525079367245e-04 1.853174317460412092e-05 1.000000000000000000e+00 2.027260000000000062e+01 4.109783107599999994e+02 8.331598902713176358e+03 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.597932184819765098e+02 -2.254732652415686417e-09 3.464322639682528016e-02 2.498494136587289804e-02 6.040923865476201249e-03 1.251570966246031346e-03 -3.408492325079367884e-04 -2.053166825395852726e-06 1.000000000000000000e+00 2.229986000000000246e+01 4.972837560196001050e+02 1.108935813951124146e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.600692996257253071e+02 -3.990740854251533582e-10 3.209237439682528781e-02 1.811942636587289546e-02 2.605920586547620307e-02 1.177732906246031254e-03 -5.077881225079367488e-04 5.365363174604119087e-06 1.000000000000000000e+00 2.432712000000000074e+01 5.918087674944000582e+02 1.439700290388836947e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.605557611538409901e+02 -6.912161668563663771e-11 4.299601339682528056e-02 2.895994436587289583e-02 1.417107986547620074e-02 1.265060666246031361e-03 -7.339628625079367124e-04 1.238756831746040893e-04 1.000000000000000000e+00 2.635437999999999903e+01 6.945533451843999728e+02 1.830452278926084546e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.595077391981066626e+02 -1.172812338719269821e-11 3.317149439682529066e-02 1.328090936587289494e-02 1.022893186547620126e-02 1.374031606246031408e-03 -5.220871725079368267e-04 1.413575031746041374e-04 1.000000000000000000e+00 2.838164000000000087e+01 8.055174890896000761e+02 2.286190738904495811e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.587979640652715148e+02 -1.964186707357858839e-12 2.405623739682528558e-02 -1.810522634127103431e-03 1.576445486547620178e-02 1.135956976246031312e-03 -5.014120825079368057e-04 1.611867531746041847e-04 1.000000000000000000e+00 3.040890000000000271e+01 9.247011992100001407e+02 2.811914629665697794e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.596659555937277446e+02 -3.223083090335421760e-13 3.234481339682528100e-02 2.004408536587289763e-02 2.356408786547620204e-02 1.221481986246031413e-03 -6.670757425079366920e-04 1.487958231746040706e-04 1.000000000000000000e+00 3.243616000000000099e+01 1.052104475545600053e+03 3.412622910551317182e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.584320401508220471e+02 -9.003308688382024074e-14 3.619885939682528087e-02 2.789771365872894399e-03 9.189109865476198513e-03 1.135373276246031326e-03 -4.355060825079367357e-04 1.002332231746041503e-04 1.000000000000000000e+00 3.446341999999999928e+01 1.187727318096400040e+03 4.093314540902982844e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.580819809866689525e+02 -3.906123070653587881e-14 3.660551639682528557e-02 -1.860463412710344766e-05 2.714363586547620388e-02 1.120834376246031315e-03 -4.501944025079367639e-04 1.202024331746040682e-04 1.000000000000000000e+00 3.649067999999999756e+01 1.331569726862399875e+03 4.858988480062322924e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.594975650647641601e+02 1.601430181974213516e-14 3.905011839682528962e-02 9.654908365872898190e-03 1.281982286547620267e-02 1.076811816246031270e-03 -6.519448025079367355e-04 1.400206731746040907e-04 1.000000000000000000e+00 3.851794000000000295e+01 1.483631701843600240e+03 5.714643687370968837e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.605247214249999956e+02 9.795389708948228080e-02 3.677422139682529068e-02 2.608958736587289190e-02 2.185457486547620273e-02 1.235064666246031345e-03 -6.071577725079368385e-04 1.763112331746040417e-04 1.000000000000000000e+00 4.054520000000000124e+01 1.643913243039999998e+03 6.665279122170541086e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.615678287570015073e+02 7.636684347997682032e+00 2.837993739682528535e-02 3.336949636587289297e-02 2.712176086547619935e-02 1.121492386246031227e-03 -3.887845825079367800e-04 9.757465317460415049e-05 1.000000000000000000e+00 4.257245999999999952e+01 1.812414350451600058e+03 7.715893743802672543e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.606581922590170848e+02 1.688917484910420086e+01 3.424000439682528540e-02 5.953364365872893665e-03 1.839351286547620187e-02 1.118185646246031353e-03 -3.785339525079367985e-04 2.395393531746040213e-04 1.000000000000000000e+00 4.459972000000000492e+01 1.989135024078400420e+03 8.871486511608993169e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.585156749757550756e+02 1.131722623416632167e+01 3.749442739682529169e-02 -1.501305634127106381e-03 1.711901486547620296e-02 1.368664136246031289e-03 -5.395318625079368116e-04 1.879513531746040403e-04 1.000000000000000000e+00 4.662698000000000320e+01 2.174075263920400175e+03 1.013705638493112347e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.589431610190735000e+02 1.345714208625528263e+00 3.218309039682527850e-02 -7.129233634127103703e-03 2.217183586547620197e-02 1.429032466246031368e-03 -5.373530925079368203e-04 1.592906031746042046e-04 1.000000000000000000e+00 4.865424000000000149e+01 2.367235069977600233e+03 1.151760232311069558e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.588859099636547398e+02 -3.786197907636791982e+00 2.637535539682528754e-02 -1.390411634127106111e-03 1.310852586547620047e-02 1.517677216246031326e-03 -5.291699825079366776e-04 1.052765531746040640e-04 1.000000000000000000e+00 5.068149999999999977e+01 2.568614442250000138e+03 1.301812328548933729e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.584379032107140688e+02 -4.100675927724760861e+00 2.384725139682528430e-02 -1.080336163412710590e-02 -4.173090134523799177e-03 1.358116916246031227e-03 -4.800622625079367331e-04 5.590095317460413646e-05 1.000000000000000000e+00 5.270875999999999806e+01 2.778213380737599891e+03 1.464361823140867637e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.595410206851418025e+02 -2.630373115496683400e+00 1.004822839682528376e-02 9.314062365872892435e-03 -9.878861345237952007e-04 1.325770276246031245e-03 -4.428060525079367620e-04 -2.427069682539584328e-05 1.000000000000000000e+00 5.473602000000000345e+01 2.996031885440400401e+03 1.639908612021034642e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.607257898907158165e+02 -1.286200190109046071e+00 2.464792639682528499e-02 2.035648336587289609e-02 -6.855731345237967012e-04 1.419879466246031343e-03 -6.113658025079368383e-04 1.115435631746041455e-04 1.000000000000000000e+00 5.676328000000000173e+01 3.222069956358400304e+03 1.828952591123596649e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.588783224743828555e+02 -5.223127938147428262e-01 2.786826139682528278e-02 1.117468365872894415e-03 -1.241363713452380002e-02 1.415631896246031260e-03 -4.147048725079367825e-04 -1.723451682539593396e-05 1.000000000000000000e+00 5.879054000000000002e+01 3.456327593491600055e+03 2.031993656382716435e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.588356428472260973e+02 -1.842356108573543483e-01 2.425059939682528559e-02 -4.276288634127104610e-03 -1.091986813452380106e-02 1.392750786246031280e-03 -4.490394525079367555e-04 -1.003586682539589405e-05 1.000000000000000000e+00 6.081780000000000541e+01 3.698804796840000563e+03 2.249531703732558235e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.597484695395635299e+02 -5.807770807625862314e-02 1.325085839682528521e-02 -3.310795634127106785e-03 2.611598386547619999e-02 1.344393666246031368e-03 -5.894356525079367040e-04 -4.194197682539594491e-05 1.000000000000000000e+00 6.284506000000000370e+01 3.949501566403600464e+03 2.482066629107282788e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 2.586971711680070598e+02 -1.669108953593786623e-02 1.520021739682528641e-02 -6.521448634127104127e-03 1.323596186547620207e-02 1.018124536246031329e-03 -5.651434125079368188e-04 -1.186629568253958888e-04 1.000000000000000000e+00 6.487232000000000198e+01 4.208417902182400212e+03 2.730098328441053745e+05 6.637850845511725772e-01 -0.000000000000000000e+00 -0.000000000000000000e+00 2.578038305276642745e+02 -4.438179736810902651e-03 1.418104939682528556e-02 -1.458225563412710556e-02 2.076608686547620070e-02 7.166574462460313308e-04 -6.010164225079367385e-04 -2.031235568253959454e-04 1.000000000000000000e+00 6.689958000000000027e+01 4.475553804176400263e+03 2.994126697668033885e+05 2.437840493460591773e+01 -0.000000000000000000e+00 -0.000000000000000000e+00 2.575900012845362994e+02 -1.104415351769467155e-03 1.171448539682528461e-02 -6.411356341271060022e-04 2.179420786547620059e-02 7.711998362460313790e-04 -5.958785525079367436e-04 -1.778974268253958766e-04 1.000000000000000000e+00 6.892683999999999855e+01 4.750909272385600161e+03 3.274651632722386275e+05 1.195928942034693989e+02 -0.000000000000000000e+00 -0.000000000000000000e+00 2.592522649854503811e+02 -2.595046810460775255e-04 5.653468396825284473e-03 -3.306909634127105230e-03 3.415740386547620050e-02 7.991702162460313699e-04 -5.105784425079367903e-04 -2.023469768253959109e-04 1.000000000000000000e+00 7.095409999999999684e+01 5.034484306809999907e+03 3.572173029538273695e+05 3.362968463074205374e+02 -0.000000000000000000e+00 -0.000000000000000000e+00 2.579003985729477790e+02 -5.799523371039054791e-05 4.075954396825285861e-03 -5.813851634127106816e-03 3.851734186547620120e-02 8.126851062460313437e-04 -4.455600825079367448e-04 -3.203095468253959032e-04 1.000000000000000000e+00 7.298135999999999513e+01 5.326278907449599501e+03 3.887190784049858339e+05 7.244798546627382620e+02 -0.000000000000000000e+00 -0.000000000000000000e+00 2.577110562270163996e+02 -1.240153176296573148e-05 1.982903996825284912e-03 -5.751847634127105896e-03 1.817295686547620165e-02 6.980794162460313449e-04 -3.607846825079367298e-04 -3.361090868253959027e-04 1.000000000000000000e+00 7.500862000000000762e+01 5.626293074304400761e+03 4.220204792191306478e+05 1.334131512685706639e+03 -0.000000000000000000e+00 -0.000000000000000000e+00 2.593279674352351662e+02 -2.549813093163372416e-06 2.012354196825284422e-03 -3.176191634127106811e-03 2.634695186547620152e-02 5.562481362460312394e-04 -4.909143225079367614e-04 -2.835488168253958450e-04 1.000000000000000000e+00 7.703588000000000591e+01 5.934526807374400960e+03 4.571714949896775070e+05 2.215241413792596632e+03 -0.000000000000000000e+00 -0.000000000000000000e+00 2.604506571831263386e+02 -5.061830223553558920e-07 3.248753396825284495e-03 5.653695365872894729e-03 3.363641326547620047e-02 4.461581362460312686e-04 -5.631164925079367844e-04 -1.737951468253959427e-04 1.000000000000000000e+00 7.906314000000000419e+01 6.250980106659601006e+03 4.942221153100429801e+05 3.417799151399689890e+03 -0.000000000000000000e+00 -0.000000000000000000e+00 2.602953723174513812e+02 -9.736609629218024716e-08 6.825325968252849568e-04 1.423937136587289515e-02 3.023103586547620097e-02 7.006392762460313377e-04 -5.004090925079366942e-04 -1.539339168253958537e-04 1.000000000000000000e+00 8.109040000000000248e+01 6.575652972159999990e+03 5.332223297736432869e+05 4.991794318923266474e+03 -0.000000000000000000e+00 -0.000000000000000000e+00 2.606369817776421769e+02 -1.820092841812642467e-08 -1.136286590317471534e-02 3.619031336587289621e-02 1.424289986547620096e-02 5.533487362460313193e-04 -4.338583525079367596e-04 -1.890155468253958962e-04 1.000000000000000000e+00 8.311766000000000076e+01 6.908545403875599732e+03 5.742221279738948215e+05 6.987216509779604166e+03 -0.000000000000000000e+00 -0.000000000000000000e+00 2.593342616024719973e+02 -3.315296284192901071e-09 -5.857725263174714918e-03 2.357598136587289728e-02 1.897169486547620187e-02 7.518108062460313089e-04 -5.384554125079367383e-04 -1.363035768253958785e-04 1.000000000000000000e+00 8.514491999999999905e+01 7.249657401806400230e+03 6.172714995042138034e+05 9.454055317384982118e+03 -0.000000000000000000e+00 -0.000000000000000000e+00 2.586453520651357962e+02 -5.897348231645986935e-10 1.111030896825284872e-03 2.246285136587289344e-02 2.219625186547620130e-02 6.593569362460313795e-04 -4.778790125079367536e-04 -7.630101682539586726e-05 1.000000000000000000e+00 8.717217999999999734e+01 7.598988965952399667e+03 6.624204339580164524e+05 1.244230033515567993e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 2.590023951682784400e+02 -1.026285419063397840e-10 1.848365996825284893e-03 1.420209336587289345e-02 2.652135286547620263e-02 9.330586362460312937e-04 -5.569034125079367487e-04 -8.223069682539586433e-05 1.000000000000000000e+00 8.919944000000000983e+01 7.956540096313601680e+03 7.097189209287194535e+05 1.600194115650800268e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 2.602716614134758402e+02 -1.749597348817579870e-11 -4.677688603174715194e-03 1.815530536587289800e-02 5.745579865476198311e-03 6.605902962460313572e-04 -5.903785325079367440e-04 -1.106166468253958835e-04 1.000000000000000000e+00 9.122670000000000812e+01 8.322310792890000812e+03 7.592169500097383279e+05 2.018296737485818085e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 2.604482429819940421e+02 -2.915444713749153634e-12 9.829689682528536254e-05 1.461135536587289396e-02 1.032855886547619922e-02 6.060708362460314087e-04 -5.028199025079367092e-04 9.170133174604125012e-06 1.000000000000000000e+00 9.325396000000000640e+01 8.696301055681600701e+03 8.109645107944898773e+05 2.503536858362251587e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 2.603872844034092395e+02 -4.662988791519401875e-13 -8.091808403174714781e-03 2.668391636587289645e-02 9.499642865476200237e-03 6.190488562460314068e-04 -5.573827825079367406e-04 -1.419941268253958845e-04 1.000000000000000000e+00 9.528122000000000469e+01 9.078510884688401347e+03 8.650115928763902048e+05 3.060913437621728735e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 2.604142717232071504e+02 -8.815554369269722195e-14 7.379531968252847629e-04 1.966617536587289550e-02 5.218423865476204404e-03 7.821939762460313177e-04 -6.720836925079368140e-04 -1.368856682539584639e-05 1.000000000000000000e+00 9.730848000000000297e+01 9.468940279910400932e+03 9.214081858488556463e+05 3.695425434605876944e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 2.596725946468405937e+02 -4.790854546882667301e-14 3.729469396825285318e-03 1.677155036587289760e-02 9.729758654761985759e-04 7.744619962460313600e-04 -6.579227325079367063e-04 3.219561317460413550e-05 1.000000000000000000e+00 9.933574000000000126e+01 9.867589241347599454e+03 9.802042793053025380e+05 4.412071808656324720e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 2.581306961166553151e+02 -1.980567423292065087e-14 1.638672296825285576e-03 -6.475722634127104721e-03 1.390103865476201295e-03 4.816735362460312572e-04 -6.694806825079367436e-04 -9.350514682539593728e-05 1.000000000000000000e+00 1.013629999999999995e+02 1.027445776900000055e+04 1.041449862839146983e+06 5.215851519114699477e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 2.583217628919668982e+02 -1.405585884156201381e-14 7.728531396825284727e-03 -3.856817634127103489e-03 5.960830865476204887e-03 3.423149362460312529e-04 -7.660289725079367888e-04 2.281447317460411506e-05 1.000000000000000000e+00 1.033902599999999978e+02 1.068954586286759877e+04 1.105194926043805433e+06 6.111763525322629721e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 2.598821128165189407e+02 -1.691532273721650273e-14 2.477927296825284398e-03 1.116856365872893886e-03 9.179691865476201362e-03 7.097850162460313164e-04 -8.175605915079367601e-04 -5.294306825395908231e-06 1.000000000000000000e+00 1.054175199999999961e+02 1.111285352295039957e+04 1.171489458512694109e+06 7.104806786621743231e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 2.585404205373588979e+02 5.840602392497974451e-15 -3.963158031747146190e-04 7.451014365872893341e-03 3.865376865476201351e-03 5.380693362460314128e-04 -7.396422825079367394e-04 -2.474268682539594241e-05 1.000000000000000000e+00 1.074447800000000086e+02 1.154438074924840112e+04 1.240383449839229695e+06 8.199980262353675789e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 2.574534264776349914e+02 1.712000128500727594e-14 -8.327767103174715108e-03 6.492053658728944021e-04 -4.315605134523795017e-03 4.314180362460313858e-04 -5.235343725079368016e-04 -1.426233668253959388e-04 1.000000000000000000e+00 1.094720400000000069e+02 1.198412754176160161e+04 1.311926889616827713e+06 9.402282911860039167e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 2.587782193488699249e+02 1.886219388254293476e-14 4.761096396825285557e-03 -7.202196341271061009e-04 -2.113392134523800481e-03 4.052769362460314270e-04 -7.262424025079366931e-04 -9.712075682539588351e-05 1.000000000000000000e+00 1.114993000000000052e+02 1.243209390049000103e+04 1.386169767438904848e+06 1.071671369448246987e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 2.598656445159390387e+02 2.347227720962027251e-14 -4.165797203174715496e-03 1.295209736587289717e-02 -1.783551213452379963e-02 4.884648362460312747e-04 -5.813059725079367619e-04 -7.004130682539588988e-05 1.000000000000000000e+00 1.135265600000000035e+02 1.288827982543360122e+04 1.463162072898877319e+06 1.214827156956259423e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 2.600989598110621728e+02 7.813072328225483567e-15 1.221070796825285756e-03 1.337387336587289588e-02 -1.252786513452380096e-02 2.161711362460314121e-04 -5.074466025079367101e-04 2.142214317460411615e-05 1.000000000000000000e+00 1.155538200000000018e+02 1.335268531659240034e+04 1.542953795590161346e+06 1.370195549644204148e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 2.608026132195976174e+02 -8.925257391752444914e-15 1.228668539682528649e-02 1.208959736587289502e-02 -2.235864113452379343e-02 1.684635362460312931e-04 -2.464530425079367254e-04 1.124107331746041069e-04 1.000000000000000000e+00 1.175810800000000000e+02 1.382531037396640022e+04 1.625594925106173148e+06 1.538276443446243939e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 2.599810088414655525e+02 -1.025966746681070654e-14 2.181112039682528425e-02 -1.205161763412710557e-02 -1.086435413452380150e-02 -5.987476375396861422e-05 -3.407551025079368036e-04 1.726038431746041530e-04 1.000000000000000000e+00 1.196083399999999983e+02 1.430615499755559904e+04 1.711135451040329412e+06 1.719569734296541719e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 2.590798220474434288e+02 -1.906044947566650386e-14 1.003784239682528612e-02 6.137143365872895634e-03 3.477642546547619895e-02 -2.676582637539685807e-04 -2.744146425079367797e-04 7.012074317460411776e-05 1.000000000000000000e+00 1.216356000000000108e+02 1.479521918736000225e+04 1.799625362986046588e+06 1.914575318129261141e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 2.594494534850605305e+02 -2.578066919736734499e-14 -2.027138103174714809e-03 -6.372505634127105523e-03 2.919624086547620290e-02 -3.534829637539685723e-04 -3.414351725079367138e-04 -9.094636825395874605e-06 1.000000000000000000e+00 1.236628600000000091e+02 1.529250294337960258e+04 1.891114650536739733e+06 2.123793090878562944e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 2.580147282408462956e+02 -1.888533906750968316e-14 -1.798189060317471888e-02 8.892993658728941264e-04 1.529699586547620185e-02 -1.785335637539686715e-04 -3.668640225079367609e-04 -1.523243868253959478e-04 1.000000000000000000e+00 1.256901200000000074e+02 1.579800626561440185e+04 1.985653303285826230e+06 2.347722948478611070e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 2.573028837927551535e+02 -1.384973992480027394e-14 -2.226030160317471474e-02 -1.401617563412710550e-02 9.232429865476204922e-03 -2.170017637539685754e-04 -6.020543625079367335e-04 -1.919957668253958593e-04 1.000000000000000000e+00 1.277173800000000057e+02 1.631172915406440188e+04 2.083291310826721834e+06 2.586864786863568006e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 2.578661686497533765e+02 6.466134453156046314e-15 -2.225478460317471471e-02 -4.346986634127105592e-03 4.281016865476203193e-03 7.098093624603144143e-05 -4.939255525079367390e-04 -1.331850268253959284e-04 1.000000000000000000e+00 1.297446400000000040e+02 1.683367160872960085e+04 2.184078662752842996e+06 0.000000000000000000e+00 5.310280676409380618e+00 -0.000000000000000000e+00 2.591778558577004219e+02 3.881029512404210243e-14 -3.350587260317471061e-02 3.708508365872893731e-03 3.303729865476202898e-03 4.290136246031276898e-06 -4.810798125079367789e-04 -1.990675968253958908e-04 1.000000000000000000e+00 1.317719000000000165e+02 1.736383362961000603e+04 2.288065348657606635e+06 0.000000000000000000e+00 5.366368736595970290e+01 -0.000000000000000000e+00 2.586265557848932417e+02 2.496704000974017557e-14 -4.055766460317471178e-02 2.324536365872896526e-03 -1.314141813452379681e-02 -2.111011637539687423e-04 -4.601720925079367608e-04 -2.897881768253959302e-04 1.000000000000000000e+00 1.337991600000000005e+02 1.790221521670560105e+04 2.395301358134427108e+06 0.000000000000000000e+00 1.950272394768473418e+02 -0.000000000000000000e+00 2.590581804587237684e+02 1.020916719346166513e-14 -3.380893360317471785e-02 4.358221365872893410e-03 -1.662428913452379531e-02 -3.211422637539687076e-04 -4.006317125079367453e-04 -1.464107968253959514e-04 1.000000000000000000e+00 1.358264200000000130e+02 1.844881637001640411e+04 2.505836680776723661e+06 0.000000000000000000e+00 4.793905304253556778e+02 -0.000000000000000000e+00 2.583827214705520987e+02 2.027411219651766781e-14 -3.782695560317471395e-02 1.149229936587289197e-02 -1.630400713452379718e-02 -2.047094637539685711e-04 -2.136010125079367472e-04 -1.059907068253958815e-04 1.000000000000000000e+00 1.378536799999999971e+02 1.900363708954240064e+04 2.619721306177909020e+06 0.000000000000000000e+00 9.567431536277551913e+02 -0.000000000000000000e+00 2.588504729398947006e+02 -2.959667608385212738e-16 -2.737255860317471326e-02 2.306047836587289679e-02 -1.175693013452380059e-02 -1.525203637539687424e-04 -2.631168025079367104e-04 -9.378550682539587170e-05 1.000000000000000000e+00 1.398809400000000096e+02 1.956667737528360158e+04 2.737005223931403365e+06 0.000000000000000000e+00 1.677074702500338617e+03 -0.000000000000000000e+00 2.575437003556809259e+02 -1.510831198685233849e-14 -2.817193160317471579e-02 -8.620721634127109789e-03 -1.014567713452380060e-02 -2.024390637539686885e-04 2.442606749206328864e-05 -1.798543568253958532e-04 1.000000000000000000e+00 1.419081999999999937e+02 2.013793722723999963e+04 2.857738423630618956e+06 0.000000000000000000e+00 2.690374770459364299e+03 -0.000000000000000000e+00 2.571050917428615890e+02 -1.222166334909334567e-14 -3.965530660317471978e-02 -7.093271634127106678e-03 -2.676973013452380035e-02 -2.402326637539686175e-04 -1.294388825079367941e-04 -2.178491468253959109e-04 1.000000000000000000e+00 1.439354600000000062e+02 2.071741664541160208e+04 2.981970894868975971e+06 0.000000000000000000e+00 4.046632950921140036e+03 -0.000000000000000000e+00 2.578799640847943806e+02 -2.669000963219823434e-14 -4.578354560317471345e-02 -1.935690153412710640e-02 -1.530625134523795616e-03 -3.285852637539686972e-04 -2.997716825079367771e-04 -1.772051168253958690e-04 1.000000000000000000e+00 1.459627199999999903e+02 2.130511562979839800e+04 3.109752627239886671e+06 0.000000000000000000e+00 5.795838837301906096e+03 -0.000000000000000000e+00 2.593543512047501167e+02 -1.557425937872241460e-14 -5.462931060317471887e-02 -1.786486341271049938e-04 -2.675493513452380234e-02 -3.041632637539686251e-04 -2.994083325079367969e-04 -2.266904168253959084e-04 1.000000000000000000e+00 1.479899800000000027e+02 2.190103418040040197e+04 3.241133610336771701e+06 0.000000000000000000e+00 7.987982023017991196e+03 -0.000000000000000000e+00 2.592103613515139955e+02 1.025550116606464834e-14 -5.747345460317471177e-02 -2.301652634127106245e-03 -3.055690313452380513e-02 -1.852517637539686981e-04 -7.782878250793675776e-05 -2.941239768253959370e-04 1.000000000000000000e+00 1.500172400000000152e+02 2.250517229721760305e+04 3.376163833753045183e+06 0.000000000000000000e+00 1.067305210148565311e+04 -0.000000000000000000e+00 2.593191728453962241e+02 7.314866478049465998e-15 -4.823187060317471464e-02 1.890068236587289646e-02 -4.777992713452379470e-02 -3.452388637539688387e-04 -1.024134925079367604e-04 -3.109670268253958468e-04 1.000000000000000000e+00 1.520444999999999993e+02 2.311752998025000124e+04 3.514893287082121242e+06 0.000000000000000000e+00 1.390103866612112324e+04 -0.000000000000000000e+00 2.592008769899240974e+02 2.221644269997001270e-14 -4.118386960317471646e-02 1.733267436587289378e-02 -4.355931913452379400e-02 -3.705732637539686618e-04 -2.771284925079367436e-04 -1.953945868253958865e-04 1.000000000000000000e+00 1.540717600000000118e+02 2.373810722949760384e+04 3.657371959917420056e+06 0.000000000000000000e+00 1.772193131034077305e+04 -0.000000000000000000e+00 2.594888566963954304e+02 3.423980053733376596e-14 -3.876614060317470911e-02 1.016017036587289757e-02 -5.628503713452380486e-02 -3.304482637539686487e-04 -1.241367425079367053e-04 -9.316598682539590105e-05 1.000000000000000000e+00 1.560990199999999959e+02 2.436690404496039991e+04 3.803649841852353886e+06 0.000000000000000000e+00 2.218571962756076755e+04 -0.000000000000000000e+00 2.592471779187787320e+02 2.090355192478126067e+00 -4.206244260317471007e-02 1.105673136587289468e-02 -4.754148013452379196e-02 -2.150553637539685901e-04 -3.158812625079367815e-04 -1.838400068253959359e-04 1.000000000000000000e+00 1.581262800000000084e+02 2.500392042663840402e+04 3.953776922480343841e+06 0.000000000000000000e+00 2.734239321119751912e+04 -0.000000000000000000e+00 2.595144644726888146e+02 1.390900631135700216e+01 -4.419308660317471105e-02 2.374663636587289600e-02 -5.757486113452379983e-02 -3.322781637539685886e-04 3.979992749206327091e-05 -1.636741968253958715e-04 1.000000000000000000e+00 1.601535399999999925e+02 2.564915637453159798e+04 4.107803191394800786e+06 0.000000000000000000e+00 3.324194165466715640e+04 -0.000000000000000000e+00 2.594934264342520578e+02 1.557696618507103814e+01 -4.037264960317471507e-02 1.567967136587289367e-02 -6.731542113452379517e-02 -3.889040637539684965e-04 6.342409749206321789e-05 -1.471519668253958805e-04 1.000000000000000000e+00 1.621808000000000050e+02 2.630261188863999996e+04 4.265778638189146295e+06 0.000000000000000000e+00 3.993435455138613179e+04 -0.000000000000000000e+00 2.599376394425571561e+02 6.004799507075502696e+00 -2.857072660317471618e-02 1.227729936587289294e-02 -4.839276813452379755e-02 -4.437891637539687073e-04 9.347311749206322923e-05 -8.371388682539590391e-05 1.000000000000000000e+00 1.642080600000000175e+02 2.696428696896360634e+04 4.427753252456794493e+06 0.000000000000000000e+00 4.746962149477063213e+04 -0.000000000000000000e+00 2.583130545235978275e+02 -1.994345614804481137e+00 -3.650895860317471264e-02 6.498904365872894273e-03 -2.158240113452379594e-02 -4.707137637539686968e-04 5.781790749206320440e-05 -1.285526168253958669e-04 1.000000000000000000e+00 1.662353200000000015e+02 2.763418161550239893e+04 4.593777023791158572e+06 0.000000000000000000e+00 5.589773207823683333e+04 -0.000000000000000000e+00 2.576154366178921009e+02 -4.354781600224979066e+00 -3.754501060317471522e-02 -1.127231463412710355e-02 -2.067503813452380157e-02 -4.761822637539686598e-04 1.106139774920631693e-04 -2.297192168253959360e-04 1.000000000000000000e+00 1.682625800000000140e+02 2.831229582825640318e+04 4.763899941785659641e+06 0.000000000000000000e+00 6.526867589520123147e+04 -0.000000000000000000e+00 2.582447671201464914e+02 -3.421137938612250018e+00 -3.709204260317471025e-02 -2.815319033412710253e-02 -3.209472813452379780e-02 -4.201502637539685295e-04 1.587400974920632384e-04 -1.486038868253958603e-04 1.000000000000000000e+00 1.702898399999999981e+02 2.899862960722560092e+04 4.938171996033710428e+06 0.000000000000000000e+00 7.563244253907985694e+04 -0.000000000000000000e+00 2.578366809073544346e+02 -1.884871573445409121e+00 -4.559719660317471113e-02 -2.012774773412710425e-02 -4.258769413452380415e-02 -5.238649637539687445e-04 1.121453374920631770e-04 -3.780857468253959143e-04 1.000000000000000000e+00 1.723171000000000106e+02 2.969318295241000305e+04 5.116643176128730178e+06 0.000000000000000000e+00 8.703902160328927857e+04 -0.000000000000000000e+00 2.575530122743335824e+02 -8.333182129281194728e-01 -5.434145660317471482e-02 -3.934316634127105194e-03 -2.802218613452379936e-02 -6.544949637539688135e-04 -4.547183250793677273e-05 -4.325602468253959159e-04 1.000000000000000000e+00 1.743443599999999947e+02 3.039595586380959867e+04 5.299363471664131619e+06 0.000000000000000000e+00 9.953840268124543945e+04 -0.000000000000000000e+00 2.582481297574381642e+02 -3.138703752643597356e-01 -7.287733960317471782e-02 -5.080906634127104610e-03 -3.453186913452380158e-02 -4.803973637539688153e-04 1.780781974920633099e-04 -5.289674068253958326e-04 1.000000000000000000e+00 1.763716200000000072e+02 3.110694834142440232e+04 5.486382872233334929e+06 0.000000000000000000e+00 1.131805753663649812e+05 -0.000000000000000000e+00 2.585080012650139452e+02 -1.043136344467513188e-01 -6.379200960317471525e-02 -1.374258063412710576e-02 -2.450723213452379867e-02 -4.271225637539686863e-04 1.437427974920632524e-04 -5.507417468253958956e-04 1.000000000000000000e+00 1.783988800000000197e+02 3.182616038525440672e+04 5.677751367429755628e+06 0.000000000000000000e+00 1.280155292520640214e+05 -0.000000000000000000e+00 2.589810122439655515e+02 -3.132084140102636693e-02 -7.903580860317471757e-02 -1.139652463412710488e-02 -3.978782313452379482e-02 -7.604801637539687284e-04 7.115520749206329099e-05 -5.629854968253959323e-04 1.000000000000000000e+00 1.804261400000000037e+02 3.255359199529960097e+04 5.873518946846805513e+06 0.000000000000000000e+00 1.440932539317586052e+05 -0.000000000000000000e+00 2.592973588137387537e+02 -8.642004611292256402e-03 -7.117489360317472147e-02 6.563063658728933436e-04 -1.220494713452379559e-02 -1.040174863753968570e-03 1.255475674920631573e-04 -4.912412868253959080e-04 1.000000000000000000e+00 1.824534000000000162e+02 3.328924317156000325e+04 6.073735600077906623e+06 0.000000000000000000e+00 1.614637389988654468e+05 -0.000000000000000000e+00 2.597658000548336190e+02 -2.219631667718219379e-03 -7.383044660317471253e-02 1.410036136587289324e-02 7.094414865476204868e-03 -1.154148363753968592e-03 1.615656974920631457e-04 -4.779029468253959113e-04 1.000000000000000000e+00 1.844806600000000003e+02 3.403311391403560265e+04 6.278451316716470756e+06 0.000000000000000000e+00 1.801769740468003438e+05 -0.000000000000000000e+00 2.600563664053747743e+02 -5.360794304548768905e-04 -6.586775660317471803e-02 1.655973336587289457e-02 -3.370324134523795812e-03 -1.024209463753968704e-03 2.467476974920631986e-04 -3.545917268253958969e-04 1.000000000000000000e+00 1.865079200000000128e+02 3.478520422272640280e+04 6.487716086355919018e+06 0.000000000000000000e+00 2.002829486689801270e+05 -0.000000000000000000e+00 2.602901128078428314e+02 -1.227230843113079188e-04 -7.049731160317471157e-02 2.987536736587289438e-02 -2.844490013452380395e-02 -1.023074663753968574e-03 3.075099974920633130e-04 -4.335420468253959139e-04 1.000000000000000000e+00 1.885351799999999969e+02 3.554551409763239644e+04 6.701579898589661345e+06 0.000000000000000000e+00 2.218316524588204629e+05 -0.000000000000000000e+00 2.598264999690118202e+02 -2.680547757517946417e-05 -6.999111960317472292e-02 3.268389136587289412e-02 -1.014415313452379785e-02 -1.264280463753968825e-03 3.754828974920633141e-04 -4.390807868253959116e-04 1.000000000000000000e+00 1.905624400000000094e+02 3.631404353875360539e+04 6.920092743011121638e+06 0.000000000000000000e+00 2.448730750097382988e+05 -0.000000000000000000e+00 2.593596969358279694e+02 -5.616675076130314912e-06 -7.061410360317471602e-02 1.946565236587289444e-02 1.298353186547620067e-02 -1.411778063753968502e-03 3.464828974920631513e-04 -4.315975268253958975e-04 1.000000000000000000e+00 1.925896999999999935e+02 3.709079254609000054e+04 7.143304609213708900e+06 0.000000000000000000e+00 0.000000000000000000e+00 2.050658692729931676e-01 2.590111897581169842e+02 -1.134042666258773269e-06 -6.430232360317471307e-02 9.387877365872897284e-03 1.989402986547620170e-02 -1.206621563753968590e-03 3.619273974920631963e-04 -2.785400868253959123e-04 1.000000000000000000e+00 1.946169600000000059e+02 3.787576111964160373e+04 7.371265486790845171e+06 0.000000000000000000e+00 0.000000000000000000e+00 1.792219728288165825e+01 2.592187248340080146e+02 -2.214888922805985211e-07 -6.659902160317471287e-02 2.656261365872894520e-03 3.570156865476202535e-03 -1.165767363753968631e-03 2.659288974920633259e-04 -4.230125468253958989e-04 1.000000000000000000e+00 1.966442200000000184e+02 3.866894925940840767e+04 7.604025365335945040e+06 0.000000000000000000e+00 0.000000000000000000e+00 1.001701567040806395e+02 2.568569463550700789e+02 -4.198101968002987330e-08 -7.323089060317471144e-02 -1.627500873412710686e-02 -1.577161013452380023e-02 -1.102308463753968581e-03 3.161176974920632205e-04 -5.168182368253959342e-04 1.000000000000000000e+00 1.986714800000000025e+02 3.947035696539039782e+04 7.841634234442420304e+06 0.000000000000000000e+00 0.000000000000000000e+00 2.969385375491461332e+02 2.567240790713361207e+02 -7.743329643075380204e-09 -6.897513560317471148e-02 -1.611915993412710302e-02 -2.616736134523796331e-03 -1.200253663753968539e-03 1.929508974920633073e-04 -5.036689468253959488e-04 1.000000000000000000e+00 2.006987400000000150e+02 4.027998423758760327e+04 8.084142083703693934e+06 0.000000000000000000e+00 0.000000000000000000e+00 6.582169332343630686e+02 2.586193994029661667e+02 -1.393367178829911838e-09 -5.634666760317472156e-02 -1.460659013412710441e-02 -7.355770134523799408e-03 -1.106632863753968712e-03 2.397183974920633369e-04 -2.815068668253959110e-04 1.000000000000000000e+00 2.027259999999999991e+02 4.109783107600000221e+04 8.331598902713175863e+06 0.000000000000000000e+00 0.000000000000000000e+00 1.233994937175999667e+03 2.583841010140561707e+02 -2.451015754665610884e-10 -5.365690960317472114e-02 -3.176442634127106535e-03 -1.357681813452379926e-02 -1.359759063753968565e-03 3.363015974920633170e-04 -3.537066968253959058e-04 1.000000000000000000e+00 2.047532600000000116e+02 4.192389748062760191e+04 8.584054681064289063e+06 0.000000000000000000e+00 0.000000000000000000e+00 2.074262142790351845e+03 2.580570629821268653e+02 -4.223306201495954454e-11 -6.084147260317471217e-02 -3.061121634127104973e-03 -7.098403134523798008e-03 -1.362605363753968592e-03 3.012735974920632180e-04 -3.800203668253959157e-04 1.000000000000000000e+00 2.067805199999999957e+02 4.275818345147039508e+04 8.841559408350443467e+06 0.000000000000000000e+00 0.000000000000000000e+00 3.229008143493673742e+03 2.579470443876787158e+02 -7.156816105084053148e-12 -5.542562460317471129e-02 -7.138105634127103749e-03 -8.081440134523797114e-03 -1.391156263753968482e-03 4.135093974920632997e-04 -3.021409368253959084e-04 1.000000000000000000e+00 2.088077800000000082e+02 4.360068898852840357e+04 9.104163074165061116e+06 0.000000000000000000e+00 0.000000000000000000e+00 4.748222532702277931e+03 2.599391914289993224e+02 -1.207054311834787104e-12 -3.929887060317471814e-02 -1.462635634127105316e-03 8.798824865476201351e-03 -1.426038263753968451e-03 4.433595974920632532e-04 -2.323506468253958533e-04 1.000000000000000000e+00 2.108350399999999922e+02 4.445141409180159826e+04 9.371915668101552874e+06 0.000000000000000000e+00 0.000000000000000000e+00 6.681894903832399905e+03 2.598000299780277942e+02 -2.147932142301532625e-13 -3.044666260317471437e-02 3.492261365872894457e-03 1.358364186547620159e-02 -1.401727663753968636e-03 6.015499974920632828e-04 -3.316270368253958971e-04 1.000000000000000000e+00 2.128623000000000047e+02 4.531035876129000098e+04 9.644867179753340781e+06 0.000000000000000000e+00 0.000000000000000000e+00 9.080014850300372927e+03 2.586574230707965967e+02 -4.005466736646009179e-14 -2.476531660317471406e-02 -5.519612634127105122e-03 -4.220371134523795420e-03 -1.356848963753968466e-03 6.767285974920632342e-04 -1.464826568253959098e-04 1.000000000000000000e+00 2.148895600000000172e+02 4.617752299699360447e+04 9.923067598713837564e+06 0.000000000000000000e+00 0.000000000000000000e+00 1.199257196552245478e+04 2.580585287471001266e+02 5.576182320169238469e-15 -2.481630060317471451e-02 -2.066747903412710641e-02 -8.855922134523797062e-03 -1.513199763753968717e-03 7.608736974920631479e-04 -2.132055668253959218e-04 1.000000000000000000e+00 2.169168200000000013e+02 4.705290679891240143e+04 1.020656691457645781e+07 0.000000000000000000e+00 0.000000000000000000e+00 1.546955584291486957e+04 2.596834585519351322e+02 2.568277743391322110e-14 -1.720004460317471617e-02 -3.208105634127104283e-03 6.712917865476203394e-03 -1.568501263753968675e-03 8.467745974920633202e-04 -4.144829682539588698e-05 1.000000000000000000e+00 2.189440800000000138e+02 4.793651016704640642e+04 1.049541511693462171e+07 0.000000000000000000e+00 0.000000000000000000e+00 1.956095607589399515e+04 2.602373452688122484e+02 2.864205519224055026e-14 -6.253708943174715595e-03 -9.945376634127107290e-03 4.145976865476200257e-03 -1.333169263753968686e-03 9.531898974920633176e-04 3.677603317460409731e-05 1.000000000000000000e+00 2.209713399999999979e+02 4.882833310139559762e+04 1.078966219538174197e+07 0.000000000000000000e+00 0.000000000000000000e+00 2.431676225787599833e+04 2.597039792615581746e+02 4.243141342386595795e-14 1.595174939682528562e-02 -1.055324663412710549e-02 -1.035979134523801193e-03 -1.308523563753968659e-03 9.431188974920631492e-04 1.944633531746040077e-04 1.000000000000000000e+00 2.229986000000000104e+02 4.972837560196000413e+04 1.108935813951123878e+07 0.000000000000000000e+00 0.000000000000000000e+00 2.978696398227728423e+04 2.587581297465923740e+02 5.218249358888024564e-14 1.167652339682528559e-02 -3.154518813412710704e-02 -1.016064713452379670e-02 -1.509701063753968696e-03 1.229371797492063208e-03 3.136779317460414856e-05 1.000000000000000000e+00 2.250258599999999944e+02 5.063663766873959685e+04 1.139455293891652301e+07 0.000000000000000000e+00 0.000000000000000000e+00 3.602155084251398512e+04 2.603926301344870922e+02 3.008952168432361289e-14 5.463445239682528098e-02 1.774224736587289714e-02 -2.389310713452379165e-02 -1.121983163753968726e-03 1.694905097492063056e-03 6.866294531746040862e-04 1.000000000000000000e+00 2.270531200000000069e+02 5.155311930173440487e+04 1.170529658319101855e+07 0.000000000000000000e+00 0.000000000000000000e+00 4.307051243200255703e+04 2.602516580326667963e+02 -6.752827934503707979e-15 4.810599839682529189e-02 -1.496451783412710429e-02 -1.839609713452379502e-02 -1.091134563753968535e-03 2.077571597492063049e-03 6.492220531746040147e-04 1.000000000000000000e+00 2.290803800000000194e+02 5.247782050094440638e+04 1.202163906192813627e+07 0.000000000000000000e+00 0.000000000000000000e+00 5.098383834415919409e+04 2.592875295662722124e+02 -5.123726758011432889e-14 5.266199139682528618e-02 -3.588364363412710478e-02 -2.446927813452379197e-02 -1.311484563753968768e-03 2.101589897492062934e-03 4.946715531746040178e-04 1.000000000000000000e+00 2.311076400000000035e+02 5.341074126636960136e+04 1.234363036472129077e+07 0.000000000000000000e+00 0.000000000000000000e+00 5.981151817240004311e+04 2.592308820611373790e+02 -4.564269168653986185e-14 6.118641339682527602e-02 -3.072892273412710512e-02 -5.481309213452380258e-02 -1.255843163753968695e-03 2.399166697492063124e-03 8.114795531746040380e-04 1.000000000000000000e+00 2.331349000000000160e+02 5.435188159801000438e+04 1.267132048116390407e+07 0.000000000000000000e+00 0.000000000000000000e+00 6.960354151014162926e+04 2.595619725021097111e+02 -7.003595564454634878e-15 7.061374139682527473e-02 -2.235936743412710384e-02 -9.913570134523798372e-03 -1.268399263753968668e-03 2.261032397492063195e-03 8.009784531746040040e-04 1.000000000000000000e+00 2.351621600000000001e+02 5.530124149586560088e+04 1.300475940084938519e+07 0.000000000000000000e+00 0.000000000000000000e+00 8.040989795079996111e+04 2.590598186666346123e+02 6.860929725324630356e-16 7.201945939682527498e-02 -3.425773313412710380e-02 -2.546723413452380014e-02 -1.075733563753968579e-03 2.168366397492063266e-03 7.489506531746041021e-04 1.000000000000000000e+00 2.371894200000000126e+02 5.625882095993640542e+04 1.334399711337115988e+07 0.000000000000000000e+00 0.000000000000000000e+00 9.228057708779163659e+04 2.587420925533573950e+02 1.294219066619279127e-14 7.647773339682528704e-02 -3.709144563412710566e-02 -2.783978113452380276e-02 -1.551370363753968619e-03 2.241157097492063165e-03 8.262895531746041215e-04 1.000000000000000000e+00 2.392166799999999967e+02 5.722461999022239615e+04 1.368908360832263529e+07 0.000000000000000000e+00 0.000000000000000000e+00 1.052655685145325697e+05 2.593739234782133281e+02 2.489479921874677366e-14 8.438259339682528670e-02 -4.554863663412710151e-02 -2.931516913452379691e-02 -1.568884063753968675e-03 2.324322897492063066e-03 1.015189053174604152e-03 1.000000000000000000e+00 2.412439400000000091e+02 5.819863858672360220e+04 1.404006887529723532e+07 0.000000000000000000e+00 0.000000000000000000e+00 1.194148618244394165e+05 2.595694737699126904e+02 1.555171091878242343e-14 1.248369423968252873e-01 -2.113519634127106195e-03 -3.012072513452379585e-02 -1.548244163753968554e-03 1.854907397492062959e-03 1.469042453174604010e-03 1.000000000000000000e+00 2.432712000000000216e+02 5.918087674944000901e+04 1.439700290388837270e+07 0.000000000000000000e+00 0.000000000000000000e+00 1.347784466109283094e+05 2.589335042145449961e+02 2.528064545633945617e-14 1.386098423968253057e-01 -1.554300763412710430e-02 -4.327043134523797518e-03 -1.380193763753968534e-03 1.668424897492063255e-03 1.876337353174604202e-03 1.000000000000000000e+00 2.452984600000000057e+02 6.017133447837160202e+04 1.475993568368945830e+07 0.000000000000000000e+00 0.000000000000000000e+00 1.514063124674152350e+05 2.585203309350814038e+02 1.708358532944420453e-14 1.239822423968252735e-01 -5.095475363412710346e-02 2.847714586547619997e-02 -1.985254663753968554e-03 1.673778397492063034e-03 1.607149453174604170e-03 1.000000000000000000e+00 2.473257200000000182e+02 6.117001177351841034e+04 1.512891720429391786e+07 0.000000000000000000e+00 0.000000000000000000e+00 1.693484489873169805e+05 2.577332151445505701e+02 -1.306066815792015049e-14 1.361771723968253078e-01 -4.787148863412710176e-02 4.220229221547620174e-02 -1.763331063753968610e-03 1.475171397492063084e-03 1.748681753174604041e-03 1.000000000000000000e+00 2.493529800000000023e+02 6.217690863488039759e+04 1.550399745529516041e+07 0.000000000000000000e+00 0.000000000000000000e+00 1.886548457640492998e+05 2.560893166165346315e+02 -2.617243662451785409e-14 1.327212623968253014e-01 -5.556048863412710315e-02 3.156961486547620044e-02 -1.981807663753968815e-03 1.528626197492063011e-03 1.673368953174604080e-03 1.000000000000000000e+00 2.513802400000000148e+02 6.319202506245760742e+04 1.588522642628660984e+07 0.000000000000000000e+00 0.000000000000000000e+00 2.093754923910290236e+05 2.583923782750806595e+02 -3.599677893875522221e-14 1.378797223968253050e-01 -4.097437163412710748e-02 4.345609296547620071e-02 -1.820374863753968561e-03 1.497760797492063162e-03 1.892659753174604376e-03 1.000000000000000000e+00 2.534074999999999989e+02 6.421536105624999618e+04 1.627265410686167143e+07 0.000000000000000000e+00 0.000000000000000000e+00 2.315603784616718476e+05 ''' nipy-0.3.0/nipy/algorithms/statistics/models/tests/test_regression.py000066400000000000000000000077211210344137400262370ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Test functions for models.regression """ import numpy as np import scipy.linalg as spl from ..regression import (OLSModel, ARModel, yule_walker, AREstimator, ar_bias_corrector, ar_bias_correct) from nose.tools import assert_equal, assert_true from numpy.testing import assert_array_almost_equal, assert_array_equal RNG = np.random.RandomState(20110902) X = RNG.standard_normal((40,10)) Y = RNG.standard_normal((40,)) def test_OLS(): model = OLSModel(design=X) results = model.fit(Y) assert_equal(results.df_resid, 30) def test_AR(): model = ARModel(design=X, rho=0.4) results = model.fit(Y) assert_equal(results.df_resid, 30) def test_OLS_degenerate(): Xd = X.copy() Xd[:,0] = Xd[:,1] + Xd[:,2] model = OLSModel(design=Xd) results = model.fit(Y) assert_equal(results.df_resid, 31) def test_AR_degenerate(): Xd = X.copy() Xd[:,0] = Xd[:,1] + Xd[:,2] model = ARModel(design=Xd, rho=0.9) results = model.fit(Y) assert_equal(results.df_resid, 31) def test_yule_walker_R(): # Test YW implementation against R results Y = np.array([1,3,4,5,8,9,10]) N = len(Y) X = np.ones((N, 2)) X[:,0] = np.arange(1,8) pX = spl.pinv(X) betas = np.dot(pX, Y) Yhat = Y - np.dot(X, betas) # R results obtained from: # >>> np.savetxt('yhat.csv', Yhat) # > yhat = read.table('yhat.csv') # > ar.yw(yhat$V1, aic=FALSE, order.max=2) def r_fudge(sigma, order): # Reverse fudge in ar.R calculation labeled as splus compatibility fix return sigma **2 * N / (N-order-1) rhos, sd = yule_walker(Yhat, 1, 'mle') assert_array_almost_equal(rhos, [-0.3004], 4) assert_array_almost_equal(r_fudge(sd, 1), 0.2534, 4) rhos, sd = yule_walker(Yhat, 2, 'mle') assert_array_almost_equal(rhos, [-0.5113, -0.7021], 4) assert_array_almost_equal(r_fudge(sd, 2), 0.1606, 4) rhos, sd = yule_walker(Yhat, 3, 'mle') assert_array_almost_equal(rhos, [-0.6737, -0.8204, -0.2313], 4) assert_array_almost_equal(r_fudge(sd, 3), 0.2027, 4) def test_ar_estimator(): # More or less a smoke test rng = np.random.RandomState(20110903) N = 100 Y = rng.normal(size=(N,1)) * 10 + 100 X = np.c_[np.linspace(-1,1,N), np.ones((N,))] my_model = OLSModel(X) results = my_model.fit(Y) are = AREstimator(my_model,2) rhos = are(results) assert_equal(rhos.shape, (2,)) assert_true(np.all(np.abs(rhos <= 1))) rhos2 = ar_bias_correct(results, 2) assert_array_almost_equal(rhos, rhos2, 8) invM = ar_bias_corrector(my_model.design, my_model.calc_beta, 2) rhos3 = ar_bias_correct(results, 2, invM) assert_array_almost_equal(rhos2, rhos3) # Check orders 1 and 3 rhos = ar_bias_correct(results, 1) assert_equal(rhos.shape, ()) assert_true(abs(rhos) <= 1) rhos = ar_bias_correct(results, 3) assert_equal(rhos.shape, (3,)) assert_true(np.all(np.abs(rhos) <= 1)) # Make a 2D Y and try that Y = rng.normal(size=(N,12)) * 10 + 100 results = my_model.fit(Y) rhos = are(results) assert_equal(rhos.shape, (2,12)) assert_true(np.all(np.abs(rhos <= 1))) rhos2 = ar_bias_correct(results, 2) assert_array_almost_equal(rhos, rhos2, 8) rhos3 = ar_bias_correct(results, 2, invM) assert_array_almost_equal(rhos2, rhos3) # Passing in a simple array rhos4 = ar_bias_correct(results.resid, 2, invM) assert_array_almost_equal(rhos3, rhos4) # Check orders 1 and 3 rhos = ar_bias_correct(results, 1) assert_equal(rhos.shape, (12,)) assert_true(np.all(np.abs(rhos) <= 1)) rhos = ar_bias_correct(results, 3) assert_equal(rhos.shape, (3,12)) assert_true(np.all(np.abs(rhos) <= 1)) # Try reshaping to 3D results.resid = results.resid.reshape((N,3,4)) rhos = ar_bias_correct(results, 2) assert_array_almost_equal(rhos, rhos2.reshape((2,3,4))) nipy-0.3.0/nipy/algorithms/statistics/models/tests/test_utils.py000066400000000000000000000014021210344137400252050ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Test functions for models.utils """ import numpy as np from .. import utils from nose.tools import (assert_equal, assert_true, assert_raises) from numpy.testing import (assert_array_equal, assert_array_almost_equal) def test_StepFunction(): x = np.arange(20) y = np.arange(20) f = utils.StepFunction(x, y) assert_array_almost_equal(f( np.array([[3.2,4.5],[24,-3.1]]) ), [[ 3, 4], [19, 0]]) def test_StepFunctionBadShape(): x = np.arange(20) y = np.arange(21) assert_raises(ValueError, utils.StepFunction, x, y) x = np.zeros((2, 2)) y = np.zeros((2, 2)) assert_raises(ValueError, utils.StepFunction, x, y) nipy-0.3.0/nipy/algorithms/statistics/models/utils.py000066400000000000000000000046571210344137400230230ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ''' General matrix and other utilities for statistics ''' import numpy as np import scipy.interpolate __docformat__ = 'restructuredtext' def mad(a, c=0.6745, axis=0): """ Median Absolute Deviation: median(abs(a - median(a))) / c """ _shape = a.shape a.shape = np.product(a.shape, axis=0) m = np.median(np.fabs(a - np.median(a))) / c a.shape = _shape return m class StepFunction(object): """ A basic step function: values at the ends are handled in the simplest way possible: everything to the left of x[0] is set to ival; everything to the right of x[-1] is set to y[-1]. Examples -------- >>> x = np.arange(20) >>> y = np.arange(20) >>> f = StepFunction(x, y) >>> >>> print f(3.2) 3.0 >>> print f([[3.2,4.5],[24,-3.1]]) [[ 3. 4.] [ 19. 0.]] """ def __init__(self, x, y, ival=0., sorted=False): _x = np.asarray(x) _y = np.asarray(y) if _x.shape != _y.shape: raise ValueError( 'in StepFunction: x and y do not have the same shape') if len(_x.shape) != 1: raise ValueError('in StepFunction: x and y must be 1-dimensional') self.x = np.hstack([[- np.inf], _x]) self.y = np.hstack([[ival], _y]) if not sorted: asort = np.argsort(self.x) self.x = np.take(self.x, asort, 0) self.y = np.take(self.y, asort, 0) self.n = self.x.shape[0] def __call__(self, time): tind = np.searchsorted(self.x, time) - 1 return self.y[tind] def ECDF(values): """ Return the ECDF of an array as a step function. """ x = np.array(values, copy=True) x.sort() x.shape = np.product(x.shape, axis=0) n = x.shape[0] y = (np.arange(n) + 1.) / n return StepFunction(x, y) def monotone_fn_inverter(fn, x, vectorized=True, **keywords): """ Given a monotone function x (no checking is done to verify monotonicity) and a set of x values, return an linearly interpolated approximation to its inverse from its values on x. """ if vectorized: y = fn(x, **keywords) else: y = [] for _x in x: y.append(fn(_x, **keywords)) y = np.array(y) a = np.argsort(y) return scipy.interpolate.interp1d(y[a], x[a]) nipy-0.3.0/nipy/algorithms/statistics/onesample.py000066400000000000000000000101311210344137400223430ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Utilities for one sample t-tests """ __docformat__ = 'restructuredtext' import numpy as np from ..utils.matrices import pos_recipr def estimate_mean(Y, sd): """ Estimate the mean of a sample given information about the standard deviations of each entry. Parameters ---------- Y : ndarray Data for which mean is to be estimated. Should have shape[0] == number of subjects. sd : ndarray Standard deviation (subject specific) of the data for which the mean is to be estimated. Should have shape[0] == number of subjects. Returns ------- value : dict This dictionary has keys ['effect', 'scale', 't', 'resid', 'sd'] """ nsubject = Y.shape[0] squeeze = False if Y.ndim == 1: Y = Y.reshape(Y.shape[0], 1) squeeze = True _stretch = lambda x: np.multiply.outer(np.ones(nsubject), x) W = pos_recipr(sd**2) if W.shape in [(), (1,)]: W = np.ones(Y.shape) * W W.shape = Y.shape # Compute the mean using the optimal weights effect = (Y * W).sum(0) / W.sum(0) resid = (Y - _stretch(effect)) * np.sqrt(W) scale = np.add.reduce(np.power(resid, 2), 0) / (nsubject - 1) var_total = scale * pos_recipr(W.sum(0)) value = {} value['resid'] = resid value['effect'] = effect value['sd'] = np.sqrt(var_total) value['t'] = value['effect'] * pos_recipr(value['sd']) value['scale'] = np.sqrt(scale) if squeeze: for key in value.keys(): value[key] = np.squeeze(value[key]) return value def estimate_varatio(Y, sd, df=None, niter=10): """ Estimate variance fixed/random effects variance ratio In a one-sample random effects problem, estimate the ratio between the fixed effects variance and the random effects variance. Parameters ---------- Y : np.ndarray Data for which mean is to be estimated. Should have shape[0] == number of subjects. sd : array Standard deviation (subject specific) of the data for which the mean is to be estimated. Should have shape[0] == number of subjects. df : int or None, optional If supplied, these are used as weights when deriving the fixed effects variance. Should have length == number of subjects. niter : int, optional Number of EM iterations to perform (default 10) Returns ------- value : dict This dictionary has keys ['fixed', 'ratio', 'random'], where 'fixed' is the fixed effects variance implied by the input parameter 'sd'; 'random' is the random effects variance and 'ratio' is the estimated ratio of variances: 'random'/'fixed'. """ nsubject = Y.shape[0] squeeze = False if Y.ndim == 1: Y = Y.reshape(Y.shape[0], 1) squeeze = True _stretch = lambda x: np.multiply.outer(np.ones(nsubject), x) W = pos_recipr(sd**2) if W.shape in [(), (1,)]: W = np.ones(Y.shape) * W W.shape = Y.shape S = 1. / W R = Y - np.multiply.outer(np.ones(Y.shape[0]), Y.mean(0)) sigma2 = np.squeeze((R**2).sum(0)) / (nsubject - 1) Sreduction = 0.99 minS = S.min(0) * Sreduction Sm = S - _stretch(minS) for _ in range(niter): Sms = Sm + _stretch(sigma2) W = pos_recipr(Sms) Winv = pos_recipr(W.sum(0)) mu = Winv * (W*Y).sum(0) R = W * (Y - _stretch(mu)) ptrS = 1 + (Sm * W).sum(0) - (Sm * W**2).sum(0) * Winv sigma2 = np.squeeze((sigma2 * ptrS + (sigma2**2) * (R**2).sum(0)) / nsubject) sigma2 = sigma2 - minS if df is None: df = np.ones(nsubject) df.shape = (1, nsubject) _Sshape = S.shape S.shape = (S.shape[0], np.product(S.shape[1:])) value = {} value['fixed'] = (np.dot(df, S) / df.sum()).reshape(_Sshape[1:]) value['ratio'] = np.nan_to_num(sigma2 / value['fixed']) value['random'] = sigma2 if squeeze: for key in value.keys(): value[key] = np.squeeze(value[key]) return value nipy-0.3.0/nipy/algorithms/statistics/quantile.c000066400000000000000000000117541210344137400220100ustar00rootroot00000000000000#include "quantile.h" #include #include #ifdef INFINITY #define POSINF INFINITY #else #define POSINF HUGE_VAL #endif #define UNSIGNED_FLOOR(a) ( (int)(a) ) #define UNSIGNED_CEIL(a) ( ( (int)(a)-a )!=0.0 ? (int)(a+1) : (int)(a) ) #define SWAP(a, b) {tmp=(a); (a)=(b); (b)=tmp;} /* Declaration of static functions */ static double _pth_element(double* x, npy_intp p, npy_intp stride, npy_intp size); static void _pth_interval(double* am, double* aM, double* x, npy_intp p, npy_intp stride, npy_intp size); /* Quantile. Given a sample x, this function computes a value q so that the number of sample values that are greater or equal to q is smaller or equal to (1-r) * sample size. */ double quantile(double* data, npy_intp size, npy_intp stride, double r, int interp) { double m, pp; npy_intp p; if ((r<0) || (r>1)){ fprintf(stderr, "Ratio must be in [0,1], returning zero"); return 0.0; } if (size == 1) return data[0]; /* Find the smallest index p so that p >= r * size */ if (!interp) { pp = r * size; p = UNSIGNED_CEIL(pp); if (p == size) return POSINF; m = _pth_element(data, p, stride, size); } else { double wm, wM; pp = r * (size-1); p = UNSIGNED_FLOOR(pp); wM = pp - (double)p; wm = 1.0 - wM; if (wM <= 0) m = _pth_element(data, p, stride, size); else { double am, aM; _pth_interval(&am, &aM, data, p, stride, size); m = wm*am + wM*aM; } } return m; } /*** STATIC FUNCTIONS ***/ /* BEWARE: the input array x gets modified! */ /* Pick up the sample value a so that: (p+1) sample values are <= a AND the remaining sample values are >= a */ static double _pth_element(double* x, npy_intp p, npy_intp stride, npy_intp n) { double a, tmp; double *bufl, *bufr; npy_intp i, j, il, jr, stop1, stop2; int same_extremities; stop1 = 0; il = 0; jr = n-1; while (stop1 == 0) { same_extremities = 0; bufl = x + stride*il; bufr = x + stride*jr; if (*bufl > *bufr) SWAP(*bufl, *bufr) else if (*bufl == *bufr) same_extremities = 1; a = *bufl; if (il == jr) return a; bufl += stride; i = il + 1; j = jr; stop2 = 0; while (stop2 == 0) { while (*bufl < a) { i ++; bufl += stride; } while (*bufr > a) { j --; bufr -= stride; } if (j <= i) stop2 = 1; else { SWAP(*bufl, *bufr) j --; bufr -= stride; i ++; bufl += stride; } /* Avoids infinite loops in samples with redundant values. This situation can only occur with i == j */ if ((same_extremities) && (j==jr)) { j --; bufr -= stride; SWAP(x[il*stride], *bufr) stop2 = 1; } } /* At this point, we know that il <= j <= i; moreover: if k <= j, x(j) <= a and if k > j, x(j) >= a if k < i, x(i) <= a and if k >= i, x(i) >= a We hence have: (j+1) values <= a and the remaining (n-j-1) >= a i values <= a and the remaining (n-i) >= a */ if (j > p) jr = j; else if (j < p) il = i; else /* j == p */ stop1 = 1; } return a; } /* BEWARE: the input array x gets modified! */ static void _pth_interval(double* am, double* aM, double* x, npy_intp p, npy_intp stride, npy_intp n) { double a, tmp; double *bufl, *bufr; npy_intp i, j, il, jr, stop1, stop2, stop3; npy_intp pp = p+1; int same_extremities = 0; *am = 0.0; *aM = 0.0; stop1 = 0; stop2 = 0; il = 0; jr = n-1; while ((stop1 == 0) || (stop2 == 0)) { same_extremities = 0; bufl = x + stride*il; bufr = x + stride*jr; if (*bufl > *bufr) SWAP(*bufl, *bufr) else if (*bufl == *bufr) same_extremities = 1; a = *bufl; if (il == jr) { *am=a; *aM=a; return; } bufl += stride; i = il + 1; j = jr; stop3 = 0; while (stop3 == 0) { while (*bufl < a) { i ++; bufl += stride; } while (*bufr > a) { j --; bufr -= stride; } if (j <= i) stop3 = 1; else { SWAP(*bufl, *bufr) j --; bufr -= stride; i ++; bufl += stride; } /* Avoids infinite loops in samples with redundant values */ if ((same_extremities) && (j==jr)) { j --; bufr -= stride; SWAP(x[il*stride], *bufr) stop3 = 1; } } /* At this point, we know that there are (j+1) datapoints <=a including a itself, and another (n-j-1) datapoints >=a */ if (j > pp) jr = j; else if (j < p) il = i; /* Case: found percentile at p */ else if (j == p) { il = i; *am = a; stop1 = 1; } /* Case: found percentile at (p+1), ie j==(p+1) */ else { jr = j; *aM = a; stop2 = 1; } } return; } nipy-0.3.0/nipy/algorithms/statistics/quantile.h000066400000000000000000000004231210344137400220040ustar00rootroot00000000000000#ifndef QUANTILE #define QUANTILE #ifdef __cplusplus extern "C" { #endif #include #include extern double quantile(double* data, npy_intp size, npy_intp stride, double r, int interp); #ifdef __cplusplus } #endif #endif nipy-0.3.0/nipy/algorithms/statistics/rft.py000066400000000000000000000563321210344137400211700ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Random field theory routines The theoretical results for the EC densities appearing in this module were partially supported by NSF grant DMS-0405970. Taylor, J.E. & Worsley, K.J. (2007). "Detecting sparse cone alternatives for Gaussian random fields, with an application to fMRI". Annals of Statistics, submitted. Taylor, J.E. & Worsley, K.J. (2007). "Random fields of multivariate test statistics, with applications to shape analysis." Annals of Statistics, accepted. """ import numpy as np from numpy.linalg import pinv from scipy import stats from scipy.misc import factorial from scipy.special import gamma, gammaln, beta, hermitenorm def binomial(n, k): """ Binomial coefficient n! c = --------- (n-k)! k! Parameters ---------- n : float n of (n, k) k : float k of (n, k) Returns ------- c : float Examples -------- First 3 values of 4 th row of Pascal triangle >>> [binomial(4, k) for k in range(3)] [1.0, 4.0, 6.0] """ if n <= k or n == 0: return 0. elif k == 0: return 1. return 1./(beta(n-k+1, k+1)*(n+1)) def Q(dim, dfd=np.inf): """ Q polynomial If `dfd` == inf (the default), then Q(dim) is the (dim-1)-st Hermite polynomial: .. math:: H_j(x) = (-1)^j * e^{x^2/2} * (d^j/dx^j e^{-x^2/2}) If `dfd` != inf, then it is the polynomial Q defined in [Worsley1994]_ Parameters ---------- dim : int dimension of polynomial dfd : scalar Returns ------- q_poly : np.poly1d instance References ---------- .. [Worsley1994] Worsley, K.J. (1994). 'Local maxima and the expected Euler characteristic of excursion sets of \chi^2, F and t fields.' Advances in Applied Probability, 26:13-42. """ m = dfd j = dim if j <= 0: raise ValueError('Q defined only for dim > 0') poly = hermitenorm(j-1) poly = np.poly1d(np.around(poly.c)) if np.isfinite(m): for l in range((j-1)//2+1): f = np.exp(gammaln((m+1)/2.) - gammaln((m+2-j+2*l)/2.) - 0.5*(j-1-2*l)*(np.log(m/2.))) poly.c[2*l] *= f return np.poly1d(poly.c) class ECquasi(np.poly1d): """ Polynomials with premultiplier A subclass of poly1d consisting of polynomials with a premultiplier of the form: (1 + x^2/m)^-exponent where m is a non-negative float (possibly infinity, in which case the function is a polynomial) and exponent is a non-negative multiple of 1/2. These arise often in the EC densities. Examples -------- >>> import numpy >>> from nipy.algorithms.statistics.rft import ECquasi >>> x = numpy.linspace(0,1,101) >>> a = ECquasi([3,4,5]) >>> a ECquasi(array([3, 4, 5]), m=inf, exponent=0.000000) >>> a(3) == 3*3**2 + 4*3 + 5 True >>> b = ECquasi(a.coeffs, m=30, exponent=4) >>> numpy.allclose(b(x), a(x) * numpy.power(1+x**2/30, -4)) True """ def __init__(self, c_or_r, r=0, exponent=None, m=None): np.poly1d.__init__(self, c_or_r, r=r, variable='x') if exponent is None and not hasattr(self, 'exponent'): self.exponent = 0 elif not hasattr(self, 'exponent'): self.exponent = exponent if m is None and not hasattr(self, 'm'): self.m = np.inf elif not hasattr(self, 'm'): self.m = m if not np.isfinite(self.m): self.exponent = 0. def denom_poly(self): """ Base of the premultiplier: (1+x^2/m). Examples -------- >>> import numpy >>> b = ECquasi([3,4,20], m=30, exponent=4) >>> d = b.denom_poly() >>> d poly1d([ 0.03333333, 0. , 1. ]) >>> numpy.allclose(d.c, [1./b.m,0,1]) True """ return np.poly1d([1./self.m, 0, 1]) def change_exponent(self, _pow): """ Change exponent Multiply top and bottom by an integer multiple of the self.denom_poly. Examples -------- >>> import numpy >>> b = ECquasi([3,4,20], m=30, exponent=4) >>> x = numpy.linspace(0,1,101) >>> c = b.change_exponent(3) >>> c ECquasi(array([ 1.11111111e-04, 1.48148148e-04, 1.07407407e-02, 1.33333333e-02, 3.66666667e-01, 4.00000000e-01, 5.00000000e+00, 4.00000000e+00, 2.00000000e+01]), m=30.000000, exponent=7.000000) >>> numpy.allclose(c(x), b(x)) True """ if np.isfinite(self.m): _denom_poly = self.denom_poly() if int(_pow) != _pow or _pow < 0: raise ValueError, 'expecting a non-negative integer' p = _denom_poly**int(_pow) exponent = self.exponent + _pow coeffs = np.polymul(self, p).coeffs return ECquasi(coeffs, exponent=exponent, m=self.m) else: return ECquasi(self.coeffs, exponent=self.exponent, m=self.m) def __setattr__(self, key, val): if key == 'exponent': if 2*float(val) % 1 == 0: self.__dict__[key] = float(val) else: raise ValueError, 'expecting multiple of a half, got %f' % val elif key == 'm': if float(val) > 0 or val == np.inf: self.__dict__[key] = val else: raise ValueError, 'expecting positive float or inf' else: np.poly1d.__setattr__(self, key, val) def compatible(self, other): """ Check compatibility of degrees of freedom Check whether the degrees of freedom of two instances are equal so that they can be multiplied together. Examples -------- >>> import numpy >>> b = ECquasi([3,4,20], m=30, exponent=4) >>> x = numpy.linspace(0,1,101) >>> c = b.change_exponent(3) >>> b.compatible(c) True >>> d = ECquasi([3,4,20]) >>> b.compatible(d) False >>> """ if self.m != other.m: #raise ValueError, 'quasi polynomials are not compatible, m disagrees' return False return True def __add__(self, other): """ Add two compatible ECquasi instances together. Examples -------- >>> b = ECquasi([3,4,20], m=30, exponent=4) >>> c = ECquasi([1], m=30, exponent=4) >>> b+c #doctest: +IGNORE_DTYPE ECquasi(array([ 3, 4, 21]), m=30.000000, exponent=4.000000) >>> d = ECquasi([1], m=30, exponent=3) >>> b+d ECquasi(array([ 3.03333333, 4. , 21. ]), m=30.000000, exponent=4.000000) """ if self.compatible(other): if np.isfinite(self.m): M = max(self.exponent, other.exponent) q1 = self.change_exponent(M-self.exponent) q2 = other.change_exponent(M-other.exponent) p = np.poly1d.__add__(q1, q2) return ECquasi(p.coeffs, exponent=M, m=self.m) else: p = np.poly1d.__add__(self, other) return ECquasi(p.coeffs, exponent=0, m=self.m) def __mul__(self, other): """ Multiply two compatible ECquasi instances together. Examples -------- >>> b=ECquasi([3,4,20], m=30, exponent=4) >>> c=ECquasi([1,2], m=30, exponent=4.5) >>> b*c ECquasi(array([ 3, 10, 28, 40]), m=30.000000, exponent=8.500000) """ if np.isscalar(other): return ECquasi(self.coeffs * other, m=self.m, exponent=self.exponent) elif self.compatible(other): p = np.poly1d.__mul__(self, other) return ECquasi(p.coeffs, exponent=self.exponent+other.exponent, m=self.m) def __call__(self, val): """Evaluate the ECquasi instance. Examples -------- >>> import numpy >>> x = numpy.linspace(0,1,101) >>> a = ECquasi([3,4,5]) >>> a ECquasi(array([3, 4, 5]), m=inf, exponent=0.000000) >>> a(3) == 3*3**2 + 4*3 + 5 True >>> b = ECquasi(a.coeffs, m=30, exponent=4) >>> numpy.allclose(b(x), a(x) * numpy.power(1+x**2/30, -4)) True """ n = np.poly1d.__call__(self, val) _p = self.denom_poly()(val) return n / np.power(_p, self.exponent) def __div__(self, other): raise NotImplementedError def __eq__(self, other): return (np.poly1d.__eq__(self, other) and self.m == other.m and self.exponent == other.exponent) def __ne__(self, other): return not self.__eq__(other) def __pow__(self, _pow): """ Power of a ECquasi instance. Examples -------- >>> b = ECquasi([3,4,5],m=10, exponent=3) >>> b**2 ECquasi(array([ 9, 24, 46, 40, 25]), m=10.000000, exponent=6.000000) """ p = np.poly1d.__pow__(self, int(_pow)) q = ECquasi(p, m=self.m, exponent=_pow*self.exponent) return q def __sub__(self, other): """ Subtract `other` from `self` Parameters ---------- other : ECquasi instance Returns ------- subbed : ECquasi Examples -------- >>> b = ECquasi([3,4,20], m=30, exponent=4) >>> c = ECquasi([1,2], m=30, exponent=4) >>> print b-c #doctest: +IGNORE_DTYPE ECquasi(array([ 3, 3, 18]), m=30.000000, exponent=4.000000) """ return self + (other * -1) def __repr__(self): if not np.isfinite(self.m): m = repr(self.m) else: m = '%f' % self.m return "ECquasi(%s, m=%s, exponent=%f)" % ( repr(self.coeffs), m, self.exponent) __str__ = __repr__ __rsub__ = __sub__ __rmul__ = __mul__ __rdiv__ = __div__ def deriv(self, m=1): """ Evaluate derivative of ECquasi Parameters ---------- m : int, optional Examples -------- >>> a = ECquasi([3,4,5]) >>> a.deriv(m=2) #doctest: +IGNORE_DTYPE ECquasi(array([6]), m=inf, exponent=0.000000) >>> b = ECquasi([3,4,5], m=10, exponent=3) >>> b.deriv() ECquasi(array([-1.2, -2. , 3. , 4. ]), m=10.000000, exponent=4.000000) """ if m == 1: if np.isfinite(self.m): q1 = ECquasi(np.poly1d.deriv(self, m=1), m=self.m, exponent=self.exponent) q2 = ECquasi(np.poly1d.__mul__(self, self.denom_poly().deriv(m=1)), m = self.m, exponent=self.exponent+1) return q1 - self.exponent * q2 else: return ECquasi(np.poly1d.deriv(self, m=1), m=np.inf, exponent=0) else: d = self.deriv(m=1) return d.deriv(m=m-1) class fnsum(object): def __init__(self, *items): self.items = list(items) def __call__(self, x): v = 0 for q in self.items: v += q(x) return v class IntrinsicVolumes(object): """ Compute intrinsic volumes of products of sets A simple class that exists only to compute the intrinsic volumes of products of sets (that themselves have intrinsic volumes, of course). """ def __init__(self, mu=[1]): if isinstance(mu, IntrinsicVolumes): mu = mu.mu self.mu = np.asarray(mu, np.float64) self.order = self.mu.shape[0]-1 def __str__(self): return str(self.mu) def __mul__(self, other): if not isinstance(other, IntrinsicVolumes): raise ValueError('expecting an IntrinsicVolumes instance') order = self.order + other.order + 1 mu = np.zeros(order) for i in range(order): for j in range(i+1): try: mu[i] += self.mu[j] * other.mu[i-j] except: pass return self.__class__(mu) class ECcone(IntrinsicVolumes): """ EC approximation to supremum distribution of var==1 Gaussian process A class that takes the intrinsic volumes of a set and gives the EC approximation to the supremum distribution of a unit variance Gaussian process with these intrinsic volumes. This is the basic building block of all of the EC densities. If product is not None, then this product (an instance of IntrinsicVolumes) will effectively be prepended to the search region in any call, but it will also affect the (quasi-)polynomial part of the EC density. For instance, Hotelling's T^2 random field has a sphere as product, as does Roy's maximum root. """ def __init__(self, mu=[1], dfd=np.inf, search=[1], product=[1]): self.dfd = dfd IntrinsicVolumes.__init__(self, mu=mu) self.product = IntrinsicVolumes(product) self.search = IntrinsicVolumes(search) def __call__(self, x, search=None): """ Get expected EC for a search region Default is self.search which itself defaults to [1] giving the survival function. """ x = np.asarray(x, np.float64) if search is None: search = self.search else: search = IntrinsicVolumes(search) search *= self.product if np.isfinite(self.dfd): q_even = ECquasi([0], m=self.dfd, exponent=0) q_odd = ECquasi([0], m=self.dfd, exponent=0.5) else: q_even = np.poly1d([0]) q_odd = np.poly1d([0]) for k in range(search.mu.shape[0]): q = self.quasi(k) c = float(search.mu[k]) * np.power(2*np.pi, -(k+1)/2.) if np.isfinite(self.dfd): q_even += q[0] * c q_odd += q[1] * c else: q_even += q * c _rho = q_even(x) + q_odd(x) if np.isfinite(self.dfd): _rho *= np.power(1 + x**2/self.dfd, -(self.dfd-1)/2.) else: _rho *= np.exp(-x**2/2.) if search.mu[0] * self.mu[0] != 0.: # tail probability is not "quasi-polynomial" if not np.isfinite(self.dfd): P = stats.norm.sf else: P = lambda x: stats.t.sf(x, self.dfd) _rho += P(x) * search.mu[0] * self.mu[0] return _rho def pvalue(self, x, search=None): return self(x, search=search) def integ(self, m=None, k=None): raise NotImplementedError # this could be done with stats.t, # at least m=1 def density(self, x, dim): """ The EC density in dimension `dim`. """ return self(x, search=[0]*dim+[1]) def _quasi_polynomials(self, dim): """ list of quasi-polynomials for EC density calculation. """ c = self.mu / np.power(2*np.pi, np.arange(self.order+1.)/2.) quasi_polynomials = [] for k in range(c.shape[0]): if k+dim > 0: _q = ECquasi(Q(k+dim, dfd=self.dfd), m=self.dfd, exponent=k/2.) _q *= float(c[k]) quasi_polynomials.append(_q) return quasi_polynomials def quasi(self, dim): """ (Quasi-)polynomial parts of EC density in dimension `dim` - ignoring a factor of (2\pi)^{-(dim+1)/2} in front. """ q_even = ECquasi([0], m=self.dfd, exponent=0) q_odd = ECquasi([0], m=self.dfd, exponent=0.5) quasi_polynomials = self._quasi_polynomials(dim) for k in range(len(quasi_polynomials)): _q = quasi_polynomials[k] if _q.exponent % 1 == 0: q_even += _q else: q_odd += _q if not np.isfinite(self.dfd): q_even += q_odd return np.poly1d(q_even.coeffs) else: return (q_even, q_odd) Gaussian = ECcone def mu_sphere(n, j, r=1): """ `j`th curvature for `n` dimensional sphere radius `r` Return mu_j(S_r(R^n)), the j-th Lipschitz Killing curvature of the sphere of radius r in R^n. From Chapter 6 of Adler & Taylor, 'Random Fields and Geometry'. 2006. """ if j < n: if n-1 == j: return 2 * np.power(np.pi, n/2.) * np.power(r, n-1) / gamma(n/2.) if (n-1-j)%2 == 0: return 2 * binomial(n-1, j) * mu_sphere(n,n-1) * np.power(r, j) / mu_sphere(n-j,n-j-1) else: return 0 else: return 0 def mu_ball(n, j, r=1): """ `j`th curvature of `n`-dimensional ball radius `r` Return mu_j(B_n(r)), the j-th Lipschitz Killing curvature of the ball of radius r in R^n. """ if j <= n: if n == j: return np.power(np.pi, n/2.) * np.power(r, n) / gamma(n/2. + 1.) else: return binomial(n, j) * np.power(r, j) * mu_ball(n,n) / mu_ball(n-j,n-j) else: return 0 def spherical_search(n, r=1): """ A spherical search region of radius r. """ return IntrinsicVolumes([mu_sphere(n,j,r=r) for j in range(n)]) def ball_search(n, r=1): """ A ball-shaped search region of radius r. """ return IntrinsicVolumes([mu_ball(n,j,r=r) for j in range(n+1)]) def volume2ball(vol, d=3): """ Approximate volume with ball Approximate intrinsic volumes of a set with a given volume by those of a ball with a given dimension and equal volume. """ if d > 0: r = np.power(vol * 1. / mu_ball(d, d), 1./d) return ball_search(d, r=r) else: return IntrinsicVolumes([1]) class ChiSquared(ECcone): """ EC densities for a Chi-Squared(n) random field. """ def __init__(self, dfn, dfd=np.inf, search=[1]): self.dfn = dfn ECcone.__init__(self, mu=spherical_search(self.dfn), search=search, dfd=dfd) def __call__(self, x, search=None): return ECcone.__call__(self, np.sqrt(x), search=search) class TStat(ECcone): """ EC densities for a t random field. """ def __init__(self, dfd=np.inf, search=[1]): ECcone.__init__(self, mu=[1], dfd=dfd, search=search) class FStat(ECcone): """ EC densities for a F random field. """ def __init__(self, dfn, dfd=np.inf, search=[1]): self.dfn = dfn ECcone.__init__(self, mu=spherical_search(self.dfn), search=search, dfd=dfd) def __call__(self, x, search=None): return ECcone.__call__(self, np.sqrt(x * self.dfn), search=search) class Roy(ECcone): """ Roy's maximum root Maximize an F_{dfd,dfn} statistic over a sphere of dimension k. """ def __init__(self, dfn=1, dfd=np.inf, k=1, search=[1]): product = spherical_search(k) self.k = k self.dfn = dfn ECcone.__init__(self, mu=spherical_search(self.dfn), search=search, dfd=dfd, product=product) def __call__(self, x, search=None): return ECcone.__call__(self, np.sqrt(x * self.dfn), search=search) class MultilinearForm(ECcone): """ Maximize a multivariate Gaussian form Maximized over spheres of dimension dims. See: Kuri, S. & Takemura, A. (2001). 'Tail probabilities of the maxima of multilinear forms and their applications.' Ann. Statist. 29(2): 328-371. """ def __init__(self, *dims, **keywords): product = IntrinsicVolumes([1]) if keywords.has_key('search'): search = keywords['search'] else: search = [1] for d in dims: product *= spherical_search(d) product.mu /= 2.**(len(dims)-1) ECcone.__init__(self, search=search, product=product) class Hotelling(ECcone): """ Hotelling's T^2 Maximize an F_{1,dfd}=T_dfd^2 statistic over a sphere of dimension `k`. """ def __init__(self, dfd=np.inf, k=1, search=[1]): product = spherical_search(k) self.k = k ECcone.__init__(self, mu=[1], search=search, dfd=dfd, product=product) def __call__(self, x, search=None): return ECcone.__call__(self, np.sqrt(x), search=search) class OneSidedF(ECcone): """ EC densities for one-sided F statistic See: Worsley, K.J. & Taylor, J.E. (2005). 'Detecting fMRI activation allowing for unknown latency of the hemodynamic response.' Neuroimage, 29,649-654. """ def __init__(self, dfn, dfd=np.inf, search=[1]): self.dfn = dfn self.regions = [spherical_search(dfn), spherical_search(dfn-1)] ECcone.__init__(self, mu=spherical_search(self.dfn), search=search, dfd=dfd) def __call__(self, x, search=None): IntrinsicVolumes.__init__(self, self.regions[0]) d1 = ECcone.__call__(self, np.sqrt(x * self.dfn), search=search) IntrinsicVolumes.__init__(self, self.regions[1]) d2 = ECcone.__call__(self, np.sqrt(x * (self.dfn-1)), search=search) self.mu = self.regions[0].mu return (d1 - d2) * 0.5 class ChiBarSquared(ChiSquared): def _getmu(self): x = np.linspace(0, 2 * self.dfn, 100) sf = 0. g = Gaussian() for i in range(1, self.dfn+1): sf += binomial(self.dfn, i) * stats.chi.sf(x, i) / np.power(2., self.dfn) d = np.array([g.density(np.sqrt(x), j) for j in range(self.dfn)]) c = np.dot(pinv(d.T), sf) sf += 1. / np.power(2, self.dfn) self.mu = IntrinsicVolumes(c) def __init__(self, dfn=1, search=[1]): ChiSquared.__init__(self, dfn=dfn, search=search) self._getmu() def __call__(self, x, dim=0, search=[1]): if search is None: search = self.stat else: search = IntrinsicVolumes(search) * self.stat return FStat.__call__(self, x, dim=dim, search=search) def scale_space(region, interval, kappa=1.): """ scale space intrinsic volumes of region x interval See: Siegmund, D.O and Worsley, K.J. (1995). 'Testing for a signal with unknown location and scale in a stationary Gaussian random field.' Annals of Statistics, 23:608-639. and Taylor, J.E. & Worsley, K.J. (2005). 'Random fields of multivariate test statistics, with applications to shape analysis and fMRI.' (available on http://www.math.mcgill.ca/keith """ w1, w2 = interval region = IntrinsicVolumes(region) D = region.order out = np.zeros((D+2,)) out[0] = region.mu[0] for i in range(1, D+2): if i < D+1: out[i] = (1./w1 + 1./w2) * region.mu[i] * 0.5 for j in range(int(np.floor((D-i+1)/2.)+1)): denom = (i + 2*j - 1.) # w^-i/i when i=0 # according to Keith Worsley the 2005 paper has a typo if denom == 0: f = np.log(w2/w1) else: f = (w1**(-i-2*j+1) - w2**(-i-2*j+1)) / denom f *= kappa**((1-2*j)/2.) * (-1)**j * factorial(int(denom)) f /= (1 - 2*j) * (4*np.pi)**j * factorial(j) * factorial(i-1) out[i] += region.mu[int(denom)] * f return IntrinsicVolumes(out) nipy-0.3.0/nipy/algorithms/statistics/setup.py000066400000000000000000000017341210344137400215310ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np import os def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('statistics', parent_package, top_path) config.add_subpackage('models') config.add_subpackage('formula') config.add_subpackage('bench') config.add_subpackage('tests') config.add_include_dirs(config.name.replace('.', os.sep)) config.add_extension('intvol', 'intvol.pyx', include_dirs=[np.get_include()]) config.add_extension('histogram', 'histogram.pyx', include_dirs=[np.get_include()]) config.add_extension('_quantile', sources=['_quantile.pyx', 'quantile.c']) return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipy-0.3.0/nipy/algorithms/statistics/tests/000077500000000000000000000000001210344137400211545ustar00rootroot00000000000000nipy-0.3.0/nipy/algorithms/statistics/tests/__init__.py000066400000000000000000000003421210344137400232640ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import nipy.algorithms.statistics.tests.test_rft import nipy.algorithms.statistics.tests.test_intrinsic_volumes nipy-0.3.0/nipy/algorithms/statistics/tests/test_empirical_pvalue.py000066400000000000000000000041741210344137400261140ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Test the empirical null estimator. """ import warnings import numpy as np from nose.tools import assert_true from ..empirical_pvalue import ( NormalEmpiricalNull, smoothed_histogram_from_samples, fdr, fdr_threshold, gaussian_fdr_threshold, gaussian_fdr) def setup(): # Suppress warnings during tests to reduce noise warnings.simplefilter("ignore") def teardown(): # Clear list of warning filters warnings.resetwarnings() def test_efdr(): # generate the data n = 100000 x = np.random.randn(n) x[:3000] += 3 # make the tests efdr = NormalEmpiricalNull(x) np.testing.assert_array_less(efdr.fdr(3.0), 0.2) np.testing.assert_array_less(-efdr.threshold(alpha=0.05), -2.8) np.testing.assert_array_less(-efdr.uncorrected_threshold(alpha=0.001), -2.5) def test_smooth_histo(): n = 100 x = np.random.randn(n) h, c = smoothed_histogram_from_samples(x, normalized=True) thh = 1. / np.sqrt(2 * np.pi) hm = h.max() assert_true(np.absolute(hm - thh) < 0.15) def test_fdr_pos(): # test with some significant values np.random.seed([1]) x = np.random.rand(100) x[:10] *= (.05 / 10) q = fdr(x) assert_true((q[:10] < .05).all()) pc = fdr_threshold(x) assert_true((pc > .0025) & (pc < .1)) def test_fdr_neg(): # test without some significant values np.random.seed([1]) x = np.random.rand(100) * .8 + .2 q =fdr(x) assert_true((q > .05).all()) pc = fdr_threshold(x) assert_true(pc == .05 / 100) def test_gaussian_fdr(): # Test that fdr works on Gaussian data np.random.seed([2]) x = np.random.randn(100) * 2 fdr = gaussian_fdr(x) assert_true(fdr.min() < .05) assert_true(fdr.max() > .99) def test_gaussian_fdr_threshold(): np.random.seed([2]) x = np.random.randn(100) * 2 ac = gaussian_fdr_threshold(x) assert_true(ac > 2.0) assert_true(ac < 4.0) assert_true(ac > gaussian_fdr_threshold(x, alpha=.1)) if __name__ == "__main__": import nose nose.run(argv=['', __file__]) nipy-0.3.0/nipy/algorithms/statistics/tests/test_histogram.py000066400000000000000000000005431210344137400245640ustar00rootroot00000000000000from ..histogram import histogram import numpy as np from numpy.testing import assert_array_equal def test_histogram(): x = np.array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4], dtype='uintp') h = histogram(x) assert_array_equal(h, [1, 2, 3, 4, 5]) nipy-0.3.0/nipy/algorithms/statistics/tests/test_intrinsic_volumes.py000066400000000000000000000277471210344137400263620ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np import numpy.linalg as npl from .. import intvol, utils from nose.tools import assert_equal, assert_raises from numpy.testing import assert_array_equal, assert_almost_equal def symnormal(p=10): M = np.random.standard_normal((p,p)) return (M + M.T) / np.sqrt(2) def randorth(p=10): """ A random orthogonal matrix. """ A = symnormal(p) return npl.eig(A)[1] def box(shape, edges): data = np.zeros(shape) sl = [] for i in range(len(shape)): sl.append(slice(edges[i][0], edges[i][1],1)) data[sl] = 1 return data.astype(np.int) def randombox(shape): """ Generate a random box, returning the box and the edge lengths """ edges = [np.random.random_integers(0, shape[j], size=(2,)) for j in range(len(shape))] for j in range(len(shape)): edges[j].sort() if edges[j][0] == edges[j][1]: edges[j][0] = 0; edges[j][1] = shape[j]/2+1 return edges, box(shape, edges) def elsym(edgelen, order=1): """ Elementary symmetric polynomial of a given order """ l = len(edgelen) if order == 0: return 1 r = 0 for v in utils.combinations(range(l), order): r += np.product([edgelen[vv] for vv in v]) return r def nonintersecting_boxes(shape): """ The Lips's are supposed to be additive, so disjoint things should be additive. But, if they ALMOST intersect, different things get added to the triangulation. >>> b1 = np.zeros(40, np.int) >>> b1[:11] = 1 >>> b2 = np.zeros(40, np.int) >>> b2[11:] = 1 >>> (b1*b2).sum() 0 >>> c = np.indices((40,)).astype(np.float) >>> intvol.Lips1d(c, b1) array([ 1., 10.]) >>> intvol.Lips1d(c, b2) array([ 1., 28.]) >>> intvol.Lips1d(c, b1+b2) array([ 1., 39.]) The function creates two boxes such that the 'dilated' box1 does not intersect with box2. Additivity works in this case. """ while True: edge1, box1 = randombox(shape) edge2, box2 = randombox(shape) diledge1 = [[max(ed[0]-1, 0), min(ed[1]+1, sh)] for ed, sh in zip(edge1, box1.shape)] dilbox1 = box(box1.shape, diledge1) if set(np.unique(dilbox1 + box2)).issubset([0,1]): break return box1, box2, edge1, edge2 def pts2dots(d, a, b, c): """ Convert point coordinates to dot products """ D00 = np.dot(d, d) D01 = np.dot(d, a) D02 = np.dot(d, b) D03 = np.dot(d, c) D11 = np.dot(a, a) D12 = np.dot(a, b) D13 = np.dot(a, c) D22 = np.dot(b, b) D23 = np.dot(b, c) D33 = np.dot(c, c) return D00, D01, D02, D03, D11, D12, D13, D22, D23, D33 def pts2mu3_tet(d, a, b, c): """ Accept point coordinates for calling mu3tet """ return intvol.mu3_tet(*pts2dots(d, a, b, c)) def wiki_tet_vol(d, a, b, c): # Wikipedia formula for generalized tetrahedron volume d, a, b, c = [np.array(e) for e in d, a, b, c] cp = np.cross((b-d),(c-d)) v2t6 = np.dot((a-d), cp) return np.sqrt(v2t6) / 6. def test_mu3tet(): assert_equal(intvol.mu3_tet(0,0,0,0,1,0,0,1,0,1), 1./6) assert_equal(intvol.mu3_tet(0,0,0,0,0,0,0,0,0,0), 0) d = [2,2,2] a = [3,2,2] b = [2,3,2] c = [2,2,3] assert_equal(pts2mu3_tet(d, a, b, c), 1./6) assert_equal(wiki_tet_vol(d, a, b, c), 1./6) # This used to generate nan values assert_equal(intvol.mu3_tet(0,0,0,0,1,0,0,-1,0,1), 0) def test_mu2tri(): assert_equal(intvol.mu2_tri(0,0,0,1,0,1), 1./2) def test_mu1tri(): assert_equal(intvol.mu1_tri(0,0,0,1,0,1), 1+np.sqrt(2)/2) def test_mu2tet(): assert_equal(intvol.mu2_tet(0,0,0,0,1,0,0,1,0,1), (3./2 + np.sqrt(3./4))/2) def pts2mu1_tet(d, a, b, c): """ Accept point coordinates for calling mu1_tet """ return intvol.mu1_tet(*pts2dots(d, a, b, c)) def test_mu1_tet(): res1 = pts2mu1_tet([2,2,2],[3,2,2],[2,3,2],[2,2,3]) res2 = pts2mu1_tet([0,0,0],[1,0,0],[0,1,0],[0,0,1]) assert_equal(res1, res2) assert_equal(intvol.mu1_tet(0,0,0,0,0,0,0,0,0,0), 0) # This used to generate nan values assert_equal(intvol.mu1_tet(0,0,0,0,1,0,0,-1,0,1), 0) def test__mu1_tetface(): # Test for out of range acos value sequences. I'm ashamed to say I found # these sequences accidentally in a failing test with random numbers _mu1_tetface = intvol._mu1_tetface assert_almost_equal(_mu1_tetface(1, 0, 0, 10, 10, 0, 0, 20, 20, 40), 0) assert_almost_equal(_mu1_tetface(36, 0, 0, 18, 48, 0, 0, 1, 30, 63), 3) def test_ec(): for i in range(1, 4): _, box1 = randombox((40,)*i) f = {3:intvol.EC3d, 2:intvol.EC2d, 1:intvol.EC1d}[i] yield assert_almost_equal, f(box1), 1 def test_ec_disjoint(): for i in range(1, 4): e = {3:intvol.EC3d, 2:intvol.EC2d, 1:intvol.EC1d}[i] box1, box2, _, _ = nonintersecting_boxes((40,)*i) assert_almost_equal(e(box1 + box2), e(box1) + e(box2)) def test_lips_wrapping(): # Test that shapes touching the edge do not combine by wrapping b1 = np.zeros(40, np.int) b1[:11] = 1 b2 = np.zeros(40, np.int) b2[11:] = 1 # lines are disjoint assert_equal((b1*b2).sum(), 0) c = np.indices(b1.shape).astype(np.float) assert_array_equal(intvol.Lips1d(c, b1), (1, 10)) assert_array_equal(intvol.Lips1d(c, b2), (1, 28)) assert_array_equal(intvol.Lips1d(c, b1+b2), (1, 39.0)) # 2D b1 = b1[:,None] b2 = b2[:,None] # boxes are disjoint assert_equal((b1*b2).sum(), 0) c = np.indices(b1.shape).astype(np.float) assert_array_equal(intvol.Lips2d(c, b1), (1, 10, 0)) assert_array_equal(intvol.Lips2d(c, b2), (1, 28, 0)) assert_array_equal(intvol.Lips2d(c, b1+b2), (1, 39.0, 0)) # 3D b1 = b1[:,:,None] b2 = b2[:,:,None] assert_equal(b1.shape, (40,1,1)) # boxes are disjoint assert_equal((b1*b2).sum(), 0) c = np.indices(b1.shape).astype(np.float) assert_array_equal(intvol.Lips3d(c, b1), (1, 10, 0, 0)) assert_array_equal(intvol.Lips3d(c, b2), (1, 28, 0, 0)) assert_array_equal(intvol.Lips3d(c, b1+b2), (1, 39.0, 0, 0)) # Shapes which are squeezable should still return sensible answers # Test simple ones line / box / volume funcer = {1: (intvol.Lips1d, intvol.EC1d), 2: (intvol.Lips2d, intvol.EC2d), 3: (intvol.Lips3d, intvol.EC3d)} for box_shape, exp_ivs in [[(10,),(1,9)], [(10,1),(1,9,0)], [(1,10),(1,9,0)], [(10,1,1), (1,9,0,0)], [(1, 10, 1), (1,9,0,0)], [(1, 1, 10), (1,9,0,0)]]: nd = len(box_shape) lips_func, ec_func = funcer[nd] c = np.indices(box_shape).astype(np.float) b = np.ones(box_shape, dtype=np.int) assert_array_equal(lips_func(c, b), exp_ivs) assert_equal(ec_func(b), exp_ivs[0]) def test_lips1_disjoint(): phi = intvol.Lips1d box1, box2, edge1, edge2 = nonintersecting_boxes((30,)) c = np.indices((30,)).astype(np.float) # Test N dimensional coordinates (N=10) d = np.random.standard_normal((10,)+(30,)) # Test rotation causes no change in volumes U = randorth(p=6)[:1] e = np.dot(U.T, c.reshape((c.shape[0], np.product(c.shape[1:])))) e.shape = (e.shape[0],) + c.shape[1:] assert_almost_equal(phi(c, box1 + box2), phi(c, box1) + phi(c, box2)) assert_almost_equal(phi(d, box1 + box2), phi(d, box1) + phi(d, box2)) assert_almost_equal(phi(e, box1 + box2), phi(e, box1) + phi(e, box2)) assert_almost_equal(phi(e, box1 + box2), phi(c, box1 + box2)) assert_almost_equal(phi(e, box1 + box2), (np.array( [elsym([e[1]-e[0]-1 for e in edge1], i) for i in range(2)]) + np.array( [elsym([e[1]-e[0]-1 for e in edge2], i) for i in range(2)]))) assert_raises(ValueError, phi, c[...,None], box1) def test_lips2_disjoint(): phi = intvol.Lips2d box1, box2, edge1, edge2 = nonintersecting_boxes((40,40)) c = np.indices((40,40)).astype(np.float) # Test N dimensional coordinates (N=10) d = np.random.standard_normal((10,40,40)) # Test rotation causes no change in volumes U = randorth(p=6)[0:2] e = np.dot(U.T, c.reshape((c.shape[0], np.product(c.shape[1:])))) e.shape = (e.shape[0],) + c.shape[1:] assert_almost_equal(phi(c, box1 + box2), phi(c, box1) + phi(c, box2)) assert_almost_equal(phi(d, box1 + box2), phi(d, box1) + phi(d, box2)) assert_almost_equal(phi(e, box1 + box2), phi(e, box1) + phi(e, box2)) assert_almost_equal(phi(e, box1 + box2), phi(c, box1 + box2)) assert_almost_equal(phi(e, box1 + box2), np.array([elsym([e[1]-e[0]-1 for e in edge1], i) for i in range(3)]) + np.array([elsym([e[1]-e[0]-1 for e in edge2], i) for i in range(3)]) ) assert_raises(ValueError, phi, c[...,None], box1) assert_raises(ValueError, phi, c[:,:,1], box1) def test_lips3_disjoint(): phi = intvol.Lips3d box1, box2, edge1, edge2 = nonintersecting_boxes((40,)*3) c = np.indices((40,)*3).astype(np.float) # Test N dimensional coordinates (N=10) d = np.random.standard_normal((10,40,40,40)) # Test rotation causes no change in volumes U = randorth(p=6)[0:3] e = np.dot(U.T, c.reshape((c.shape[0], np.product(c.shape[1:])))) e.shape = (e.shape[0],) + c.shape[1:] assert_almost_equal(phi(c, box1 + box2), phi(c, box1) + phi(c, box2)) assert_almost_equal(phi(d, box1 + box2), phi(d, box1) + phi(d, box2)) assert_almost_equal(phi(e, box1 + box2), phi(e, box1) + phi(e, box2)) assert_almost_equal(phi(e, box1 + box2), phi(c, box1 + box2)) assert_almost_equal( phi(e, box1 + box2), (np.array([elsym([e[1]-e[0]-1 for e in edge1], i) for i in range(4)]) + np.array([elsym([e[1]-e[0]-1 for e in edge2], i) for i in range(4)]))) assert_raises(ValueError, phi, c[...,None], box1) assert_raises(ValueError, phi, c[:,:,:,1], box1) def test_lips3_nans(): # These boxes caused nans in the Lips3 disjoint box tests phi = intvol.Lips3d box1 = np.zeros((40,40,40), dtype=np.int) box2 = box1.copy() box1[23:30,22:32,9:13] = 1 box2[7:22,0,8:17] = 1 c = np.indices(box1.shape).astype(np.float) assert_array_equal(np.isnan(phi(c, box2)), False) U = randorth(p=6)[0:3] e = np.dot(U.T, c.reshape((c.shape[0], np.product(c.shape[1:])))) e.shape = (e.shape[0],) + c.shape[1:] assert_array_equal(np.isnan(phi(e, box1 + box2)), False) def test_slices(): # Slices have EC 1... e = intvol.EC3d p = intvol.Lips3d m = np.zeros((40,)*3, np.int) D = np.indices(m.shape).astype(np.float) m[10,10,10] = 1 yield assert_almost_equal, e(m), 1 yield assert_almost_equal, p(D,m), [1,0,0,0] m = np.zeros((40,)*3, np.int) m[10,10:14,10] = 1 yield assert_almost_equal, e(m), 1 yield assert_almost_equal, p(D,m), [1,3,0,0] m = np.zeros((40,)*3, np.int) m[10,10:14,9:15] = 1 yield assert_almost_equal, e(m), 1 yield assert_almost_equal, p(D,m), [1,8,15,0] def test_ec_wrapping(): # Test wrapping for EC1 calculation assert_equal(intvol.EC1d(np.ones((6,), dtype=np.int)), 1) box1 = np.array([1, 1, 0, 1, 1, 1], dtype=np.int) assert_equal(intvol.EC1d(box1), 2) # 2D box1 = np.zeros((3,6), dtype=np.int) box1[1] = 1 assert_equal(intvol.EC2d(box1), 1) box1[1, 3] = 0 assert_equal(intvol.EC2d(box1), 2) # 3D box1 = np.zeros((3,6,3), dtype=np.int) box1[1, :, 1] = 1 assert_equal(intvol.EC3d(box1), 1) box1[1, 3, 1] = 0 assert_equal(intvol.EC3d(box1), 2) nipy-0.3.0/nipy/algorithms/statistics/tests/test_mixed_effects.py000066400000000000000000000101211210344137400253650ustar00rootroot00000000000000""" Testing the glm module """ import numpy as np from numpy.testing import assert_almost_equal from nose.tools import assert_true import numpy.random as nr from ..mixed_effects_stat import ( one_sample_ttest, one_sample_ftest, two_sample_ttest, two_sample_ftest, generate_data, t_stat, mfx_stat) def test_mfx(): """ Test the generic mixed-effects model""" n_samples, n_tests = 20, 100 np.random.seed(1) # generate some data V1 = np.random.rand(n_samples, n_tests) Y = generate_data(np.ones((n_samples, 1)), 0, 1, V1) X = np.random.randn(20, 3) # compute the test statistics t1, = mfx_stat(Y, V1, X, 1,return_t=True, return_f=False, return_effect=False, return_var=False) assert_true(t1.shape == (n_tests,)) assert_true(t1.mean() < 5 / np.sqrt(n_tests)) assert_true((t1.var() < 2) and (t1.var() > .5)) t2, = mfx_stat(Y, V1, X * np.random.rand(3), 1) assert_almost_equal(t1, t2) f, = mfx_stat(Y, V1, X, 1, return_t=False, return_f=True) assert_almost_equal(t1 ** 2, f) v2, = mfx_stat(Y, V1, X, 1, return_t=False, return_var=True) assert_true((v2 > 0).all()) fx, = mfx_stat(Y, V1, X, 1, return_t=False, return_effect=True) assert_true(fx.shape == (n_tests,)) def test_t_test(): """ test that the t test run """ n_samples, n_tests = 15, 100 data = nr.randn(n_samples, n_tests) t = t_stat(data) assert_true(t.shape == (n_tests,)) assert_true( np.abs(t.mean() < 5 / np.sqrt(n_tests))) assert_true(t.var() < 2) assert_true( t.var() > .5) def test_two_sample_ttest(): """ test that the mfx ttest indeed runs """ n_samples, n_tests = 15, 4 np.random.seed(1) # generate some data vardata = np.random.rand(n_samples, n_tests) data = generate_data(np.ones(n_samples), 0, 1, vardata) # compute the test statistics u = np.concatenate((np.ones(5), np.zeros(10))) t2 = two_sample_ttest(data, vardata, u, n_iter=5) assert t2.shape == (n_tests,) assert np.abs(t2.mean() < 5 / np.sqrt(n_tests)) assert t2.var() < 2 assert t2.var() > .5 # try verbose mode t3 = two_sample_ttest(data, vardata, u, n_iter=5, verbose=1) assert_almost_equal(t2, t3) def test_two_sample_ftest(): """ test that the mfx ttest indeed runs """ n_samples, n_tests = 15, 4 np.random.seed(1) # generate some data vardata = np.random.rand(n_samples, n_tests) data = generate_data(np.ones((n_samples, 1)), 0, 1, vardata) # compute the test statistics u = np.concatenate((np.ones(5), np.zeros(10))) t2 = two_sample_ftest(data, vardata, u, n_iter=5) assert t2.shape == (n_tests,) assert np.abs(t2.mean() < 5 / np.sqrt(n_tests)) assert t2.var() < 2 assert t2.var() > .5 # try verbose mode t3 = two_sample_ftest(data, vardata, u, n_iter=5, verbose=1) assert_almost_equal(t2, t3) def test_mfx_ttest(): """ test that the mfx ttest indeed runs """ n_samples, n_tests = 15, 100 np.random.seed(1) # generate some data vardata = np.random.rand(n_samples, n_tests) data = generate_data(np.ones((n_samples, 1)), 0, 1, vardata) # compute the test statistics t2 = one_sample_ttest(data, vardata, n_iter=5) assert t2.shape == (n_tests,) assert np.abs(t2.mean() < 5 / np.sqrt(n_tests)) assert t2.var() < 2 assert t2.var() > .5 # try verbose mode t3 = one_sample_ttest(data, vardata, n_iter=5, verbose=1) assert_almost_equal(t2, t3) def test_mfx_ftest(): """ test that the mfx ftest indeed runs """ n_samples, n_tests = 15, 100 np.random.seed(1) # generate some data vardata = np.random.rand(n_samples, n_tests) data = generate_data(np.ones((n_samples, 1)), 0, 1, vardata) # compute the test statistics f = one_sample_ftest(data, vardata, n_iter=5) assert f.shape == (n_tests,) assert (np.abs(f.mean() - 1) < 1) assert f.var() < 10 assert f.var() > .2 if __name__ == "__main__": import nose nose.run(argv=['', __file__]) nipy-0.3.0/nipy/algorithms/statistics/tests/test_onesample.py000066400000000000000000000025161210344137400245540ustar00rootroot00000000000000 import numpy as np from nipy.algorithms.statistics import onesample from scipy.stats import norm from nipy.testing import assert_almost_equal def test_estimate_varatio(p=1.0e-04, sigma2=1): # This is a random test, but is design to fail only rarely.... ntrial = 300 n = 10 random = np.zeros(10) rsd = np.zeros(n) sd = np.multiply.outer( np.linspace(0,1,40), np.ones(ntrial) ) + np.ones((40,ntrial)) for i in range(n): Y = np.random.standard_normal((40,ntrial)) * np.sqrt((sd**2 + sigma2)) results = onesample.estimate_varatio(Y, sd) results = onesample.estimate_varatio(Y, sd) random[i] = results['random'].mean() rsd[i] = results['random'].std() # Compute the mean just to be sure it works W = 1. / (sd**2 + results['random']) mu = onesample.estimate_mean(Y, np.sqrt(sd**2 + results['random']))['effect'] yield assert_almost_equal, mu, (W*Y).sum(0) / W.sum(0) rsd = np.sqrt((rsd**2).mean() / ntrial) T = np.fabs((random.mean() - sigma2) / (rsd / np.sqrt(n))) # should fail one in every 1/p trials at least for sigma > 0, # small values of sigma seem to have some bias if T > norm.ppf(1-p/2): raise ValueError('large T value, but algorithm works, ' 'could be a statistical failure') nipy-0.3.0/nipy/algorithms/statistics/tests/test_quantile.py000066400000000000000000000026001210344137400244050ustar00rootroot00000000000000#!/usr/bin/env python import numpy as np from numpy import median as np_median from scipy.stats import scoreatpercentile as sp_percentile from .._quantile import _quantile, _median from numpy.testing import (assert_array_equal, assert_array_almost_equal) NUMERIC_TYPES = sum([np.sctypes[t] for t in ('int', 'uint', 'float', 'complex')], []) def another_percentile(arr, pct, axis): # numpy.percentile not available until after numpy 1.4.1 return np.apply_along_axis(sp_percentile, axis, arr.astype(float), pct) def test_median(): for dtype in NUMERIC_TYPES: for shape in ((10,), (10, 11), (10, 11, 12)): X = (100 * (np.random.random(shape) - .5)).astype(dtype) for a in range(X.ndim): assert_array_equal(_median(X, axis=a).squeeze(), np_median(X.astype(np.float64), axis=a)) def test_quantile(): for dtype in NUMERIC_TYPES: for shape in ((10,), (10, 11), (10, 11, 12)): X = (100 * (np.random.random(shape) - .5)).astype(dtype) for a in range(X.ndim): assert_array_almost_equal( _quantile(X, .75, axis=a, interp=True).squeeze(), another_percentile(X, 75, axis=a)) if __name__ == "__main__": import nose nose.run(argv=['', __file__]) nipy-0.3.0/nipy/algorithms/statistics/tests/test_rft.py000066400000000000000000000363211210344137400233650ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np from scipy.special import gammaln, hermitenorm import scipy.stats from scipy.misc import factorial from .. import rft from nose.tools import assert_raises from numpy.testing import assert_almost_equal, dec #def rho(x, dim, df=np.inf): # """ # EC densities for T and Gaussian (df=inf) random fields. # """ # # m = df # # if dim > 0: # x = np.asarray(x, np.float64) #--jarrod: shouldn't Q be rft.Q?? # q = Q(dim, dfd=df)(x) # # if np.isfinite(m): # q *= np.power(1 + x**2/m, -(m-1)/2.) # else: # q *= np.exp(-x**2/2) # # return q * np.power(2*np.pi, -(dim+1)/2.) # else: # if np.isfinite(m): # return scipy.stats.t.sf(x, df) # else: # return scipy.stats.norm.sf(x) def test_Q(): assert_raises(ValueError, rft.Q, -1) assert_raises(ValueError, rft.Q, 0) x = np.arange(-9, 10) for dim in range(1, 4): res = rft.Q(dim) assert_almost_equal(res(x), hermitenorm(dim - 1)(x)) def K(dim=4, dfn=7, dfd=np.inf): """ Determine the polynomial K in: Worsley, K.J. (1994). 'Local maxima and the expected Euler characteristic of excursion sets of \chi^2, F and t fields.' Advances in Applied Probability, 26:13-42. If dfd=inf, return the limiting polynomial. """ def lbinom(n, j): return gammaln(n+1) - gammaln(j+1) - gammaln(n-j+1) m = dfd n = dfn D = dim k = np.arange(D) coef = 0 for j in range(int(np.floor((D-1)/2.)+1)): if np.isfinite(m): t = (gammaln((m+n-D)/2.+j) - gammaln(j+1) - gammaln((m+n-D)/2.)) t += lbinom(m-1, k-j) - k * np.log(m) else: _t = np.power(2., -j) / (factorial(k-j) * factorial(j)) t = np.log(_t) t[np.isinf(_t)] = -np.inf t += lbinom(n-1, D-1-j-k) coef += (-1)**(D-1) * factorial(D-1) * np.exp(t) * np.power(-1.*n, k) return np.poly1d(coef[::-1]) def F(x, dim, dfd=np.inf, dfn=1): """ EC densities for F and Chi^2 (dfd=inf) random fields. """ m = float(dfd) n = float(dfn) D = float(dim) if dim > 0: x = np.asarray(x, np.float64) k = K(dim=dim, dfd=dfd, dfn=dfn)(x) if np.isfinite(m): f = x*n/m t = -np.log(1 + f) * (m+n-2.) / 2. t += np.log(f) * (n-D) / 2. t += gammaln((m+n-D)/2.) - gammaln(m/2.) else: f = x*n t = np.log(f/2.) * (n-D) / 2. - f/2. t -= np.log(2*np.pi) * D / 2. + np.log(2) * (D-2)/2. + gammaln(n/2.) k *= np.exp(t) return k else: if np.isfinite(m): return scipy.stats.f.sf(x, dfn, dfd) else: return scipy.stats.chi.sf(x, dfn) def polyF(dim, dfd=np.inf, dfn=1): """ Return the polynomial part of the EC density when evaluating the polynomial on the sqrt(F) scale (or sqrt(chi^2)=chi scale). The polynomial is such that, if dfd=inf, the F EC density in is just:: polyF(dim,dfn=dfn)(sqrt(dfn*x)) * exp(-dfn*x/2) * (2\pi)^{-(dim+1)/2} """ n = float(dfn) m = float(dfd) D = float(dim) p = K(dim=D, dfd=m, dfn=n) c = p.c # Take care of the powers of n (i.e. we want polynomial K evaluated # at */n). for i in range(p.order+1): c[i] /= np.power(n, p.order-i) # Now, turn it into a polynomial of x when evaluated at x**2 C = np.zeros((2*c.shape[0]-1,)) for i in range(c.shape[0]): C[2*i] = c[i] # Multiply by the factor x^(dfn-dim) in front (see Theorem 4.6 of # Worsley (1994), cited above. if dim > dfn: # divide by x^(dim-dfn) C = C[0:(C.shape[0] - (dim-dfn))] else: # multiply by x^(dim-dfn) C = np.hstack([C, np.zeros((dfn-dim,))]) # Fix up constant in front if np.isfinite(m): C *= np.exp(gammaln((m+n-D)/2.) - gammaln(m/2.)) * np.power(m, -(n-D)/2.) else: C *= np.power(2, -(n-D)/2.) C /= np.power(2, (dim-2)/2.) * np.exp(gammaln(n/2.)) C *= np.sqrt(2*np.pi) return np.poly1d(C) def F_alternative(x, dim, dfd=np.inf, dfn=1): """ Another way to compute F EC density as a product of a polynomial and a power of (1+x^2/m). """ n = float(dfn) m = float(dfd) x = np.asarray(x, np.float64) p = polyF(dim=dim, dfd=dfd, dfn=dfn) v = p(np.sqrt(n*x)) if np.isfinite(m): v *= np.power(1 + n*x/m, -(m+n-2.) / 2.) else: v *= np.exp(-n*x/2) v *= np.power(2*np.pi, -(dim+1)/2.) return v def test_polynomial1(): # Polynomial part of Gaussian densities are Hermite polynomials. for dim in range(1,10): q = rft.Gaussian().quasi(dim) h = hermitenorm(dim-1) yield assert_almost_equal, q.c, h.c def test_polynomial2(): # EC density of chi^2(1) is 2 * EC density of Gaussian so polynomial part is # a factor of 2 as well. for dim in range(1,10): q = rft.ChiSquared(dfn=1).quasi(dim) h = hermitenorm(dim-1) yield assert_almost_equal, q.c, 2*h.c @dec.slow def test_polynomial3(): # EC density of F with infinite dfd is the same as chi^2 -- # polynomials should be the same. for dim in range(10): for dfn in range(5,10): q1 = rft.FStat(dfn=dfn, dfd=np.inf).quasi(dim) q2 = rft.ChiSquared(dfn=dfn).quasi(dim) yield assert_almost_equal, q1.c, q2.c @dec.slow def test_chi1(): # EC density of F with infinite dfd is the same as chi^2 -- EC should be the # same. x = np.linspace(0.1,10,100) for dim in range(10): for dfn in range(5,10): c = rft.ChiSquared(dfn=dfn) f = rft.FStat(dfn=dfn, dfd=np.inf) chi1 = c.density(dfn*x, dim) chi2 = f.density(x, dim) yield assert_almost_equal, chi1, chi2 def test_chi2(): # Quasi-polynomial part of the chi^2 EC density should # be the limiting polyF. for dim in range(1,10): for dfn in range(5,10): c = rft.ChiSquared(dfn=dfn) p1 = c.quasi(dim=dim) p2 = polyF(dim=dim, dfn=dfn) yield assert_almost_equal, p1.c, p2.c def test_chi3(): # EC density of chi^2(1) is 2 * EC density of Gaussian squared so EC # densities factor of 2 as well. x = np.linspace(0.1,10,100) for dim in range(10): g = rft.Gaussian() c = rft.ChiSquared(dfn=1) ec1 = g.density(np.sqrt(x), dim) ec2 = c.density(x, dim) yield assert_almost_equal, 2*ec1, ec2 def test_T1(): # O-dim EC density should be tail probality. x = np.linspace(0.1,10,100) for dfd in [40,50]: t = rft.TStat(dfd=dfd) yield assert_almost_equal, t(x), scipy.stats.t.sf(x, dfd) t = rft.TStat(dfd=np.inf) yield assert_almost_equal, t(x), scipy.stats.norm.sf(x) def test_search(): # Test that the search region works. search = rft.IntrinsicVolumes([3,4,5]) x = np.linspace(0.1,10,100) stat = rft.Gaussian(search=search) v1 = stat(x) v2 = ((5*x + 4*np.sqrt(2*np.pi)) * np.exp(-x**2/2.) / np.power(2*np.pi, 1.5) + 3 * scipy.stats.norm.sf(x)) assert_almost_equal(v1, v2) @dec.slow def test_search1(): # Test that the search region works. # XXX - we are not testing anything search = rft.IntrinsicVolumes([3,4,5]) x = np.linspace(0.1,10,100) stats = [rft.Gaussian()] for dfn in range(5,10): for dfd in [40,50,np.inf]: stats.append(rft.FStat(dfn=dfn, dfd=dfd)) stats.append(rft.TStat(dfd=dfd)) stats.append(rft.ChiSquared(dfn=dfn)) for dim in range(7): for stat in stats: # XXX - v1 appears to be unused v1 = stat(x, search=search) v2 = 0 for i in range(search.mu.shape[0]): v2 += stat.density(x, i) * search.mu[i] @dec.slow def test_search2(): # Test that the search region works. search = rft.IntrinsicVolumes([3,4,5]) x = np.linspace(0.1,10,100) stats = [rft.Gaussian(search=search)] ostats = [rft.Gaussian()] for dfn in range(5,10): for dfd in [40,50,np.inf]: stats.append(rft.FStat(dfn=dfn, dfd=dfd, search=search)) ostats.append(rft.FStat(dfn=dfn, dfd=dfd)) stats.append(rft.TStat(dfd=dfd, search=search)) ostats.append(rft.TStat(dfd=dfd)) stats.append(rft.ChiSquared(dfn=dfn, search=search)) ostats.append(rft.ChiSquared(dfn=dfn)) for i in range(len(stats)): stat = stats[i] ostat = ostats[i] v1 = stat(x) v2 = 0 for j in range(search.mu.shape[0]): v2 += ostat.density(x, j) * search.mu[j] assert_almost_equal(v1, v2) def test_search3(): # In the Gaussian case, test that search and product give same results. search = rft.IntrinsicVolumes([3,4,5,7]) g1 = rft.Gaussian(search=search) g2 = rft.Gaussian(product=search) x = np.linspace(0.1,10,100) y1 = g1(x) y2 = g2(x) assert_almost_equal(y1, y2) def test_search4(): # Test that the search/product work well together search = rft.IntrinsicVolumes([3,4,5]) product = rft.IntrinsicVolumes([1,2]) x = np.linspace(0.1,10,100) g1 = rft.Gaussian() g2 = rft.Gaussian(product=product) y = g2(x, search=search) z = g1(x, search=search*product) assert_almost_equal(y, z) def test_search5(): # Test that the search/product work well together search = rft.IntrinsicVolumes([3,4,5]) product = rft.IntrinsicVolumes([1,2]) prodsearch = product * search x = np.linspace(0,5,101) g1 = rft.Gaussian() g2 = rft.Gaussian(product=product) z = 0 for i in range(prodsearch.mu.shape[0]): z += g1.density(x, i) * prodsearch.mu[i] y = g2(x, search=search) assert_almost_equal(y, z) @dec.slow def test_T2(): # T**2 is an F with dfn=1 x = np.linspace(0,5,101) for dfd in [40,50,np.inf]: t = rft.TStat(dfd=dfd) f = rft.FStat(dfd=dfd, dfn=1) for dim in range(7): y = 2*t.density(x, dim) z = f.density(x**2, dim) yield assert_almost_equal, y, z @dec.slow def test_hotelling1(): # Asymptotically, Hotelling is the same as F which is the same as chi^2. x = np.linspace(0.1,10,100) for dim in range(7): for dfn in range(5,10): h = rft.Hotelling(k=dfn).density(x*dfn, dim) f = rft.FStat(dfn=dfn).density(x, dim) yield assert_almost_equal, h, f @dec.slow def test_hotelling4(): # Hotelling T^2 should just be like taking product with sphere. x = np.linspace(0.1,10,100) for dim in range(7): search = rft.IntrinsicVolumes([0]*(dim) + [1]) for k in range(5, 10): p = rft.spherical_search(k) for dfd in [np.inf,40,50]: f = rft.FStat(dfd=dfd, dfn=1)(x, search=p*search) t = 2*rft.TStat(dfd=dfd)(np.sqrt(x), search=p*search) h2 = 2*rft.Hotelling(k=k, dfd=dfd).density(x, dim) h = 2*rft.Hotelling(k=k, dfd=dfd)(x, search=search) yield assert_almost_equal, h, t yield assert_almost_equal, h, f yield assert_almost_equal, h, h2 search = rft.IntrinsicVolumes([3,4,5]) for k in range(5, 10): p = rft.spherical_search(k) for dfd in [np.inf,40,50]: f = rft.FStat(dfd=dfd, dfn=1)(x, search=p*search) h = 2*rft.Hotelling(k=k, dfd=dfd)(x, search=search) h2 = 0 for i in range(search.mu.shape[0]): h2 += 2*rft.Hotelling(k=k, dfd=dfd).density(x, i) * search.mu[i] yield assert_almost_equal, h, f yield assert_almost_equal, h, h2 def test_hotelling2(): # Marginally, Hotelling's T^2(k) with m degrees of freedom # in the denominator satisfies # (m-k+1)/(mk) T^2 \sim F_{k,m-k+1}. x = np.linspace(0.1,10,100) for dfn in range(6, 10): h = rft.Hotelling(k=dfn)(x) chi = rft.ChiSquared(dfn=dfn)(x) assert_almost_equal(h, chi) chi2 = scipy.stats.chi2.sf(x, dfn) yield assert_almost_equal, h, chi2 # XXX - p appears to be unused p = rft.spherical_search(dfn) for dfd in [40,50]: fac = (dfd-dfn+1.)/(dfd*dfn) h = rft.Hotelling(dfd=dfd,k=dfn)(x) f = scipy.stats.f.sf(x*fac, dfn, dfd-dfn+1) f2 = rft.FStat(dfd=dfd-dfn+1,dfn=dfn)(x*fac) yield assert_almost_equal, f2, f yield assert_almost_equal, h, f @dec.slow def test_roy1(): # EC densities of Roy with dfn=1 should be twice EC densities of Hotelling # T^2's. x = np.linspace(0.1,10,100) for dfd in [40,50,np.inf]: for k in [1,4,6]: for dim in range(7): h = 2*rft.Hotelling(dfd=dfd,k=k).density(x, dim) r = rft.Roy(dfd=dfd,k=k,dfn=1).density(x, dim) yield assert_almost_equal, h, r @dec.slow def test_onesidedF(): # EC densities of one sided F should be a difference of # F EC densities x = np.linspace(0.1,10,100) for dfd in [40,50,np.inf]: for dfn in range(2,10): for dim in range(7): f1 = rft.FStat(dfd=dfd,dfn=dfn).density(x, dim) f2 = rft.FStat(dfd=dfd,dfn=dfn-1).density(x, dim) onesided = rft.OneSidedF(dfd=dfd,dfn=dfn).density(x, dim) yield assert_almost_equal, onesided, 0.5*(f1-f2) @dec.slow def test_multivariate_forms(): # MVform with one sphere is sqrt(chi^2), two spheres is sqrt(Roy) with # infinite degrees of freedom. x = np.linspace(0.1,10,100) for k1 in range(5,10): m = rft.MultilinearForm(k1) c = rft.ChiSquared(k1) for dim in range(7): mx = m.density(x, dim) cx = c.density(x**2, dim) yield assert_almost_equal, mx, cx for k2 in range(5,10): m = rft.MultilinearForm(k1,k2) r = rft.Roy(k=k1, dfn=k2, dfd=np.inf) for dim in range(7): mx = 2*m.density(x, dim) rx = r.density(x**2/k2, dim) yield assert_almost_equal, mx, rx def test_scale(): # Smoke test? a = rft.IntrinsicVolumes([2,3,4]) b = rft.scale_space(a, [3,4], kappa=0.5) def test_F1(): x = np.linspace(0.1,10,100) for dim in range(1,10): for dfn in range(5,10): for dfd in [40,50,np.inf]: f1 = F(x, dim, dfn=dfn, dfd=dfd) f2 = F_alternative(x, dim, dfn=dfn, dfd=dfd) yield assert_almost_equal, f1, f2 @dec.slow def test_F2(): x = np.linspace(0.1,10,100) for dim in range(3,7): for dfn in range(5,10): for dfd in [40,50,np.inf]: f1 = rft.FStat(dfn=dfn, dfd=dfd).density(x, dim) f2 = F_alternative(x, dim, dfn=dfn, dfd=dfd) yield assert_almost_equal, f1, f2 @dec.slow def test_F3(): x = np.linspace(0.1,10,100) for dim in range(3,7): for dfn in range(5,10): for dfd in [40,50,np.inf]: f1 = rft.FStat(dfn=dfn, dfd=dfd).density(x, dim) f2 = F(x, dim, dfn=dfn, dfd=dfd) yield assert_almost_equal, f1, f2 nipy-0.3.0/nipy/algorithms/statistics/tests/test_utils.py000066400000000000000000000026131210344137400237270ustar00rootroot00000000000000#!/usr/bin/env python import numpy as np from scipy.stats import norm from ..utils import multiple_mahalanobis, z_score, multiple_fast_inv from nose.tools import assert_true from numpy.testing import assert_almost_equal, assert_array_almost_equal def test_z_score(): p = np.random.rand(10) z = z_score(p) assert_array_almost_equal(norm.sf(z), p) def test_mahalanobis(): x = np.random.rand(100) / 100 A = np.random.rand(100, 100) / 100 A = np.dot(A.transpose(), A) + np.eye(100) mah = np.dot(x, np.dot(np.linalg.inv(A), x)) assert_almost_equal(mah, multiple_mahalanobis(x, A), decimal=1) def test_mahalanobis2(): x = np.random.randn(100, 3) Aa = np.zeros([100, 100, 3]) for i in range(3): A = np.random.randn(120, 100) A = np.dot(A.T, A) Aa[:, :, i] = A i = np.random.randint(3) mah = np.dot(x[:, i], np.dot(np.linalg.inv(Aa[:, :, i]), x[:, i])) f_mah = (multiple_mahalanobis(x, Aa))[i] assert_true(np.allclose(mah, f_mah)) def test_multiple_fast_inv(): shape = (10, 20, 20) X = np.random.randn(shape[0], shape[1], shape[2]) X_inv_ref = np.zeros(shape) for i in range(shape[0]): X[i] = np.dot(X[i], X[i].T) X_inv_ref[i] = np.linalg.inv(X[i]) X_inv = multiple_fast_inv(X) assert_almost_equal(X_inv_ref, X_inv) if __name__ == "__main__": import nose nose.run(argv=['', __file__]) nipy-0.3.0/nipy/algorithms/statistics/utils.py000066400000000000000000000326151210344137400215330ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np from scipy.stats import norm TINY = 1e-15 def z_score(pvalue): """ Return the z-score corresponding to a given p-value. """ pvalue = np.minimum(np.maximum(pvalue, TINY), 1. - TINY) z = norm.isf(pvalue) return z def multiple_fast_inv(a): """Compute the inverse of a set of arrays. Parameters ---------- a: array_like of shape (n_samples, n_dim, n_dim) Set of square matrices to be inverted. A is changed in place. Returns ------- a: ndarray yielding the inverse of the inputs Raises ------ LinAlgError : If `a` is singular. ValueError : If `a` is not square, or not 2-dimensional. Notes ----- This function is borrowed from scipy.linalg.inv, but with some customizations for speed-up. """ if a.shape[1] != a.shape[2]: raise ValueError('a must have shape(n_samples, n_dim, n_dim)') from scipy.linalg import calc_lwork from scipy.linalg.lapack import get_lapack_funcs a1, n = a[0], a.shape[0] getrf, getri = get_lapack_funcs(('getrf', 'getri'), (a1,)) for i in range(n): if (getrf.module_name[:7] == 'clapack' and getri.module_name[:7] != 'clapack'): # ATLAS 3.2.1 has getrf but not getri. lu, piv, info = getrf(np.transpose(a[i]), rowmajor=0, overwrite_a=True) a[i] = np.transpose(lu) else: a[i], piv, info = getrf(a[i], overwrite_a=True) if info == 0: if getri.module_name[:7] == 'flapack': lwork = calc_lwork.getri(getri.prefix, a1.shape[0]) lwork = lwork[1] # XXX: the following line fixes curious SEGFAULT when # benchmarking 500x500 matrix inverse. This seems to # be a bug in LAPACK ?getri routine because if lwork is # minimal (when using lwork[0] instead of lwork[1]) then # all tests pass. Further investigation is required if # more such SEGFAULTs occur. lwork = int(1.01 * lwork) a[i], _ = getri(a[i], piv, lwork=lwork, overwrite_lu=1) else: # clapack a[i], _ = getri(a[i], piv, overwrite_lu=1) else: raise ValueError('Matrix LU decomposition failed') return a def multiple_mahalanobis(effect, covariance): """Returns the squared Mahalanobis distance for a given set of samples Parameters ---------- effect: array of shape (n_features, n_samples), Each column represents a vector to be evaluated covariance: array of shape (n_features, n_features, n_samples), Corresponding covariance models stacked along the last axis Returns ------- sqd: array of shape (n_samples,) the squared distances (one per sample) """ # check size if effect.ndim == 1: effect = effect[:, np.newaxis] if covariance.ndim == 2: covariance = covariance[:, :, np.newaxis] if effect.shape[0] != covariance.shape[0]: raise ValueError('Inconsistant shape for effect and covariance') if covariance.shape[0] != covariance.shape[1]: raise ValueError('Inconsistant shape for covariance') # transpose and make contuguous for the sake of speed Xt, Kt = np.ascontiguousarray(effect.T), np.ascontiguousarray(covariance.T) # compute the inverse of the covariances Kt = multiple_fast_inv(Kt) # derive the squared Mahalanobis distances sqd = np.sum(np.sum(Xt[:, :, np.newaxis] * Xt[:, np.newaxis] * Kt, 1), 1) return sqd # Taken from python doc site, exists in python2.6 def combinations(iterable, r): # combinations('ABCD', 2) --> AB AC AD BC BD CD # combinations(range(4), 3) --> 012 013 023 123 pool = tuple(iterable) n = len(pool) if r > n: return indices = range(r) yield tuple(pool[i] for i in indices) while True: for i in reversed(range(r)): if indices[i] != i + n - r: break else: return indices[i] += 1 for j in range(i+1, r): indices[j] = indices[j-1] + 1 yield tuple(pool[i] for i in indices) def complex(maximal=[(0, 3, 2, 7), (0, 6, 2, 7), (0, 7, 5, 4), (0, 7, 5, 1), (0, 7, 4, 6), (0, 3, 1, 7)]): """ Faces from simplices Take a list of maximal simplices (by default a triangulation of a cube into 6 tetrahedra) and computes all faces Parameters ---------- maximal : sequence of sequences, optional Default is triangulation of cube into tetrahedra Returns ------- faces : dict """ faces = {} l = [len(list(x)) for x in maximal] for i in range(np.max(l)): faces[i+1] = set([]) for simplex in maximal: simplex = list(simplex) simplex.sort() for k in range(1,len(simplex)+1): for v in combinations(simplex, k): if len(v) == 1: v = v[0] faces[k].add(v) return faces def cube_with_strides_center(center=[0,0,0], strides=[4, 2, 1]): """ Cube in an array of voxels with a given center and strides. This triangulates a cube with vertices [center[i] + 1]. The dimension of the cube is determined by len(center) which should agree with len(center). The allowable dimensions are [1,2,3]. Parameters ---------- center : (d,) sequence of int, optional Default is [0, 0, 0] strides : (d,) sequence of int, optional Default is [4, 2, 1]. These are the strides given by ``np.ones((2,2,2), np.bool).strides`` Returns ------- complex : dict A dictionary with integer keys representing a simplicial complex. The vertices of the simplicial complex are the indices of the corners of the cube in a 'flattened' array with specified strides. """ d = len(center) if not 0 < d <= 3: raise ValueError('dimensionality must be 0 < d <= 3') if len(strides) != d: raise ValueError('center and strides must have the same length') if d == 3: maximal = [(0, 3, 2, 7), (0, 6, 2, 7), (0, 7, 5, 4), (0, 7, 5, 1), (0, 7, 4, 6), (0, 3, 1, 7)] vertices = [] for k in range(2): for j in range(2): for i in range(2): vertices.append((center[0]+i)*strides[0] + (center[1]+j)*strides[1] + (center[2]+k)*strides[2]) elif d == 2: maximal = [(0,1,3), (0,2,3)] vertices = [] for j in range(2): for i in range(2): vertices.append((center[0]+i)*strides[0] + (center[1]+j)*strides[1]) elif d == 1: maximal = [(0,1)] vertices = [center[0],center[0]+strides[0]] mm = [] for m in maximal: nm = [vertices[j] for j in m] mm.append(nm) maximal = [tuple([vertices[j] for j in m]) for m in maximal] return complex(maximal) def join_complexes(*complexes): """ Join a sequence of simplicial complexes. Returns the union of all the particular faces. """ faces = {} nmax = np.array([len(c) for c in complexes]).max() for i in range(nmax): faces[i+1] = set([]) for c in complexes: for i in range(nmax): if c.has_key(i+1): faces[i+1] = faces[i+1].union(c[i+1]) return faces def decompose3d(shape, dim=4): """ Return all (dim-1)-dimensional simplices in a triangulation of a cube of a given shape. The vertices in the triangulation are indices in a 'flattened' array of the specified shape. """ # First do the interior contributions. # We first figure out which vertices, edges, triangles, tetrahedra # are uniquely associated with an interior voxel unique = {} strides = np.empty(shape, np.bool).strides union = join_complexes(*[cube_with_strides_center((0,0,-1), strides), cube_with_strides_center((0,-1,0), strides), cube_with_strides_center((0,-1,-1), strides), cube_with_strides_center((-1,0,0), strides), cube_with_strides_center((-1,0,-1), strides), cube_with_strides_center((-1,-1,0), strides), cube_with_strides_center((-1,-1,-1), strides)]) c = cube_with_strides_center((0,0,0), strides) for i in range(4): unique[i+1] = c[i+1].difference(union[i+1]) if unique.has_key(dim) and dim > 1: d = unique[dim] for i in range(shape[0]-1): for j in range(shape[1]-1): for k in range(shape[2]-1): index = i*strides[0]+j*strides[1]+k*strides[2] for l in d: yield [index+ii for ii in l] # There are now contributions from three two-dimensional faces for _strides, _shape in zip([(strides[0], strides[1]), (strides[0], strides[2]), (strides[1], strides[2])], [(shape[0], shape[1]), (shape[0], shape[2]), (shape[1], shape[2])]): unique = {} union = join_complexes(*[cube_with_strides_center((0,-1), _strides), cube_with_strides_center((-1,0), _strides), cube_with_strides_center((-1,-1), _strides)]) c = cube_with_strides_center((0,0), _strides) for i in range(3): unique[i+1] = c[i+1].difference(union[i+1]) if unique.has_key(dim) and dim > 1: d = unique[dim] for i in range(_shape[0]-1): for j in range(_shape[1]-1): index = i*_strides[0]+j*_strides[1] for l in d: yield [index+ii for ii in l] # Finally the one-dimensional faces for _stride, _shape in zip(strides, shape): unique = {} union = cube_with_strides_center((-1,), [_stride]) c = cube_with_strides_center((0,), [_stride]) for i in range(2): unique[i+1] = c[i+1].difference(union[i+1]) if unique.has_key(dim) and dim > 1: d = unique[dim] for i in range(_shape-1): index = i*_stride for l in d: yield [index+ii for ii in l] if dim == 1: for i in range(np.product(shape)): yield i def decompose2d(shape, dim=3): """ Return all (dim-1)-dimensional simplices in a triangulation of a square of a given shape. The vertices in the triangulation are indices in a 'flattened' array of the specified shape. """ # First do the interior contributions. # We first figure out which vertices, edges, triangles # are uniquely associated with an interior pixel unique = {} strides = np.empty(shape, np.bool).strides union = join_complexes(*[cube_with_strides_center((0,-1), strides), cube_with_strides_center((-1,0), strides), cube_with_strides_center((-1,-1), strides)]) c = cube_with_strides_center((0,0), strides) for i in range(3): unique[i+1] = c[i+1].difference(union[i+1]) if unique.has_key(dim) and dim > 1: d = unique[dim] for i in range(shape[0]-1): for j in range(shape[1]-1): index = i*strides[0]+j*strides[1] for l in d: yield [index+ii for ii in l] # Now, the one-dimensional faces for _stride, _shape in zip(strides, shape): unique = {} union = cube_with_strides_center((-1,), [_stride]) c = cube_with_strides_center((0,), [_stride]) for i in range(2): unique[i+1] = c[i+1].difference(union[i+1]) if unique.has_key(dim) and dim > 1: d = unique[dim] for i in range(_shape-1): index = i*_stride for l in d: yield [index+ii for ii in l] if dim == 1: for i in range(np.product(shape)): yield i def test_EC3(shape): ts = 0 fs = 0 es = 0 vs = 0 ec = 0 for t in decompose3d(shape, dim=4): ec -= 1; ts += 1 for f in decompose3d(shape, dim=3): ec += 1; fs += 1 for e in decompose3d(shape, dim=2): ec -= 1; es += 1 for v in decompose3d(shape, dim=1): ec += 1; vs += 1 return ts, fs, es, vs, ec # Tell nose testing framework not to run this as a test test_EC3.__test__ = False def test_EC2(shape): fs = 0 es = 0 vs = 0 ec = 0 for f in decompose2d(shape, dim=3): ec += 1; fs += 1 for e in decompose2d(shape, dim=2): ec -= 1; es += 1 for v in decompose2d(shape, dim=1): ec += 1; vs += 1 return fs, es, vs, ec # Tell nose testing framework not to run this as a test test_EC2.__test__ = False nipy-0.3.0/nipy/algorithms/tests/000077500000000000000000000000001210344137400167625ustar00rootroot00000000000000nipy-0.3.0/nipy/algorithms/tests/__init__.py000066400000000000000000000000001210344137400210610ustar00rootroot00000000000000nipy-0.3.0/nipy/algorithms/tests/test_kernel_smooth.py000066400000000000000000000117331210344137400232510ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Test for smoothing with kernels """ import numpy as np from numpy.random import random_integers as randint from ... import load_image from ..kernel_smooth import LinearFilter, sigma2fwhm, fwhm2sigma from ...externals.transforms3d.taitbryan import euler2mat from ...core.api import Image, compose, AffineTransform, drop_io_dim from nose.tools import (assert_true, assert_false, assert_equal, assert_raises) from numpy.testing import assert_array_equal, assert_array_almost_equal from ...testing import (anatfile, funcfile) def test_anat_smooth(): anat = load_image(anatfile) smoother = LinearFilter(anat.coordmap, anat.shape) sanat = smoother.smooth(anat) assert_equal(sanat.shape, anat.shape) assert_equal(sanat.coordmap, anat.coordmap) assert_false(np.allclose(sanat.get_data(), anat.get_data())) def test_funny_coordmap(): # 5x4 affine should also work, and give same answer as 4x4 func = load_image(funcfile) cmap = func.coordmap # Give the affine a rotation aff = np.eye(5) aff[:3,:3] = euler2mat(0.3, 0.2, 0.1) cmap_rot = AffineTransform(cmap.function_range, cmap.function_range, aff) func_rot = Image(func.get_data(), compose(cmap_rot, cmap)) func1 = func_rot[...,1] # 5x4 affine smoother = LinearFilter(func1.coordmap, func1.shape) sfunc1 = smoother.smooth(func1) # OK # And same as for 4x4 affine cmap3d = drop_io_dim(cmap, 't') func3d = Image(func1.get_data(), cmap3d) smoother = LinearFilter(func3d.coordmap, func3d.shape) sfunc3d = smoother.smooth(func3d) assert_equal(sfunc1.shape, sfunc3d.shape) assert_array_almost_equal(sfunc1.get_data(), sfunc3d.get_data()) # And same with no rotation func_fresh = func[...,1] # 5x4 affine, no rotation smoother = LinearFilter(func_fresh.coordmap, func_fresh.shape) sfunc_fresh = smoother.smooth(func_fresh) assert_equal(sfunc1.shape, sfunc_fresh.shape) assert_array_almost_equal(sfunc1.get_data(), sfunc_fresh.get_data()) def test_func_smooth(): func = load_image(funcfile) smoother = LinearFilter(func.coordmap, func.shape) # should work, but currently broken : sfunc = smoother.smooth(func) assert_raises(NotImplementedError, smoother.smooth, func) def test_sigma_fwhm(): # ensure that fwhm2sigma and sigma2fwhm are inverses of each other fwhm = np.arange(1.0, 5.0, 0.1) sigma = np.arange(1.0, 5.0, 0.1) assert_true(np.allclose(sigma2fwhm(fwhm2sigma(fwhm)), fwhm)) assert_true(np.allclose(fwhm2sigma(sigma2fwhm(sigma)), sigma)) def test_kernel(): # Verify that convolution with a delta function gives the correct # answer. tol = 0.9999 sdtol = 1.0e-8 for x in range(6): shape = randint(30,60,(3,)) # pos of delta ii, jj, kk = randint(11,17, (3,)) # random affine coordmap (diagonal and translations) coordmap = AffineTransform.from_start_step('ijk', 'xyz', randint(5,20,(3,))*0.25, randint(5,10,(3,))*0.5) # delta function in 3D array signal = np.zeros(shape) signal[ii,jj,kk] = 1. signal = Image(signal, coordmap=coordmap) # A filter with coordmap, shape matched to image kernel = LinearFilter(coordmap, shape, fwhm=randint(50,100)/10.) # smoothed normalized 3D array ssignal = kernel.smooth(signal).get_data() ssignal[:] *= kernel.norms[kernel.normalization] # 3 points * signal.size array I = np.indices(ssignal.shape) I.shape = (kernel.coordmap.ndims[0], np.product(shape)) # location of maximum in smoothed array i, j, k = I[:, np.argmax(ssignal[:].flat)] # same place as we put it before smoothing? assert_equal((i,j,k), (ii,jj,kk)) # get physical points position relative to position of delta Z = kernel.coordmap(I.T) - kernel.coordmap([i,j,k]) _k = kernel(Z) _k.shape = ssignal.shape assert_true((np.corrcoef(_k[:].flat, ssignal[:].flat)[0,1] > tol)) assert_true(((_k[:] - ssignal[:]).std() < sdtol)) def _indices(i,j,k,axis): I = np.zeros((3,20)) I[0] += i I[1] += j I[2] += k I[axis] += np.arange(-10,10) return I.T vx = ssignal[i,j,(k-10):(k+10)] xformed_ijk = coordmap([i, j, k]) vvx = coordmap(_indices(i,j,k,2)) - xformed_ijk assert_true((np.corrcoef(vx, kernel(vvx))[0,1] > tol)) vy = ssignal[i,(j-10):(j+10),k] vvy = coordmap(_indices(i,j,k,1)) - xformed_ijk assert_true((np.corrcoef(vy, kernel(vvy))[0,1] > tol)) vz = ssignal[(i-10):(i+10),j,k] vvz = coordmap(_indices(i,j,k,0)) - xformed_ijk assert_true((np.corrcoef(vz, kernel(vvz))[0,1] > tol)) nipy-0.3.0/nipy/algorithms/tests/test_resample.py000066400000000000000000000231271210344137400222100ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np from nipy.core.api import (AffineTransform, Image, ArrayCoordMap, compose) from nipy.core.reference import slices from nipy.algorithms.resample import resample, resample_img2img from nipy.io.api import load_image from nose.tools import assert_true, assert_raises from numpy.testing import assert_array_almost_equal from nipy.testing import funcfile, anatfile def test_resample_img2img(): fimg = load_image(funcfile) aimg = load_image(anatfile) resimg = resample_img2img(fimg, fimg) yield assert_true, np.allclose(resimg.get_data(), fimg.get_data()) yield assert_raises, ValueError, resample_img2img, fimg, aimg # Hackish flag for enabling of pylab plots of resamplingstest_2d_from_3d gui_review = False def test_rotate2d(): # Rotate an image in 2d on a square grid, should result in transposed image g = AffineTransform.from_params('ij', 'xy', np.diag([0.7,0.5,1])) g2 = AffineTransform.from_params('ij', 'xy', np.diag([0.5,0.7,1])) i = Image(np.ones((100,100)), g) # This sets the image data by writing into the array i.get_data()[50:55,40:55] = 3. a = np.array([[0,1,0], [1,0,0], [0,0,1]], np.float) ir = resample(i, g2, a, (100, 100)) assert_array_almost_equal(ir.get_data().T, i.get_data()) def test_rotate2d2(): # Rotate an image in 2d on a non-square grid, should result in transposed # image g = AffineTransform.from_params('ij', 'xy', np.diag([0.7,0.5,1])) g2 = AffineTransform.from_params('ij', 'xy', np.diag([0.5,0.7,1])) i = Image(np.ones((100,80)), g) # This sets the image data by writing into the array i.get_data()[50:55,40:55] = 3. a = np.array([[0,1,0], [1,0,0], [0,0,1]], np.float) ir = resample(i, g2, a, (80,100)) assert_array_almost_equal(ir.get_data().T, i.get_data()) def test_rotate2d3(): # Another way to rotate/transpose the image, similar to # test_rotate2d2 and test_rotate2d, except the world of the # output coordmap is the same as the world of the # original image. That is, the data is transposed on disk, but the # output coordinates are still 'x,'y' order, not 'y', 'x' order as # above # this functionality may or may not be used a lot. if data is to # be transposed but one wanted to keep the NIFTI order of output # coords this would do the trick g = AffineTransform.from_params('xy', 'ij', np.diag([0.5,0.7,1])) i = Image(np.ones((100,80)), g) # This sets the image data by writing into the array i.get_data()[50:55,40:55] = 3. a = np.identity(3) g2 = AffineTransform.from_params('xy', 'ij', np.array([[0,0.5,0], [0.7,0,0], [0,0,1]])) ir = resample(i, g2, a, (80,100)) assert_array_almost_equal(ir.get_data().T, i.get_data()) def test_rotate3d(): # Rotate / transpose a 3d image on a non-square grid g = AffineTransform.from_params('ijk', 'xyz', np.diag([0.5,0.6,0.7,1])) g2 = AffineTransform.from_params('ijk', 'xyz', np.diag([0.5,0.7,0.6,1])) shape = (100,90,80) i = Image(np.ones(shape), g) i.get_data()[50:55,40:55,30:33] = 3. a = np.array([[1,0,0,0], [0,0,1,0], [0,1,0,0], [0,0,0,1.]]) ir = resample(i, g2, a, (100,80,90)) assert_array_almost_equal(np.transpose(ir.get_data(), (0,2,1)), i.get_data()) def test_resample2d(): g = AffineTransform.from_params('ij', 'xy', np.diag([0.5,0.5,1])) i = Image(np.ones((100,90)), g) i.get_data()[50:55,40:55] = 3. # This mapping describes a mapping from the "target" physical # coordinates to the "image" physical coordinates. The 3x3 matrix # below indicates that the "target" physical coordinates are related # to the "image" physical coordinates by a shift of -4 in each # coordinate. Or, to find the "image" physical coordinates, given # the "target" physical coordinates, we add 4 to each "target # coordinate". The resulting resampled image should show the # overall image shifted -8,-8 voxels towards the origin a = np.identity(3) a[:2,-1] = 4. ir = resample(i, i.coordmap, a, (100,90)) assert_array_almost_equal(ir.get_data()[42:47,32:47], 3.) def test_resample2d1(): # Tests the same as test_resample2d, only using a callable instead of # an AffineTransform instance g = AffineTransform.from_params('ij', 'xy', np.diag([0.5,0.5,1])) i = Image(np.ones((100,90)), g) i.get_data()[50:55,40:55] = 3. a = np.identity(3) a[:2,-1] = 4. A = np.identity(2) b = np.ones(2)*4 def mapper(x): return np.dot(x, A.T) + b ir = resample(i, i.coordmap, mapper, (100,90)) assert_array_almost_equal(ir.get_data()[42:47,32:47], 3.) def test_resample2d2(): g = AffineTransform.from_params('ij', 'xy', np.diag([0.5,0.5,1])) i = Image(np.ones((100,90)), g) i.get_data()[50:55,40:55] = 3. a = np.identity(3) a[:2,-1] = 4. A = np.identity(2) b = np.ones(2)*4 ir = resample(i, i.coordmap, (A, b), (100,90)) assert_array_almost_equal(ir.get_data()[42:47,32:47], 3.) def test_resample2d3(): # Same as test_resample2d, only a different way of specifying # the transform: here it is an (A,b) pair g = AffineTransform.from_params('ij', 'xy', np.diag([0.5,0.5,1])) i = Image(np.ones((100,90)), g) i.get_data()[50:55,40:55] = 3. a = np.identity(3) a[:2,-1] = 4. ir = resample(i, i.coordmap, a, (100,90)) assert_array_almost_equal(ir.get_data()[42:47,32:47], 3.) def test_resample3d(): g = AffineTransform.from_params('ijk', 'xyz', np.diag([0.5,0.5,0.5,1])) shape = (100,90,80) i = Image(np.ones(shape), g) i.get_data()[50:55,40:55,30:33] = 3. # This mapping describes a mapping from the "target" physical # coordinates to the "image" physical coordinates. The 4x4 matrix # below indicates that the "target" physical coordinates are related # to the "image" physical coordinates by a shift of -4 in each # coordinate. Or, to find the "image" physical coordinates, given # the "target" physical coordinates, we add 4 to each "target # coordinate". The resulting resampled image should show the # overall image shifted [-6,-8,-10] voxels towards the origin a = np.identity(4) a[:3,-1] = [3,4,5] ir = resample(i, i.coordmap, a, (100,90,80)) assert_array_almost_equal(ir.get_data()[44:49,32:47,20:23], 3.) def test_nonaffine(): # resamples an image along a curve through the image. # # FIXME: use the reference.evaluate.Grid to perform this nicer # FIXME: Remove pylab references def curve(x): # function accept N by 1, returns N by 2 return (np.vstack([5*np.sin(x.T),5*np.cos(x.T)]).T + [52,47]) for names in (('xy', 'ij', 't', 'u'),('ij', 'xy', 't', 's')): in_names, out_names, tin_names, tout_names = names g = AffineTransform.from_params(in_names, out_names, np.identity(3)) img = Image(np.ones((100,90)), g) img.get_data()[50:55,40:55] = 3. tcoordmap = AffineTransform.from_start_step( tin_names, tout_names, [0], [np.pi*1.8/100]) ir = resample(img, tcoordmap, curve, (100,)) if gui_review: import pylab pylab.figure(num=3) pylab.imshow(img, interpolation='nearest') d = curve(np.linspace(0,1.8*np.pi,100)) pylab.plot(d[0], d[1]) pylab.gca().set_ylim([0,99]) pylab.gca().set_xlim([0,89]) pylab.figure(num=4) pylab.plot(ir.get_data()) def test_2d_from_3d(): # Resample a 3d image on a 2d affine grid # This example creates a coordmap that coincides with # the 10th slice of an image, and checks that # resampling agrees with the data in the 10th slice. shape = (100,90,80) g = AffineTransform.from_params('ijk', 'xyz', np.diag([0.5,0.5,0.5,1])) i = Image(np.ones(shape), g) i.get_data()[50:55,40:55,30:33] = 3. a = np.identity(4) g2 = ArrayCoordMap.from_shape(g, shape)[10] ir = resample(i, g2.coordmap, a, g2.shape) assert_array_almost_equal(ir.get_data(), i[10].get_data()) def test_slice_from_3d(): # Resample a 3d image, returning a zslice, yslice and xslice # # This example creates a coordmap that coincides with # a given z, y, or x slice of an image, and checks that # resampling agrees with the data in the given slice. shape = (100,90,80) g = AffineTransform.from_params('ijk', 'xyz', np.diag([0.5,0.5,0.5,1])) img = Image(np.ones(shape), g) img.get_data()[50:55,40:55,30:33] = 3 I = np.identity(4) zsl = slices.zslice(26, ((0,49.5), 100), ((0,44.5), 90), img.reference) ir = resample(img, zsl, I, (100, 90)) assert_array_almost_equal(ir.get_data(), img[:,:,53].get_data()) ysl = slices.yslice(22, ((0,49.5), 100), ((0,39.5), 80), img.reference) ir = resample(img, ysl, I, (100, 80)) assert_array_almost_equal(ir.get_data(), img[:,45,:].get_data()) xsl = slices.xslice(15.5, ((0,44.5), 90), ((0,39.5), 80), img.reference) ir = resample(img, xsl, I, (90, 80)) assert_array_almost_equal(ir.get_data(), img[32,:,:].get_data()) nipy-0.3.0/nipy/algorithms/utils/000077500000000000000000000000001210344137400167605ustar00rootroot00000000000000nipy-0.3.0/nipy/algorithms/utils/__init__.py000066400000000000000000000000001210344137400210570ustar00rootroot00000000000000nipy-0.3.0/nipy/algorithms/utils/fast_distance.py000066400000000000000000000020661210344137400221450ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ this module contains a function to perform fast distance computation on arrays Author : Bertrand Thirion, 2008-2011 """ import numpy as np def euclidean_distance(X, Y=None): """ Considering the rows of X (and Y=X) as vectors, compute the distance matrix between each pair of vectors Parameters ---------- X, array of shape (n1,p) Y=None, array of shape (n2,p) if Y==None, then Y=X is used instead Returns ------- ED, array fo shape(n1, n2) with all the pairwise distance """ if Y == None: Y = X if X.shape[1] != Y.shape[1]: raise ValueError("incompatible dimension for X and Y matrices") n1 = X.shape[0] n2 = Y.shape[0] NX = np.reshape(np.sum(X * X, 1), (n1, 1)) NY = np.reshape(np.sum(Y * Y, 1), (1, n2)) ED = np.repeat(NX, n2, 1) ED += np.repeat(NY, n1, 0) ED -= 2 * np.dot(X, Y.T) ED = np.maximum(ED, 0) ED = np.sqrt(ED) return ED nipy-0.3.0/nipy/algorithms/utils/matrices.py000066400000000000000000000111401210344137400211360ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Utilities for working with matrices """ import numpy as np import scipy.linalg as spl def matrix_rank(M, tol=None): ''' Return rank of matrix using SVD method Rank of the array is the number of SVD singular values of the array that are greater than `tol`. This version of matrix rank is very similar to the numpy.linalg version except for the use of: * scipy.linalg.svd istead of numpy.linalg.svd. * the MATLAB algorithm for default tolerance calculation ``matrix_rank`` appeared in numpy.linalg in December 2009, first available in numpy 1.5.0. Parameters ---------- M : array-like array of <=2 dimensions tol : {None, float} threshold below which SVD values are considered zero. If `tol` is None, and `S` is an array with singular values for `M`, and `eps` is the epsilon value for datatype of `S`, then `tol` set to ``S.max() * eps * max(M.shape)``. Examples -------- >>> matrix_rank(np.eye(4)) # Full rank matrix 4 >>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix >>> matrix_rank(I) 3 >>> matrix_rank(np.zeros((4,4))) # All zeros - zero rank 0 >>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0 1 >>> matrix_rank(np.zeros((4,))) 0 >>> matrix_rank([1]) # accepts array-like 1 Notes ----- We check for numerical rank deficiency by using tol=max(M.shape) * eps * S[0] (where S[0] is the maximum singular value and thus the 2-norm of the matrix). This is one tolerance threshold for rank deficiency, and the default algorithm used by MATLAB [2]. When floating point roundoff is the main concern, then "numerical rank deficiency" is a reasonable choice. In some cases you may prefer other definitions. The most useful measure of the tolerance depends on the operations you intend to use on your matrix. For example, if your data come from uncertain measurements with uncertainties greater than floating point epsilon, choosing a tolerance near that uncertainty may be preferable. The tolerance may be absolute if the uncertainties are absolute rather than relative. References ---------- .. [1] G. H. Golub and C. F. Van Loan, _Matrix Computations_. Baltimore: Johns Hopkins University Press, 1996. .. [2] http://www.mathworks.com/help/techdoc/ref/rank.html ''' M = np.asarray(M) if M.ndim > 2: raise TypeError('array should have 2 or fewer dimensions') if M.ndim < 2: return int(not np.all(M==0)) S = spl.svd(M, compute_uv=False) if tol is None: tol = S.max() * np.finfo(S.dtype).eps * max(M.shape) return np.sum(S > tol) def full_rank(X, r=None): """ Return full-rank matrix whose column span is the same as X Uses an SVD decomposition. If the rank of `X` is known it can be specified by `r` -- no check is made to ensure that this really is the rank of X. Parameters ---------- X : array-like 2D array which may not be of full rank. r : None or int Known rank of `X`. r=None results in standard matrix rank calculation. We do not check `r` is really the rank of X; it is to speed up calculations when the rank is already known. Returns ------- fX : array Full-rank matrix with column span matching that of `X` """ if r is None: r = matrix_rank(X) V, D, U = spl.svd(X, full_matrices=0) order = np.argsort(D) order = order[::-1] value = [] for i in range(r): value.append(V[:,order[i]]) return np.asarray(np.transpose(value)).astype(np.float64) def pos_recipr(X): """ Return element-wise reciprocal of array, setting `X`>=0 to 0 Return the reciprocal of an array, setting all entries less than or equal to 0 to 0. Therefore, it presumes that X should be positive in general. Parameters ---------- X : array-like Returns ------- rX : array array of same shape as `X`, dtype np.float, with values set to 1/X where X > 0, 0 otherwise """ X = np.asarray(X) return np.where(X<=0, 0, 1. / X) def recipr0(X): """ Return element-wise reciprocal of array, `X`==0 -> 0 Return the reciprocal of an array, setting all entries equal to 0 as 0. It does not assume that X should be positive in general. Parameters ---------- X : array-like Returns ------- rX : array """ X = np.asarray(X) return np.where(X==0, 0, 1. / X) nipy-0.3.0/nipy/algorithms/utils/pca.py000066400000000000000000000363261210344137400201070ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This module provides a class for principal components analysis (PCA). PCA is an orthonormal, linear transform (i.e., a rotation) that maps the data to a new coordinate system such that the maximal variability of the data lies on the first coordinate (or the first principal component), the second greatest variability is projected onto the second coordinate, and so on. The resulting data has unit covariance (i.e., it is decorrelated). This technique can be used to reduce the dimensionality of the data. More specifically, the data is projected onto the eigenvectors of the covariance matrix. """ import numpy as np import scipy.linalg as spl from ...core.image.image import rollimg from ...core.reference.coordinate_map import (io_axis_indices, orth_axes, drop_io_dim, AxisError) def pca(data, axis=0, mask=None, ncomp=None, standardize=True, design_keep=None, design_resid='mean', tol_ratio=0.01): """Compute the SVD PCA of an array-like thing over `axis`. Parameters ---------- data : ndarray-like (np.float) The array on which to perform PCA over axis `axis` (below) axis : int, optional The axis over which to perform PCA (axis identifying observations). Default is 0 (first) mask : ndarray-like (np.bool), optional An optional mask, should have shape given by data axes, with `axis` removed, i.e.: ``s = data.shape; s.pop(axis); msk_shape=s`` ncomp : {None, int}, optional How many component basis projections to return. If ncomp is None (the default) then the number of components is given by the calculated rank of the data, after applying `design_keep`, `design_resid` and `tol_ratio` below. We always return all the basis vectors and percent variance for each component; `ncomp` refers only to the number of basis_projections returned. standardize : bool, optional If True, standardize so each time series (after application of `design_keep` and `design_resid`) has the same standard deviation, as calculated by the ``np.std`` function. design_keep : None or ndarray, optional Data is projected onto the column span of design_keep. None (default) equivalent to ``np.identity(data.shape[axis])`` design_resid : str or None or ndarray, optional After projecting onto the column span of design_keep, data is projected perpendicular to the column span of this matrix. If None, we do no such second projection. If a string 'mean', then the mean of the data is removed, equivalent to passing a column vector matrix of 1s. tol_ratio : float, optional If ``XZ`` is the vector of singular values of the projection matrix from `design_keep` and `design_resid`, and S are the singular values of ``XZ``, then `tol_ratio` is the value used to calculate the effective rank of the projection of the design, as in ``rank = ((S / S.max) > tol_ratio).sum()`` Returns ------- results : dict $G$ is the number of non-trivial components found after applying `tol_ratio` to the projections of `design_keep` and `design_resid`. `results` has keys: * ``basis_vectors``: series over `axis`, shape (data.shape[axis], G) - the eigenvectors of the PCA * ``pcnt_var``: percent variance explained by component, shape (G,) * ``basis_projections``: PCA components, with components varying over axis `axis`; thus shape given by: ``s = list(data.shape); s[axis] = ncomp`` * ``axis``: axis over which PCA has been performed. Notes ----- See ``pca_image.m`` from ``fmristat`` for Keith Worsley's code on which some of this is based. See: http://en.wikipedia.org/wiki/Principal_component_analysis for some inspiration for naming - particularly 'basis_vectors' and 'basis_projections' Examples -------- >>> arr = np.random.normal(size=(17, 10, 12, 14)) >>> msk = np.all(arr > -2, axis=0) >>> res = pca(arr, mask=msk, ncomp=9) Basis vectors are columns. There is one column for each component. The number of components is the calculated rank of the data matrix after applying the various projections listed in the parameters. In this case we are only removing the mean, so the number of components is one less than the axis over which we do the PCA (here axis=0 by default). >>> res['basis_vectors'].shape (17, 16) Basis projections are arrays with components in the dimension over which we have done the PCA (axis=0 by default). Because we set `ncomp` above, we only retain `ncomp` components. >>> res['basis_projections'].shape (9, 10, 12, 14) """ data = np.asarray(data) # We roll the PCA axis to be first, for convenience if axis is None: raise ValueError('axis cannot be None') data = np.rollaxis(data, axis) if mask is not None: mask = np.asarray(mask) if not data.shape[1:] == mask.shape: raise ValueError('Mask should match dimensions of data other than ' 'the axis over which to do the PCA') if design_resid == 'mean': # equivalent to: design_resid = np.ones((data.shape[0], 1)) def project_resid(Y): return Y - Y.mean(0)[None,...] elif design_resid is None: def project_resid(Y): return Y else: # matrix passed, we hope projector = np.dot(design_resid, spl.pinv(design_resid)) def project_resid(Y): return Y - np.dot(projector, Y) if standardize: def rmse_scales_func(std_source): # modifies array in place resid = project_resid(std_source) # root mean square of the residual rmse = np.sqrt(np.square(resid).sum(axis=0) / resid.shape[0]) # positive 1/rmse return np.where(rmse<=0, 0, 1. / rmse) else: rmse_scales_func = None """ Perform the computations needed for the PCA. This stores the covariance/correlation matrix of the data in the attribute 'C'. The components are stored as the attributes 'components', for an fMRI image these are the time series explaining the most variance. Now, we compute projection matrices. First, data is projected onto the columnspace of design_keep, then it is projected perpendicular to column space of design_resid. """ if design_keep is None: X = np.eye(data.shape[0]) else: X = np.dot(design_keep, spl.pinv(design_keep)) XZ = project_resid(X) UX, SX, VX = spl.svd(XZ, full_matrices=0) # The matrix UX has orthonormal columns and represents the # final "column space" that the data will be projected onto. rank = (SX/SX.max() > tol_ratio).sum() UX = UX[:,:rank].T # calculate covariance matrix in full-rank column space. The returned # array is roughly: YX = dot(UX, data); C = dot(YX, YX.T), perhaps where the # data has been standarized, perhaps summed over slices C_full_rank = _get_covariance(data, UX, rmse_scales_func, mask) # find the eigenvalues D and eigenvectors Vs of the covariance # matrix D, Vs = spl.eigh(C_full_rank) # Compute basis vectors in original column space basis_vectors = np.dot(UX.T, Vs).T # sort both in descending order of eigenvalues order = np.argsort(-D) D = D[order] basis_vectors = basis_vectors[order] pcntvar = D * 100 / D.sum() """ Output the component basis_projections """ if ncomp is None: ncomp = rank subVX = basis_vectors[:ncomp] out = _get_basis_projections(data, subVX, rmse_scales_func) # Roll PCA image axis back to original position in data array if axis < 0: axis += data.ndim out = np.rollaxis(out, 0, axis+1) return {'basis_vectors': basis_vectors.T, 'pcnt_var': pcntvar, 'basis_projections': out, 'axis': axis} def _get_covariance(data, UX, rmse_scales_func, mask): # number of points in PCA dimension rank, n_pts = UX.shape C = np.zeros((rank, rank)) # nan_to_num only for floating point masks if not mask is None: nan_to_num = mask.dtype.type in (np.sctypes['float'] + np.sctypes['complex']) # loop over next dimension to save memory if data.ndim == 2: # If we have 2D data, just do the covariance all in one shot, by using # a slice that is the equivalent of the ':' slice syntax slices = [slice(None)] else: # If we have more then 2D, then we iterate over slices in the second # dimension, in order to save memory slices = [slice(i,i+1) for i in range(data.shape[1])] for i, s_slice in enumerate(slices): Y = data[:,s_slice].reshape((n_pts, -1)) # project data into required space YX = np.dot(UX, Y) if rmse_scales_func is not None: YX *= rmse_scales_func(Y) if mask is not None: # weight data with mask. Usually the weights will be 0,1 msk_slice = mask[s_slice].reshape(Y.shape[1]) if nan_to_num: # but if floats, check for NaNs too. msk_slice = np.nan_to_num(msk_slice) YX = YX * msk_slice C += np.dot(YX, YX.T) return C def _get_basis_projections(data, subVX, rmse_scales_func): ncomp = subVX.shape[0] out = np.empty((ncomp,) + data.shape[1:], np.float) for i in range(data.shape[1]): Y = data[:,i].reshape((data.shape[0], -1)) U = np.dot(subVX, Y) if rmse_scales_func is not None: U *= rmse_scales_func(Y) U.shape = (U.shape[0],) + data.shape[2:] out[:,i] = U return out def pca_image(img, axis='t', mask=None, ncomp=None, standardize=True, design_keep=None, design_resid='mean', tol_ratio=0.01): """ Compute the PCA of an image over a specified axis Parameters ---------- img : Image The image on which to perform PCA over the given `axis` axis : str or int Axis over which to perform PCA. Default is 't'. If `axis` is an integer, gives the index of the input (domain) axis of `img`. If `axis` is a str, can be an input (domain) name, or an output (range) name, that maps to an input (domain) name. mask : Image, optional An optional mask, should have shape == image.shape[:3] and the same coordinate map as `img` but with `axis` dropped ncomp : {None, int}, optional How many component basis projections to return. If ncomp is None (the default) then the number of components is given by the calculated rank of the data, after applying `design_keep`, `design_resid` and `tol_ratio` below. We always return all the basis vectors and percent variance for each component; `ncomp` refers only to the number of basis_projections returned. standardize : bool, optional If True, standardize so each time series (after application of `design_keep` and `design_resid`) has the same standard deviation, as calculated by the ``np.std`` function. design_keep : None or ndarray, optional Data is projected onto the column span of design_keep. None (default) equivalent to ``np.identity(data.shape[axis])`` design_resid : str or None or ndarray, optional After projecting onto the column span of design_keep, data is projected perpendicular to the column span of this matrix. If None, we do no such second projection. If a string 'mean', then the mean of the data is removed, equivalent to passing a column vector matrix of 1s. tol_ratio : float, optional If ``XZ`` is the vector of singular values of the projection matrix from `design_keep` and `design_resid`, and S are the singular values of ``XZ``, then `tol_ratio` is the value used to calculate the effective rank of the projection of the design, as in ``rank = ((S / S.max) > tol_ratio).sum()`` Returns ------- results : dict $L$ is the number of non-trivial components found after applying `tol_ratio` to the projections of `design_keep` and `design_resid`. `results` has keys: * ``basis_vectors``: series over `axis`, shape (data.shape[axis], L) - the eigenvectors of the PCA * ``pcnt_var``: percent variance explained by component, shape (L,) * ``basis_projections``: PCA components, with components varying over axis `axis`; thus shape given by: ``s = list(data.shape); s[axis] = ncomp`` * ``axis``: axis over which PCA has been performed. Examples -------- >>> from nipy.testing import funcfile >>> from nipy import load_image >>> func_img = load_image(funcfile) Time is the fourth axis >>> func_img.coordmap.function_range CoordinateSystem(coord_names=('aligned-x=L->R', 'aligned-y=P->A', 'aligned-z=I->S', 't'), name='aligned', coord_dtype=float64) >>> func_img.shape (17, 21, 3, 20) Calculate the PCA over time, by default >>> res = pca_image(func_img) >>> res['basis_projections'].coordmap.function_range CoordinateSystem(coord_names=('aligned-x=L->R', 'aligned-y=P->A', 'aligned-z=I->S', 'PCA components'), name='aligned', coord_dtype=float64) The number of components is one less than the number of time points >>> res['basis_projections'].shape (17, 21, 3, 19) """ img_klass = img.__class__ # Which axes are we operating over? in_ax, out_ax = io_axis_indices(img.coordmap, axis) if None in (in_ax, out_ax): raise AxisError('Cannot identify matching input output axes with "%s"' % axis) if not orth_axes(in_ax, out_ax, img.coordmap.affine): raise AxisError('Input and output axes found from "%s" not othogonal ' 'to rest of affine' % axis) # Roll the chosen axis to input position zero work_img = rollimg(img, axis) if mask is not None: if not mask.coordmap.similar_to(drop_io_dim(img.coordmap, axis)): raise ValueError("Mask should have matching coordmap to `img` " "coordmap with dropped axis %s" % axis) data = work_img.get_data() if mask is not None: mask_data = mask.get_data() else: mask_data = None # do the PCA res = pca(data, 0, mask_data, ncomp, standardize, design_keep, design_resid, tol_ratio) # Clean up images after PCA # Rename the axis we dropped, at position 0 after rollimg output_coordmap = work_img.coordmap.renamed_domain( {0: 'PCA components'}) # And the matching output axis - which has not moved position output_coordmap = output_coordmap.renamed_range( {out_ax: 'PCA components'}) output_img = img_klass(res['basis_projections'], output_coordmap) # We have to roll the axis back to the original position output_img = rollimg(output_img, 0, in_ax + 1) key = 'basis_vectors over %s' % axis res[key] = res['basis_vectors'] res['basis_projections'] = output_img # Signal the roll in results res['axis'] = in_ax return res nipy-0.3.0/nipy/algorithms/utils/setup.py000066400000000000000000000007361210344137400205000ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('utils', parent_package, top_path) config.add_subpackage('tests') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipy-0.3.0/nipy/algorithms/utils/tests/000077500000000000000000000000001210344137400201225ustar00rootroot00000000000000nipy-0.3.0/nipy/algorithms/utils/tests/__init__.py000066400000000000000000000000271210344137400222320ustar00rootroot00000000000000# Make tests a package nipy-0.3.0/nipy/algorithms/utils/tests/test_fast_distance.py000066400000000000000000000017171210344137400243500ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Test the fast distance estimator """ import numpy as np from numpy.testing import assert_almost_equal from ..fast_distance import euclidean_distance as ed def test_euclidean_1(): """ test that the euclidean distance is as expected """ nx, ny = (10, 12) X = np.random.randn(nx, 2) Y = np.random.randn(ny, 2) ED = ed(X, Y) ref = np.zeros((nx, ny)) for i in range(nx): ref[i] = np.sqrt(np.sum((Y - X[i])**2, 1)) assert_almost_equal(ED, ref) def test_euclidean_2(): """ test that the euclidean distance is as expected """ nx = 10 X = np.random.randn(nx, 2) ED = ed(X) ref = np.zeros((nx, nx)) for i in range(nx): ref[i] = np.sqrt(np.sum((X - X[i])**2, 1)) assert_almost_equal(ED, ref) if __name__ == "__main__": import nose nose.run(argv=['', __file__]) nipy-0.3.0/nipy/algorithms/utils/tests/test_matrices.py000066400000000000000000000054411210344137400233460ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Test functions for utils.matrices """ import numpy as np from ..matrices import (matrix_rank, full_rank, pos_recipr, recipr0) from nose.tools import (assert_true, assert_equal, assert_false, assert_raises) from numpy.testing import (assert_almost_equal, assert_array_almost_equal) def test_matrix_rank(): # Full rank matrix assert_equal(4, matrix_rank(np.eye(4))) I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix assert_equal(matrix_rank(I), 3) # All zeros - zero rank assert_equal(matrix_rank(np.zeros((4,4))), 0) # 1 dimension - rank 1 unless all 0 assert_equal(matrix_rank(np.ones((4,))), 1) assert_equal(matrix_rank(np.zeros((4,))), 0) # accepts array-like assert_equal(matrix_rank([1]), 1) # Make rank deficient matrix rng = np.random.RandomState(20120613) X = rng.normal(size=(40, 10)) X[:, 0] = X[:, 1] + X[:, 2] S = np.linalg.svd(X, compute_uv=False) eps = np.finfo(X.dtype).eps assert_equal(matrix_rank(X, tol=0), 10) assert_equal(matrix_rank(X, tol=S.min() - eps), 10) assert_equal(matrix_rank(X, tol=S.min() + eps), 9) def test_full_rank(): rng = np.random.RandomState(20110831) X = rng.standard_normal((40,5)) # A quick rank check assert_equal(matrix_rank(X), 5) X[:,0] = X[:,1] + X[:,2] assert_equal(matrix_rank(X), 4) Y1 = full_rank(X) assert_equal(Y1.shape, (40,4)) Y2 = full_rank(X, r=3) assert_equal(Y2.shape, (40,3)) Y3 = full_rank(X, r=4) assert_equal(Y3.shape, (40,4)) # Windows - there seems to be some randomness in the SVD result; standardize # column signs before comparison flipper = np.sign(Y1[0]) * np.sign(Y3[0]) assert_almost_equal(Y1, Y3 * flipper) def test_pos_recipr(): X = np.array([2,1,-1,0], dtype=np.int8) eX = np.array([0.5,1,0,0]) Y = pos_recipr(X) yield assert_array_almost_equal, Y, eX yield assert_equal, Y.dtype.type, np.float64 X2 = X.reshape((2,2)) Y2 = pos_recipr(X2) yield assert_array_almost_equal, Y2, eX.reshape((2,2)) # check that lists have arrived XL = [0, 1, -1] yield assert_array_almost_equal, pos_recipr(XL), [0, 1, 0] # scalars yield assert_equal, pos_recipr(-1), 0 yield assert_equal, pos_recipr(0), 0 yield assert_equal, pos_recipr(2), 0.5 def test_recipr0(): X = np.array([[2,1],[-4,0]]) Y = recipr0(X) yield assert_array_almost_equal, Y, np.array([[0.5,1],[-0.25,0]]) # check that lists have arrived XL = [0, 1, -1] yield assert_array_almost_equal, recipr0(XL), [0, 1, -1] # scalars yield assert_equal, recipr0(-1), -1 yield assert_equal, recipr0(0), 0 yield assert_equal, recipr0(2), 0.5 nipy-0.3.0/nipy/algorithms/utils/tests/test_pca.py000066400000000000000000000225651210344137400223100ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np from ..pca import pca from nipy.io.api import load_image from nipy.testing import (assert_equal, assert_almost_equal, assert_array_almost_equal, funcfile, assert_true, assert_array_equal, assert_raises) data = {} def setup(): img = load_image(funcfile) arr = img.get_data() #arr = np.rollaxis(arr, 3) data['nimages'] = arr.shape[3] data['fmridata'] = arr frame = data['fmridata'][...,0] data['mask'] = (frame > 500).astype(np.float64) def reconstruct(time_series, images, axis=0): # Reconstruct data from remaining components n_tps = time_series.shape[0] images = np.rollaxis(images, axis) ncomps = images.shape[0] img_size = np.prod(images.shape[1:]) rarr = images.reshape((ncomps, img_size)) recond = np.dot(time_series, rarr) recond = recond.reshape((n_tps,) + images.shape[1:]) if axis < 0: axis = axis + images.ndim recond = np.rollaxis(recond, 0, axis+1) return recond def root_mse(arr, axis=0): return np.sqrt(np.square(arr).sum(axis=axis) / arr.shape[axis]) def pos1pca(arr, axis=0, **kwargs): ''' Return basis vectors and projections with first row positive ''' res = pca(arr, axis, **kwargs) return res2pos1(res) def res2pos1(res): # Orient basis vectors in standard direction axis = res['axis'] bvs = res['basis_vectors'] bps = res['basis_projections'] signs = np.sign(bvs[0]) res['basis_vectors'] = bvs * signs new_axes = [None] * bps.ndim n_comps = res['basis_projections'].shape[axis] new_axes[axis] = slice(0,n_comps) res['basis_projections'] = bps * signs[new_axes] return res def test_same_basis(): arr4d = data['fmridata'] shp = arr4d.shape arr2d = arr4d.reshape((np.prod(shp[:3]), shp[3])) res = pos1pca(arr2d, axis=-1) p1b_0 = res['basis_vectors'] for i in range(3): res_again = pos1pca(arr2d, axis=-1) assert_almost_equal(res_again['basis_vectors'], p1b_0) def test_2d_eq_4d(): arr4d = data['fmridata'] shp = arr4d.shape arr2d = arr4d.reshape((np.prod(shp[:3]), shp[3])) arr3d = arr4d.reshape((shp[0], -1, shp[3])) res4d = pos1pca(arr4d, axis=-1, standardize=False) res3d = pos1pca(arr3d, axis=-1, standardize=False) res2d = pos1pca(arr2d, axis=-1, standardize=False) assert_array_almost_equal(res4d['basis_vectors'], res2d['basis_vectors']) assert_array_almost_equal(res4d['basis_vectors'], res3d['basis_vectors']) def test_input_effects(): # Test effects of axis specifications ntotal = data['nimages'] - 1 # return full rank - mean PCA over last axis p = pos1pca(data['fmridata'], -1) assert_equal(p['basis_vectors'].shape, (data['nimages'], ntotal)) assert_equal(p['basis_projections'].shape, data['mask'].shape + (ntotal,)) assert_equal(p['pcnt_var'].shape, (ntotal,)) # Reconstructed data lacks only mean rarr = reconstruct(p['basis_vectors'], p['basis_projections'], -1) rarr = rarr + data['fmridata'].mean(-1)[...,None] # same effect if over axis 0, which is the default arr = data['fmridata'] arr = np.rollaxis(arr, -1) # Same basis once we've normalized the signs pr = pos1pca(arr) out_arr = np.rollaxis(pr['basis_projections'], 0, 4) assert_almost_equal(out_arr, p['basis_projections']) assert_almost_equal(p['basis_vectors'], pr['basis_vectors']) assert_almost_equal(p['pcnt_var'], pr['pcnt_var']) # Check axis None raises error assert_raises(ValueError, pca, data['fmridata'], None) def test_diagonality(): # basis_projections are diagonal, whether standarized or not p = pca(data['fmridata'], -1) # standardized assert_true(diagonal_covariance(p['basis_projections'], -1)) pns = pca(data['fmridata'], -1, standardize=False) # not assert_true(diagonal_covariance(pns['basis_projections'], -1)) def diagonal_covariance(arr, axis=0): arr = np.rollaxis(arr, axis) arr = arr.reshape(arr.shape[0], -1) aTa = np.dot(arr, arr.T) return np.allclose(aTa, np.diag(np.diag(aTa)), atol=1e-6) def test_2D(): # check that a standard 2D PCA works too M = 100 N = 20 L = M-1 # rank after mean removal data = np.random.uniform(size=(M, N)) p = pca(data) ts = p['basis_vectors'] imgs = p['basis_projections'] assert_equal(ts.shape, (M, L)) assert_equal(imgs.shape, (L, N)) rimgs = reconstruct(ts, imgs) # add back the sqrt MSE, because we standardized data_mean = data.mean(0)[None,...] demeaned = data - data_mean rmse = root_mse(demeaned, axis=0)[None,...] # also add back the mean assert_array_almost_equal((rimgs * rmse) + data_mean, data) # if standardize is set, or not, covariance is diagonal assert_true(diagonal_covariance(imgs)) p = pca(data, standardize=False) imgs = p['basis_projections'] assert_true(diagonal_covariance(imgs)) def test_PCAMask(): # for 2 and 4D case ntotal = data['nimages'] - 1 ncomp = 5 arr4d = data['fmridata'] mask3d = data['mask'] arr2d = arr4d.reshape((-1, data['nimages'])) mask1d = mask3d.reshape((-1)) for arr, mask in (arr4d, mask3d), (arr2d, mask1d): p = pca(arr, -1, mask, ncomp=ncomp) assert_equal(p['basis_vectors'].shape, (data['nimages'], ntotal)) assert_equal(p['basis_projections'].shape, mask.shape + (ncomp,)) assert_equal(p['pcnt_var'].shape, (ntotal,)) assert_almost_equal(p['pcnt_var'].sum(), 100.) # Any reasonable datatype for mask for dt in ([np.bool_] + np.sctypes['int'] + np.sctypes['uint'] + np.sctypes['float']): p = pca(arr4d, -1, mask3d.astype(dt), ncomp=ncomp) assert_equal(p['basis_vectors'].shape, (data['nimages'], ntotal)) assert_equal(p['basis_projections'].shape, mask3d.shape + (ncomp,)) assert_equal(p['pcnt_var'].shape, (ntotal,)) assert_almost_equal(p['pcnt_var'].sum(), 100.) # Mask data shape must match assert_raises(ValueError, pca, arr4d, -1, mask1d) def test_PCAMask_nostandardize(): ntotal = data['nimages'] - 1 ncomp = 5 p = pca(data['fmridata'], -1, data['mask'], ncomp=ncomp, standardize=False) assert_equal(p['basis_vectors'].shape, (data['nimages'], ntotal)) assert_equal(p['basis_projections'].shape, data['mask'].shape + (ncomp,)) assert_equal(p['pcnt_var'].shape, (ntotal,)) assert_almost_equal(p['pcnt_var'].sum(), 100.) def test_PCANoMask(): ntotal = data['nimages'] - 1 ncomp = 5 p = pca(data['fmridata'], -1, ncomp=ncomp) assert_equal(p['basis_vectors'].shape, (data['nimages'], ntotal)) assert_equal(p['basis_projections'].shape, data['mask'].shape + (ncomp,)) assert_equal(p['pcnt_var'].shape, (ntotal,)) assert_almost_equal(p['pcnt_var'].sum(), 100.) def test_PCANoMask_nostandardize(): ntotal = data['nimages'] - 1 ncomp = 5 p = pca(data['fmridata'], -1, ncomp=ncomp, standardize=False) assert_equal(p['basis_vectors'].shape, (data['nimages'], ntotal)) assert_equal(p['basis_projections'].shape, data['mask'].shape + (ncomp,)) assert_equal(p['pcnt_var'].shape, (ntotal,)) assert_almost_equal(p['pcnt_var'].sum(), 100.) def test_keep(): # Data is projected onto k=10 dimensional subspace # then has its mean removed. # Should still have rank 10. k = 10 ncomp = 5 ntotal = k X = np.random.standard_normal((data['nimages'], k)) p = pca(data['fmridata'], -1, ncomp=ncomp, design_keep=X) assert_equal(p['basis_vectors'].shape, (data['nimages'], ntotal)) assert_equal(p['basis_projections'].shape, data['mask'].shape + (ncomp,)) assert_equal(p['pcnt_var'].shape, (ntotal,)) assert_almost_equal(p['pcnt_var'].sum(), 100.) def test_resid(): # Data is projected onto k=10 dimensional subspace then has its mean # removed. Should still have rank 10. k = 10 ncomp = 5 ntotal = k X = np.random.standard_normal((data['nimages'], k)) p = pca(data['fmridata'], -1, ncomp=ncomp, design_resid=X) assert_equal(p['basis_vectors'].shape, (data['nimages'], ntotal)) assert_equal(p['basis_projections'].shape, data['mask'].shape + (ncomp,)) assert_equal(p['pcnt_var'].shape, (ntotal,)) assert_almost_equal(p['pcnt_var'].sum(), 100.) # if design_resid is None, we do not remove the mean, and we get # full rank from our data p = pca(data['fmridata'], -1, design_resid=None) rank = p['basis_vectors'].shape[1] assert_equal(rank, data['nimages']) rarr = reconstruct(p['basis_vectors'], p['basis_projections'], -1) # add back the sqrt MSE, because we standardized rmse = root_mse(data['fmridata'], axis=-1)[...,None] assert_true(np.allclose(rarr * rmse, data['fmridata'])) def test_both(): k1 = 10 k2 = 8 ncomp = 5 ntotal = k1 X1 = np.random.standard_normal((data['nimages'], k1)) X2 = np.random.standard_normal((data['nimages'], k2)) p = pca(data['fmridata'], -1, ncomp=ncomp, design_resid=X2, design_keep=X1) assert_equal(p['basis_vectors'].shape, (data['nimages'], ntotal)) assert_equal(p['basis_projections'].shape, data['mask'].shape + (ncomp,)) assert_equal(p['pcnt_var'].shape, (ntotal,)) assert_almost_equal(p['pcnt_var'].sum(), 100.) nipy-0.3.0/nipy/algorithms/utils/tests/test_pca_image.py000066400000000000000000000312671210344137400234510ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np from nibabel.affines import from_matvec from ..pca import pca_image, pca as pca_array from ....core.api import Image, AffineTransform, CoordinateSystem as CS from ....core.reference.coordinate_map import (product as cm_product, drop_io_dim, AxisError) from ....core.image.image import rollimg from ....io.api import load_image from nose.tools import assert_raises from numpy.testing import (assert_equal, assert_almost_equal, assert_array_equal) from ....testing import funcfile from .test_pca import res2pos1 data_dict = {} def setup(): img = load_image(funcfile) # Here, I'm just doing this so I know that img.shape[0] is the number of # volumes t0_img = rollimg(img, 't') data_dict['nimages'] = t0_img.shape[0] # Below, I am just making a mask because I already have img, I know I can do # this. In principle, though, the pca function will just take another Image # as a mask img_data = t0_img.get_data() mask_cmap = drop_io_dim(img.coordmap, 't') first_frame = img_data[0] mask = Image(np.greater(first_frame, 500).astype(np.float64), mask_cmap) data_dict['fmridata'] = img data_dict['mask'] = mask # print data_dict['mask'].shape, np.sum(data_dict['mask'].get_data()) assert_equal(data_dict['mask'].shape, (17, 21, 3)) assert_almost_equal(np.sum(data_dict['mask'].get_data()), 1071.0) def _rank(p): return p['basis_vectors'].shape[1] def test_PCAMask(): nimages = data_dict['nimages'] ntotal = nimages - 1 ncomp = 5 p = pca_image(data_dict['fmridata'], 't', data_dict['mask'], ncomp=ncomp) assert_equal(_rank(p), ntotal) assert_equal(p['axis'], 3) assert_equal(p['basis_vectors over t'].shape, (nimages, ntotal)) assert_equal(p['basis_projections'].shape, data_dict['mask'].shape + (ncomp,)) assert_equal(p['pcnt_var'].shape, (ntotal,)) assert_almost_equal(p['pcnt_var'].sum(), 100.) assert_equal(p['basis_projections'].axes.coord_names, ['i','j','k','PCA components']) assert_equal(p['basis_projections'].coordmap.affine, data_dict['fmridata'].coordmap.affine) def test_mask_match(): # we can't do PCA over spatial axes if we use a spatial mask ncomp = 5 out_coords = data_dict['mask'].reference.coord_names for i, o, n in zip('ijk', out_coords, [0,1,2]): assert_raises(ValueError, pca_image, data_dict['fmridata'], i, data_dict['mask'], ncomp) assert_raises(ValueError, pca_image, data_dict['fmridata'], o, data_dict['mask'], ncomp) assert_raises(ValueError, pca_image, data_dict['fmridata'], n, data_dict['mask'], ncomp) def test_PCAMask_nostandardize(): nimages = data_dict['nimages'] ntotal = nimages - 1 ncomp = 5 p = pca_image(data_dict['fmridata'], 't', data_dict['mask'], ncomp=ncomp, standardize=False) assert_equal(_rank(p), ntotal) assert_equal(p['basis_vectors over t'].shape, (nimages, ntotal)) assert_equal(p['basis_projections'].shape, data_dict['mask'].shape + (ncomp,)) assert_equal(p['pcnt_var'].shape, (ntotal,)) assert_almost_equal(p['pcnt_var'].sum(), 100.) assert_equal(p['basis_projections'].axes.coord_names, ['i','j','k','PCA components']) assert_equal(p['basis_projections'].coordmap.affine, data_dict['fmridata'].coordmap.affine) def test_PCANoMask(): nimages = data_dict['nimages'] ntotal = nimages - 1 ncomp = 5 p = pca_image(data_dict['fmridata'], ncomp=ncomp) assert_equal(_rank(p), ntotal) assert_equal(p['basis_vectors over t'].shape, (nimages, ntotal)) assert_equal(p['basis_projections'].shape, data_dict['mask'].shape + (ncomp,)) assert_equal(p['pcnt_var'].shape, (ntotal,)) assert_almost_equal(p['pcnt_var'].sum(), 100.) assert_equal(p['basis_projections'].axes.coord_names, ['i','j','k','PCA components']) assert_equal(p['basis_projections'].coordmap.affine, data_dict['fmridata'].coordmap.affine) def test_PCANoMask_nostandardize(): nimages = data_dict['nimages'] ntotal = nimages - 1 ncomp = 5 p = pca_image(data_dict['fmridata'], ncomp=ncomp, standardize=False) assert_equal(_rank(p), ntotal) assert_equal(p['basis_vectors over t'].shape, (nimages, ntotal)) assert_equal(p['basis_projections'].shape, data_dict['mask'].shape + (ncomp,)) assert_equal(p['pcnt_var'].shape, (ntotal,)) assert_almost_equal(p['pcnt_var'].sum(), 100.) assert_equal(p['basis_projections'].axes.coord_names, ['i','j','k','PCA components']) assert_equal(p['basis_projections'].coordmap.affine, data_dict['fmridata'].coordmap.affine) def test_keep(): # Data is projected onto k=10 dimensional subspace then has its mean # removed. Should still have rank 10. k = 10 ncomp = 5 nimages = data_dict['nimages'] ntotal = k X = np.random.standard_normal((nimages, k)) p = pca_image(data_dict['fmridata'], ncomp=ncomp, design_keep=X) assert_equal(_rank(p), ntotal) assert_equal(p['basis_vectors over t'].shape, (nimages, ntotal)) assert_equal(p['basis_projections'].shape, data_dict['mask'].shape + (ncomp,)) assert_equal(p['pcnt_var'].shape, (ntotal,)) assert_almost_equal(p['pcnt_var'].sum(), 100.) assert_equal(p['basis_projections'].axes.coord_names, ['i','j','k','PCA components']) assert_equal(p['basis_projections'].coordmap.affine, data_dict['fmridata'].coordmap.affine) def test_resid(): # Data is projected onto k=10 dimensional subspace then has its mean # removed. Should still have rank 10. k = 10 ncomp = 5 nimages = data_dict['nimages'] ntotal = k X = np.random.standard_normal((nimages, k)) p = pca_image(data_dict['fmridata'], ncomp=ncomp, design_resid=X) assert_equal(_rank(p), ntotal) assert_equal(p['basis_vectors over t'].shape, (nimages, ntotal)) assert_equal(p['basis_projections'].shape, data_dict['mask'].shape + (ncomp,)) assert_equal(p['pcnt_var'].shape, (ntotal,)) assert_almost_equal(p['pcnt_var'].sum(), 100.) assert_equal(p['basis_projections'].axes.coord_names, ['i','j','k','PCA components']) assert_equal(p['basis_projections'].coordmap.affine, data_dict['fmridata'].coordmap.affine) def test_both(): k1 = 10 k2 = 8 ncomp = 5 nimages = data_dict['nimages'] ntotal = k1 X1 = np.random.standard_normal((nimages, k1)) X2 = np.random.standard_normal((nimages, k2)) p = pca_image(data_dict['fmridata'], ncomp=ncomp, design_resid=X2, design_keep=X1) assert_equal(_rank(p), ntotal) assert_equal(p['basis_vectors over t'].shape, (nimages, ntotal)) assert_equal(p['basis_projections'].shape, data_dict['mask'].shape + (ncomp,)) assert_equal(p['pcnt_var'].shape, (ntotal,)) assert_almost_equal(p['pcnt_var'].sum(), 100.) assert_equal(p['basis_projections'].axes.coord_names, ['i','j','k','PCA components']) assert_equal(p['basis_projections'].coordmap.affine, data_dict['fmridata'].coordmap.affine) def test_5d(): # What happened to a 5d image? We should get 4d images back img = data_dict['fmridata'] data = img.get_data() # Make a last input and output axis called 'v' vcs = CS('v') xtra_cmap = AffineTransform(vcs, vcs, np.eye(2)) cmap_5d = cm_product(img.coordmap, xtra_cmap) data_5d = data.reshape(data.shape + (1,)) fived = Image(data_5d, cmap_5d) mask = data_dict['mask'] mask_data = mask.get_data() mask_data = mask_data.reshape(mask_data.shape + (1,)) cmap_4d = cm_product(mask.coordmap, xtra_cmap) mask4d = Image(mask_data, cmap_4d) nimages = data_dict['nimages'] ntotal = nimages - 1 ncomp = 5 p = pca_image(fived, 't', mask4d, ncomp=ncomp) assert_equal(_rank(p), ntotal) assert_equal(p['basis_vectors over t'].shape, (nimages, ntotal)) assert_equal(p['basis_projections'].shape, data.shape[:3] + (ncomp, 1)) assert_equal(p['pcnt_var'].shape, (ntotal,)) assert_almost_equal(p['pcnt_var'].sum(), 100.) assert_equal(p['basis_projections'].axes.coord_names, ['i','j','k','PCA components','v']) assert_equal(p['basis_projections'].coordmap.affine, fived.coordmap.affine) # flip the PCA dimension to end data_5d = data.reshape(data.shape[:3] + (1, data.shape[3])) # Make the last axis name be 'group'. 't' is not a length 1 dimension we # are going to leave as is gcs = CS(['group']) xtra_cmap = AffineTransform(gcs, gcs, np.eye(2)) cmap_5d = cm_product(img.coordmap, xtra_cmap) fived = Image(data_5d, cmap_5d) # Give the mask a 't' dimension, but no group dimension mask = data_dict['mask'] mask_data = mask.get_data() mask_data = mask_data.reshape(mask_data.shape + (1,)) # We need to replicate the time scaling of the image cmap, hence the 2. in # the affine xtra_cmap = AffineTransform(CS('t'), CS('t'), np.diag([2., 1])) cmap_4d = cm_product(mask.coordmap, xtra_cmap) mask4d = Image(mask_data, cmap_4d) nimages = data_dict['nimages'] ntotal = nimages - 1 ncomp = 5 # We can now show the axis does not have to be time p = pca_image(fived, mask=mask4d, ncomp=ncomp, axis='group') assert_equal(p['basis_vectors over group'].shape, (nimages, ntotal)) assert_equal(p['basis_projections'].axes.coord_names, ['i','j','k','t','PCA components']) assert_equal(p['basis_projections'].shape, data.shape[:3] + (1, ncomp)) def img_res2pos1(res, bv_key): # Orient basis vectors in standard direction axis = res['axis'] bvs = res[bv_key] bps_img = res['basis_projections'] bps = bps_img.get_data() signs = np.sign(bvs[0]) res[bv_key] = bvs * signs new_axes = [None] * bps.ndim n_comps = bps.shape[axis] new_axes[axis] = slice(0, n_comps) res['basis_projections'] = Image(bps * signs[new_axes], bps_img.coordmap) return res def test_other_axes(): # With a diagonal affine, we can do PCA on any axis ncomp = 5 img = data_dict['fmridata'] in_coords = list(img.axes.coord_names) img_data = img.get_data() for axis_no, axis_name in enumerate('ijkt'): p = pca_image(img, axis_name, ncomp=ncomp) n = img.shape[axis_no] bv_key = 'basis_vectors over ' + axis_name assert_equal(_rank(p), n - 1) assert_equal(p[bv_key].shape, (n, n - 1)) # We get the expected data back dp = pca_array(img_data, axis_no, ncomp=ncomp) # We have to make sure the signs are the same; on Windows it seems the # signs can flip even between two runs on the same data pos_p = img_res2pos1(p, bv_key) pos_dp = res2pos1(dp) img_bps = pos_p['basis_projections'] assert_almost_equal(pos_dp['basis_vectors'], pos_p[bv_key]) assert_almost_equal(pos_dp['basis_projections'], img_bps.get_data()) # And we've replaced the expected axis exp_coords = in_coords[:] exp_coords[exp_coords.index(axis_name)] = 'PCA components' assert_equal(img_bps.axes.coord_names, exp_coords) # If the affine is not diagonal, we'll get an error aff = from_matvec(np.arange(16).reshape(4,4)) nd_cmap = AffineTransform(img.axes, img.reference, aff) nd_img = Image(img_data, nd_cmap) for axis_name in 'ijkt': assert_raises(AxisError, pca_image, nd_img, axis_name) # Only for the non-diagonal parts aff = np.array([[1, 2, 0, 0, 10], [2, 1, 0, 0, 11], [0, 0, 3, 0, 12], [0, 0, 0, 4, 13], [0, 0, 0, 0, 1]]) nd_cmap = AffineTransform(img.axes, img.reference, aff) nd_img = Image(img_data, nd_cmap) for axis_name in 'ij': assert_raises(AxisError, pca_image, nd_img, axis_name) for axis_name in 'kt': p = pca_image(img, axis_name, ncomp=ncomp) exp_coords = in_coords[:] exp_coords[exp_coords.index(axis_name)] = 'PCA components' assert_equal(p['basis_projections'].axes.coord_names, exp_coords) nipy-0.3.0/nipy/core/000077500000000000000000000000001210344137400143775ustar00rootroot00000000000000nipy-0.3.0/nipy/core/__init__.py000066400000000000000000000004201210344137400165040ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Package containing core nipy classes. """ __docformat__ = 'restructuredtext' from nipy.testing import Tester test = Tester().test bench = Tester().bench nipy-0.3.0/nipy/core/api.py000066400000000000000000000023721210344137400155260ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Pseudo-package for all of the core symbols from the image object and its reference system. Use this module for importing core names into your namespace. For example: >>> from nipy.core.api import Image """ # Note: The order of imports is important here. from .reference.coordinate_system import CoordinateSystem from .reference.coordinate_map import (CoordinateMap, AffineTransform, compose, drop_io_dim, append_io_dim) from .reference.array_coords import Grid, ArrayCoordMap from .reference.spaces import (vox2scanner, vox2mni, vox2talairach, scanner_space, mni_space, talairach_space) from .image.image import (Image, fromarray, is_image, subsample, slice_maker, iter_axis, rollaxis as img_rollaxis, rollimg) from .image.image_spaces import (xyz_affine, is_xyz_affable, as_xyz_image, make_xyz_image) from .image.image_list import ImageList from .utils.generators import (parcels, data_generator, write_data, slice_generator, f_generator, matrix_generator) nipy-0.3.0/nipy/core/image/000077500000000000000000000000001210344137400154615ustar00rootroot00000000000000nipy-0.3.0/nipy/core/image/__init__.py000066400000000000000000000007371210344137400176010ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ The Image class provides the interface which should be used by users at the application level. The image provides a coordinate map, and the data itself. """ __docformat__ = 'restructuredtext' # You'd usually use nipy.core.api for these from . import image from .image import Image from nipy.testing import Tester test = Tester().test bench = Tester().bench nipy-0.3.0/nipy/core/image/image.py000066400000000000000000000757651210344137400171410ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Define the Image class and functions to work with Image instances * fromarray : create an Image instance from an ndarray (deprecated in favor of using the Image constructor) * subsample : slice an Image instance (deprecated in favor of image slicing) * rollaxis : roll an image axis backwards * synchronized_order : match coordinate systems between images * iter_axis : make iterator to iterate over an image axis * is_image : test for an object obeying the Image API """ import warnings from copy import copy import numpy as np from nibabel.onetime import setattr_on_read # These imports are used in the fromarray and subsample functions only, not in # Image from ..reference.coordinate_map import (AffineTransform, CoordinateSystem, input_axis_index) from ..reference.array_coords import ArrayCoordMap class Image(object): """ The `Image` class provides the core object type used in nipy. An `Image` represents a volumetric brain image and provides means for manipulating the image data. Most functions in the image module operate on `Image` objects. Notes ----- Images can be created through the module functions. See nipy.io for image IO such as ``load`` and ``save`` Examples -------- Load an image from disk >>> from nipy.testing import anatfile >>> from nipy.io.api import load_image >>> img = load_image(anatfile) Make an image from an array. We need to make a meaningful coordinate map for the image. >>> arr = np.zeros((21,64,64), dtype=np.int16) >>> cmap = AffineTransform('kji', 'zxy', np.eye(4)) >>> img = Image(arr, cmap) """ _doc = {} # Dictionary to store docs for attributes that are properties. We # want these docs to conform with our documentation standard, but # they need to be passed into the property function. Defining # them separately allows us to do this without a lot of clutter # in the property line. ################################################################### # # Attributes # ################################################################### metadata = {} _doc['metadata'] = "Dictionary containing additional information." coordmap = AffineTransform(CoordinateSystem('ijk'), CoordinateSystem('xyz'), np.diag([3,5,7,1])) _doc['coordmap'] = "Affine transform mapping from axes coordinates to reference coordinates." @setattr_on_read def shape(self): return self._data.shape _doc['shape'] = "Shape of data array." @setattr_on_read def ndim(self): return len(self._data.shape) _doc['ndim'] = "Number of data dimensions." @setattr_on_read def reference(self): return self.coordmap.function_range _doc['reference'] = "Reference coordinate system." @setattr_on_read def axes(self): return self.coordmap.function_domain _doc['axes'] = "Axes of image." @setattr_on_read def affine(self): if hasattr(self.coordmap, "affine"): return self.coordmap.affine raise AttributeError('Nonlinear transform does not have an affine.') _doc['affine'] = "Affine transformation if one exists." ################################################################### # # Properties # ################################################################### def _getheader(self): # data loaded from a file may have a header warnings.warn("Please don't use ``img.header``; use" "``img.metadata['header'] instead", DeprecationWarning, stacklevel=2) hdr = self.metadata.get('header') if hdr is None: raise AttributeError('Image created from arrays ' 'may not have headers.') return hdr def _setheader(self, header): warnings.warn("Please don't use ``img.header``; use" "``img.metadata['header'] instead", DeprecationWarning, stacklevel=2) self.metadata['header'] = header _doc['header'] = \ """The file header structure for this image, if available. This interface will soon go away - you should use ``img.metadata['header'] instead. """ header = property(_getheader, _setheader, doc=_doc['header']) ################################################################### # # Constructor # ################################################################### def __init__(self, data, coordmap, metadata=None): """Create an `Image` object from array and `CoordinateMap` object. Images are often created through the ``load_image`` function in the nipy base namespace. Parameters ---------- data : array-like object that as attribute ``shape`` and returns an array from ``np.asarray(data)`` coordmap : `AffineTransform` object coordmap mapping the domain (input) voxel axes of the image to the range (reference, output) axes - usually mm in real world space metadata : dict, optional Freeform metadata for image. Most common contents is ``header`` from nifti etc loaded images. See Also -------- load_image : load ``Image`` from a file save_image : save ``Image`` to a file """ if metadata is None: metadata = {} else: # Shallow copy metadata = copy(metadata) ndim = len(data.shape) if not isinstance(coordmap, AffineTransform): raise ValueError('coordmap must be an AffineTransform') # self._data is an array-like object. It must have a shape attribute # (see above) and return an array from np.array(data) self._data = data self.coordmap = coordmap if coordmap.function_domain.ndim != ndim: raise ValueError('the number of axes implied by the coordmap do ' 'not match the number of axes of the data') self.metadata = metadata ################################################################### # # Methods # ################################################################### def reordered_reference(self, order=None): """ Return new Image with reordered output coordinates New Image coordmap has reordered output coordinates. This does not transpose the data. Parameters ---------- order : None, sequence, optional sequence of int (giving indices) or str (giving names) - expressing new order of coordmap output coordinates. None (the default) results in reversed ordering. Returns ------- r_img : object Image of same class as `self`, with reordered output coordinates. Examples -------- >>> cmap = AffineTransform.from_start_step( ... 'ijk', 'xyz', [1, 2, 3], [4, 5, 6], 'domain', 'range') >>> im = Image(np.empty((30,40,50)), cmap) >>> im_reordered = im.reordered_reference([2,0,1]) >>> im_reordered.shape (30, 40, 50) >>> im_reordered.coordmap AffineTransform( function_domain=CoordinateSystem(coord_names=('i', 'j', 'k'), name='domain', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('z', 'x', 'y'), name='range', coord_dtype=float64), affine=array([[ 0., 0., 6., 3.], [ 4., 0., 0., 1.], [ 0., 5., 0., 2.], [ 0., 0., 0., 1.]]) ) """ if order is None: order = range(self.ndim)[::-1] elif type(order[0]) == type(''): order = [self.reference.index(s) for s in order] new_cmap = self.coordmap.reordered_range(order) return self.__class__.from_image(self, coordmap=new_cmap) def reordered_axes(self, order=None): """ Return a new Image with reordered input coordinates. This transposes the data as well. Parameters ---------- order : None, sequence, optional Sequence of int (giving indices) or str (giving names) - expressing new order of coordmap output coordinates. None (the default) results in reversed ordering. Returns ------- r_img : object Image of same class as `self`, with reordered output coordinates. Examples -------- >>> cmap = AffineTransform.from_start_step( ... 'ijk', 'xyz', [1, 2, 3], [4, 5, 6], 'domain', 'range') >>> cmap AffineTransform( function_domain=CoordinateSystem(coord_names=('i', 'j', 'k'), name='domain', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('x', 'y', 'z'), name='range', coord_dtype=float64), affine=array([[ 4., 0., 0., 1.], [ 0., 5., 0., 2.], [ 0., 0., 6., 3.], [ 0., 0., 0., 1.]]) ) >>> im = Image(np.empty((30,40,50)), cmap) >>> im_reordered = im.reordered_axes([2,0,1]) >>> im_reordered.shape (50, 30, 40) >>> im_reordered.coordmap AffineTransform( function_domain=CoordinateSystem(coord_names=('k', 'i', 'j'), name='domain', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('x', 'y', 'z'), name='range', coord_dtype=float64), affine=array([[ 0., 4., 0., 1.], [ 0., 0., 5., 2.], [ 6., 0., 0., 3.], [ 0., 0., 0., 1.]]) ) """ if order is None: order = range(self.ndim)[::-1] elif type(order[0]) == type(''): order = [self.axes.index(s) for s in order] new_cmap = self.coordmap.reordered_domain(order) # Only transpose if we have to so as to avoid calling # self.get_data if order != range(self.ndim): new_data = np.transpose(self.get_data(), order) else: new_data = self._data return self.__class__.from_image(self, data=new_data, coordmap=new_cmap) def renamed_axes(self, **names_dict): """ Return a new image with input (domain) axes renamed Axes renamed according to the input dictionary. Parameters ---------- \*\*names_dict : dict with keys being old names, and values being new names Returns ------- newimg : Image An Image with the same data, having its axes renamed. Examples -------- >>> data = np.random.standard_normal((11,9,4)) >>> im = Image(data, AffineTransform.from_params('ijk', 'xyz', np.identity(4), 'domain', 'range')) >>> im_renamed = im.renamed_axes(i='slice') >>> print im_renamed.axes CoordinateSystem(coord_names=('slice', 'j', 'k'), name='domain', coord_dtype=float64) """ new_cmap = self.coordmap.renamed_domain(names_dict) return self.__class__.from_image(self, coordmap=new_cmap) def renamed_reference(self, **names_dict): """ Return new image with renamed output (range) coordinates Coordinates renamed according to the dictionary Parameters ---------- \*\*names_dict : dict with keys being old names, and values being new names Returns ------- newimg : Image An Image with the same data, having its output coordinates renamed. Examples -------- >>> data = np.random.standard_normal((11,9,4)) >>> im = Image(data, AffineTransform.from_params('ijk', 'xyz', np.identity(4), 'domain', 'range')) >>> im_renamed_reference = im.renamed_reference(x='newx', y='newy') >>> print im_renamed_reference.reference CoordinateSystem(coord_names=('newx', 'newy', 'z'), name='range', coord_dtype=float64) """ new_cmap = self.coordmap.renamed_range(names_dict) return self.__class__.from_image(self, coordmap=new_cmap) def __setitem__(self, index, value): """Setting values of an image, set values in the data array.""" warnings.warn("Please don't use ``img[x] = y``; use " "``img.get_data()[x] = y`` instead", DeprecationWarning, stacklevel=2) self._data[index] = value def __array__(self): """Return data as a numpy array.""" warnings.warn('Please use get_data instead - will be deprecated', DeprecationWarning, stacklevel=2) return self.get_data() def get_data(self): """Return data as a numpy array.""" return np.asanyarray(self._data) def __getitem__(self, slice_object): """ Slicing an image returns an Image. Parameters ---------- slice_object: int, slice or sequence of slice An object representing a numpy 'slice'. Returns ------- img_subsampled: Image An Image with data self.get_data()[slice_object] and an appropriately corrected CoordinateMap. Examples -------- >>> from nipy.io.api import load_image >>> from nipy.testing import funcfile >>> im = load_image(funcfile) >>> frame3 = im[:,:,:,3] >>> np.allclose(frame3.get_data(), im.get_data()[:,:,:,3]) True """ data = self.get_data()[slice_object] g = ArrayCoordMap(self.coordmap, self.shape)[slice_object] coordmap = g.coordmap if coordmap.function_domain.ndim > 0: return self.__class__.from_image(self, data=data, coordmap=coordmap) else: return data def __iter__(self): """ Images do not have default iteration This is because it's not obvious that axis 0 is the right axis to iterate over. For example, we often want to iterate over the time or volume axis, and this is more likely to be axis 3 """ raise TypeError("Images do not have default iteration; " "you can use ``iter_axis(img, axis)`` instead.") def __eq__(self, other): return (isinstance(other, self.__class__) and np.all(self.get_data() == other.get_data()) and np.all(self.affine == other.affine) and (self.axes.coord_names == other.axes.coord_names)) def __ne__(self, other): return not self.__eq__(other) def __repr__(self): options = np.get_printoptions() np.set_printoptions(precision=6, threshold=64, edgeitems=2) representation = \ 'Image(\n data=%s,\n coordmap=%s)' % ( '\n '.join(repr(self._data).split('\n')), '\n '.join(repr(self.coordmap).split('\n'))) np.set_printoptions(**options) return representation @classmethod def from_image(klass, img, data=None, coordmap=None, metadata=None): """ Classmethod makes new instance of this `klass` from instance `img` Parameters ---------- data : array-like object that as attribute ``shape`` and returns an array from ``np.asarray(data)`` coordmap : `AffineTransform` object coordmap mapping the domain (input) voxel axes of the image to the range (reference, output) axes - usually mm in real world space metadata : dict, optional Freeform metadata for image. Most common contents is ``header`` from nifti etc loaded images. Returns ------- img : `klass` instance New image with data from `data`, coordmap from `coordmap` maybe metadata from `metadata` Notes ----- Subclasses of ``Image`` with different semantics for ``__init__`` will need to override this classmethod. Examples -------- >>> from nipy import load_image >>> from nipy.core.api import Image >>> from nipy.testing import anatfile >>> aimg = load_image(anatfile) >>> arr = np.arange(24).reshape((2,3,4)) >>> img = Image.from_image(aimg, data=arr) """ if data is None: data = img._data if coordmap is None: coordmap = copy(img.coordmap) if metadata is None: metadata = copy(img.metadata) return klass(data, coordmap, metadata) class SliceMaker(object): """ This class just creates slice objects for image resampling It only has a __getitem__ method that returns its argument. XXX Wouldn't need this if there was a way XXX to do this XXX subsample(img, [::2,::3,10:1:-1]) XXX XXX Could be something like this Subsample(img)[::2,::3,10:1:-1] """ def __getitem__(self, index): return index slice_maker = SliceMaker() def subsample(img, slice_object): """ Subsample an image Please don't use this function, but use direct image slicing instead. That is, replace:: frame3 = subsample(im, slice_maker[:,:,:,3]) with:: frame3 = im[:,:,:,3] Parameters ---------- img : Image slice_object: int, slice or sequence of slice An object representing a numpy 'slice'. Returns ------- img_subsampled: Image An Image with data img.get_data()[slice_object] and an appropriately corrected CoordinateMap. Examples -------- >>> from nipy.io.api import load_image >>> from nipy.testing import funcfile >>> from nipy.core.api import subsample, slice_maker >>> im = load_image(funcfile) >>> frame3 = subsample(im, slice_maker[:,:,:,3]) >>> np.allclose(frame3.get_data(), im.get_data()[:,:,:,3]) True """ warnings.warn('subsample is deprecated, please use image ' 'slicing instead (e.g. img[:,:,1]', DeprecationWarning, stacklevel=2) return img.__getitem__(slice_object) def fromarray(data, innames, outnames): """Create an image from array `data`, and input/output coordinate names The mapping between the input and output coordinate names is the identity matrix. Please don't use this routine, but instead prefer:: from nipy.core.api import Image, AffineTransform img = Image(data, AffineTransform(innames, outnames, np.eye(4))) where ``4`` is ``len(innames) + 1``. Parameters ---------- data : numpy array A numpy array of three dimensions. innames : sequence a list of input axis names innames : sequence a list of output axis names Returns ------- image : An `Image` object See Also -------- load : function for loading images save : function for saving images Examples -------- >>> img = fromarray(np.zeros((2,3,4)), 'ijk', 'xyz') >>> img.coordmap AffineTransform( function_domain=CoordinateSystem(coord_names=('i', 'j', 'k'), name='', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('x', 'y', 'z'), name='', coord_dtype=float64), affine=array([[ 1., 0., 0., 0.], [ 0., 1., 0., 0.], [ 0., 0., 1., 0.], [ 0., 0., 0., 1.]]) ) """ warnings.warn('fromarray is deprecated, please use the Image ' 'constructor instead', DeprecationWarning, stacklevel=2) ndim = len(data.shape) coordmap = AffineTransform.from_start_step(innames, outnames, (0.,)*ndim, (1.,)*ndim) return Image(data, coordmap) @np.deprecate_with_doc('Please use rollimg instead') def rollaxis(img, axis, inverse=False): """ Roll `axis` backwards, until it lies in the first position. It also reorders the reference coordinates by the same ordering. This is done to preserve a diagonal affine matrix if image.affine is diagonal. It also makes it possible to unambiguously specify an axis to roll along in terms of either a reference name (i.e. 'z') or an axis name (i.e. 'slice'). This function is deprecated; please use ``rollimg`` instead. Parameters ---------- img : Image Image whose axes and reference coordinates are to be reordered by rolling. axis : str or int Axis to be rolled, can be specified by name or as an integer. inverse : bool, optional If inverse is True, then axis must be an integer and the first axis is returned to the position axis. This keyword is deprecated and we'll remove it in a future version of nipy. Returns ------- newimg : Image Image with reordered axes and reference coordinates. Examples -------- >>> data = np.zeros((30,40,50,5)) >>> affine_transform = AffineTransform.from_params('ijkl', 'xyzt', np.diag([1,2,3,4,1])) >>> im = Image(data, affine_transform) >>> im.coordmap AffineTransform( function_domain=CoordinateSystem(coord_names=('i', 'j', 'k', 'l'), name='', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('x', 'y', 'z', 't'), name='', coord_dtype=float64), affine=array([[ 1., 0., 0., 0., 0.], [ 0., 2., 0., 0., 0.], [ 0., 0., 3., 0., 0.], [ 0., 0., 0., 4., 0.], [ 0., 0., 0., 0., 1.]]) ) >>> im_t_first = rollaxis(im, 't') >>> np.diag(im_t_first.affine) array([ 4., 1., 2., 3., 1.]) >>> im_t_first.shape (5, 30, 40, 50) >>> im_t_first.coordmap AffineTransform( function_domain=CoordinateSystem(coord_names=('l', 'i', 'j', 'k'), name='', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('t', 'x', 'y', 'z'), name='', coord_dtype=float64), affine=array([[ 4., 0., 0., 0., 0.], [ 0., 1., 0., 0., 0.], [ 0., 0., 2., 0., 0.], [ 0., 0., 0., 3., 0.], [ 0., 0., 0., 0., 1.]]) ) """ if inverse not in (True, False): raise ValueError('Inverse should be True or False; did you mean to ' 'use the ``rollimg` function instead?') if isinstance(axis, int) and axis < 0: axis = img.ndim + axis if inverse: if type(axis) != type(0): raise ValueError('If carrying out inverse rolling, ' 'axis must be an integer') order = range(1, img.ndim) order.insert(axis, 0) return img.reordered_axes(order).reordered_reference(order) if axis not in (range(img.axes.ndim) + list(img.axes.coord_names) + list(img.reference.coord_names)): raise ValueError('axis must be an axis number,' 'an axis name or a reference name') # Find out which index axis corresonds to in_index = out_index = -1 if type(axis) == type(''): try: in_index = img.axes.index(axis) except: pass try: out_index = img.reference.index(axis) except: pass if in_index > 0 and out_index > 0 and in_index != out_index: raise ValueError('ambiguous choice of axis -- it exists ' 'both in as an axis name and a ' 'reference name') if in_index >= 0: axis = in_index else: axis = out_index if axis == -1: axis += img.axes.ndim order = range(img.ndim) order.remove(axis) order.insert(0, axis) return img.reordered_axes(order).reordered_reference(order) def rollimg(img, axis, start=0, fix0=True): """ Roll `axis` backwards in the inputs, until it lies before `start` Parameters ---------- img : Image Image whose axes and reference coordinates are to be reordered by rollimg. axis : str or int Axis to be rolled, can be specified by name or as an integer. If an integer, axis is an input axis. If a name, can be name of input or output axis. If an output axis, we search for the closest matching input axis, and raise an AxisError if this fails. start : str or int, optional position before which to roll axis `axis`. Default to 0. Can again be an integer (input axis) or name of input or output axis. fix0 : bool, optional Whether to allow for zero scaling when searching for an input axis matching an output axis. Useful for images where time scaling is 0. Returns ------- newimg : Image Image with reordered input axes and corresponding data. Examples -------- >>> data = np.zeros((30,40,50,5)) >>> affine_transform = AffineTransform('ijkl', 'xyzt', np.diag([1,2,3,4,1])) >>> im = Image(data, affine_transform) >>> im.coordmap AffineTransform( function_domain=CoordinateSystem(coord_names=('i', 'j', 'k', 'l'), name='', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('x', 'y', 'z', 't'), name='', coord_dtype=float64), affine=array([[ 1., 0., 0., 0., 0.], [ 0., 2., 0., 0., 0.], [ 0., 0., 3., 0., 0.], [ 0., 0., 0., 4., 0.], [ 0., 0., 0., 0., 1.]]) ) >>> im_t_first = rollimg(im, 't') >>> im_t_first.shape (5, 30, 40, 50) >>> im_t_first.coordmap AffineTransform( function_domain=CoordinateSystem(coord_names=('l', 'i', 'j', 'k'), name='', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('x', 'y', 'z', 't'), name='', coord_dtype=float64), affine=array([[ 0., 1., 0., 0., 0.], [ 0., 0., 2., 0., 0.], [ 0., 0., 0., 3., 0.], [ 4., 0., 0., 0., 0.], [ 0., 0., 0., 0., 1.]]) ) """ axis = input_axis_index(img.coordmap, axis, fix0) start = input_axis_index(img.coordmap, start, fix0) order = range(img.ndim) order.remove(axis) if axis < start: start -= 1 order.insert(start, axis) return img.reordered_axes(order) def iter_axis(img, axis, asarray=False): """ Return generator to slice an image `img` over `axis` Parameters ---------- img : ``Image`` instance axis : int or str axis identifier, either name or axis number asarray : {False, True}, optional Returns ------- g : generator such that list(g) returns a list of slices over `axis`. If `asarray` is `False` the slices are images. If `asarray` is True, slices are the data from the images. Examples -------- >>> data = np.arange(24).reshape((4,3,2)) >>> img = Image(data, AffineTransform('ijk', 'xyz', np.eye(4))) >>> slices = list(iter_axis(img, 'j')) >>> len(slices) 3 >>> slices[0].shape (4, 2) >>> slices = list(iter_axis(img, 'k', asarray=True)) >>> slices[1].sum() == data[:,:,1].sum() True """ rimg = rollimg(img, axis) for i in range(rimg.shape[0]): if asarray: yield rimg[i].get_data() else: yield rimg[i] def synchronized_order(img, target_img, axes=True, reference=True): """ Reorder reference and axes of `img` to match target_img. Parameters ---------- img : Image target_img : Image axes : bool, optional If True, synchronize the order of the axes. reference : bool, optional If True, synchronize the order of the reference coordinates. Returns ------- newimg : Image An Image satisfying newimg.axes == target.axes (if axes == True), newimg.reference == target.reference (if reference == True). Examples -------- >>> data = np.random.standard_normal((3,4,7,5)) >>> im = Image(data, AffineTransform.from_params('ijkl', 'xyzt', np.diag([1,2,3,4,1]))) >>> im_scrambled = im.reordered_axes('iljk').reordered_reference('txyz') >>> im == im_scrambled False >>> im_unscrambled = synchronized_order(im_scrambled, im) >>> im == im_unscrambled True The images don't have to be the same shape >>> data2 = np.random.standard_normal((3,11,9,4)) >>> im2 = Image(data, AffineTransform.from_params('ijkl', 'xyzt', np.diag([1,2,3,4,1]))) >>> im_scrambled2 = im2.reordered_axes('iljk').reordered_reference('xtyz') >>> im_unscrambled2 = synchronized_order(im_scrambled2, im) >>> im_unscrambled2.coordmap == im.coordmap True or have the same coordmap >>> data3 = np.random.standard_normal((3,11,9,4)) >>> im3 = Image(data3, AffineTransform.from_params('ijkl', 'xyzt', np.diag([1,9,3,-2,1]))) >>> im_scrambled3 = im3.reordered_axes('iljk').reordered_reference('xtyz') >>> im_unscrambled3 = synchronized_order(im_scrambled3, im) >>> im_unscrambled3.axes == im.axes True >>> im_unscrambled3.reference == im.reference True >>> im_unscrambled4 = synchronized_order(im_scrambled3, im, axes=False) >>> im_unscrambled4.axes == im.axes False >>> im_unscrambled4.axes == im_scrambled3.axes True >>> im_unscrambled4.reference == im.reference True """ # Caution, we can't just use target_img.reference because other subclasses # of Image may not have all axes in the .reference attribute. target_axes = target_img.axes # = target_img.coordmap.function_domain # the below not necessarily == target_image.reference target_reference = target_img.coordmap.function_range if axes: img = img.reordered_axes(target_axes.coord_names) if reference: img = img.reordered_reference(target_reference.coord_names) return img def is_image(obj): ''' Returns true if this object obeys the Image API This allows us to test for something that is duck-typing an image. For now an array must have a 'coordmap' attribute, and a callable 'get_data' attribute. Parameters ---------- obj : object object for which to test API Returns ------- is_img : bool True if object obeys image API Examples -------- >>> from nipy.testing import anatfile >>> from nipy.io.api import load_image >>> img = load_image(anatfile) >>> is_image(img) True >>> class C(object): pass >>> c = C() >>> is_image(c) False ''' if not hasattr(obj, 'coordmap') or not hasattr(obj, 'metadata'): return False return callable(getattr(obj, 'get_data')) nipy-0.3.0/nipy/core/image/image_list.py000066400000000000000000000155311210344137400201550ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import warnings import numpy as np from .image import Image, iter_axis, is_image from ..reference.coordinate_map import (drop_io_dim, io_axis_indices, AxisError) class ImageList(object): ''' Class to contain ND image as list of (N-1)D images ''' def __init__(self, images=None): """ An implementation of a list of images. Parameters ---------- images : iterable an iterable object whose items are meant to be images; this is checked by asserting that each has a `coordmap` attribute and a ``get_data`` method. Note that Image objects are not iterable by default; use the ``from_image`` classmethod or ``iter_axis`` function to convert images to image lists - see examples below for the latter. Examples -------- >>> from nipy.testing import funcfile >>> from nipy.core.api import Image, ImageList, iter_axis >>> from nipy.io.api import load_image >>> funcim = load_image(funcfile) >>> iterable_img = iter_axis(funcim, 't') >>> ilist = ImageList(iterable_img) >>> sublist = ilist[2:5] Slicing an ImageList returns a new ImageList >>> isinstance(sublist, ImageList) True Indexing an ImageList returns a new Image >>> newimg = ilist[2] >>> isinstance(newimg, Image) True >>> isinstance(newimg, ImageList) False >>> np.asarray(sublist).shape (3, 17, 21, 3) >>> newimg.get_data().shape (17, 21, 3) """ if images is None: self.list = [] return images = list(images) if not all(is_image(im) for im in images): raise ValueError("Expecting each element of images to have " "the Image API") self.list = images @classmethod def from_image(klass, image, axis=None, dropout=True): """ Create an image list from an `image` by slicing over `axis` Parameters ---------- image : object object with ``coordmap`` attribute axis : str or int axis of `image` that should become the axis indexed by the image list. dropout : bool, optional When taking slices from an image, we will leave an output dimension to the coordmap that has no corresponding input dimension. If `dropout` is True, drop this output dimension. Returns ------- ilist : ``ImageList`` instance """ if axis is None: raise ValueError('Must specify image axis') # Get corresponding input, output dimension indices in_ax, out_ax = io_axis_indices(image.coordmap, axis) if in_ax is None: raise AxisError('No correspnding input dimension for %s' % axis) dropout = dropout and not out_ax is None if dropout: out_ax_name = image.reference.coord_names[out_ax] imlist = [] for img in iter_axis(image, in_ax): if dropout: cmap = drop_io_dim(img.coordmap, out_ax_name) img = Image(img.get_data(), cmap, img.metadata) imlist.append(img) return klass(imlist) def __setitem__(self, index, value): """ self.list[index] = value """ self.list[index] = value def __len__(self): """ Length of image list """ return len(self.list) def __getitem__(self, index): """ self.list[index] """ # Integer slices return elements if type(index) is type(1): return self.list[index] # List etc slicing return new instances of self.__class__ return self.__class__(images=self.list[index]) def get_list_data(self, axis=None): """Return data in ndarray with list dimension at position `axis` Parameters ---------- axis : int `axis` specifies which axis of the output will take the role of the list dimension. For example, 0 will put the list dimension in the first axis of the result. Returns ------- data : ndarray data in image list as array, with data across elements of the list concetenated at dimension `axis` of the array. Examples -------- >>> from nipy.testing import funcfile >>> from nipy.io.api import load_image >>> funcim = load_image(funcfile) >>> ilist = ImageList.from_image(funcim, axis='t') >>> ilist.get_list_data(axis=0).shape (20, 17, 21, 3) """ if axis is None: raise ValueError('Must specify which axis of the output will take ' 'the role of the list dimension, eg 0 will put ' 'the list dimension in the first axis of the ' 'result') img_shape = self.list[0].shape ilen = len(self.list) out_dim = len(img_shape) + 1 if axis >= out_dim or axis < -out_dim: raise ValueError('I have only %d axes position, but axis %d asked ' 'for' % (out_dim -1, axis)) # tmp_shape is the shape of the output if axis is 0 tmp_shape = (ilen,) + img_shape v = np.empty(tmp_shape) # first put the data in an array, with list dimension in the first axis for i, im in enumerate(self.list): v[i] = im.get_data() # get_data method of an image has no axis # then roll (and rock?) the axis to have axis in the right place if axis < 0: axis += out_dim res = np.rollaxis(v, 0, axis + 1) # Check we got the expected shape target_shape = img_shape[0:axis] + (ilen,) + img_shape[axis:] if target_shape != res.shape: raise ValueError('We were not expecting this shape') return res def __array__(self): """Return data in ndarray. Called through numpy.array. Examples -------- >>> from nipy.testing import funcfile >>> from nipy.io.api import load_image >>> funcim = load_image(funcfile) >>> ilist = ImageList.from_image(funcim, axis='t') >>> np.asarray(ilist).shape (20, 17, 21, 3) """ """Return data as a numpy array.""" warnings.warn('Please use get_list_data() instead - default ' 'conversion to array will be deprecated', DeprecationWarning, stacklevel=2) return self.get_list_data(axis=0) def __iter__(self): self._iter = iter(self.list) return self def next(self): return self._iter.next() nipy-0.3.0/nipy/core/image/image_spaces.py000066400000000000000000000332061210344137400204570ustar00rootroot00000000000000""" Utilities for working with Images and common neuroimaging spaces Images are very general things, and don't know anything about the kinds of spaces they refer to, via their coordinate map. There are a set of common neuroimaging spaces. When we create neuroimaging Images, we want to place them in neuroimaging spaces, and return information about common neuroimaging spaces. We do this by putting information about neuroimaging spaces in functions and variables in the ``nipy.core.reference.spaces`` module, and in this module. This keeps the specific neuroimaging spaces out of our Image object. >>> from nipy.core.api import Image, vox2mni, rollimg, xyz_affine, as_xyz_image Make a standard 4D xyzt image in MNI space. First the data and affine: >>> data = np.arange(24).reshape((1,2,3,4)) >>> affine = np.diag([2,3,4,1]).astype(float) We can add the TR (==2.0) to make the full 5x5 affine we need >>> img = Image(data, vox2mni(affine, 2.0)) >>> img.affine array([[ 2., 0., 0., 0., 0.], [ 0., 3., 0., 0., 0.], [ 0., 0., 4., 0., 0.], [ 0., 0., 0., 2., 0.], [ 0., 0., 0., 0., 1.]]) In this case the neuroimaging 'xyz_affine' is just the 4x4 from the 5x5 in the image >>> xyz_affine(img) array([[ 2., 0., 0., 0.], [ 0., 3., 0., 0.], [ 0., 0., 4., 0.], [ 0., 0., 0., 1.]]) However, if we roll time first in the image array, we can't any longer get an xyz_affine that makes sense in relationship to the voxel data: >>> img_t0 = rollimg(img, 't') >>> xyz_affine(img_t0) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... AxesError: First 3 input axes must correspond to X, Y, Z But we can fix this: >>> img_t0_affable = as_xyz_image(img_t0) >>> xyz_affine(img_t0_affable) array([[ 2., 0., 0., 0.], [ 0., 3., 0., 0.], [ 0., 0., 4., 0.], [ 0., 0., 0., 1.]]) It also works with nibabel images, which can only have xyz_affines: >>> import nibabel as nib >>> nimg = nib.Nifti1Image(data, affine) >>> xyz_affine(nimg) array([[ 2., 0., 0., 0.], [ 0., 3., 0., 0.], [ 0., 0., 4., 0.], [ 0., 0., 0., 1.]]) """ import sys import numpy as np from ...fixes.nibabel import io_orientation from ..image.image import Image from ..reference import spaces as rsp from ..reference.coordinate_map import AffineTransform def xyz_affine(img, name2xyz=None): """ Return xyz affine from image `img` if possible, or raise error Parameters ---------- img : ``Image`` instance or nibabel image It has a ``coordmap`` or method ``get_affine`` name2xyz : None or mapping Object such that ``name2xyz[ax_name]`` returns 'x', or 'y' or 'z' or raises a KeyError for a str ``ax_name``. None means use module default. Not used for nibabel `img` input. Returns ------- xyz_aff : (4,4) array voxel to X, Y, Z affine mapping Raises ------ SpaceTypeError : if `img` does not have an affine coordinate map AxesError : if not all of x, y, z recognized in `img` ``coordmap`` range AffineError : if axes dropped from the affine contribute to x, y, z coordinates Examples -------- >>> from nipy.core.api import vox2mni, Image >>> arr = np.arange(24).reshape((2,3,4,1)).astype(float) >>> img = Image(arr, vox2mni(np.diag([2,3,4,5,1]))) >>> img.coordmap AffineTransform( function_domain=CoordinateSystem(coord_names=('i', 'j', 'k', 'l'), name='voxels', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('mni-x=L->R', 'mni-y=P->A', 'mni-z=I->S', 't'), name='mni', coord_dtype=float64), affine=array([[ 2., 0., 0., 0., 0.], [ 0., 3., 0., 0., 0.], [ 0., 0., 4., 0., 0.], [ 0., 0., 0., 5., 0.], [ 0., 0., 0., 0., 1.]]) ) >>> xyz_affine(img) array([[ 2., 0., 0., 0.], [ 0., 3., 0., 0.], [ 0., 0., 4., 0.], [ 0., 0., 0., 1.]]) Nibabel images always have xyz affines >>> import nibabel as nib >>> nimg = nib.Nifti1Image(arr, np.diag([2,3,4,1])) >>> xyz_affine(nimg) array([[ 2., 0., 0., 0.], [ 0., 3., 0., 0.], [ 0., 0., 4., 0.], [ 0., 0., 0., 1.]]) """ try: return img.get_affine() except AttributeError: return rsp.xyz_affine(img.coordmap, name2xyz) def is_xyz_affable(img, name2xyz=None): """ Return True if the image `img` has an xyz affine Parameters ---------- img : ``Image`` or nibabel ``SpatialImage`` If ``Image`` test ``img.coordmap``. If a nibabel image, return True name2xyz : None or mapping Object such that ``name2xyz[ax_name]`` returns 'x', or 'y' or 'z' or raises a KeyError for a str ``ax_name``. None means use module default. Not used for nibabel `img` input. Returns ------- tf : bool True if `img` has an xyz affine, False otherwise Examples -------- >>> from nipy.core.api import vox2mni, Image, rollimg >>> arr = np.arange(24).reshape((2,3,4,1)) >>> img = Image(arr, vox2mni(np.diag([2,3,4,5,1]))) >>> img.coordmap AffineTransform( function_domain=CoordinateSystem(coord_names=('i', 'j', 'k', 'l'), name='voxels', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('mni-x=L->R', 'mni-y=P->A', 'mni-z=I->S', 't'), name='mni', coord_dtype=float64), affine=array([[ 2., 0., 0., 0., 0.], [ 0., 3., 0., 0., 0.], [ 0., 0., 4., 0., 0.], [ 0., 0., 0., 5., 0.], [ 0., 0., 0., 0., 1.]]) ) >>> is_xyz_affable(img) True >>> time0_img = rollimg(img, 't') >>> time0_img.coordmap AffineTransform( function_domain=CoordinateSystem(coord_names=('l', 'i', 'j', 'k'), name='voxels', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('mni-x=L->R', 'mni-y=P->A', 'mni-z=I->S', 't'), name='mni', coord_dtype=float64), affine=array([[ 0., 2., 0., 0., 0.], [ 0., 0., 3., 0., 0.], [ 0., 0., 0., 4., 0.], [ 5., 0., 0., 0., 0.], [ 0., 0., 0., 0., 1.]]) ) >>> is_xyz_affable(time0_img) False Nibabel images always have xyz affines >>> import nibabel as nib >>> nimg = nib.Nifti1Image(arr, np.diag([2,3,4,1])) >>> is_xyz_affable(nimg) True """ try: xyz_affine(img, name2xyz) except rsp.SpaceError: return False return True def as_xyz_image(img, name2xyz=None): """ Return version of `img` that has a valid xyz affine, or raise error Parameters ---------- img : ``Image`` instance or nibabel image It has a ``coordmap`` attribute (``Image``) or a ``get_affine`` method (nibabel image object) name2xyz : None or mapping Object such that ``name2xyz[ax_name]`` returns 'x', or 'y' or 'z' or raises a KeyError for a str ``ax_name``. None means use module default. Not used for nibabel `img` input. Returns ------- reo_img : ``Image`` instance or nibabel image Returns image of same type as `img` input. If necessary, `reo_img` has its data and coordmap changed to allow it to return an xyz affine. If `img` is already xyz affable we return the input unchanged (``img is reo_img``). Raises ------ SpaceTypeError : if `img` does not have an affine coordinate map AxesError : if not all of x, y, z recognized in `img` ``coordmap`` range AffineError : if axes dropped from the affine contribute to x, y, z coordinates """ try: aff = xyz_affine(img, name2xyz) except (rsp.AxesError, rsp.AffineError): pass else: return img cmap = img.coordmap order = rsp.xyz_order(cmap.function_range, name2xyz) # Reorder reference to canonical order reo_img = img.reordered_reference(order) # Which input axes correspond? ornt = io_orientation(reo_img.coordmap.affine) current_in_order = ornt[:,0] # Set nan to inf to make np.argsort work for old numpy versions current_in_order[np.isnan(current_in_order)] = np.inf # Do we have the first three axes somewhere? if not set((0,1,2)).issubset(current_in_order): raise rsp.AxesError("One of x, y or z outputs missing a " "corresponding input axis") desired_input_order = np.argsort(current_in_order) reo_img = reo_img.reordered_axes(list(desired_input_order)) try: aff = xyz_affine(reo_img, name2xyz) except rsp.SpaceError: # Python 2.5 / 3 compatibility e = sys.exc_info()[1] raise e.__class__("Could not reorder so xyz coordinates did not " "depend on the other axis coordinates: " + str(e)) return reo_img def make_xyz_image(data, xyz_affine, world, metadata=None): """ Create 3D+ image embedded in space named in `world` Parameters ---------- data : object Object returning array from ``np.asarray(obj)``, and having ``shape`` attribute. Should have at least 3 dimensions (``len(shape) >= 3``), and these three first 3 dimensions should be spatial xyz_affine : (4, 4) array-like or tuple if (4, 4) array-like (the usual case), then an affine relating spatial dimensions in data (dimensions 0:3) to mm in XYZ space given in `world`. If a tuple, then contains two values: the (4, 4) array-like, and a sequence of scalings for the dimensions greater than 3. See examples. world : str or XYZSpace or CoordSysMaker or CoordinateSystem World 3D space to which affine refers. See ``spaces.get_world_cs()`` metadata : None or mapping, optional metadata for created image. Defaults to None, giving empty metadata. Returns ------- img : Image image containing `data`, with coordmap constructed from `affine` and `world`, and with default voxel input coordinates. If the data has more than 3 dimensions, and you didn't specify the added zooms with a tuple `xyz_affine` parameter, the coordmap affine gets filled out with extra ones on the diagonal to give an (N+1, N+1) affine, with ``N = len(data.shape)`` Examples -------- >>> data = np.arange(24).reshape((2, 3, 4)) >>> aff = np.diag([4, 5, 6, 1]) >>> img = make_xyz_image(data, aff, 'mni') >>> img Image( data=array([[[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]], [[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23]]]), coordmap=AffineTransform( function_domain=CoordinateSystem(coord_names=('i', 'j', 'k'), name='voxels', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('mni-x=L->R', 'mni-y=P->A', 'mni-z=I->S'), name='mni', coord_dtype=float64), affine=array([[ 4., 0., 0., 0.], [ 0., 5., 0., 0.], [ 0., 0., 6., 0.], [ 0., 0., 0., 1.]]) )) Now make data 4D; we just add 1. to the diagonal for the new dimension >>> data4 = data[..., None] >>> img = make_xyz_image(data4, aff, 'mni') >>> img.coordmap AffineTransform( function_domain=CoordinateSystem(coord_names=('i', 'j', 'k', 'l'), name='voxels', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('mni-x=L->R', 'mni-y=P->A', 'mni-z=I->S', 't'), name='mni', coord_dtype=float64), affine=array([[ 4., 0., 0., 0., 0.], [ 0., 5., 0., 0., 0.], [ 0., 0., 6., 0., 0.], [ 0., 0., 0., 1., 0.], [ 0., 0., 0., 0., 1.]]) ) We can pass in a scalar or tuple to specify scaling for the extra dimension >>> img = make_xyz_image(data4, (aff, 2.0), 'mni') >>> img.coordmap.affine array([[ 4., 0., 0., 0., 0.], [ 0., 5., 0., 0., 0.], [ 0., 0., 6., 0., 0.], [ 0., 0., 0., 2., 0.], [ 0., 0., 0., 0., 1.]]) >>> data5 = data4[..., None] >>> img = make_xyz_image(data5, (aff, (2.0, 3.0)), 'mni') >>> img.coordmap.affine array([[ 4., 0., 0., 0., 0., 0.], [ 0., 5., 0., 0., 0., 0.], [ 0., 0., 6., 0., 0., 0.], [ 0., 0., 0., 2., 0., 0.], [ 0., 0., 0., 0., 3., 0.], [ 0., 0., 0., 0., 0., 1.]]) """ N = len(data.shape) if N < 3: raise ValueError('Need data with at least 3 dimensions') if type(xyz_affine) is tuple: xyz_affine, added_zooms = xyz_affine # Could be scalar added zooms try: len(added_zooms) except TypeError: added_zooms = (added_zooms,) if len(added_zooms) != (N - 3): raise ValueError('Wrong number of added zooms') else: added_zooms = (1,) * (N - 3) xyz_affine = np.asarray(xyz_affine) if not xyz_affine.shape == (4, 4): raise ValueError("Expecting 4 x 4 affine") # Make coordinate map world_cm = rsp.get_world_cs(world, N) voxel_cm = rsp.voxel_csm(N) if N > 3: affine = np.diag((1., 1, 1) + added_zooms + (1,)) affine[:3, :3] = xyz_affine[:3, :3] affine[:3, -1] = xyz_affine[:3, 3] else: affine = xyz_affine cmap = AffineTransform(voxel_cm, world_cm, affine) return Image(data, cmap, metadata) nipy-0.3.0/nipy/core/image/roi.py000066400000000000000000000011241210344137400166220ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Site formerly occupied by region of interest (ROI) module We removed this module because it needed some work and was in a place obvious enough to be confusing. The last commit where you can find the full module ready for more work is ``0831bf1`` You might want to have a look at :mod:`nipy.labs.spatial_models`. Please see :mod:``nipy.labs.spatial_models.discrete_domain`, :mod:``nipy.labs.spatial_models.mroi` and the example in ``examples/labs/need_data/demo_roi.py``. """ nipy-0.3.0/nipy/core/image/setup.py000066400000000000000000000007131210344137400171740ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('image', parent_package, top_path) config.add_subpackage('tests') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipy-0.3.0/nipy/core/image/tests/000077500000000000000000000000001210344137400166235ustar00rootroot00000000000000nipy-0.3.0/nipy/core/image/tests/__init__.py000066400000000000000000000000001210344137400207220ustar00rootroot00000000000000nipy-0.3.0/nipy/core/image/tests/test_image.py000066400000000000000000000422631210344137400213250ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import warnings import numpy as np import nibabel as nib from .. import image from ..image import Image, iter_axis, is_image, rollimg from ...api import parcels, data_generator, write_data from ...reference.coordinate_system import CoordinateSystem from ...reference.coordinate_map import AffineTransform, AxisError from nose.tools import (assert_true, assert_false, assert_equal, assert_not_equal, assert_raises) from numpy.testing import (assert_array_almost_equal, assert_almost_equal, assert_array_equal) def setup(): # Suppress warnings during tests to reduce noise warnings.simplefilter("ignore") def teardown(): # Clear list of warning filters warnings.resetwarnings() _data = np.arange(24).reshape((4,3,2)) gimg = Image(_data, AffineTransform('ijk', 'xyz', np.eye(4))) def test_init(): data = gimg.get_data() new = Image(data, gimg.coordmap) assert_array_almost_equal(gimg.get_data(), new.get_data()) assert_equal(new.coordmap, gimg.coordmap) assert_raises(TypeError, Image) assert_raises(TypeError, Image, data) def test_maxmin_values(): y = gimg.get_data() assert_equal(y.shape, tuple(gimg.shape)) assert_equal(y.max(), 23) assert_equal(y.min(), 0.0) def test_slice_plane(): x = gimg[1] assert_equal(x.shape, gimg.shape[1:]) def test_slice_block(): x = gimg[1:3] assert_equal(x.shape, (2,) + tuple(gimg.shape[1:])) def test_slice_step(): s = slice(0,4,2) x = gimg[s] assert_equal(x.shape, (2,) + tuple(gimg.shape[1:])) x = gimg[0:4:2] assert_equal(x.shape, (2,) + tuple(gimg.shape[1:])) def test_slice_type(): s = slice(0, gimg.shape[0]) x = gimg[s] assert_equal(x.shape, gimg.shape) x = gimg[0:] assert_equal(x.shape, gimg.shape) def test_slice_steps(): dim0, dim1, dim2 = gimg.shape slice_z = slice(0, dim0, 2) slice_y = slice(0, dim1, 2) slice_x = slice(0, dim2, 2) x = gimg[slice_z, slice_y, slice_x] newshape = tuple(np.floor((np.array(gimg.shape) - 1)/2) + 1) assert_equal(x.shape, newshape) x = gimg[0:dim0:2,0:dim1:2,0:dim2:2] assert_equal(x.shape, newshape) def test_get_data(): # get_data always returns an array x = gimg.get_data() assert_true(isinstance(x, np.ndarray)) assert_equal(x.shape, gimg.shape) assert_equal(x.ndim, gimg.ndim) def test_generator(): # User iter_axis to return slices gen = iter_axis(gimg, axis=0) for img_slice in gen: assert_equal(img_slice.shape, (3,2)) def test_iter(): for img_slice in image.iter_axis(gimg, 0): assert_equal(img_slice.shape, (3,2)) tmp = np.zeros(gimg.shape) write_data(tmp, enumerate(iter_axis(gimg, 0, asarray=True))) assert_array_almost_equal(tmp, gimg.get_data()) tmp = np.zeros(gimg.shape) g = iter_axis(gimg, 0, asarray=True) write_data(tmp, enumerate(g)) assert_array_almost_equal(tmp, gimg.get_data()) def test_parcels1(): parcelmap = gimg.get_data().astype(np.int32) test = np.zeros(parcelmap.shape) v = 0 for i, d in data_generator(test, parcels(parcelmap)): v += d.shape[0] assert_equal(v, np.product(test.shape)) def test_parcels3(): rho = gimg[0] parcelmap = rho.get_data().astype(np.int32) labels = np.unique(parcelmap) test = np.zeros(rho.shape) v = 0 for i, d in data_generator(test, parcels(parcelmap, labels=labels)): v += d.shape[0] yield assert_equal, v, np.product(test.shape) def test_slicing_returns_image(): data = np.ones((2,3,4)) img = Image(data, AffineTransform('kji', 'zyx', np.eye(4))) assert_true(isinstance(img, Image)) assert_equal(img.ndim, 3) # 2D slice img2D = img[:,:,0] assert_true(isinstance(img2D, Image)) assert_equal(img2D.ndim, 2) # 1D slice img1D = img[:,0,0] assert_true(isinstance(img1D, Image)) assert_equal(img1D.ndim, 1) class ArrayLikeObj(object): """The data attr in Image is an array-like object. Test the array-like interface that we'll expect to support.""" def __init__(self): self._data = np.ones((2,3,4)) @property def shape(self): return self._data.shape def __array__(self): return self._data def test_ArrayLikeObj(): obj = ArrayLikeObj() # create simple coordmap xform = np.eye(4) coordmap = AffineTransform.from_params('xyz', 'ijk', xform) # create image form array-like object and coordmap img = image.Image(obj, coordmap) assert_equal(img.ndim, 3) assert_equal(img.shape, (2,3,4)) assert_array_almost_equal(img.get_data(), 1) # Test that the array stays with the image, so we can assign the array # in-place, at least in this case img.get_data()[:] = 4 assert_array_equal(img.get_data(), 4) def test_defaults_ND(): for arr_shape, in_names, out_names in ( ((2,3), 'kj', 'yz'), ((2,3,4), 'ijk', 'zyx'), ((2,3,4,5), 'hijk', 'zyxt')): data = np.ones(arr_shape) ndim = len(arr_shape) img = Image(data, AffineTransform(in_names, out_names, np.eye(ndim+1))) assert_true(isinstance(img._data, np.ndarray)) assert_equal(img.ndim, len(arr_shape)) assert_equal(img.shape, arr_shape) assert_equal(img.affine.shape, (img.ndim+1, img.ndim+1)) assert_true(img.affine.diagonal().all()) # img.header deprecated, when removed, test will raise Error assert_raises(AttributeError, getattr, img, 'header') def test_header(): # Property header interface deprecated arr = np.arange(24).reshape((2,3,4)) coordmap = AffineTransform.from_params('xyz', 'ijk', np.eye(4)) header = nib.Nifti1Header() img = Image(arr, coordmap, metadata={'header': header}) assert_equal(img.metadata['header'], header) # This interface deprecated assert_equal(img.header, header) hdr2 = nib.Nifti1Header() hdr2['descrip'] = 'from fullness of heart' assert_not_equal(img.header, hdr2) img.header = hdr2 assert_equal(img.header, hdr2) def test_from_image(): # from_image classmethod copies arr = np.arange(24).reshape((2,3,4)) coordmap = AffineTransform.from_params('xyz', 'ijk', np.eye(4)) img = Image(arr, coordmap, metadata={'field': 'value'}) img2 = Image.from_image(img) assert_array_equal(img.get_data(), img2.get_data()) assert_equal(img.coordmap, img2.coordmap) assert_equal(img.metadata, img2.metadata) assert_false(img.metadata is img2.metadata) # optional inputs - data arr2 = arr + 10 new = Image.from_image(img, arr2) assert_array_almost_equal(arr2, new.get_data()) assert_equal(new.coordmap, coordmap) new = Image.from_image(img, data=arr2) assert_array_almost_equal(arr2, new.get_data()) assert_equal(new.coordmap, coordmap) # optional inputs - coordmap coordmap2 = AffineTransform.from_params('pqr', 'ijk', np.eye(4)) new = Image.from_image(img, arr2, coordmap2) assert_array_almost_equal(arr2, new.get_data()) assert_equal(new.coordmap, coordmap2) new = Image.from_image(img, coordmap=coordmap2) assert_array_almost_equal(arr, new.get_data()) assert_equal(new.coordmap, coordmap2) # Optional inputs - metadata assert_equal(new.metadata, img.metadata) another_meta = {'interesting': 'information'} new = Image.from_image(img, arr2, coordmap2, another_meta) assert_array_almost_equal(arr2, new.get_data()) assert_equal(another_meta, new.metadata) def test_synchronized_order(): data = np.random.standard_normal((3,4,7,5)) im = Image(data, AffineTransform.from_params('ijkl', 'xyzt', np.diag([1,2,3,4,1]))) im_scrambled = im.reordered_axes('iljk').reordered_reference('xtyz') im_unscrambled = image.synchronized_order(im_scrambled, im) assert_equal(im_unscrambled.coordmap, im.coordmap) assert_almost_equal(im_unscrambled.get_data(), im.get_data()) assert_equal(im_unscrambled, im) assert_true(im_unscrambled == im) assert_false(im_unscrambled != im) # the images don't have to be the same shape data2 = np.random.standard_normal((3,11,9,4)) im2 = Image(data2, AffineTransform.from_params('ijkl', 'xyzt', np.diag([1,2,3,4,1]))) im_scrambled2 = im2.reordered_axes('iljk').reordered_reference('xtyz') im_unscrambled2 = image.synchronized_order(im_scrambled2, im) assert_equal(im_unscrambled2.coordmap, im.coordmap) # or the same coordmap data3 = np.random.standard_normal((3,11,9,4)) im3 = Image(data3, AffineTransform.from_params('ijkl', 'xyzt', np.diag([1,9,3,-2,1]))) im_scrambled3 = im3.reordered_axes('iljk').reordered_reference('xtyz') im_unscrambled3 = image.synchronized_order(im_scrambled3, im) assert_equal(im_unscrambled3.axes, im.axes) assert_equal(im_unscrambled3.reference, im.reference) def test_iter_axis(): # axis iteration helper function. This function also tests rollimg, # because iter_axis uses rollimg iter_axis = image.iter_axis data = np.arange(24).reshape((4,3,2)) img = Image(data, AffineTransform('ijk', 'xyz', np.eye(4))) for ax_id, ax_no in (('i',0), ('j',1), ('k',2), ('x',0), ('y',1), ('z',2), (0,0), (1,1), (2,2), (-1,2)): slices = list(iter_axis(img, ax_id)) expected_shape = list(data.shape) g_len = expected_shape.pop(ax_no) assert_equal(len(slices), g_len) for s in slices: assert_equal(list(s.shape), expected_shape) # test asarray slicer = [slice(None) for i in range(data.ndim)] for i, s in enumerate(iter_axis(img, ax_id, asarray=True)): slicer[ax_no] = i assert_array_equal(s, data[slicer]) def test_rollaxis(): data = np.random.standard_normal((3,4,7,5)) im = Image(data, AffineTransform.from_params('ijkl', 'xyzt', np.diag([1,2,3,4,1]))) # for the inverse we must specify an integer assert_raises(ValueError, image.rollaxis, im, 'i', True) # Check that rollaxis preserves diagonal affines, as claimed assert_almost_equal(image.rollaxis(im, 1).affine, np.diag([2,1,3,4,1])) assert_almost_equal(image.rollaxis(im, 2).affine, np.diag([3,1,2,4,1])) assert_almost_equal(image.rollaxis(im, 3).affine, np.diag([4,1,2,3,1])) # Check that ambiguous axes raise an exception # 'l' appears both as an axis and a reference coord name # and in different places im_amb = Image(data, AffineTransform.from_params('ijkl', 'xylt', np.diag([1,2,3,4,1]))) assert_raises(ValueError, image.rollaxis, im_amb, 'l') # But if it's unambiguous, then # 'l' can appear both as an axis and a reference coord name im_unamb = Image(data, AffineTransform.from_params('ijkl', 'xyzl', np.diag([1,2,3,4,1]))) im_rolled = image.rollaxis(im_unamb, 'l') assert_almost_equal(im_rolled.get_data(), im_unamb.get_data().transpose([3,0,1,2])) for i, o, n in zip('ijkl', 'xyzt', range(4)): im_i = image.rollaxis(im, i) im_o = image.rollaxis(im, o) im_n = image.rollaxis(im, n) assert_almost_equal(im_i.get_data(), im_o.get_data()) assert_almost_equal(im_i.affine, im_o.affine) assert_almost_equal(im_n.get_data(), im_o.get_data()) for _im in [im_n, im_o, im_i]: im_n_inv = image.rollaxis(_im, n, inverse=True) assert_almost_equal(im_n_inv.affine, im.affine) assert_almost_equal(im_n_inv.get_data(), im.get_data()) def test_is_image(): # Test is_image check arr = np.arange(24).reshape((2,3,4)) cmap = AffineTransform(CoordinateSystem('ijk'), CoordinateSystem('xyz'), np.eye(4)) img = Image(arr, cmap) assert_true(is_image(img)) assert_false(is_image(object())) class C(object): def get_data(self): pass c = C() assert_false(is_image(c)) c.coordmap = None assert_false(is_image(c)) c.metadata = None assert_true(is_image(c)) def test_rollimg(): AT = AffineTransform data = np.random.standard_normal((3,4,7,5)) aff = np.diag([1,2,3,4,1]) im = Image(data, AT('ijkl', 'xyzt', aff)) # No attempt to preserve the diagonal im1 = rollimg(im, 1) assert_equal(im1.coordmap, rollimg(im, 'j').coordmap) assert_equal(im1.coordmap, rollimg(im, 'y').coordmap) assert_equal(im1.coordmap, rollimg(im, -3).coordmap) assert_equal(im1.coordmap, AT('jikl', 'xyzt', aff[:, (1, 0, 2, 3, 4)])) assert_array_equal(im1.get_data(), np.rollaxis(data, 1)) im2 = rollimg(im, 2) assert_equal(im2.coordmap, rollimg(im, 'k').coordmap) assert_equal(im2.coordmap, rollimg(im, 'z').coordmap) assert_equal(im2.coordmap, rollimg(im, -2).coordmap) assert_equal(im2.coordmap, AT('kijl', 'xyzt', aff[:, (2, 0, 1, 3, 4)])) assert_array_equal(im2.get_data(), np.rollaxis(data, 2)) im3 = rollimg(im, 3) assert_equal(im3.coordmap, rollimg(im, 'l').coordmap) assert_equal(im3.coordmap, rollimg(im, 't').coordmap) assert_equal(im3.coordmap, rollimg(im, -1).coordmap) assert_equal(im3.coordmap, AT('lijk', 'xyzt', aff[:, (3, 0, 1, 2, 4)])) assert_array_equal(im3.get_data(), np.rollaxis(data, 3)) # We can roll to before a specified axis im31 = rollimg(im, 3, 1) assert_equal(im31.coordmap, rollimg(im, 'l', 'j').coordmap) assert_equal(im31.coordmap, rollimg(im, 't', 'y').coordmap) assert_equal(im31.coordmap, rollimg(im, 't', 'j').coordmap) assert_equal(im31.coordmap, rollimg(im, 'l', 'y').coordmap) assert_equal(im31.coordmap, rollimg(im, -1, 'y').coordmap) assert_equal(im31.coordmap, rollimg(im, -1, -3).coordmap) assert_equal(im31.coordmap, AT('iljk', 'xyzt', aff[:, (0, 3, 1, 2, 4)])) assert_array_equal(im31.get_data(), np.rollaxis(data, 3, 1)) # Check that ambiguous axes raise an exception; 'l' appears both as an axis # and a reference coord name and in different places im_amb = Image(data, AT('ijkl', 'xylt', np.diag([1,2,3,4,1]))) assert_raises(AxisError, rollimg, im_amb, 'l') # But if it's unambiguous, then 'l' can appear both as an axis and a # reference coord name im_unamb = Image(data, AT('ijkl', 'xyzl', np.diag([1,2,3,4,1]))) im_rolled = rollimg(im_unamb, 'l') assert_array_equal(im_rolled.get_data(), im_unamb.get_data().transpose([3,0,1,2])) # Zero row / col means we can't find an axis mapping, when fix0 is false aff_z = np.diag([1, 2, 3, 0, 1]) im_z = Image(data, AT('ijkl', 'xyzt', aff_z)) assert_raises(AxisError, rollimg, im_z, 't', fix0=False) # But we can work it out if we turn on our zero detector assert_equal(rollimg(im_z, 't', fix0=True).coordmap, AT('lijk', 'xyzt', aff_z[:, (3, 0, 1, 2, 4)])) # That's the default assert_equal(rollimg(im_z, 't').coordmap, AT('lijk', 'xyzt', aff_z[:, (3, 0, 1, 2, 4)])) # Non square is OK aff_r = np.array([[1, 0, 0, 10], [0, 2, 0, 11], [0, 0, 2, 12], [0, 0, 0, 13], [0, 0, 0, 1]]) im_r = Image(data[:,:,:,0], AT('ijk', 'xyzt', aff_r)) assert_equal(rollimg(im_r, 'k').coordmap, AT('kij', 'xyzt', aff_r[:, (2, 0, 1, 3)])) # Unless you're tring to get at the dropped input dimension of course assert_raises(AxisError, rollimg, im_r, 't') # Another check for integers, input names, output names, reversing for i, o, n in zip('ijkl', 'xyzt', range(4)): im_i = rollimg(im, i) im_o = rollimg(im, o) im_n = rollimg(im, n) assert_array_equal(im_i.get_data(), im_o.get_data()) assert_array_equal(im_i.affine, im_o.affine) assert_array_equal(im_n.get_data(), im_o.get_data()) for _im in [im_n, im_o, im_i]: # We're rollimg back. We want to roll the new axis 0 back to where # it started, which was position n im_n_inv = rollimg(_im, 0, n + 1) assert_array_equal(im_n_inv.affine, im.affine) assert_array_equal(im_n_inv.get_data(), im.get_data()) def test_rollimg_rollaxis(): # Check that rollimg and rollaxis do the same AT = AffineTransform data = np.random.standard_normal((3,4,7,5)) aff = np.diag([1,2,3,4,1]) img = Image(data, AT('ijkl', 'xyzt', aff)) for axis in range(4) + range(-3, -1): rdata = np.rollaxis(data, axis) rimg = rollimg(img, axis) assert_array_equal(rdata, rimg.get_data()) for start in range(4) + range(-3, -1): rdata = np.rollaxis(data, axis, start) rimg = rollimg(img, axis, start) assert_array_equal(rdata, rimg.get_data()) def test_rollaxis_inverse(): # Test deprecated image rollaxis with inverse AT = AffineTransform data = np.random.standard_normal((3,4,7,5)) aff = np.diag([1,2,3,4,1]) img = Image(data, AT('ijkl', 'xyzt', aff)) for axis in range(4) + range(-3, -1): rimg = image.rollaxis(img, axis) rdata = np.rollaxis(data, axis) assert_array_equal(rdata, rimg.get_data()) rrimg = image.rollaxis(rimg, axis, inverse=True) assert_array_equal(data, rrimg.get_data()) nipy-0.3.0/nipy/core/image/tests/test_image_list.py000066400000000000000000000111071210344137400223510ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np from ..image_list import ImageList, iter_axis from ..image import Image from ....io.api import load_image from ....core.reference.coordinate_map import (AxisError, CoordinateSystem, AffineTransform) from ....testing import (funcfile, assert_true, assert_equal, assert_raises, assert_almost_equal) FIMG = load_image(funcfile) def test_il_init(): images = list(iter_axis(FIMG, 't')) imglst = ImageList(images) assert_equal(len(imglst), 20) element = imglst[1] assert_equal(element.shape, (17, 21, 3)) assert_equal(element.coordmap, FIMG[...,1].coordmap) # Test bad construction bad_images = images + [np.zeros((17, 21, 3))] assert_raises(ValueError, ImageList, bad_images) a = np.arange(10) assert_raises(ValueError, ImageList, a) # Test empty ImageList emplst = ImageList() assert_equal(len(emplst.list), 0) def test_il_from_image(): exp_shape = (17, 21, 3, 20) assert_equal(FIMG.shape, exp_shape) # from_image construction imglst = ImageList.from_image(FIMG, axis=-1) # Test axis must be specified assert_raises(ValueError, ImageList.from_image, FIMG) assert_raises(ValueError, ImageList.from_image, FIMG, None) # check all the axes for i in range(4): order = range(4) order.remove(i) order.insert(0,i) img_re_i = FIMG.reordered_reference(order).reordered_axes(order) imglst_i = ImageList.from_image(FIMG, axis=i) assert_equal(imglst_i.list[0].shape, img_re_i.shape[1:]) # check the affine as well assert_almost_equal(imglst_i.list[0].affine, img_re_i.affine[1:,1:]) # length of image list should match number of frames assert_equal(len(imglst), FIMG.shape[3]) # check the affine A = np.identity(4) A[:3,:3] = FIMG.affine[:3,:3] A[:3,-1] = FIMG.affine[:3,-1] assert_almost_equal(imglst.list[0].affine, A) # Check other ways of naming axis assert_equal(len(ImageList.from_image(FIMG, axis='t')), 20) # Input and output axis names work new_cmap = AffineTransform(CoordinateSystem('ijkl'), FIMG.coordmap.function_range, FIMG.coordmap.affine) fimg2 = Image(FIMG.get_data(), new_cmap) assert_equal(len(ImageList.from_image(fimg2, axis='t')), 20) assert_equal(len(ImageList.from_image(fimg2, axis='l')), 20) assert_raises(AxisError, ImageList.from_image, FIMG, 'q') # Check non-dropping case ndlist = ImageList.from_image(FIMG, axis='t', dropout=False) element = ndlist[1] assert_equal(element.coordmap, FIMG[...,1].coordmap) def test_il_slicing_dicing(): imglst = ImageList.from_image(FIMG, -1) # Slicing an ImageList should return an ImageList sublist = imglst[2:5] assert_true(isinstance(sublist, ImageList)) # Except when we're indexing one element assert_true(isinstance(imglst[0], Image)) # Verify array interface # test __array__ assert_true(isinstance(sublist.get_list_data(axis=0), np.ndarray)) # Test __setitem__ sublist[2] = sublist[0] assert_equal(sublist[0].get_data().mean(), sublist[2].get_data().mean()) # Test iterator for x in sublist: assert_true(isinstance(x, Image)) assert_equal(x.shape, FIMG.shape[:3]) # Test image_list.get_list_data(axis = an_axis) funcim = load_image(funcfile) ilist = ImageList.from_image(funcim, axis='t') # make sure that we pass an axis assert_raises(ValueError, ImageList.get_list_data, ilist, None) assert_raises(ValueError, ImageList.get_list_data, ilist) # make sure that axis that don't exist makes the function fail assert_raises(ValueError, ImageList.get_list_data, ilist, 4) assert_raises(ValueError, ImageList.get_list_data, ilist, -5) # make sure that axis is put in the right place in the result array # image of ilist have dimension (17,21,3), lenght(ilist) = 20. data = ilist.get_list_data(axis=0) assert_equal(data.shape, (20, 17, 21, 3)) data = ilist.get_list_data(axis=1) assert_equal(data.shape, (17, 20, 21, 3)) data = ilist.get_list_data(axis=2) assert_equal(data.shape, (17, 21, 20, 3)) data = ilist.get_list_data(axis=3) assert_equal(data.shape, (17, 21, 3, 20)) data = ilist.get_list_data(axis=-1) assert_equal(data.shape, (17, 21, 3, 20)) data = ilist.get_list_data(axis=-2) assert_equal(data.shape, (17, 21, 20, 3)) nipy-0.3.0/nipy/core/image/tests/test_image_spaces.py000066400000000000000000000154021210344137400226560ustar00rootroot00000000000000""" Testing Image spaces """ import numpy as np import nibabel as nib from nibabel.affines import from_matvec from ..image import Image, rollimg from ..image_spaces import (is_xyz_affable, as_xyz_image, xyz_affine, make_xyz_image) from ...reference.coordinate_system import CoordinateSystem as CS from ...reference.coordinate_map import AffineTransform from ...reference.spaces import (vox2mni, vox2talairach, voxel_csm, mni_space, talairach_space, AffineError, AxesError, XYZSpace, SpaceError) from numpy.testing import (assert_array_almost_equal, assert_array_equal) from nose.tools import assert_true, assert_false, assert_equal, assert_raises def test_image_xyz_affine(): # Test getting the image xyz affines arr = np.arange(24).reshape((2,3,4)) aff = np.diag([2,3,4,1]) img = Image(arr, vox2mni(aff)) assert_true(is_xyz_affable(img)) assert_array_equal(xyz_affine(img), aff) arr4 = np.arange(24).reshape((1,2,3,4)) img4 = Image(arr4, vox2mni(np.diag([2,3,4,5,1]))) assert_true(is_xyz_affable(img4)) img4_r = img4.reordered_axes([3,2,0,1]) assert_false(is_xyz_affable(img4_r)) assert_raises(AxesError, xyz_affine, img4_r) nimg = nib.Nifti1Image(arr, aff) assert_true(is_xyz_affable(nimg)) assert_array_equal(xyz_affine(nimg), aff) # Any dimensions not spatial, AxesError d_cs = CS('ijk', 'voxels') r_cs = CS(('mni-x=L->R', 'mni-y=P->A', 'mni-q'), 'mni') cmap = AffineTransform(d_cs,r_cs, aff) img = Image(arr, cmap) assert_raises(AxesError, xyz_affine, img) # Can pass in own validator my_valtor = dict(blind='x', leading='y', ditch='z') r_cs = CS(('blind', 'leading', 'ditch'), 'fall') cmap = AffineTransform(d_cs, r_cs, aff) img = Image(arr, cmap) assert_raises(AxesError, xyz_affine, img) assert_array_equal(xyz_affine(img, my_valtor), aff) def test_image_as_xyz_image(): # Test getting xyz affable version of the image arr = np.arange(24).reshape((1,2,3,4)) aff = np.diag([2,3,4,5,1]) img = Image(arr, vox2mni(aff)) img_r = as_xyz_image(img) assert_true(img is img_r) # Reorder, reverse reordering, test != and == for order in ((3, 0, 1, 2), (0, 3, 1, 2)): img_ro_out = img.reordered_reference(order) img_ro_in = img.reordered_axes(order) img_ro_both = img_ro_out.reordered_axes(order) for tmap in (img_ro_out, img_ro_in, img_ro_both): assert_false(is_xyz_affable(tmap)) img_r = as_xyz_image(tmap) assert_false(tmap is img_r) assert_equal(img, img_r) assert_array_equal(img.get_data(), img_r.get_data()) img_t0 = rollimg(img, 't') assert_false(is_xyz_affable(img_t0)) img_t0_r = as_xyz_image(img_t0) assert_false(img_t0 is img_t0_r) assert_array_equal(img.get_data(), img_t0_r.get_data()) assert_equal(img.coordmap, img_t0_r.coordmap) # Test against nibabel image nimg = nib.Nifti1Image(arr, np.diag([2,3,4,1])) nimg_r = as_xyz_image(nimg) assert_true(nimg is nimg_r) # It's sometimes impossible to make an xyz affable image # If the xyz coordinates depend on the time coordinate aff = np.array([[2, 0, 0, 2, 20], [0, 3, 0, 0, 21], [0, 0, 4, 0, 22], [0, 0, 0, 5, 23], [0, 0, 0, 0, 1]]) img = Image(arr, vox2mni(aff)) assert_raises(AffineError, as_xyz_image, img) # If any dimensions not spatial, AxesError arr = np.arange(24).reshape((2,3,4)) aff = np.diag([2,3,4,1]) d_cs = CS('ijk', 'voxels') r_cs = CS(('mni-x=L->R', 'mni-y=P->A', 'mni-q'), 'mni') cmap = AffineTransform(d_cs, r_cs, aff) img = Image(arr, cmap) assert_raises(AxesError, as_xyz_image, img) # Can pass in own validator my_valtor = dict(blind='x', leading='y', ditch='z') r_cs = CS(('blind', 'leading', 'ditch'), 'fall') cmap = AffineTransform(d_cs, r_cs, aff) img = Image(arr, cmap) assert_raises(AxesError, as_xyz_image, img) assert_true(as_xyz_image(img, my_valtor) is img) def test_image_xyza_slices(): # Jonathan found some nastiness where xyz present in output but there was # not corresponding axis for x in the input arr = np.arange(24).reshape((1,2,3,4)) aff = np.diag([2,3,4,5,1]) img = Image(arr, vox2mni(aff)) img0 = img[0] # slice in X # The result does not have an input axis corresponding to x, and should # raise an error assert_raises(AxesError, as_xyz_image, img0) img0r = img0.reordered_reference([1,0,2,3]).reordered_axes([2,0,1]) assert_raises(AxesError, as_xyz_image, img0r) def test_make_xyz_image(): # Standard neuro image creator arr = np.arange(24).reshape((1,2,3,4)) aff = np.diag([2,3,4,1]) img = make_xyz_image(arr, aff, 'mni') assert_equal(img.coordmap, vox2mni(aff, 1.0)) assert_array_equal(img.get_data(), arr) img = make_xyz_image(arr, aff, 'talairach') assert_equal(img.coordmap, vox2talairach(aff, 1.0)) assert_array_equal(img.get_data(), arr) img = make_xyz_image(arr, aff, talairach_space) assert_equal(img.coordmap, vox2talairach(aff, 1.0)) # Unknown space as string raises SpaceError assert_raises(SpaceError, make_xyz_image, arr, aff, 'unlikely space name') funky_space = XYZSpace('hija') img = make_xyz_image(arr, aff, funky_space) csm = funky_space.to_coordsys_maker('t') in_cs = CS('ijkl', 'voxels') exp_cmap = AffineTransform(in_cs, csm(4), np.diag([2, 3, 4, 1, 1])) assert_equal(img.coordmap, exp_cmap) # Affine must be 4, 4 bad_aff = np.diag([2,3,4,2,1]) assert_raises(ValueError, make_xyz_image, arr, bad_aff, 'mni') # Can pass added zooms img = make_xyz_image(arr, (aff, (2.,)), 'mni') assert_equal(img.coordmap, vox2mni(aff, 2.0)) # Also as scalar img = make_xyz_image(arr, (aff, 2.), 'mni') assert_equal(img.coordmap, vox2mni(aff, 2.0)) # Must match length of needed zooms arr5 = arr[...,None] assert_raises(ValueError, make_xyz_image, arr5, (aff, 2.), 'mni') img = make_xyz_image(arr5, (aff, (2., 3.)), 'mni') assert_equal(img.coordmap, vox2mni(aff, (2.0, 3.0))) # Always xyz affable after creation assert_array_equal(xyz_affine(img), aff) assert_true(is_xyz_affable(img)) # Need at least 3 dimensions in data assert_raises(ValueError, make_xyz_image, np.zeros((2,3)), aff, 'mni') # Check affines don't round / floor floating point aff = np.diag([2.1, 3, 4, 1]) img = make_xyz_image(np.zeros((2, 3, 4)), aff, 'scanner') assert_array_equal(img.coordmap.affine, aff) img = make_xyz_image(np.zeros((2, 3, 4, 5)), aff, 'scanner') assert_array_equal(img.coordmap.affine, np.diag([2.1, 3, 4, 1, 1])) nipy-0.3.0/nipy/core/image/tests/test_rollimg.py000066400000000000000000000204201210344137400216770ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This test basically just plays around with image.rollimg. It has three examples * image_reduce: this takes an Image having, say, an axis 't' and returns another Image having reduced over 't' * need_specific_axis_reduce: this takes an Image and a specific axis name, like 't' and produces an Image reduced over 't'. raises an exception if Image has no axis 't' * image_call: this takes an Image having, say, an axis 't' and does something along this axis -- like fits a regression model? and outputs a new Image with the 't' axis replaced by 'new' * image_modify_copy: this takes an Image and an axis specification, such as 'x+LR', 'l', or 2, modifies a copy of the data by iterating over this axis, and returns an Image with the same axes Notes ----- In these loaded Images, 't' is both an axis name and a world coordinate name so it is not ambiguous to say 't' axis. It is slightly ambiguous to say 'x+LR' axis if the axisnames are ['slice', 'frequency', 'phase'] but image.rollimg identifies 'x+LR' == 'slice' == 0. """ import numpy as np from ..image import (Image, rollimg, synchronized_order) from ...reference.coordinate_map import (AffineTransform as AT, drop_io_dim, AxisError) from ...reference.coordinate_system import CoordinateSystem as CS from ...reference.spaces import mni_csm from ...image.image_spaces import xyz_affine from nose.tools import (assert_raises, assert_equal) from numpy.testing import assert_almost_equal, assert_array_equal MNI3 = mni_csm(3) MNI4 = mni_csm(4) def image_reduce(img, reduce_op, axis='t'): """ Take an Image, perform some reduce operation on it, over a specified axis, and return a new Image. For the sake of testing things out, we will assume that the operation reduces over the first axis only. Parameters ---------- image : Image reduce_op : callable An operation that reduces over the first axis, such as lambda x: x.sum(0) axis : str or int Specification of axis of Image Returns ------- newim : Image, missing axis """ img = rollimg(img, axis) axis_name = img.axes.coord_names[0] output_axes = list(img.axes.coord_names) output_axes.remove(axis_name) newdata = reduce_op(img.get_data()) return Image(newdata, drop_io_dim(img.coordmap, axis)) def need_specific_axis_reduce(img, reduce_op): """ Take an Image, perform some reduce operation on it, over the axis named 'specific', and return a new Image. For the sake of testing things out, we will assume that the operation reduces over the first axis only. Parameters ---------- img : Image reduce_op : callable An operation that reduces over the first axis, such as lambda x: x.sum(0) Returns ------- newim : Image, missing axis """ return image_reduce(img, reduce_op, 'specific') def image_call(img, function, inaxis='t', outaxis='new'): """ Take an Image, perform some operation on it, over a specified axis, and return a new Image. For the sake of testing things out, we will assume that the operation can only operate on the first axis of the array. Parameters ---------- img : Image function : callable An operation that does something over the first axis, such as lambda x: x[::2] inaxis : str or int Specification of axis of Image outaxis : str Name of new axis in new Image Returns ------- newim : Image with axis `inaxis` replaced with `outaxis` """ rolled_img = rollimg(img, inaxis) inaxis = rolled_img.axes.coord_names[0] # now it's a string newdata = function(rolled_img.get_data()) new_coordmap = rolled_img.coordmap.renamed_domain({inaxis: outaxis}) new_image = Image(newdata, new_coordmap) # we have to roll the axis back axis_index = img.axes.index(inaxis) + 1 return rollimg(new_image, 0, axis_index) def image_modify(img, modify, axis='y+PA'): """ Take an Image, perform some operation on it, over a specified axis, and return a new Image. For this operation, we are allowed to iterate over spatial axes. For the sake of testing things out, we will assume that the operation modify can only operate by iterating over the first axis of an array. Parameters ---------- img : Image modify : callable An operation that modifies an array. Something like:: def f(x): x[:] = x.mean() axis : str or int Specification of axis of Image Returns ------- newim : Image with a modified copy of img._data. """ rolled_img = rollimg(img, axis) data = rolled_img.get_data().copy() for d in data: modify(d) import copy new_image = Image(data, copy.copy(rolled_img.coordmap)) # Now, we have to put the data back to same order as img return synchronized_order(new_image, img) def test_reduce(): shape = (3, 5, 7, 9) x = np.random.standard_normal(shape) im = Image(x, AT(CS('ijkq'), MNI4, np.diag([3, 4, 5, 6, 1]))) newim = image_reduce(im, lambda x: x.sum(0), 'q') assert_array_equal(xyz_affine(im), xyz_affine(newim)) assert_equal(newim.axes.coord_names, tuple('ijk')) assert_equal(newim.shape, (3, 5, 7)) assert_almost_equal(newim.get_data(), x.sum(3)) im_nd = Image(x, AT(CS('ijkq'), MNI4, np.array( [[0, 1, 2, 0, 10], [3, 4, 5, 0, 11], [6, 7, 8, 0, 12], [0, 0, 0, 9, 13], [0, 0, 0, 0, 1]]))) for i, o, n in zip('ijk', MNI3.coord_names, range(3)): for axis_id in (i, o, n): # Non-diagonal reduce raise an error assert_raises(AxisError, image_reduce, im_nd, lambda x: x.sum(0), axis_id) # Diagonal reduces are OK newim = image_reduce(im, lambda x: x.sum(0), axis_id) def test_specific_reduce(): shape = (3, 5, 7, 9) x = np.random.standard_normal(shape) im = Image(x, AT(CS('ijkq'), MNI4, np.diag([3, 4, 5, 6, 1]))) # we have to rename the axis before we can call the function # need_specific_axis_reduce on it assert_raises(AxisError, need_specific_axis_reduce, im, lambda x: x.sum(0)) im = im.renamed_axes(q='specific') newim = need_specific_axis_reduce(im, lambda x: x.sum(0)) assert_array_equal(xyz_affine(im), xyz_affine(newim)) assert_equal(newim.axes.coord_names, tuple('ijk')) assert_equal(newim.shape, (3, 5, 7)) assert_almost_equal(newim.get_data(), x.sum(3)) def test_call(): shape = (3, 5, 7, 12) x = np.random.standard_normal(shape) affine = np.eye(5) affine[:3, :3] = np.random.standard_normal((3, 3)) affine[:4, 4] = np.random.standard_normal((4,)) im = Image(x, AT(CS('ijkq'), MNI4, affine)) newim = image_call(im, lambda x: x[::2], 'q', 'out') assert_array_equal(xyz_affine(im), xyz_affine(newim)) assert_equal(newim.axes.coord_names, tuple('ijk') + ('out',)) assert_equal(newim.shape, (3, 5, 7, 6)) assert_almost_equal(newim.get_data(), x[:,:,:,::2]) def test_modify(): shape = (3, 5, 7, 12) x = np.random.standard_normal(shape) affine = np.eye(5) affine[:3, :3] = np.random.standard_normal((3, 3)) affine[:4, 4] = np.random.standard_normal((4,)) im = Image(x, AT(CS('ijkq'), MNI4, affine)) def nullmodify(d): pass def meanmodify(d): d[:] = d.mean() for i, o, n in zip('ijkq', MNI3.coord_names + ('q',), range(4)): for a in i, o, n: nullim = image_modify(im, nullmodify, a) meanim = image_modify(im, meanmodify, a) assert_array_equal(nullim.get_data(), im.get_data()) assert_array_equal(xyz_affine(im), xyz_affine(nullim)) assert_equal(nullim.axes, im.axes) # yield assert_equal, nullim, im assert_array_equal(xyz_affine(im), xyz_affine(meanim)) assert_equal(meanim.axes, im.axes) # Make sure that meanmodify works as expected d = im.get_data() d = np.rollaxis(d, n) meand = meanim.get_data() meand = np.rollaxis(meand, n) for i in range(d.shape[0]): assert_almost_equal(meand[i], d[i].mean()) nipy-0.3.0/nipy/core/reference/000077500000000000000000000000001210344137400163355ustar00rootroot00000000000000nipy-0.3.0/nipy/core/reference/__init__.py000066400000000000000000000036071210344137400204540ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Frame of reference/coordinates package. Mathematical model ================== The idea of a chart :lm:`\\phi` : I{U} S{sub} I{M} S{->} B{R}^k on a "manifold" I{M}. For a chart both input (I{M}) and output coordinates (B{R}^k) must be defined and a map relating the two coordinate systems. Description =========== The modules in this package contains classes which define the space in which an image exists and also functions for manipulating and traversing this space. The basic class which defines an image space is a CoordinateMap (coordinate_map.py). A CoordinateMap consists of an input CoordinateSystem (coordinate_system.py), an output CoordinateSystem, and a mapping which converts points in the input space to points in the output space. A `CoordinateSystem` consists of a set of ordered `Coordinate` objects. Each Coordinate has a name and a builtin dtype. The typical use of a `CoordinateMap` is to define how voxels in an `Image` (core.image.__init__.py) object's raw data map into real space. `Image` traversal is general done in terms of the underlying coordinate_map, and a number of iterators are provided to traverse points in the coordinate_map (iterators.py). Access to available iterators is done through the CoordinateMap interface, rather than accessing the iterator classes directly. The other common image access method is to take slices through the coordinate_map. In slices.py functions are presented which will return a `CoordinateMap` representing a single slice through a larger coordinate_map. """ __docformat__ = 'restructuredtext' from . import coordinate_system from . import coordinate_map from . import slices __all__ = ["coordinate_system", "coordinate_map", "slices"] from nipy.testing import Tester test = Tester().test bench = Tester().bench nipy-0.3.0/nipy/core/reference/array_coords.py000066400000000000000000000313271210344137400214040ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Some CoordinateMaps have a domain that are 'array' coordinates, hence the function of the CoordinateMap can be evaluated at these 'array' points. This module tries to make these operations easier by defining a class ArrayCoordMap that is essentially a CoordinateMap and a shape. This class has two properties: values, transposed_values the CoordinateMap at np.indices(shape). The class Grid is meant to take a CoordinateMap and an np.mgrid-like notation to create an ArrayCoordMap. """ import numpy as np from .coordinate_map import CoordinateMap, AffineTransform, compose from .coordinate_map import product as cmap_product from .coordinate_map import shifted_range_origin from .coordinate_system import CoordinateSystem class ArrayCoordMap(object): """ Class combining coordinate map and array shape When the function_domain of a CoordinateMap can be thought of as 'array' coordinates, i.e. an 'input_shape' makes sense. We can than evaluate the CoordinateMap at np.indices(input_shape) """ def __init__(self, coordmap, shape): """ Parameters ---------- coordmap : ``CoordinateMap`` A CoordinateMap with function_domain that are 'array' coordinates. shape : sequence of int The size of the (implied) underlying array. Examples -------- >>> aff = np.diag([0.6,1.1,2.3,1]) >>> aff[:3,3] = (0.1, 0.2, 0.3) >>> cmap = AffineTransform.from_params('ijk', 'xyz', aff) >>> cmap.ndims # number of (input, output) dimensions (3, 3) >>> acmap = ArrayCoordMap(cmap, (1, 2, 3)) Real world values at each array coordinate, one row per array coordinate (6 in this case), one column for each output dimension (3 in this case) >>> acmap.values array([[ 0.1, 0.2, 0.3], [ 0.1, 0.2, 2.6], [ 0.1, 0.2, 4.9], [ 0.1, 1.3, 0.3], [ 0.1, 1.3, 2.6], [ 0.1, 1.3, 4.9]]) Same values, but arranged in np.indices / np.mgrid format, first axis is for number of output coordinates (3 in our case), the rest are for the input shape (1, 2, 3) >>> acmap.transposed_values.shape (3, 1, 2, 3) >>> acmap.transposed_values array([[[[ 0.1, 0.1, 0.1], [ 0.1, 0.1, 0.1]]], [[[ 0.2, 0.2, 0.2], [ 1.3, 1.3, 1.3]]], [[[ 0.3, 2.6, 4.9], [ 0.3, 2.6, 4.9]]]]) """ self.coordmap = coordmap self.shape = tuple(shape) def _evaluate(self, transpose=False): """ If the coordmap has a shape (so that it can be thought of as a map from voxels to some output space), return the range of the coordmap, i.e. the value at all the voxels. Parameters ---------- coordmap : `CoordinateMap` transpose : bool, optional If False (the default), the result is a 2-dimensional ndarray with shape[1] == coordmap.ndims[1]. That is, the result is a list of output values. Otherwise, the shape is (coordmap.ndims[1],) + coordmap.shape. Returns ------- values : array Values of self.coordmap evaluated at np.indices(self.shape). """ indices = np.indices(self.shape).astype( self.coordmap.function_domain.coord_dtype) tmp_shape = indices.shape # reshape indices to be a sequence of coordinates indices.shape = (self.coordmap.ndims[0], np.product(self.shape)) # evaluate using coordinate map mapping _range = self.coordmap(indices.T) if transpose: # reconstruct np.indices format for output _range = _range.T _range.shape = (_range.shape[0],) + tmp_shape[1:] return _range def _getvalues(self): return self._evaluate(transpose=False) values = property(_getvalues, doc='Get values of ArrayCoordMap in a ' '2-dimensional array of shape ' '(product(self.shape), self.coordmap.ndims[1]))') def _getindices_values(self): return self._evaluate(transpose=True) transposed_values = property(_getindices_values, doc='Get values of ArrayCoordMap in an array of shape (self.coordmap.ndims[1],) + self.shape)') def __getitem__(self, slicers): """ Return a slice through the coordmap. Parameters ---------- slicers : int or tuple int, or sequence of any combination of integers, slices. The sequence can also contain one Ellipsis. """ # slicers might just be just one thing, so convert to tuple if type(slicers) != type(()): slicers = (slicers,) # raise error for anything other than slice, int, Ellipsis have_ellipsis = False # check for >1 Ellipsis for i in slicers: if isinstance(i, np.ndarray): raise ValueError('Sorry, we do not support ' 'ndarrays (fancy indexing)') if i == Ellipsis: if have_ellipsis: raise ValueError( "only one Ellipsis (...) allowed in slice") have_ellipsis = True continue try: int(i) except TypeError: if hasattr(i, 'start'): # probably slice continue raise ValueError('Expecting int, slice or Ellipsis') # allow slicing of form [...,1] if have_ellipsis: # convert ellipsis to series of slice(None) objects. For # example, if the coordmap is length 3, we convert (...,1) # to (slice(None), slice(None), 1) - equivalent to [:,:,1] ellipsis_start = list(slicers).index(Ellipsis) inds_after_ellipsis = slicers[(ellipsis_start+1):] # the ellipsis continues until any remaining slice specification n_ellipses = len(self.shape) - ellipsis_start - len(inds_after_ellipsis) slicers = (slicers[:ellipsis_start] + n_ellipses * (slice(None),) + inds_after_ellipsis) return _slice(self.coordmap, self.shape, *slicers) @staticmethod def from_shape(coordmap, shape): """ Create an evaluator assuming that coordmap.function_domain are 'array' coordinates. """ slices = tuple([slice(0,s,1) for s in shape]) return Grid(coordmap)[slices] def __repr__(self): return "ArrayCoordMap(\n coordmap=" + \ '\n '.join(repr(self.coordmap).split('\n')) + ',\n shape=%s' % repr(self.shape) + '\n)' def _slice(coordmap, shape, *slices): """ Slice a 'voxel' CoordinateMap's function_domain with slices. A 'voxel' CoordinateMap is interpreted as a coordmap having a shape. """ if len(slices) < coordmap.ndims[0]: slices = (list(slices) + [slice(None,None,None)] * (coordmap.ndims[0] - len(slices))) ranges = [np.arange(s) for s in shape] cmaps = [] keep_in_output = [] dtype = coordmap.function_domain.coord_dtype newshape = [] for i, __slice in enumerate(slices): ranges[i] = ranges[i][__slice] try: start = ranges[i][0] except IndexError: try: start = int(ranges[i]) except TypeError: raise ValueError('empty slice for dimension %d, ' 'coordinate %s' % (i, coordmap.function_domain.coord_names[i])) if ranges[i].shape == (): step = 0 start = int(ranges[i]) l = 1 elif ranges[i].shape[0] > 1: start = ranges[i][0] step = ranges[i][1] - ranges[i][0] l = ranges[i].shape[0] keep_in_output.append(i) else: start = ranges[i][0] step = 0. l = 1 keep_in_output.append(i) if step > 1: name = coordmap.function_domain.coord_names[i] + '-slice' else: name = coordmap.function_domain.coord_names[i] cmaps.append(AffineTransform( CoordinateSystem([name], coord_dtype=dtype), CoordinateSystem([coordmap.function_domain.coord_names[i]]), np.array([[step, start],[0,1]], dtype=dtype))) if i in keep_in_output: newshape.append(l) slice_cmap = cmap_product(*cmaps) # Identify the origin in the range of cmap # with the origin in the domain of coordmap slice_cmap = shifted_range_origin(slice_cmap, np.zeros(slice_cmap.ndims[1]), coordmap.function_domain.name) # Reduce the size of the matrix innames = slice_cmap.function_domain.coord_names inmat = [] function_domain = CoordinateSystem( [innames[i] for i in keep_in_output], 'input-slice', coordmap.function_domain.coord_dtype) A = np.zeros((coordmap.ndims[0]+1, len(keep_in_output)+1)) for j, i in enumerate(keep_in_output): A[:,j] = slice_cmap.affine[:,i] A[:,-1] = slice_cmap.affine[:,-1] A = A.astype(function_domain.coord_dtype) slice_cmap = AffineTransform(function_domain, coordmap.function_domain, A) return ArrayCoordMap(compose(coordmap, slice_cmap), tuple(newshape)) class Grid(object): """ Simple class to construct AffineTransform instances with slice notation like np.ogrid/np.mgrid. >>> c = CoordinateSystem('xy', 'input') >>> g = Grid(c) >>> points = g[-1:1:21j,-2:4:31j] >>> points.coordmap.affine array([[ 0.1, 0. , -1. ], [ 0. , 0.2, -2. ], [ 0. , 0. , 1. ]]) >>> print points.coordmap.function_domain CoordinateSystem(coord_names=('i0', 'i1'), name='product', coord_dtype=float64) >>> print points.coordmap.function_range CoordinateSystem(coord_names=('x', 'y'), name='input', coord_dtype=float64) >>> points.shape (21, 31) >>> print points.transposed_values.shape (2, 21, 31) >>> print points.values.shape (651, 2) """ def __init__(self, coords): """ Initialize Grid object Parameters ---------- coords: ``CoordinateMap`` or ``CoordinateSystem`` A coordinate map to be 'sliced' into. If coords is a CoordinateSystem, then an AffineTransform instance is created with coords with identity transformation. """ if isinstance(coords, CoordinateSystem): coordmap = AffineTransform.identity(coords.coord_names, coords.name) elif not (isinstance(coords, CoordinateMap) or isinstance(coords, AffineTransform)): raise ValueError('expecting either a CoordinateMap, CoordinateSystem or AffineTransform for Grid') else: coordmap = coords self.coordmap = coordmap def __getitem__(self, index): """ Create an AffineTransform coordinate map with into self.coords with slices created as in np.mgrid/np.ogrid. """ dtype = self.coordmap.function_domain.coord_dtype results = [a.ravel().astype(dtype) for a in np.ogrid[index]] if len(results) != len(self.coordmap.function_domain.coord_names): raise ValueError('the number of slice objects must match ' 'the number of input dimensions') cmaps = [] for i, result in enumerate(results): if result.shape[0] > 1: step = result[1] - result[0] else: step = 0 start = result[0] cmaps.append(AffineTransform( CoordinateSystem(['i%d' % i], coord_dtype=dtype), CoordinateSystem([self.coordmap.function_domain.coord_names[i]], coord_dtype=dtype), np.array([[step, start],[0,1]], dtype=dtype))) shape = [result.shape[0] for result in results] cmap = cmap_product(*cmaps) # Identify the origin in the range of cmap # with the origin in the domain of self.coordmap cmap = shifted_range_origin(cmap, np.zeros(cmap.ndims[1]), self.coordmap.function_domain.name) return ArrayCoordMap(compose(self.coordmap, cmap), tuple(shape)) nipy-0.3.0/nipy/core/reference/coordinate_map.py000066400000000000000000002471001210344137400216770ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This module describes two types of *mappings*: * CoordinateMap: a general function from a domain to a range, with a possible inverse function. * AffineTransform: an affine function from a domain to a range, not necessarily of the same dimension, hence not always invertible. Each of these objects is meant to encapsulate a tuple of (domain, range, function). Each of the mapping objects contain all the details about their domain CoordinateSystem, their range CoordinateSystem and the mapping between them. Common API ---------- They are separate classes, neither one inheriting from the other. They do, however, share some parts of an API, each having methods: * renamed_domain : rename on the coordinates of the domain (returns a new mapping) * renamed_range : rename the coordinates of the range (returns a new mapping) * reordered_domain : reorder the coordinates of the domain (returns a new mapping) * reordered_range : reorder the coordinates of the range (returns a new mapping) * inverse : when appropriate, return the inverse *mapping* These methods are implemented by module level functions of the same name. They also share some attributes: * ndims : the dimensions of the domain and range, respectively * function_domain : CoordinateSystem describing the domain * function_range : CoordinateSystem describing the range Operations on mappings (module level functions) ----------------------------------------------- * compose : Take a sequence of mappings (either CoordinateMaps or AffineTransforms) and return their composition. If they are all AffineTransforms, an AffineTransform is returned. This checks to ensure that domains and ranges of the various mappings agree. * product : Take a sequence of mappings (either CoordinateMaps or AffineTransforms) and return a new mapping that has domain and range given by the concatenation of their domains and ranges, and the mapping simply concatenates the output of each of the individual mappings. If they are all AffineTransforms, an AffineTransform is returned. If they are all AffineTransforms that are in fact linear (i.e. origin=0) then can is represented as a block matrix with the size of the blocks determined by * concat : Take a mapping and prepend a coordinate to its domain and range. For mapping `m`, this is the same as product(AffineTransform.identity('concat'), `m`) """ import warnings import numpy as np import numpy.linalg as npl from nibabel.affines import to_matvec, from_matvec from ...fixes.nibabel import io_orientation from .coordinate_system import(CoordinateSystem, safe_dtype, is_coordsys, product as coordsys_product ) # shorthand CS = CoordinateSystem # Tolerance for small values in affine TINY = 1e-5 class CoordinateMap(object): """A set of domain and range CoordinateSystems and a function between them. For example, the function may represent the mapping of a voxel (the domain of the function) to real space (the range). The function may be an affine or non-affine transformation. Attributes ---------- function_domain : :class:`CoordinateSystem` instance The input coordinate system. function_range : :class:`CoordinateSystem` instance The output coordinate system. function : callable A callable that maps the function_domain to the function_range. inverse_function : None or callable A callable that maps the function_range to the function_domain. Not all functions have an inverse, in which case inverse_function is None. Examples -------- >>> function_domain = CoordinateSystem('ijk', 'voxels') >>> function_range = CoordinateSystem('xyz', 'world') >>> mni_orig = np.array([-90.0, -126.0, -72.0]) >>> function = lambda x: x + mni_orig >>> inv_function = lambda x: x - mni_orig >>> cm = CoordinateMap(function_domain, function_range, function, inv_function) Map the first 3 voxel coordinates, along the x-axis, to mni space: >>> x = np.array([[0,0,0], [1,0,0], [2,0,0]]) >>> cm.function(x) array([[ -90., -126., -72.], [ -89., -126., -72.], [ -88., -126., -72.]]) >>> x = CoordinateSystem('x') >>> y = CoordinateSystem('y') >>> m = CoordinateMap(x, y, np.exp, np.log) >>> m CoordinateMap( function_domain=CoordinateSystem(coord_names=('x',), name='', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('y',), name='', coord_dtype=float64), function=, inverse_function= ) >>> m.inverse() CoordinateMap( function_domain=CoordinateSystem(coord_names=('y',), name='', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('x',), name='', coord_dtype=float64), function=, inverse_function= ) """ _doc = {} function = np.exp _doc['function'] = 'The function from function_domain to function_range.' function_domain = CoordinateSystem('x') _doc['function_domain'] = 'The domain of the function, a CoordinateSystem.' function_range = CoordinateSystem('y') _doc['function_range'] = 'The range of the function, a CoordinateSystem.' inverse_function = np.log _doc['inverse_function'] = 'The inverse function from function_range' + \ 'to function_domain, if supplied.' ndims = (1,1) _doc['ndims'] = 'Number of dimensions of domain and range, respectively.' def __init__(self, function_domain, function_range, function, inverse_function=None): """Create a CoordinateMap given function, domain and range. Parameters ---------- function_domain : :class:`CoordinateSystem` The input coordinate system. function_range : :class:`CoordinateSystem` The output coordinate system function : callable The function between function_domain and function_range. It should be a callable that accepts arrays of shape (N, function_domain.ndim) and returns arrays of shape (N, function_range.ndim), where N is the number of points for transformation. inverse_function : None or callable, optional The optional inverse of function, with the intention being ``x = inverse_function(function(x))``. If the function is affine and invertible, then this is true for all x. The default is None Returns ------- coordmap : CoordinateMap """ # These attrs define the structure of the coordmap. if not is_coordsys(function_domain): function_domain = CoordinateSystem(function_domain) self.function_domain = function_domain if not is_coordsys(function_range): function_range = CoordinateSystem(function_range) self.function_range = function_range self.function = function self.inverse_function = inverse_function self.ndims = (function_domain.ndim, function_range.ndim) if not callable(function): raise ValueError('The function must be callable.') if inverse_function is not None: if not callable(inverse_function): raise ValueError('The inverse_function must be callable.') self._checkfunction() # All attributes are read only def __setattr__(self, key, value): if key in self.__dict__: raise AttributeError('the value of %s has already been ' 'set and all attributes are ' 'read-only' % key) object.__setattr__(self, key, value) ################################################################### # # Properties # ################################################################### ################################################################### # # Methods # ################################################################### def reordered_domain(self, order=None): """ Create a new CoordinateMap with the coordinates of function_domain reordered. Default behaviour is to reverse the order of the coordinates. Parameters ---------- order : sequence Order to use, defaults to reverse. The elements can be integers, strings or 2-tuples of strings. If they are strings, they should be in mapping.function_domain.coord_names. Returns ------- newmapping : CoordinateMap A new CoordinateMap with the coordinates of function_domain reordered. Examples -------- >>> input_cs = CoordinateSystem('ijk') >>> output_cs = CoordinateSystem('xyz') >>> cm = CoordinateMap(input_cs, output_cs, lambda x:x+1) >>> cm.reordered_domain('ikj').function_domain CoordinateSystem(coord_names=('i', 'k', 'j'), name='', coord_dtype=float64) """ return reordered_domain(self, order) def reordered_range(self, order=None): """ Nnew CoordinateMap with function_range reordered. Defaults to reversing the coordinates of function_range. Parameters ---------- order : sequence Order to use, defaults to reverse. The elements can be integers, strings or 2-tuples of strings. If they are strings, they should be in mapping.function_range.coord_names. Returns ------- newmapping : CoordinateMap A new CoordinateMap with the coordinates of function_range reordered. Examples -------- >>> input_cs = CoordinateSystem('ijk') >>> output_cs = CoordinateSystem('xyz') >>> cm = CoordinateMap(input_cs, output_cs, lambda x:x+1) >>> cm.reordered_range('xzy').function_range CoordinateSystem(coord_names=('x', 'z', 'y'), name='', coord_dtype=float64) >>> cm.reordered_range([0,2,1]).function_range.coord_names ('x', 'z', 'y') >>> newcm = cm.reordered_range('yzx') >>> newcm.function_range.coord_names ('y', 'z', 'x') """ return reordered_range(self, order) def renamed_domain(self, newnames, name=''): """ New CoordinateMap with function_domain renamed Parameters ---------- newnames : dict A dictionary whose keys are integers or are in mapping.function_domain.coord_names and whose values are the new names. Returns ------- newmaping : CoordinateMap A new CoordinateMap with renamed function_domain. Examples -------- >>> domain = CoordinateSystem('ijk') >>> range = CoordinateSystem('xyz') >>> cm = CoordinateMap(domain, range, lambda x:x+1) >>> new_cm = cm.renamed_domain({'i':'phase','k':'freq','j':'slice'}) >>> new_cm.function_domain CoordinateSystem(coord_names=('phase', 'slice', 'freq'), name='', coord_dtype=float64) >>> new_cm = cm.renamed_domain({'i':'phase','k':'freq','l':'slice'}) Traceback (most recent call last): ... ValueError: no domain coordinate named l """ return renamed_domain(self, newnames) def renamed_range(self, newnames, name=''): """ New CoordinateMap with function_domain renamed Parameters ---------- newnames : dict A dictionary whose keys are integers or are in mapping.function_range.coord_names and whose values are the new names. Returns ------- newmapping : CoordinateMap A new CoordinateMap with renamed function_range. Examples -------- >>> domain = CoordinateSystem('ijk') >>> range = CoordinateSystem('xyz') >>> cm = CoordinateMap(domain, range, lambda x:x+1) >>> new_cm = cm.renamed_range({'x':'u'}) >>> new_cm.function_range CoordinateSystem(coord_names=('u', 'y', 'z'), name='', coord_dtype=float64) >>> new_cm = cm.renamed_range({'w':'u'}) Traceback (most recent call last): ... ValueError: no range coordinate named w """ return renamed_range(self, newnames) def inverse(self): """ New CoordinateMap with the functions reversed """ if self.inverse_function is None: return None return CoordinateMap(self.function_range, self.function_domain, self.inverse_function, inverse_function=self.function) def __call__(self, x): """ Return mapping evaluated at x Also, check x and the return value of self.function for compatiblity with function_domain and function_range coordinate systems respectively. Parameters ---------- x : array-like Values in domain coordinate system space that will be mapped to the range coordinate system space, using ``self.mapping``. The last dimension of the array is the coordinate dimension. Thus `x` can be any array that can be reshaped to (N, self.function_domain.ndim), and that matches self.function_domain dtype. Returns ------- y : array Values in range coordinate system space. If input `x` was shape S + (self.function_domain.ndim) (where S is a tuple of int and can be ()) - then the output `y` will be shape S + (self.function_range.ndim) Examples -------- >>> input_cs = CoordinateSystem('ijk') >>> output_cs = CoordinateSystem('xyz') >>> function = lambda x:x+1 >>> inverse = lambda x:x-1 >>> cm = CoordinateMap(input_cs, output_cs, function, inverse) >>> cm([2., 3., 4.]) array([ 3., 4., 5.]) >>> cmi = cm.inverse() >>> cmi([2., 6. ,12.]) array([ 1., 5., 11.]) """ x = np.asanyarray(x) out_shape = (self.function_range.ndim,) if x.ndim > 1: out_shape = x.shape[:-1] + out_shape in_vals = self.function_domain._checked_values(x) out_vals = self.function(in_vals) final_vals = self.function_range._checked_values(out_vals) return final_vals.reshape(out_shape) def __copy__(self): """ Create a copy of the coordmap. Returns ------- coordmap : CoordinateMap """ return CoordinateMap(self.function_domain, self.function_range, self.function, inverse_function=self.inverse_function) ################################################################### # # Private methods # ################################################################### def __repr__(self): if self.inverse_function is None: return "CoordinateMap(\n function_domain=%s,\n function_range=%s,\n function=%s\n )" % (self.function_domain, self.function_range, repr(self.function)) else: return "CoordinateMap(\n function_domain=%s,\n function_range=%s,\n function=%s,\n inverse_function=%s\n )" % (self.function_domain, self.function_range, repr(self.function), repr(self.inverse_function)) def _checkfunction(self): """Verify that the domain and range of self.function work can be used for calling self.function. We do this by passing something that should work, through __call__ """ inp = np.zeros((10, self.ndims[0]), dtype=self.function_domain.coord_dtype) out = self(inp) def __eq__(self, other): return ((isinstance(other, self.__class__) or isinstance(self, other.__class__)) and (self.function == other.function) and (self.function_domain == other.function_domain) and (self.function_range == other.function_range) and (self.inverse_function == other.inverse_function)) def __ne__(self, other): return not self.__eq__(other) def similar_to(self, other): """ Does `other` have similar coordinate systems and same mappings? A "similar" coordinate system is one with the same coordinate names and data dtype, but ignoring the coordinate system name. """ return (isinstance(other, self.__class__) and (self.function == other.function) and (self.function_domain.similar_to(other.function_domain)) and (self.function_range.similar_to(other.function_range)) and (self.inverse_function == other.inverse_function)) class AffineTransform(object): """ Class for affine transformation from domain to a range This class has an affine attribute, which is a matrix representing the affine transformation in homogeneous coordinates. This matrix is used to evaluate the function, rather than having an explicit function (as is the case for a CoordinateMap). Examples -------- >>> inp_cs = CoordinateSystem('ijk') >>> out_cs = CoordinateSystem('xyz') >>> cm = AffineTransform(inp_cs, out_cs, np.diag([1, 2, 3, 1])) >>> cm AffineTransform( function_domain=CoordinateSystem(coord_names=('i', 'j', 'k'), name='', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('x', 'y', 'z'), name='', coord_dtype=float64), affine=array([[ 1., 0., 0., 0.], [ 0., 2., 0., 0.], [ 0., 0., 3., 0.], [ 0., 0., 0., 1.]]) ) >>> cm.affine array([[ 1., 0., 0., 0.], [ 0., 2., 0., 0.], [ 0., 0., 3., 0.], [ 0., 0., 0., 1.]]) >>> cm([1,1,1]) array([ 1., 2., 3.]) >>> icm = cm.inverse() >>> icm([1,2,3]) array([ 1., 1., 1.]) """ _doc = {} affine = np.diag([3,4,5,1]) _doc['affine'] = 'The matrix representing an affine transformation ' + \ 'homogeneous form.' function_domain = CoordinateSystem('x') _doc['function_domain'] = 'The domain of the affine transformation, ' + \ 'a CoordinateSystem.' function_range = CoordinateSystem('y') _doc['function_range'] = 'The range of the affine transformation, ' + \ 'a CoordinateSystem.' ndims = (3,3) _doc['ndims'] = 'Number of dimensions of domain and range, respectively.' def __init__(self, function_domain, function_range, affine): """ Initialize AffineTransform Parameters ---------- function_domain : :class:`CoordinateSystem` input coordinates function_range : :class:`CoordinateSystem` output coordinates affine : array-like affine homogenous coordinate matrix Notes ----- The dtype of the resulting matrix is determined by finding a safe typecast for the function_domain, function_range and affine. """ if not is_coordsys(function_domain): function_domain = CoordinateSystem(function_domain) if not is_coordsys(function_range): function_range = CoordinateSystem(function_range) affine = np.asarray(affine) dtype = safe_dtype(affine.dtype, function_domain.coord_dtype, function_range.coord_dtype) inaxes = function_domain.coord_names outaxes = function_range.coord_names self.function_domain = CoordinateSystem(inaxes, function_domain.name, dtype) self.function_range = CoordinateSystem(outaxes, function_range.name, dtype) self.ndims = (self.function_domain.ndim, self.function_range.ndim) affine = np.asarray(affine, dtype=dtype) if affine.shape != (self.ndims[1]+1, self.ndims[0]+1): raise ValueError('coordinate lengths do not match ' 'affine matrix shape') # Test that it is actually an affine mapping in homogeneous # form bottom_row = np.array([0]*self.ndims[0] + [1]) if not np.all(affine[-1] == bottom_row): raise ValueError('the homogeneous transform should have bottom=' + \ 'row %s' % repr(bottom_row)) self.affine = affine ################################################################### # # Properties # ################################################################### def inverse(self, preserve_dtype=False): """ Return coordinate map with inverse affine transform or None Parameters ---------- preserve_dtype : bool If False, return affine mapping from inverting the ``affine``. The domain / range dtypes for the inverse may then change as a function of the dtype of the inverted ``affine``. If True, try to invert our ``affine``, and see if it can be cast to the needed data type, which is ``self.function_domain.coord_dtype``. We need this dtype in order for the inverse to preserve the coordinate system dtypes. Returns ------- aff_cm_inv : ``AffineTransform`` instance or None ``AffineTransform`` mapping from the *range* of input `self` to the *domain* of input `self` - the inverse of `self`. If ``self.affine`` was not invertible return None. If `preserve_dtype` is True, and the inverse of ``self.affine`` cannot be cast to ``self.function_domain.coord_dtype``, then return None. Otherwise return ``AffineTransform`` inverse mapping. If `preserve_dtype` is False, the domain / range dtypes of the return inverse may well be different from those of the input `self`. Examples -------- >>> input_cs = CoordinateSystem('ijk', coord_dtype=np.int) >>> output_cs = CoordinateSystem('xyz', coord_dtype=np.int) >>> affine = np.array([[1,0,0,1], ... [0,1,0,1], ... [0,0,1,1], ... [0,0,0,1]]) >>> affine_transform = AffineTransform(input_cs, output_cs, affine) >>> affine_transform([2,3,4]) #doctest: +IGNORE_DTYPE array([3, 4, 5]) The inverse transform, by default, generates a floating point inverse matrix and therefore floating point output: >>> affine_transform_inv = affine_transform.inverse() >>> affine_transform_inv([2, 6, 12]) array([ 1., 5., 11.]) You can force it to preserve the coordinate system dtype with the `preserve_dtype` flag: >>> at_inv_preserved = affine_transform.inverse(preserve_dtype=True) >>> at_inv_preserved([2, 6, 12]) #doctest: +IGNORE_DTYPE array([ 1, 5, 11]) If you `preserve_dtype`, and there is no inverse affine preserving the dtype, the inverse is None: >>> affine2 = affine.copy() >>> affine2[0, 0] = 2 # now inverse can't be integer >>> aff_t = AffineTransform(input_cs, output_cs, affine2) >>> aff_t.inverse(preserve_dtype=True) is None True """ aff_dt = self.function_range.coord_dtype try: m_inv = npl.inv(self.affine) except npl.LinAlgError: return None except TypeError: # Try using sympy for the inverse. This might be needed for sympy # symbols in the affine, or Float128 from sympy import Matrix, matrix2numpy sym_inv = Matrix(self.affine).inv() m_inv = matrix2numpy(sym_inv).astype(aff_dt) else: # linalg inverse succeeded if preserve_dtype and aff_dt != np.object: # can we cast back? m_inv_orig = m_inv m_inv = m_inv.astype(aff_dt) if not np.allclose(m_inv_orig, m_inv): return None return AffineTransform(self.function_range, self.function_domain, m_inv) ################################################################### # # Helper constructors # ################################################################### @staticmethod def from_params(innames, outnames, params, domain_name='', range_name=''): """ Create `AffineTransform` from `innames` and `outnames` Parameters ---------- innames : sequence of str or str The names of the axes of the domain. If str, then names given by ``list(innames)`` outnames : seqence of str or str The names of the axes of the range. If str, then names given by ``list(outnames)`` params : AffineTransform, array or (array, array) An affine function between the domain and range. This can be represented either by a single ndarray (which is interpreted as the representation of the function in homogeneous coordinates) or an (A,b) tuple. domain_name : str, optional Name of domain CoordinateSystem range_name : str, optional Name of range CoordinateSystem Returns ------- aff : ``AffineTransform`` Notes ----- :Precondition: ``len(shape) == len(names)`` :Raises ValueError: ``if len(shape) != len(names)`` """ if type(params) == type(()): A, b = params params = from_matvec(A, b) ndim = (len(innames) + 1, len(outnames) + 1) if params.shape != ndim[::-1]: raise ValueError('shape and number of axis names do not agree') function_domain = CoordinateSystem(innames, domain_name) function_range = CoordinateSystem(outnames, range_name) return AffineTransform(function_domain, function_range, params) @staticmethod def from_start_step(innames, outnames, start, step, domain_name='', range_name=''): """ New `AffineTransform` from names, start and step. Parameters ---------- innames : sequence of str or str The names of the axes of the domain. If str, then names given by ``list(innames)`` outnames : seqence of str or str The names of the axes of the range. If str, then names given by ``list(outnames)`` start : sequence of float Start vector used in constructing affine transformation step : sequence of float Step vector used in constructing affine transformation domain_name : str, optional Name of domain CoordinateSystem range_name : str, optional Name of range CoordinateSystem Returns ------- cm : `CoordinateMap` Examples -------- >>> cm = AffineTransform.from_start_step('ijk', 'xyz', [1, 2, 3], [4, 5, 6]) >>> cm.affine array([[ 4., 0., 0., 1.], [ 0., 5., 0., 2.], [ 0., 0., 6., 3.], [ 0., 0., 0., 1.]]) Notes ----- ``len(names) == len(start) == len(step)`` """ ndim = len(innames) if len(outnames) != ndim: raise ValueError('len(innames) != len(outnames)') return AffineTransform.from_params(innames, outnames, (np.diag(step), start), domain_name=domain_name, range_name=range_name) @staticmethod def identity(coord_names, name=''): """ Return an identity coordmap of the given shape Parameters ---------- coord_names : sequence of str or str The names of the axes of the domain. If str, then names given by ``list(coord_names)`` name : str, optional Name of origin of coordinate system Returns ------- cm : `CoordinateMap` ``CoordinateMap`` with ``CoordinateSystem`` domain and an identity transform, with identical domain and range. Examples -------- >>> cm = AffineTransform.identity('ijk', 'somewhere') >>> cm.affine array([[ 1., 0., 0., 0.], [ 0., 1., 0., 0.], [ 0., 0., 1., 0.], [ 0., 0., 0., 1.]]) >>> cm.function_domain CoordinateSystem(coord_names=('i', 'j', 'k'), name='somewhere', coord_dtype=float64) >>> cm.function_range CoordinateSystem(coord_names=('i', 'j', 'k'), name='somewhere', coord_dtype=float64) """ return AffineTransform.from_start_step(coord_names, coord_names, [0]*len(coord_names), [1]*len(coord_names), name, name) ################################################################### # # Methods # ################################################################### def reordered_domain(self, order=None): """ New AffineTransform with function_domain reordered Default behaviour is to reverse the order of the coordinates. Parameters ---------- order : sequence Order to use, defaults to reverse. The elements can be integers, strings or 2-tuples of strings. If they are strings, they should be in mapping.function_domain.coord_names. Returns ------- newmapping :AffineTransform A new AffineTransform with the coordinates of function_domain reordered. Examples -------- >>> input_cs = CoordinateSystem('ijk') >>> output_cs = CoordinateSystem('xyz') >>> cm = AffineTransform(input_cs, output_cs, np.identity(4)) >>> cm.reordered_domain('ikj').function_domain CoordinateSystem(coord_names=('i', 'k', 'j'), name='', coord_dtype=float64) """ return reordered_domain(self, order) def reordered_range(self, order=None): """ New AffineTransform with function_range reordered Defaults to reversing the coordinates of function_range. Parameters ---------- order : sequence Order to use, defaults to reverse. The elements can be integers, strings or 2-tuples of strings. If they are strings, they should be in mapping.function_range.coord_names. Returns ------- newmapping : AffineTransform A new AffineTransform with the coordinates of function_range reordered. Examples -------- >>> input_cs = CoordinateSystem('ijk') >>> output_cs = CoordinateSystem('xyz') >>> cm = AffineTransform(input_cs, output_cs, np.identity(4)) >>> cm.reordered_range('xzy').function_range CoordinateSystem(coord_names=('x', 'z', 'y'), name='', coord_dtype=float64) >>> cm.reordered_range([0,2,1]).function_range.coord_names ('x', 'z', 'y') >>> newcm = cm.reordered_range('yzx') >>> newcm.function_range.coord_names ('y', 'z', 'x') """ return reordered_range(self, order) def renamed_domain(self, newnames, name=''): """ New AffineTransform with function_domain renamed Parameters ---------- newnames : dict A dictionary whose keys are integers or are in mapping.function_domain.coord_names and whose values are the new names. Returns ------- newmapping : AffineTransform A new AffineTransform with renamed function_domain. Examples -------- >>> affine_domain = CoordinateSystem('ijk') >>> affine_range = CoordinateSystem('xyz') >>> affine_matrix = np.identity(4) >>> affine_mapping = AffineTransform(affine_domain, affine_range, affine_matrix) >>> new_affine_mapping = affine_mapping.renamed_domain({'i':'phase','k':'freq','j':'slice'}) >>> new_affine_mapping.function_domain CoordinateSystem(coord_names=('phase', 'slice', 'freq'), name='', coord_dtype=float64) >>> new_affine_mapping = affine_mapping.renamed_domain({'i':'phase','k':'freq','l':'slice'}) Traceback (most recent call last): ... ValueError: no domain coordinate named l """ return renamed_domain(self, newnames) def renamed_range(self, newnames, name=''): """ New AffineTransform with renamed function_domain Parameters ---------- newnames : dict A dictionary whose keys are integers or are in mapping.function_range.coord_names and whose values are the new names. Returns ------- newmapping : AffineTransform A new AffineTransform with renamed function_range. Examples -------- >>> affine_domain = CoordinateSystem('ijk') >>> affine_range = CoordinateSystem('xyz') >>> affine_matrix = np.identity(4) >>> affine_mapping = AffineTransform(affine_domain, affine_range, affine_matrix) >>> new_affine_mapping = affine_mapping.renamed_range({'x':'u'}) >>> new_affine_mapping.function_range CoordinateSystem(coord_names=('u', 'y', 'z'), name='', coord_dtype=float64) >>> new_affine_mapping = affine_mapping.renamed_range({'w':'u'}) Traceback (most recent call last): ... ValueError: no range coordinate named w """ return renamed_range(self, newnames) def __call__(self, x): """Return mapping evaluated at x Parameters ---------- x : array-like Values in domain coordinate system space that will be mapped to the range coordinate system space, using the homogeneous transform matrix self.affine. The last dimension of the array is the coordinate dimension. Thus `x` can be any array that can be reshaped to (N, self.function_domain.ndim), and that matches self.function_domain dtype. Returns ------- y : array Values in range coordinate system space. If input `x` was shape S + (self.function_domain.ndim) (where S is a tuple of int and can be ()) - then the output `y` will be shape S + (self.function_range.ndim) Examples -------- >>> input_cs = CoordinateSystem('ijk', coord_dtype=np.int) >>> output_cs = CoordinateSystem('xyz', coord_dtype=np.int) >>> affine = np.array([[1,0,0,1], ... [0,1,0,1], ... [0,0,1,1], ... [0,0,0,1]]) >>> affine_transform = AffineTransform(input_cs, output_cs, affine) >>> affine_transform([2,3,4]) #doctest: +IGNORE_DTYPE array([3, 4, 5]) """ x = np.asanyarray(x) out_shape = (self.function_range.ndim,) if x.ndim > 1: out_shape = x.shape[:-1] + out_shape in_vals = self.function_domain._checked_values(x) A, b = to_matvec(self.affine) out_vals = np.dot(in_vals, A.T) + b[np.newaxis,:] final_vals = self.function_range._checked_values(out_vals) return final_vals.reshape(out_shape) ################################################################### # # Private methods # ################################################################### def __copy__(self): """ Create a copy of the AffineTransform. Returns ------- affine_transform : AffineTransform Examples -------- >>> import copy >>> cm = AffineTransform(CoordinateSystem('ijk'), CoordinateSystem('xyz'), np.eye(4)) >>> cm_copy = copy.copy(cm) >>> cm is cm_copy False Note that the matrix (affine) is not a pointer to the same data, it's a full independent copy >>> cm.affine[0,0] = 2.0 >>> cm_copy.affine[0,0] 1.0 """ return AffineTransform(self.function_domain, self.function_range, self.affine.copy()) def __repr__(self): return ("AffineTransform(\n" " function_domain=%s,\n" " function_range=%s,\n" " affine=%s\n)" % (self.function_domain, self.function_range, '\n '.join(repr(self.affine).split('\n')))) def __eq__(self, other): # Must be subclasses if not (isinstance(other, self.__class__) or isinstance(self, other.__class__)): return False if np.any(self.affine - other.affine): # for objects if not np.allclose(self.affine, other.affine): # for numerical return False if not self.function_domain == other.function_domain: return False return self.function_range == other.function_range def __ne__(self, other): return not self.__eq__(other) def similar_to(self, other): """ Does `other` have similar coordinate systems and same mappings? A "similar" coordinate system is one with the same coordinate names and data dtype, but ignoring the coordinate system name. """ return (isinstance(other, self.__class__) and (self.function_domain.similar_to(other.function_domain)) and (self.function_range.similar_to(other.function_range)) and np.allclose(self.affine, other.affine)) #################################################################################### # # Module level functions # #################################################################################### def product(*cmaps, **kwargs): """ "topological" product of two or more mappings The mappings can be either AffineTransforms or CoordinateMaps. If they are all AffineTransforms, the result is an AffineTransform, else it is a CoordinateMap. Parameters ---------- cmaps : sequence of CoordinateMaps or AffineTransforms Returns ------- cmap : ``CoordinateMap`` Examples -------- >>> inc1 = AffineTransform.from_params('i', 'x', np.diag([2,1])) >>> inc2 = AffineTransform.from_params('j', 'y', np.diag([3,1])) >>> inc3 = AffineTransform.from_params('k', 'z', np.diag([4,1])) >>> cmap = product(inc1, inc3, inc2) >>> cmap.function_domain.coord_names ('i', 'k', 'j') >>> cmap.function_range.coord_names ('x', 'z', 'y') >>> cmap.affine array([[ 2., 0., 0., 0.], [ 0., 4., 0., 0.], [ 0., 0., 3., 0.], [ 0., 0., 0., 1.]]) >>> A1 = AffineTransform.from_params('ij', 'xyz', np.array([[2,3,1,0],[3,4,5,0],[7,9,3,1]]).T) >>> A2 = AffineTransform.from_params('xyz', 'de', np.array([[8,6,7,4],[1,-1,13,3],[0,0,0,1]])) >>> A1.affine array([[ 2., 3., 7.], [ 3., 4., 9.], [ 1., 5., 3.], [ 0., 0., 1.]]) >>> A2.affine array([[ 8., 6., 7., 4.], [ 1., -1., 13., 3.], [ 0., 0., 0., 1.]]) >>> p=product(A1, A2) >>> p.affine array([[ 2., 3., 0., 0., 0., 7.], [ 3., 4., 0., 0., 0., 9.], [ 1., 5., 0., 0., 0., 3.], [ 0., 0., 8., 6., 7., 4.], [ 0., 0., 1., -1., 13., 3.], [ 0., 0., 0., 0., 0., 1.]]) >>> np.allclose(p.affine[:3,:2], A1.affine[:3,:2]) True >>> np.allclose(p.affine[:3,-1], A1.affine[:3,-1]) True >>> np.allclose(p.affine[3:5,2:5], A2.affine[:2,:3]) True >>> np.allclose(p.affine[3:5,-1], A2.affine[:2,-1]) True >>> >>> A1([3,4]) array([ 25., 34., 26.]) >>> A2([5,6,7]) array([ 129., 93.]) >>> p([3,4,5,6,7]) array([ 25., 34., 26., 129., 93.]) """ # First, check if they're all Affine allaffine = np.all([isinstance(cmap, AffineTransform) for cmap in cmaps]) if allaffine: return _product_affines(*cmaps, **kwargs) else: warnings.warn("product of non-affine CoordinateMaps is less robust than"+ "the AffineTransform") return _product_cmaps(*[_as_coordinate_map(cmap) for cmap in cmaps], **kwargs) def compose(*cmaps): """ Return the composition of two or more CoordinateMaps. Parameters ---------- cmaps : sequence of CoordinateMaps Returns ------- cmap : ``CoordinateMap`` The resulting CoordinateMap has function_domain == cmaps[-1].function_domain and function_range == cmaps[0].function_range Examples -------- >>> cmap = AffineTransform.from_params('i', 'x', np.diag([2.,1.])) >>> cmapi = cmap.inverse() >>> id1 = compose(cmap,cmapi) >>> id1.affine array([[ 1., 0.], [ 0., 1.]]) >>> id2 = compose(cmapi,cmap) >>> id1.function_domain.coord_names ('x',) >>> id2.function_domain.coord_names ('i',) """ # First check if they're all affine allaffine = np.all([isinstance(cmap, AffineTransform) for cmap in cmaps]) if allaffine: return _compose_affines(*cmaps) else: warnings.warn("composition of non-affine CoordinateMaps is " "less robust than the AffineTransform") return _compose_cmaps(*[_as_coordinate_map(cmap) for cmap in cmaps]) def reordered_domain(mapping, order=None): """ New coordmap with the coordinates of function_domain reordered Default behaviour is to reverse the order of the coordinates. Parameters ---------- order: sequence Order to use, defaults to reverse. The elements can be integers, strings or 2-tuples of strings. If they are strings, they should be in mapping.function_domain.coord_names. Returns ------- newmapping : CoordinateMap or AffineTransform A new CoordinateMap with the coordinates of function_domain reordered. If isinstance(mapping, AffineTransform), newmapping is also an AffineTransform. Otherwise, it is a CoordinateMap. Examples -------- >>> input_cs = CoordinateSystem('ijk') >>> output_cs = CoordinateSystem('xyz') >>> cm = AffineTransform(input_cs, output_cs, np.identity(4)) >>> cm.reordered_domain('ikj').function_domain CoordinateSystem(coord_names=('i', 'k', 'j'), name='', coord_dtype=float64) Notes ----- If no reordering is to be performed, it returns a copy of mapping. """ ndim = mapping.ndims[0] if order is None: order = range(ndim)[::-1] elif type(order[0]) == type(''): order = [mapping.function_domain.index(s) for s in order] newaxes = [mapping.function_domain.coord_names[i] for i in order] newincoords = CoordinateSystem(newaxes, mapping.function_domain.name, coord_dtype=mapping.function_domain.coord_dtype) perm = np.zeros((ndim+1,)*2) perm[-1,-1] = 1. for i, j in enumerate(order): perm[j,i] = 1. # If there is no reordering, return mapping if np.allclose(perm, np.identity(perm.shape[0])): import copy return copy.copy(mapping) perm = perm.astype(mapping.function_domain.coord_dtype) A = AffineTransform(newincoords, mapping.function_domain, perm) if isinstance(mapping, AffineTransform): return _compose_affines(mapping, A) else: return _compose_cmaps(mapping, _as_coordinate_map(A)) def shifted_domain_origin(mapping, difference_vector, new_origin): """ Shift the origin of the domain Parameters ---------- difference_vector : array Representing the difference shifted_origin-current_origin in the domain's basis. Examples -------- >>> A = np.random.standard_normal((5,6)) >>> A[-1] = [0,0,0,0,0,1] >>> affine_transform = AffineTransform(CS('ijklm', 'oldorigin'), CS('xyzt'), A) >>> affine_transform.function_domain CoordinateSystem(coord_names=('i', 'j', 'k', 'l', 'm'), name='oldorigin', coord_dtype=float64) A random change of origin >>> difference = np.random.standard_normal(5) The same affine transforation with a different origin for its domain >>> shifted_affine_transform = shifted_domain_origin(affine_transform, difference, 'neworigin') >>> shifted_affine_transform.function_domain CoordinateSystem(coord_names=('i', 'j', 'k', 'l', 'm'), name='neworigin', coord_dtype=float64) Let's check that things work >>> point_in_old_basis = np.random.standard_normal(5) This is the relation ship between coordinates in old and new origins >>> np.allclose(shifted_affine_transform(point_in_old_basis), affine_transform(point_in_old_basis+difference)) True >>> np.allclose(shifted_affine_transform(point_in_old_basis-difference), affine_transform(point_in_old_basis)) True """ new_function_domain = CoordinateSystem(mapping.function_domain.coord_names, new_origin, coord_dtype=mapping.function_domain.coord_dtype) ndim = new_function_domain.ndim shift_matrix = np.identity(ndim+1, mapping.function_domain.coord_dtype) shift_matrix[:-1,-1] = np.array(difference_vector) shift_map = AffineTransform(new_function_domain, mapping.function_domain, shift_matrix) if isinstance(mapping, AffineTransform): return _compose_affines(mapping, shift_map) else: return _compose_cmaps(mapping, _as_coordinate_map(shift_map)) def shifted_range_origin(mapping, difference_vector, new_origin): """ Shift the origin of the range. Parameters ---------- difference_vector : array Representing the difference shifted_origin-current_origin in the range's basis. Examples -------- >>> A = np.random.standard_normal((5,6)) >>> A[-1] = [0,0,0,0,0,1] >>> affine_transform = AffineTransform(CS('ijklm'), CS('xyzt', 'oldorigin'), A) >>> affine_transform.function_range CoordinateSystem(coord_names=('x', 'y', 'z', 't'), name='oldorigin', coord_dtype=float64) Make a random shift of the origin in the range >>> difference = np.random.standard_normal(4) >>> shifted_affine_transform = shifted_range_origin(affine_transform, difference, 'neworigin') >>> shifted_affine_transform.function_range CoordinateSystem(coord_names=('x', 'y', 'z', 't'), name='neworigin', coord_dtype=float64) >>> Evaluate the transform and verify it does as expected >>> point_in_domain = np.random.standard_normal(5) Check that things work >>> np.allclose(shifted_affine_transform(point_in_domain), affine_transform(point_in_domain) - difference) True >>> np.allclose(shifted_affine_transform(point_in_domain) + difference, affine_transform(point_in_domain)) True """ new_function_range = CoordinateSystem(mapping.function_range.coord_names, new_origin, coord_dtype=mapping.function_range.coord_dtype) ndim = new_function_range.ndim shift_matrix = np.identity(ndim+1, mapping.function_range.coord_dtype) shift_matrix[:-1,-1] = -np.array(difference_vector) shift_map = AffineTransform(mapping.function_range, new_function_range, shift_matrix) if isinstance(mapping, AffineTransform): return _compose_affines(shift_map, mapping) else: return _compose_cmaps(_as_coordinate_map(shift_map), mapping) def renamed_domain(mapping, newnames, name=''): """ New coordmap with the coordinates of function_domain renamed Parameters ---------- newnames: dict A dictionary whose keys are integers or are in mapping.function_range.coord_names and whose values are the new names. Returns ------- newmapping : CoordinateMap or AffineTransform A new mapping with renamed function_domain. If isinstance(mapping, AffineTransform), newmapping is also an AffineTransform. Otherwise, it is a CoordinateMap. Examples -------- >>> affine_domain = CoordinateSystem('ijk') >>> affine_range = CoordinateSystem('xyz') >>> affine_matrix = np.identity(4) >>> affine_mapping = AffineTransform(affine_domain, affine_range, affine_matrix) >>> new_affine_mapping = affine_mapping.renamed_domain({'i':'phase','k':'freq','j':'slice'}) >>> new_affine_mapping.function_domain CoordinateSystem(coord_names=('phase', 'slice', 'freq'), name='', coord_dtype=float64) >>> new_affine_mapping = affine_mapping.renamed_domain({'i':'phase','k':'freq','l':'slice'}) Traceback (most recent call last): ... ValueError: no domain coordinate named l """ for key in newnames.keys(): if type(key) == type(0): newnames[mapping.function_domain.coord_names[key]] = \ newnames[key] del(newnames[key]) for key in newnames.keys(): if key not in mapping.function_domain.coord_names: raise ValueError('no domain coordinate named %s' % str(key)) new_coord_names = [] for n in mapping.function_domain.coord_names: if n in newnames: new_coord_names.append(newnames[n]) else: new_coord_names.append(n) new_function_domain = CoordinateSystem(new_coord_names, mapping.function_domain.name, coord_dtype=mapping.function_domain.coord_dtype) ndim = mapping.ndims[0] ident_map = AffineTransform(new_function_domain, mapping.function_domain, np.identity(ndim+1)) if isinstance(mapping, AffineTransform): return _compose_affines(mapping, ident_map) else: return _compose_cmaps(mapping, _as_coordinate_map(ident_map)) def renamed_range(mapping, newnames): """ New coordmap with the coordinates of function_range renamed Parameters ---------- newnames : dict A dictionary whose keys are integers or in mapping.function_range.coord_names and whose values are the new names. Returns ------- newmapping : CoordinateMap or AffineTransform A new CoordinateMap with the coordinates of function_range renamed. If isinstance(mapping, AffineTransform), newmapping is also an AffineTransform. Otherwise, it is a CoordinateMap. Examples -------- >>> affine_domain = CoordinateSystem('ijk') >>> affine_range = CoordinateSystem('xyz') >>> affine_matrix = np.identity(4) >>> affine_mapping = AffineTransform(affine_domain, affine_range, affine_matrix) >>> new_affine_mapping = affine_mapping.renamed_range({'x':'u'}) >>> new_affine_mapping.function_range CoordinateSystem(coord_names=('u', 'y', 'z'), name='', coord_dtype=float64) >>> new_affine_mapping = affine_mapping.renamed_range({'w':'u'}) Traceback (most recent call last): ... ValueError: no range coordinate named w """ for key in newnames.keys(): if type(key) == type(0): newnames[mapping.function_range.coord_names[key]] = \ newnames[key] del(newnames[key]) for key in newnames.keys(): if key not in mapping.function_range.coord_names: raise ValueError('no range coordinate named %s' % str(key)) new_coord_names = [] for n in mapping.function_range.coord_names: if n in newnames: new_coord_names.append(newnames[n]) else: new_coord_names.append(n) new_function_range = CoordinateSystem(new_coord_names, mapping.function_range.name, coord_dtype=mapping.function_range.coord_dtype) ndim = mapping.ndims[1] ident_map = AffineTransform(mapping.function_range, new_function_range, np.identity(ndim+1)) if isinstance(mapping, AffineTransform): return _compose_affines(ident_map, mapping) else: return _compose_cmaps(_as_coordinate_map(ident_map), mapping) def reordered_range(mapping, order=None): """ New coordmap with the coordinates of function_range reordered Defaults to reversing the coordinates of function_range. Parameters ---------- order: sequence Order to use, defaults to reverse. The elements can be integers, strings or 2-tuples of strings. If they are strings, they should be in mapping.function_range.coord_names. Returns ------- newmapping : CoordinateMap or AffineTransform A new CoordinateMap with the coordinates of function_range reordered. If isinstance(mapping, AffineTransform), newmapping is also an AffineTransform. Otherwise, it is a CoordinateMap. Examples -------- >>> input_cs = CoordinateSystem('ijk') >>> output_cs = CoordinateSystem('xyz') >>> cm = AffineTransform(input_cs, output_cs, np.identity(4)) >>> cm.reordered_range('xzy').function_range CoordinateSystem(coord_names=('x', 'z', 'y'), name='', coord_dtype=float64) >>> cm.reordered_range([0,2,1]).function_range.coord_names ('x', 'z', 'y') >>> newcm = cm.reordered_range('yzx') >>> newcm.function_range.coord_names ('y', 'z', 'x') Notes ----- If no reordering is to be performed, it returns a copy of mapping. """ ndim = mapping.ndims[1] if order is None: order = range(ndim)[::-1] elif type(order[0]) == type(''): order = [mapping.function_range.index(s) for s in order] newaxes = [mapping.function_range.coord_names[i] for i in order] newoutcoords = CoordinateSystem(newaxes, mapping.function_range.name, mapping.function_range.coord_dtype) perm = np.zeros((ndim+1,)*2) perm[-1,-1] = 1. for i, j in enumerate(order): perm[j,i] = 1. if np.allclose(perm, np.identity(perm.shape[0])): import copy return copy.copy(mapping) perm = perm.astype(mapping.function_range.coord_dtype) A = AffineTransform(mapping.function_range, newoutcoords, perm.T) if isinstance(mapping, AffineTransform): return _compose_affines(A, mapping) else: return _compose_cmaps(_as_coordinate_map(A), mapping) def equivalent(mapping1, mapping2): """ A test to see if mapping1 is equal to mapping2 after possibly reordering the domain and range of mapping. Parameters ---------- mapping1 : CoordinateMap or AffineTransform mapping2 : CoordinateMap or AffineTransform Returns ------- are_they_equal : bool Examples -------- >>> ijk = CoordinateSystem('ijk') >>> xyz = CoordinateSystem('xyz') >>> T = np.random.standard_normal((4,4)) >>> T[-1] = [0,0,0,1] # otherwise AffineTransform raises ... # an exception because ... # it's supposed to represent an ... # affine transform in homogeneous ... # coordinates >>> A = AffineTransform(ijk, xyz, T) >>> B = A.reordered_domain('ikj').reordered_range('xzy') >>> C = B.renamed_domain({'i':'slice'}) >>> equivalent(A, B) True >>> equivalent(A, C) False >>> equivalent(B, C) False >>> >>> D = CoordinateMap(ijk, xyz, np.exp) >>> equivalent(D, D) True >>> E = D.reordered_domain('kij').reordered_range('xzy') >>> # no non-AffineTransform will ever be >>> # equivalent to a reordered version of itself, >>> # because their functions don't evaluate as equal >>> equivalent(D, E) False >>> equivalent(E, E) True >>> >>> # This has not changed the order >>> # of the axes, so the function is still the same >>> >>> F = D.reordered_range('xyz').reordered_domain('ijk') >>> equivalent(F, D) True >>> id(F) == id(D) False """ target_dnames = mapping2.function_domain.coord_names target_rnames = mapping2.function_range.coord_names try: mapping1 = mapping1.reordered_domain(target_dnames)\ .reordered_range(target_rnames) except ValueError: # impossible to rename the domain and ranges of mapping1 to match mapping2 return False return mapping1 == mapping2 ################################################################### # # Private functions # ################################################################### def _as_coordinate_map(cmap): """ Return CoordinateMap from AffineTransform Take a mapping AffineTransform and return a CoordinateMap with the appropriate functions. """ if isinstance(cmap, CoordinateMap): return cmap elif isinstance(cmap, AffineTransform): affine_transform = cmap A, b = to_matvec(affine_transform.affine) def _function(x): value = np.dot(x, A.T) value += b return value # Preserve dtype check because the CoordinateMap expects to generate the # expected dtype and checks this on object creation affine_transform_inv = affine_transform.inverse(preserve_dtype=True) if affine_transform_inv: Ainv, binv = to_matvec(affine_transform_inv.affine) def _inverse_function(x): value = np.dot(x, Ainv.T) value += binv return value else: _inverse_function = None return CoordinateMap(affine_transform.function_domain, affine_transform.function_range, _function, _inverse_function) else: raise ValueError('all mappings should be instances of ' 'either CoordinateMap or AffineTransform') def _compose_affines(*affines): """ Composition of sequence of affines Compose hecking the domains and ranges. """ cur = AffineTransform(affines[-1].function_domain, affines[-1].function_domain, np.identity(affines[-1].ndims[0]+1, dtype=affines[-1].affine.dtype)) for cmap in affines[::-1]: if cmap.function_domain == cur.function_range: cur = AffineTransform(cur.function_domain, cmap.function_range, np.dot(cmap.affine, cur.affine)) else: raise ValueError("domains and ranges don't match up correctly") return cur def _compose_cmaps(*cmaps): """ Compute the composition of a sequence of cmaps """ def _compose2(cmap1, cmap2): forward = lambda input: cmap1.function(cmap2.function(input)) cmap1i = cmap1.inverse() cmap2i = cmap2.inverse() if cmap1i is not None and cmap2i is not None: backward = lambda output: cmap2i.function(cmap1i.function(output)) else: backward = None return forward, backward # the identity coordmap cur = CoordinateMap(cmaps[-1].function_domain, cmaps[-1].function_domain, lambda x: x, lambda x: x) for cmap in cmaps[::-1]: if cmap.function_domain == cur.function_range: forward, backward = _compose2(cmap, cur) cur = CoordinateMap(cur.function_domain, cmap.function_range, forward, inverse_function=backward) else: raise ValueError( 'domain and range coordinates do not match: ' 'domain=%s, range=%s' % (cmap.function_domain.dtype, cur.function_range.dtype)) return cur def _product_cmaps(*cmaps, **kwargs): input_name = kwargs.pop('input_name', 'product') output_name = kwargs.pop('output_name', 'product') if kwargs: raise TypeError('Unexpected kwargs %s' % kwargs) ndimin = [cmap.ndims[0] for cmap in cmaps] ndimin.insert(0,0) ndimin = tuple(np.cumsum(ndimin)) def function(x): x = np.atleast_2d(x) y = [] for i in range(len(ndimin)-1): yy = cmaps[i](x[:,ndimin[i]:ndimin[i+1]]) y.append(yy) yy = np.hstack(y) return yy incoords = coordsys_product(*[cmap.function_domain for cmap in cmaps], **{'name': input_name}) outcoords = coordsys_product(*[cmap.function_range for cmap in cmaps], **{'name': output_name}) return CoordinateMap(incoords, outcoords, function) def _product_affines(*affine_mappings, **kwargs): """ Product of affine_mappings. """ input_name = kwargs.pop('input_name', 'product') output_name = kwargs.pop('output_name', 'product') if kwargs: raise TypeError('Unexpected kwargs %s' % kwargs) if input_name is None: input_name = 'product' if output_name is None: output_name = 'product' ndimin = [affine.ndims[0] for affine in affine_mappings] ndimout = [affine.ndims[1] for affine in affine_mappings] M = np.zeros((np.sum(ndimout)+1, np.sum(ndimin)+1), dtype=safe_dtype(*[affine.affine.dtype for affine in affine_mappings])) M[-1,-1] = 1. # Fill in the block matrix product_domain = [] product_range = [] i = 0 j = 0 for l, affine in enumerate(affine_mappings): A, b = to_matvec(affine.affine) M[i:(i+ndimout[l]),j:(j+ndimin[l])] = A M[i:(i+ndimout[l]),-1] = b product_domain.extend(affine.function_domain.coord_names) product_range.extend(affine.function_range.coord_names) i += ndimout[l] j += ndimin[l] return AffineTransform( CoordinateSystem(product_domain, name=input_name, coord_dtype=M.dtype), CoordinateSystem(product_range, name=output_name, coord_dtype=M.dtype), M) class AxisError(Exception): """ Error for incorrect axis selection """ def drop_io_dim(cm, axis_id, fix0=True): ''' Drop dimensions `axis_id` from coordinate map, if orthogonal to others If you specify an input dimension, drop that dimension and any corresponding output dimension, as long as all other outputs are orthogonal to dropped input. If you specify an output dimension, drop that dimension and any corresponding input dimension, as long as all other inputs are orthogonal to dropped output. Parameters ---------- cm : class:`AffineTransform` Affine coordinate map instance axis_id : int or str If int, gives index of *input* axis to drop. If str, gives name of input *or* output axis to drop. When specifying an input axis: if given input axis does not affect any output axes, just drop input axis. If input axis affects only one output axis, drop both input and corresponding output. Similarly when specifying an output axis. If `axis_id` is a str, it must be unambiguous - if the named axis exists in both input and output, and they do not correspond, raises a AxisError. See Raises section for checks fix0: bool, optional Whether to fix potential 0 TR in affine Returns ------- cm_redux : Affine Affine coordinate map with orthogonal input + output dimension dropped Raises ------ AxisError: if `axis_id` is a str and does not match any no input or output coordinate names. AxisError: if specified `axis_id` affects more than a single input / output axis. AxisError: if the named `axis_id` exists in both input and output, and they do not correspond. Examples -------- Typical use is in getting a 3D coordinate map from 4D >>> cm4d = AffineTransform.from_params('ijkl', 'xyzt', np.diag([1,2,3,4,1])) >>> cm3d = drop_io_dim(cm4d, 't') >>> cm3d.affine array([[ 1., 0., 0., 0.], [ 0., 2., 0., 0.], [ 0., 0., 3., 0.], [ 0., 0., 0., 1.]]) ''' # Implicit check for affine-type coordinate map aff = cm.affine.copy() # What dimensions did you ask for? in_dim, out_dim = io_axis_indices(cm, axis_id, fix0) if not None in (in_dim, out_dim): if not orth_axes(in_dim, out_dim, aff, allow_zero=fix0): raise AxisError('Input and output dimensions not orthogonal to ' 'rest of affine') M, N = aff.shape rows = range(M) cols = range(N) in_dims = list(cm.function_domain.coord_names) out_dims = list(cm.function_range.coord_names) if not in_dim is None: in_dims.pop(in_dim) cols.pop(in_dim) if not out_dim is None: out_dims.pop(out_dim) rows.pop(out_dim) aff = aff[rows] aff = aff[:,cols] return AffineTransform.from_params(in_dims, out_dims, aff) def _fix0(aff): """ Fix possible 0 time scaling from 0 TR Look in matrix part of affine (3, 3) in a (4, 4) affine). If there is exactly one row and exactly one column in this part of the affine that are all exactly zero, assume this is a 0 scaling from a 0 TR in the header, and fix corresponding row, column index to 1. Parameters ---------- aff : (M, N) array-like affine Returns ------- fixed_aff : (M, N) affine which will be `aff` if no fix, and a new affine if fixed, with a 1 instead of the zero in the offending row and column Examples -------- >>> _fix0(np.diag([1, 2, 3, 0])) array([[1, 0, 0, 0], [0, 2, 0, 0], [0, 0, 3, 0], [0, 0, 0, 0]]) >>> _fix0(np.diag([1, 0, 3, 0])) array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 3, 0], [0, 0, 0, 0]]) """ aff = np.asarray(aff) zeros = aff[:-1, :-1] == 0 zrs = np.where(np.all(zeros, axis=1))[0] zcs = np.where(np.all(zeros, axis=0))[0] if len(zrs) != 1 or len(zcs) != 1: return aff fixed_aff = aff.copy() fixed_aff[zrs[0], zcs[0]] = 1 return fixed_aff def append_io_dim(cm, in_name, out_name, start=0, step=1): ''' Append input and output dimension to coordmap Parameters ---------- cm : Affine Affine coordinate map instance to which to append dimension in_name : str Name for new input dimension out_name : str Name for new output dimension start : float, optional Offset for transformed values in new dimension step : float, optional Step, or scale factor for transformed values in new dimension Returns ------- cm_plus : Affine New coordinate map with appended dimension Examples -------- Typical use is creating a 4D coordinate map from a 3D >>> cm3d = AffineTransform.from_params('ijk', 'xyz', np.diag([1,2,3,1])) >>> cm4d = append_io_dim(cm3d, 'l', 't', 9, 5) >>> cm4d.affine array([[ 1., 0., 0., 0., 0.], [ 0., 2., 0., 0., 0.], [ 0., 0., 3., 0., 0.], [ 0., 0., 0., 5., 9.], [ 0., 0., 0., 0., 1.]]) ''' extra_aff = np.array([[step, start], [0, 1]]) extra_cmap = AffineTransform.from_params([in_name], [out_name], extra_aff) return product(cm, extra_cmap) def axmap(coordmap, direction='in2out', fix0=True): """ Return mapping between input and output axes Parameters ---------- coordmap : Affine Affine coordinate map instance for which to get axis mappings direction : {'in2out', 'out2in', 'both'} direction to find mapping. If 'in2out', returned mapping will have keys from the input axis (names and indices) and values of corresponding output axes. If 'out2in' the keys will be output axis names, indices and the values will be input axis indices. If both, return both mappings. fix0: bool, optional Whether to fix potential 0 TR in affine Returns ------- map : dict or tuple * if `direction` == 'in2out' - mapping with keys of input names and input indices, values of output indices. Mapping is to closest matching axis. None means there appears to be no matching axis * if `direction` == 'out2in' - mapping with keys of output names and input indices, values of input indices, as above. * if `direction` == 'both' - tuple of (input to output mapping, output to input mapping) """ in2out = direction in ('in2out', 'both') out2in = direction in ('out2in', 'both') if not True in (in2out, out2in): raise ValueError('Direction must be one of "in2out", "out2in", "both"') affine = coordmap.affine affine = _fix0(affine) if fix0 else affine ornts = io_orientation(affine) ornts = [None if np.isnan(R) else int(R) for R in ornts[:, 0]] if in2out: in2out_map = {} for i, name in enumerate(coordmap.function_domain.coord_names): in2out_map[i] = ornts[i] in2out_map[name] = ornts[i] if not out2in: return in2out_map if out2in: out2in_map = {} for i, name in enumerate(coordmap.function_range.coord_names): in_i = ornts.index(i) if i in ornts else None out2in_map[i] = in_i out2in_map[name] = in_i if not in2out: return out2in_map return in2out_map, out2in_map def input_axis_index(coordmap, axis_id, fix0=True): """ Return input axis index for `axis_id` `axis_id` can be integer, or a name of an input axis, or it can be the name of an output axis which maps to an input axis. Parameters ---------- coordmap : AffineTransform axis_id : int or str If int, then an index of an input axis. Can be negative, so that -2 refers to the second to last input axis. If a str can be the name of an input axis, or the name of an output axis that should have a corresponding input axis (see Raises section). fix0: bool, optional Whether to fix potential single 0 on diagonal of affine. This often happens when loading nifti images with TR set to 0. Returns ------- inax : int index of matching input axis. If `axis_id` is the name of an output axis, then `inax` will be the input axis that had a 'best' match with this output axis. The 'best' match algorithm ensures that there can only be one input axis paired with one output axis. Raises ------ AxisError: if no matching name found AxisError : if name exists in both input and output and they do not map to each other AxisError : if name present in output but no matching input """ # Lists for .index in python < 2.6 in_names = list(coordmap.function_domain.coord_names) out_names = list(coordmap.function_range.coord_names) if isinstance(axis_id, int): if axis_id < 0: axis_id = len(out_names) + axis_id return axis_id in_in = axis_id in in_names in_out = axis_id in out_names if not in_in and not in_out: raise AxisError('Name "%s" not in input or output names' % axis_id) if in_in: in_no = in_names.index(axis_id) if not in_out: return in_no out2in = axmap(coordmap, 'out2in', fix0=fix0) if not out2in[axis_id] == in_no: raise AxisError('Name "%s" present in input and output but ' 'they do not appear to match' % axis_id) return in_no in_no = axmap(coordmap, 'out2in', fix0=fix0)[axis_id] if in_no is None: raise AxisError('Name "%s" present in output but this output axis ' 'does not have the best match with any input axis' % axis_id) return in_no def io_axis_indices(coordmap, axis_id, fix0=True): """ Return input and output axis index for id `axis_id` in `coordmap` Parameters ---------- cm : class:`AffineTransform` Affine coordinate map instance axis_id : int or str If int, gives index of *input* axis. Can be negative, so that -2 refers to the second from last input axis. If str, gives name of input *or* output axis. If `axis_id` is a str, it must be unambiguous - if the named axis exists in both input and output, and they do not correspond, raises a AxisError. See Raises section for checks fix0: bool, optional Whether to fix potential 0 column / row in affine Returns ------- in_index : None or int index of input axis that corresponds to `axis_id` out_index : None or int index of output axis that corresponds to `axis_id` Raises ------ AxisError: if `axis_id` is a str and does not match any input or output coordinate names. AxisError: if the named `axis_id` exists in both input and output, and they do not correspond. Examples -------- >>> aff = [[0, 1, 0, 10], [1, 0, 0, 11], [0, 0, 1, 12], [0, 0, 0, 1]] >>> cmap = AffineTransform('ijk', 'xyz', aff) >>> io_axis_indices(cmap, 0) (0, 1) >>> io_axis_indices(cmap, 1) (1, 0) >>> io_axis_indices(cmap, -1) (2, 2) >>> io_axis_indices(cmap, 'j') (1, 0) >>> io_axis_indices(cmap, 'y') (0, 1) """ in_dims = list(coordmap.function_domain.coord_names) out_dims = list(coordmap.function_range.coord_names) in_dim, out_dim, is_str = None, None, False if isinstance(axis_id, int): # Integer axis, always input axis # Integers are always input indices in_dim = axis_id if axis_id >=0 else len(in_dims) + axis_id else: # Let's hope they are strings if axis_id in in_dims: in_dim = in_dims.index(axis_id) elif axis_id in out_dims: out_dim = out_dims.index(axis_id) else: raise AxisError('No input or output dimension with name (%s)' % axis_id) is_str = True if out_dim is None: out_dim = axmap(coordmap, 'in2out', fix0=fix0)[in_dim] if (is_str and axis_id in out_dims and out_dim != out_dims.index(axis_id)): raise AxisError('Input and output axes with the same name but ' 'the axes do not appear to correspond') elif in_dim is None: in_dim = axmap(coordmap, 'out2in', fix0=fix0)[out_dim] return in_dim, out_dim def orth_axes(in_ax, out_ax, affine, allow_zero=True, tol=TINY): """ True if `in_ax` related only to `out_ax` in `affine` and vice versa Parameters ---------- in_ax : int Input axis index out_ax : int Output axis index affine : array-like Affine transformation matrix allow_zero : bool, optional Whether to allow zero in ``affine[out_ax, in_ax]``. This means that the two axes are not related, but nor is this pair related to any other part of the affine. Returns ------- tf : bool True if in_ax, out_ax pair are orthogonal to the rest of `affine`, unless `allow_zero` is False, in which case require in addition that ``affine[out_ax, in_ax] != 0``. Examples -------- >>> aff = np.eye(4) >>> orth_axes(1, 1, aff) True >>> orth_axes(1, 2, aff) False """ rzs, trans = to_matvec(affine) nzs = np.abs(rzs) > tol if not allow_zero and not nzs[out_ax, in_ax]: return False nzs[out_ax, in_ax] = 0 return np.all(nzs[out_ax] == 0) and np.all(nzs[:, in_ax] == 0) class CoordMapMakerError(Exception): pass class CoordMapMaker(object): """ Class to create coordinate maps of different dimensions """ generic_maker = CoordinateMap affine_maker = AffineTransform def __init__(self, domain_maker, range_maker): """ Create coordinate map maker Parameters ---------- domain_maker : callable A coordinate system maker, returning a coordinate system with input argument only ``N``, an integer giving the length of the coordinate map. range_maker : callable A coordinate system maker, returning a coordinate system with input argument only ``N``, an integer giving the length of the coordinate map. Examples -------- >>> from nipy.core.reference.coordinate_system import CoordSysMaker >>> dmaker = CoordSysMaker('ijkl', 'generic-array') >>> rmaker = CoordSysMaker('xyzt', 'generic-scanner') >>> cm_maker = CoordMapMaker(dmaker, rmaker) """ self.domain_maker = domain_maker self.range_maker = range_maker def make_affine(self, affine, append_zooms=(), append_offsets=()): """ Create affine coordinate map Parameters ---------- affine : (M, N) array-like Array expressing the affine tranformation append_zooms : scalar or sequence length E If scalar, converted to sequence length E==1. Append E entries to the diagonal of `affine` (see examples) append_offsets : scalar or sequence length F If scalar, converted to sequence length F==1. If F==0, and E!=0, use sequence of zeros length E. Append E entries to the translations (final column) of `affine` (see examples). Returns ------- affmap : ``AffineTransform`` coordinate map Examples -------- >>> from nipy.core.reference.coordinate_system import CoordSysMaker >>> dmaker = CoordSysMaker('ijkl', 'generic-array') >>> rmaker = CoordSysMaker('xyzt', 'generic-scanner') >>> cm_maker = CoordMapMaker(dmaker, rmaker) >>> cm_maker.make_affine(np.diag([2,3,4,1])) AffineTransform( function_domain=CoordinateSystem(coord_names=('i', 'j', 'k'), name='generic-array', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('x', 'y', 'z'), name='generic-scanner', coord_dtype=float64), affine=array([[ 2., 0., 0., 0.], [ 0., 3., 0., 0.], [ 0., 0., 4., 0.], [ 0., 0., 0., 1.]]) ) We can add extra orthogonal dimensions, by specifying the diagonal elements: >>> cm_maker.make_affine(np.diag([2,3,4,1]), 6) AffineTransform( function_domain=CoordinateSystem(coord_names=('i', 'j', 'k', 'l'), name='generic-array', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('x', 'y', 'z', 't'), name='generic-scanner', coord_dtype=float64), affine=array([[ 2., 0., 0., 0., 0.], [ 0., 3., 0., 0., 0.], [ 0., 0., 4., 0., 0.], [ 0., 0., 0., 6., 0.], [ 0., 0., 0., 0., 1.]]) ) Or the diagonal elements and the offset elements: >>> cm_maker.make_affine(np.diag([2,3,4,1]), [6], [9]) AffineTransform( function_domain=CoordinateSystem(coord_names=('i', 'j', 'k', 'l'), name='generic-array', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('x', 'y', 'z', 't'), name='generic-scanner', coord_dtype=float64), affine=array([[ 2., 0., 0., 0., 0.], [ 0., 3., 0., 0., 0.], [ 0., 0., 4., 0., 0.], [ 0., 0., 0., 6., 9.], [ 0., 0., 0., 0., 1.]]) ) """ affine = np.asarray(affine) append_zooms = np.atleast_1d(append_zooms) append_offsets = np.atleast_1d(append_offsets) extra_N = len(append_zooms) if len(append_offsets) == 0: append_offsets = np.zeros(extra_N, dtype=append_zooms.dtype) elif len(append_offsets) != extra_N: raise CoordMapMakerError('Need same number of offsets as zooms') o_n_domain = affine.shape[1] - 1 o_n_range = affine.shape[0] - 1 domain = self.domain_maker(o_n_domain + extra_N) range = self.range_maker(o_n_range + extra_N) if extra_N == 0: return self.affine_maker(domain, range, affine) # Combine original and added affine using product cmap0 = self.affine_maker(CS(domain.coord_names[:o_n_domain]), CS(range.coord_names[:o_n_range]), affine) affine1 = from_matvec(np.diag(append_zooms), append_offsets) cmap1 = self.affine_maker(CS(domain.coord_names[o_n_domain:]), CS(range.coord_names[o_n_range:]), affine1) cmap = product(cmap0, cmap1) # Return with original coordinate system names return self.affine_maker(domain, range, cmap.affine) def make_cmap(self, domain_N, xform, inv_xform=None): """ Coordinate map with transform function `xform` Parameters ---------- domain_N : int Number of domain coordinates xform : callable Function that transforms points of dimension `domain_N` inv_xform : None or callable, optional Function, such that ``inv_xform(xform(pts))`` returns ``pts`` Returns ------- cmap : ``CoordinateMap`` Examples -------- >>> from nipy.core.reference.coordinate_system import CoordSysMaker >>> dmaker = CoordSysMaker('ijkl', 'generic-array') >>> rmaker = CoordSysMaker('xyzt', 'generic-scanner') >>> cm_maker = CoordMapMaker(dmaker, rmaker) >>> cm_maker.make_cmap(4, lambda x : x+1) #doctest: +ELLIPSIS CoordinateMap( function_domain=CoordinateSystem(coord_names=('i', 'j', 'k', 'l'), name='generic-array', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('x', 'y', 'z', 't'), name='generic-scanner', coord_dtype=float64), function= at ...> ) """ domain_cs = self.domain_maker(domain_N) ex_pt = np.zeros((1, domain_N), dtype=domain_cs.coord_dtype) xformed_pt = xform(ex_pt) range_N = xformed_pt.shape[1] return self.generic_maker(domain_cs, self.range_maker(range_N), xform, inv_xform) def __call__(self, *args, **kwargs): """ Create affine or non-affine coordinate map Parameters ---------- \\*args : Arguments to ``make_affine`` or ``make_cmap`` methods. We check the first argument to see if it is a scalar or an affine, and pass the \\*args, \\*\\*kwargs to ``make_cmap`` or ``make_affine`` respectively \\*\\*kwargs: See above Returns ------- cmap : ``CoordinateMap`` or ``AffineTransform`` Affine if the first \\*arg was an affine array, otherwise a Coordinate Map. """ arg0 = np.asarray(args[0]) if arg0.shape == (): return self.make_cmap(*args, **kwargs) return self.make_affine(*args, **kwargs) nipy-0.3.0/nipy/core/reference/coordinate_system.py000066400000000000000000000403721210344137400224500ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ CoordinateSystems are used to represent the space in which the image resides. A CoordinateSystem contains named coordinates, one for each dimension and a coordinate dtype. The purpose of the CoordinateSystem is to specify the name and order of the coordinate axes for a particular space. This allows one to compare two CoordinateSystems to determine if they are equal. """ __docformat__ = 'restructuredtext' import numpy as np class CoordinateSystemError(Exception): pass class CoordinateSystem(object): """An ordered sequence of named coordinates of a specified dtype. A coordinate system is defined by the names of the coordinates, (attribute ``coord_names``) and the numpy dtype of each coordinate value (attribute ``coord_dtype``). The coordinate system can also have a name. >>> names = ['first', 'second', 'third'] >>> cs = CoordinateSystem(names, 'a coordinate system', np.float) >>> cs.coord_names ('first', 'second', 'third') >>> cs.name 'a coordinate system' >>> cs.coord_dtype dtype('float64') The coordinate system also has a ``dtype`` which is the composite numpy dtype, made from the (``names``, ``coord_dtype``). >>> dtype_template = [(name, np.float) for name in cs.coord_names] >>> dtype_should_be = np.dtype(dtype_template) >>> cs.dtype == dtype_should_be True Two CoordinateSystems are equal if they have the same dtype and the same names and the same name. >>> another_cs = CoordinateSystem(names, 'not irrelevant', np.float) >>> cs == another_cs False >>> cs.dtype == another_cs.dtype True >>> cs.name == another_cs.name False """ _doc = {} name = 'world-LPI' _doc['name'] = 'Name describing the CoordinateSystem' coord_names = ('x', 'y', 'z') _doc['coord_names'] = 'Tuple of names describing each coordinate.' coord_dtype = np.float64 _doc['coord_dtype'] = 'The builtin, scalar, dtype of each coordinate.' ndim = 3 _doc['ndim'] = 'The number of dimensions' dtype = np.dtype([('x', np.float), ('y', np.float), ('z', np.float)]) _doc['dtype'] = 'The composite dtype of the CoordinateSystem, ' + \ 'expresses the fact that there are three numbers, the' + \ 'first one corresponds to "x" and the second to "y".' def __init__(self, coord_names, name='', coord_dtype=np.float): """Create a coordinate system with a given name and coordinate names. The CoordinateSystem has two dtype attributes: #. self.coord_dtype is the dtype of the individual coordinate values #. self.dtype is the recarray dtype for the CoordinateSystem which combines the coord_names and the coord_dtype. This functions as the description of the CoordinateSystem. Parameters ---------- coord_names : iterable A sequence of coordinate names. name : string, optional The name of the coordinate system coord_dtype : np.dtype, optional The dtype of the coord_names. This should be a built-in numpy scalar dtype. (default is np.float). The value can by anything that can be passed to the np.dtype constructor. For example ``np.float``, ``np.dtype(np.float)`` or ``f8`` all result in the same ``coord_dtype``. Examples -------- >>> c = CoordinateSystem('ij', name='input') >>> print c CoordinateSystem(coord_names=('i', 'j'), name='input', coord_dtype=float64) >>> c.coord_dtype dtype('float64') """ # this allows coord_names to be an iterator and have a length coord_names = tuple(coord_names) # Make sure each coordinate is unique if len(set(coord_names)) != len(coord_names): raise ValueError('coord_names must have distinct names') # verify that the dtype is coord_dtype for sanity sctypes = (np.sctypes['int'] + np.sctypes['float'] + np.sctypes['complex'] + np.sctypes['uint'] + [np.object]) coord_dtype = np.dtype(coord_dtype) if coord_dtype not in sctypes: raise ValueError('Coordinate dtype should be one of %s' % sctypes) # Set all the attributes self.name = name self.coord_names = coord_names self.coord_dtype = coord_dtype self.ndim = len(coord_names) self.dtype = np.dtype([(name, self.coord_dtype) for name in self.coord_names]) # All attributes are read only def __setattr__(self, key, value): if key in self.__dict__: raise AttributeError('the value of %s has already been set and all attributes are read-only' % key) object.__setattr__(self, key, value) def index(self, coord_name): """Return the index of a given named coordinate. >>> c = CoordinateSystem('ij', name='input') >>> c.index('i') 0 >>> c.index('j') 1 """ return list(self.coord_names).index(coord_name) def __ne__(self, other): return not self.__eq__(other) def __eq__(self, other): """Equality is defined by self.dtype and self.name Parameters ---------- other : :class:`CoordinateSystem` The object to be compared with Returns ------- tf: bool """ return (self.dtype == other.dtype) and (self.name == other.name) def similar_to(self, other): """Similarity is defined by self.dtype, ignoring name Parameters ---------- other : :class:`CoordinateSystem` The object to be compared with Returns ------- tf: bool """ return (self.dtype == other.dtype) def __repr__(self): """Create a string representation of the coordinate system Returns ------- s : string """ return ("CoordinateSystem(coord_names=%s, name='%s', coord_dtype=%s)" % (self.coord_names, self.name, self.coord_dtype)) def _checked_values(self, arr): ''' Check ``arr`` for valid dtype and shape as coordinate values. Raise Errors for failed checks. The dtype of ``arr`` has to be castable (without loss of precision) to ``self.coord_dtype``. We use numpy ``can_cast`` for this check. The last (or only) axis of ``arr`` should be of length ``self.ndim``. Parameters ---------- arr : array-like array to check Returns ------- checked_arr : array Possibly reshaped array Examples -------- >>> cs = CoordinateSystem('ijk', coord_dtype=np.float32) >>> arr = np.array([1, 2, 3], dtype=np.int16) >>> cs._checked_values(arr) # 1D is OK with matching dimensions array([[1, 2, 3]], dtype=int16) >>> cs._checked_values(arr.reshape(1,3)) # as is 1 by N array([[1, 2, 3]], dtype=int16) This next is the wrong shape: >>> cs._checked_values(arr.reshape(3,1)) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... CoordinateSystemError: Array shape[-1] (1) must match CoordinateSystem ndim (3). CoordinateSystem(coord_names=('i', 'j', 'k'), name='', coord_dtype=float32) Wrong length: >>> cs._checked_values(arr[0:2]) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... CoordinateSystemError: Array shape[-1] (2) must match CoordinateSystem ndim (3). CoordinateSystem(coord_names=('i', 'j', 'k'), name='', coord_dtype=float32) The dtype has to be castable: >>> cs._checked_values(np.array([1, 2, 3], dtype=np.float64)) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... CoordinateSystemError: Cannot cast array dtype float64 to CoordinateSystem coord_dtype float32. CoordinateSystem(coord_names=('i', 'j', 'k'), name='', coord_dtype=float32) The input array is unchanged, even if a reshape has occurred. The returned array points to the same data. >>> checked = cs._checked_values(arr) >>> checked.shape == arr.shape False >>> checked is arr False >>> arr[0] 1 >>> checked[0,0] = 10 >>> arr[0] 10 For a 1D CoordinateSystem, passing a 1D vector length N could be a mistake (you were expecting an N-dimensional coordinate system), or it could be N points in 1D. Because it is ambiguous, this is an error. >>> cs = CoordinateSystem('x') >>> cs._checked_values(1) array([[1]]) >>> cs._checked_values([1, 2]) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... CoordinateSystemError: Array shape[-1] (2) must match CoordinateSystem ndim (1). CoordinateSystem(coord_names=('x',), name='', coord_dtype=float64) But of course 2D, N by 1 is OK >>> cs._checked_values(np.array([1,2,3]).reshape(3, 1)) array([[1], [2], [3]]) ''' arr = np.atleast_2d(arr) if arr.shape[-1] != self.ndim: raise CoordinateSystemError('Array shape[-1] (%s) must match ' 'CoordinateSystem ndim (%d).\n %s' % (arr.shape[-1], self.ndim, str(self))) if not np.can_cast(arr.dtype, self.coord_dtype): raise CoordinateSystemError('Cannot cast array dtype %s to ' 'CoordinateSystem coord_dtype %s.\n %s' % (arr.dtype, self.coord_dtype, str(self))) return arr.reshape((-1, self.ndim)) def is_coordsys(obj): """ Test if `obj` has the CoordinateSystem API Parameters ---------- obj : object Object to test Returns ------- tf : bool True if `obj` has the coordinate system API Examples -------- >>> is_coordsys(CoordinateSystem('xyz')) True >>> is_coordsys(CoordSysMaker('ikj')) False """ if not hasattr(obj, 'coord_names'): return False if not hasattr(obj, 'name'): return False if not hasattr(obj, 'coord_dtype'): return False # Distinguish from CoordSysMaker return not callable(obj) def safe_dtype(*dtypes): """Determine a dtype to safely cast all of the given dtypes to. Safe dtypes are valid numpy dtypes or python types which can be cast to numpy dtypes. See numpy.sctypes for a list of valid dtypes. Composite dtypes and string dtypes are not safe dtypes. Parameters ---------- dtypes : sequence of ``np.dtype`` Returns ------- dtype : np.dtype Examples -------- >>> c1 = CoordinateSystem('ij', 'input', coord_dtype=np.float32) >>> c2 = CoordinateSystem('kl', 'input', coord_dtype=np.complex) >>> safe_dtype(c1.coord_dtype, c2.coord_dtype) dtype('complex128') >>> # Strings are invalid dtypes >>> safe_dtype(type('foo')) Traceback (most recent call last): ... TypeError: dtype must be valid numpy dtype int, uint, float, complex or object >>> # Check for a valid dtype >>> myarr = np.zeros(2, np.float32) >>> myarr.dtype.isbuiltin 1 >>> # Composite dtypes are invalid >>> mydtype = np.dtype([('name', 'S32'), ('age', 'i4')]) >>> myarr = np.zeros(2, mydtype) >>> myarr.dtype.isbuiltin 0 >>> safe_dtype(mydtype) Traceback (most recent call last): ... TypeError: dtype must be valid numpy dtype int, uint, float, complex or object """ arrays = [np.zeros(2, dtype) for dtype in dtypes] kinds = [a.dtype.kind for a in arrays] if not set(kinds).issubset('iubfcO'): raise TypeError('dtype must be valid numpy dtype ' 'int, uint, float, complex or object') return np.array(arrays).dtype def product(*coord_systems, **kwargs): """Create the product of a sequence of CoordinateSystems. The coord_dtype of the result will be determined by ``safe_dtype``. Parameters ---------- \*coord_systems : sequence of :class:`CoordinateSystem` name : str Name of ouptut coordinate system Returns ------- product_coord_system : :class:`CoordinateSystem` Examples -------- >>> c1 = CoordinateSystem('ij', 'input', coord_dtype=np.float32) >>> c2 = CoordinateSystem('kl', 'input', coord_dtype=np.complex) >>> c3 = CoordinateSystem('ik', 'in3') >>> print product(c1, c2) CoordinateSystem(coord_names=('i', 'j', 'k', 'l'), name='product', coord_dtype=complex128) >>> print product(c1, c2, name='another name') CoordinateSystem(coord_names=('i', 'j', 'k', 'l'), name='another name', coord_dtype=complex128) >>> product(c2, c3) Traceback (most recent call last): ... ValueError: coord_names must have distinct names """ name = kwargs.pop('name', 'product') if kwargs: raise TypeError('Unexpected kwargs %s' % kwargs) coords = [] for c in coord_systems: coords += c.coord_names dtype = safe_dtype(*[c.coord_dtype for c in coord_systems]) return CoordinateSystem(coords, name, coord_dtype=dtype) class CoordSysMakerError(Exception): pass class CoordSysMaker(object): """ Class to create similar coordinate maps of different dimensions """ coord_sys_klass = CoordinateSystem def __init__(self, coord_names, name='', coord_dtype=np.float): """Create a coordsys maker with given axis `coord_names` Parameters ---------- coord_names : iterable A sequence of coordinate names. name : string, optional The name of the coordinate system coord_dtype : np.dtype, optional The dtype of the coord_names. This should be a built-in numpy scalar dtype. (default is np.float). The value can by anything that can be passed to the np.dtype constructor. For example ``np.float``, ``np.dtype(np.float)`` or ``f8`` all result in the same ``coord_dtype``. Examples -------- >>> cmkr = CoordSysMaker('ijk', 'a name') >>> print cmkr(2) CoordinateSystem(coord_names=('i', 'j'), name='a name', coord_dtype=float64) >>> print cmkr(3) CoordinateSystem(coord_names=('i', 'j', 'k'), name='a name', coord_dtype=float64) """ self.coord_names = tuple(coord_names) self.name = name self.coord_dtype = coord_dtype def __call__(self, N, name=None, coord_dtype=None): """ Create coordinate system of length `N` Parameters ---------- N : int length of coordinate map name : None or str, optional Name of coordinate map. Default is ``self.name`` coord_dtype : None or dtype ``coord_dtype`` of returned coordinate system. Default is ``self.coord_dtype`` Returns ------- csys : coordinate system """ if name is None: name = self.name if coord_dtype is None: coord_dtype = self.coord_dtype if N > len(self.coord_names): raise CoordSysMakerError('Not enough axis names (have %d, ' 'you asked for %d)' % (len(self.coord_names), N)) return self.coord_sys_klass(self.coord_names[:N], name, coord_dtype) def is_coordsys_maker(obj): """ Test if `obj` has the CoordSysMaker API Parameters ---------- obj : object Object to test Returns ------- tf : bool True if `obj` has the coordinate system API Examples -------- >>> is_coordsys_maker(CoordSysMaker('ikj')) True >>> is_coordsys_maker(CoordinateSystem('xyz')) False """ if not hasattr(obj, 'coord_names'): return False if not hasattr(obj, 'name'): return False if not hasattr(obj, 'coord_dtype'): return False # Distinguish from CoordinateSystem return callable(obj) nipy-0.3.0/nipy/core/reference/setup.py000066400000000000000000000007171210344137400200540ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('reference', parent_package, top_path) config.add_subpackage('tests') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipy-0.3.0/nipy/core/reference/slices.py000066400000000000000000000175231210344137400202010ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ A set of methods to get coordinate maps which represent slices in space. """ import numpy as np from nibabel.affines import from_matvec from .coordinate_system import CoordinateSystem from .coordinate_map import AffineTransform from .array_coords import ArrayCoordMap from .spaces import get_world_cs def xslice(x, y_spec, z_spec, world): """ Return an LPS slice through a 3d box with x fixed. Parameters ---------- x : float The value at which x is fixed. y_spec : sequence A sequence with 2 values of form ((float, float), int). The (float, float) components are the min and max y values; the int is the number of points. z_spec : sequence As for `y_spec` but for z world : str or CoordinateSystem CoordSysMaker or XYZSpace World 3D space to which resulting coordmap refers Returns ------- affine_transform : AffineTransform An affine transform that describes an plane in LPS coordinates with x fixed. Examples -------- >>> y_spec = ([-114,114], 115) # voxels of size 2 in y, starting at -114, ending at 114 >>> z_spec = ([-70,100], 86) # voxels of size 2 in z, starting at -70, ending at 100 >>> x30 = xslice(30, y_spec, z_spec, 'scanner') >>> x30([0,0]) array([ 30., -114., -70.]) >>> x30([114,85]) array([ 30., 114., 100.]) >>> x30 AffineTransform( function_domain=CoordinateSystem(coord_names=('i_y', 'i_z'), name='slice', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('scanner-x=L->R', 'scanner-y=P->A', 'scanner-z=I->S'), name='scanner', coord_dtype=float64), affine=array([[ 0., 0., 30.], [ 2., 0., -114.], [ 0., 2., -70.], [ 0., 0., 1.]]) ) >>> bounding_box(x30, (y_spec[1], z_spec[1])) ((30.0, 30.0), (-114.0, 114.0), (-70.0, 100.0)) """ affine_range = get_world_cs(world) (ymin, ymax), yno = y_spec y_tick = (ymax-ymin) / (yno - 1.0) (zmin, zmax), zno = z_spec z_tick = (zmax-zmin) / (zno - 1.0) origin = [x, ymin, zmin] colvectors = np.asarray([[0, 0], [y_tick, 0], [0, z_tick]]) T = from_matvec(colvectors, origin) affine_domain = CoordinateSystem(['i_y', 'i_z'], 'slice') return AffineTransform(affine_domain, affine_range, T) def yslice(y, x_spec, z_spec, world): """ Return a slice through a 3d box with y fixed. Parameters ---------- y : float The value at which y is fixed. x_spec : sequence A sequence with 2 values of form ((float, float), int). The (float, float) components are the min and max x values; the int is the number of points. z_spec : sequence As for `x_spec` but for z world : str or CoordinateSystem CoordSysMaker or XYZSpace World 3D space to which resulting coordmap refers Returns ------- affine_transform : AffineTransform An affine transform that describes an plane in LPS coordinates with y fixed. Examples -------- >>> x_spec = ([-92,92], 93) # voxels of size 2 in x, starting at -92, ending at 92 >>> z_spec = ([-70,100], 86) # voxels of size 2 in z, starting at -70, ending at 100 >>> y70 = yslice(70, x_spec, z_spec, 'mni') >>> y70 AffineTransform( function_domain=CoordinateSystem(coord_names=('i_x', 'i_z'), name='slice', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('mni-x=L->R', 'mni-y=P->A', 'mni-z=I->S'), name='mni', coord_dtype=float64), affine=array([[ 2., 0., -92.], [ 0., 0., 70.], [ 0., 2., -70.], [ 0., 0., 1.]]) ) >>> y70([0,0]) array([-92., 70., -70.]) >>> y70([92,85]) array([ 92., 70., 100.]) >>> bounding_box(y70, (x_spec[1], z_spec[1])) ((-92.0, 92.0), (70.0, 70.0), (-70.0, 100.0)) """ affine_range = get_world_cs(world) (xmin, xmax), xno = x_spec x_tick = (xmax-xmin) / (xno - 1.0) (zmin, zmax), zno = z_spec z_tick = (zmax-zmin) / (zno - 1.0) origin = [xmin, y, zmin] colvectors = np.asarray([[x_tick, 0], [0, 0], [0, z_tick]]) T = from_matvec(colvectors, origin) affine_domain = CoordinateSystem(['i_x', 'i_z'], 'slice') return AffineTransform(affine_domain, affine_range, T) def zslice(z, x_spec, y_spec, world): """ Return a slice through a 3d box with z fixed. Parameters ---------- z : float The value at which z is fixed. x_spec : sequence A sequence with 2 values of form ((float, float), int). The (float, float) components are the min and max x values; the int is the number of points. y_spec : sequence As for `x_spec` but for y world : str or CoordinateSystem CoordSysMaker or XYZSpace World 3D space to which resulting coordmap refers Returns ------- affine_transform : AffineTransform An affine transform that describes a plane in LPS coordinates with z fixed. Examples -------- >>> x_spec = ([-92,92], 93) # voxels of size 2 in x, starting at -92, ending at 92 >>> y_spec = ([-114,114], 115) # voxels of size 2 in y, starting at -114, ending at 114 >>> z40 = zslice(40, x_spec, y_spec, 'unknown') >>> z40 AffineTransform( function_domain=CoordinateSystem(coord_names=('i_x', 'i_y'), name='slice', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('unknown-x=L->R', 'unknown-y=P->A', 'unknown-z=I->S'), name='unknown', coord_dtype=float64), affine=array([[ 2., 0., -92.], [ 0., 2., -114.], [ 0., 0., 40.], [ 0., 0., 1.]]) ) >>> z40([0,0]) array([ -92., -114., 40.]) >>> z40([92,114]) array([ 92., 114., 40.]) >>> bounding_box(z40, (x_spec[1], y_spec[1])) ((-92.0, 92.0), (-114.0, 114.0), (40.0, 40.0)) """ affine_range = get_world_cs(world) (xmin, xmax), xno = x_spec x_tick = (xmax-xmin) / (xno - 1.0) (ymin, ymax), yno = y_spec y_tick = (ymax-ymin) / (yno - 1.0) origin = [xmin, ymin, z] colvectors = np.asarray([[x_tick, 0], [0, y_tick], [0, 0]]) T = from_matvec(colvectors, origin) affine_domain = CoordinateSystem(['i_x', 'i_y'], 'slice') return AffineTransform(affine_domain, affine_range, T) def bounding_box(coordmap, shape): """ Determine a valid bounding box from a CoordinateMap and a shape. Parameters ---------- coordmap : CoordinateMap or AffineTransform Containing mapping between voxel coordinates implied by `shape` and physical coordinates. shape : sequence of int shape implying array Returns ------- limits : (N,) tuple of (2,) tuples of float minimum and maximum coordinate values in output space (range) of `coordmap`. N is given by coordmap.ndim[1]. Examples -------- Make a 3D voxel to mni coordmap >>> from nipy.core.api import vox2mni >>> affine = np.array([[1, 0, 0, 2], ... [0, 3, 0, 4], ... [0, 0, 5, 6], ... [0, 0, 0, 1]], dtype=np.float64) >>> A = vox2mni(affine) >>> bounding_box(A, (30,40,20)) ((2.0, 31.0), (4.0, 121.0), (6.0, 101.0)) """ e = ArrayCoordMap.from_shape(coordmap, shape) return tuple([(r.min(), r.max()) for r in e.transposed_values]) nipy-0.3.0/nipy/core/reference/spaces.py000066400000000000000000000401641210344137400201720ustar00rootroot00000000000000""" Useful neuroimaging coordinate map makers and utilities """ import numpy as np from nibabel.affines import from_matvec from ...fixes.nibabel import io_orientation from .coordinate_system import CoordSysMaker, is_coordsys, is_coordsys_maker from .coordinate_map import CoordMapMaker class XYZSpace(object): """ Class contains logic for spaces with XYZ coordinate systems >>> sp = XYZSpace('hijo') >>> print sp hijo: [('x', 'hijo-x=L->R'), ('y', 'hijo-y=P->A'), ('z', 'hijo-z=I->S')] >>> csm = sp.to_coordsys_maker() >>> cs = csm(3) >>> cs CoordinateSystem(coord_names=('hijo-x=L->R', 'hijo-y=P->A', 'hijo-z=I->S'), name='hijo', coord_dtype=float64) >>> cs in sp True """ x_suffix = 'x=L->R' y_suffix = 'y=P->A' z_suffix = 'z=I->S' def __init__(self, name): self.name = name @property def x(self): """ x-space coordinate name """ return "%s-%s" % (self.name, self.x_suffix) @property def y(self): """ y-space coordinate name """ return "%s-%s" % (self.name, self.y_suffix) @property def z(self): """ z-space coordinate name """ return "%s-%s" % (self.name, self.z_suffix) def __repr__(self): return "%s('%s')" % (self.__class__.__name__, self.name) def __str__(self): return "%s: %s" % (self.name, sorted(self.as_map().items())) def __eq__(self, other): """ Equality defined as having the same xyz names """ try: otuple = other.as_tuple() except AttributeError: return False return self.as_tuple() == otuple def __ne__(self, other): return not self == other def as_tuple(self): """ Return xyz names as tuple >>> sp = XYZSpace('hijo') >>> sp.as_tuple() ('hijo-x=L->R', 'hijo-y=P->A', 'hijo-z=I->S') """ return self.x, self.y, self.z def as_map(self): """ Return xyz names as dictionary >>> sp = XYZSpace('hijo') >>> sorted(sp.as_map().items()) [('x', 'hijo-x=L->R'), ('y', 'hijo-y=P->A'), ('z', 'hijo-z=I->S')] """ return dict(zip('xyz', self.as_tuple())) def register_to(self, mapping): """ Update `mapping` with key=self.x, value='x' etc pairs The mapping will then have keys that are names we (``self``) identify as being x, or y, or z, values are 'x' or 'y' or 'z'. Note that this is the opposite way round for keys, values, compared to the ``as_map`` method. Parameters ---------- mapping : mapping such as a dict Returns ------- None Examples -------- >>> sp = XYZSpace('hijo') >>> mapping = {} >>> sp.register_to(mapping) >>> sorted(mapping.items()) [('hijo-x=L->R', 'x'), ('hijo-y=P->A', 'y'), ('hijo-z=I->S', 'z')] """ mapping.update(dict(zip(self.as_tuple(), 'xyz'))) def to_coordsys_maker(self, extras=()): """ Make a coordinate system maker for this space Parameters ---------- extra : sequence names for any further axes after x, y, z Returns ------- csm : CoordinateSystemMaker Examples -------- >>> sp = XYZSpace('hijo') >>> csm = sp.to_coordsys_maker() >>> csm(3) CoordinateSystem(coord_names=('hijo-x=L->R', 'hijo-y=P->A', 'hijo-z=I->S'), name='hijo', coord_dtype=float64) """ return CoordSysMaker(self.as_tuple() + tuple(extras), name=self.name) def __contains__(self, obj): """ True if `obj` can be thought of as being 'in' this space `obj` is an object that is in some kind of space - it can be a coordinate system, a coordinate map, or an object with a ``coordmap`` attribute. We test the output coordinate system of `obj` against our own space definition. A coordinate system is in our space if it has all the axes of our space. Parameters ---------- obj : object Usually a coordinate system, a coordinate map, or an Image (with a ``coordmap`` attribute) Returns ------- tf : bool True if `obj` is 'in' this space Examples -------- >>> from nipy.core.api import Image, AffineTransform, CoordinateSystem >>> sp = XYZSpace('hijo') >>> names = sp.as_tuple() >>> cs = CoordinateSystem(names) >>> cs in sp True >>> cs = CoordinateSystem(names + ('another_name',)) >>> cs in sp True >>> cmap = AffineTransform('ijk', names, np.eye(4)) >>> cmap in sp True >>> img = Image(np.zeros((3,4,5)), cmap) >>> img in sp True """ try: obj = obj.coordmap except AttributeError: pass try: obj = obj.function_range except AttributeError: pass my_names = self.as_tuple() return set(my_names).issubset(obj.coord_names) # Generic coordinate map maker for voxels (function_domain). Unlike nifti # loading, by default the 4th axis is not time (because we don't know what it # is). voxel_csm = CoordSysMaker('ijklmnop', 'voxels') # Module level mapping from key=name to values in 'x' or 'y' or 'z' known_names = {} known_spaces = [] # Standard spaces defined for _name in ('unknown', 'scanner', 'aligned', 'mni', 'talairach'): _space = XYZSpace(_name) known_spaces.append(_space) _space.register_to(known_names) _csm = _space.to_coordsys_maker('tuvw') _cmm = CoordMapMaker(voxel_csm, _csm) # Put these into the module namespace exec('%s_space = _space' % _name) exec('%s_csm = _csm' % _name) exec('vox2%s = _cmm' % _name) def known_space(obj, spaces=None): """ If `obj` is in a known space, return the space, otherwise return None Parameters ---------- obj : object Object that can be tested against an XYZSpace with ``obj in sp`` spaces : None or sequence, optional spaces to test against. If None, use the module level ``known_spaces`` list to test against. Returns ------- sp : None or XYZSpace If `obj` is not in any of the `known_spaces`, return None. Otherwise return the first matching space in `known_spaces` Examples -------- >>> from nipy.core.api import CoordinateSystem >>> sp0 = XYZSpace('hijo') >>> sp1 = XYZSpace('hija') Make a matching coordinate system >>> cs = sp0.to_coordsys_maker()(3) Test whether this coordinate system is in either of ``(sp0, sp1)`` >>> known_space(cs, (sp0, sp1)) XYZSpace('hijo') So, yes, it's in ``sp0``. How about another generic CoordinateSystem? >>> known_space(CoordinateSystem('xyz'), (sp0, sp1)) is None True So, no, that is not in either of ``(sp0, sp1)`` """ if spaces is None: # use module level global spaces = known_spaces for sp in spaces: if obj in sp: return sp return None def get_world_cs(world_id, ndim=3, extras='tuvw', spaces=None): """ Get world coordinate system from `world_id` Parameters ---------- world_id : str, XYZSPace, CoordSysMaker or CoordinateSystem Object defining a world output system. If str, then should be a name of an XYZSpace in the list `spaces`. ndim : int, optional Number of dimensions in this world. Default is 3 extras : sequence, optional Coordinate (axis) names for axes > 3 that are not named by `world_id` spaces : None or sequence, optional List of known (named) spaces to compare a str `world_id` to. If None, use the module level ``known_spaces`` Returns ------- world_cs : CoordinateSystem A world coordinate system Examples -------- >>> get_world_cs('mni') CoordinateSystem(coord_names=('mni-x=L->R', 'mni-y=P->A', 'mni-z=I->S'), name='mni', coord_dtype=float64) >>> get_world_cs(mni_space, 4) CoordinateSystem(coord_names=('mni-x=L->R', 'mni-y=P->A', 'mni-z=I->S', 't'), name='mni', coord_dtype=float64) >>> from nipy.core.api import CoordinateSystem >>> get_world_cs(CoordinateSystem('xyz')) CoordinateSystem(coord_names=('x', 'y', 'z'), name='', coord_dtype=float64) """ if is_coordsys(world_id): if world_id.ndim != ndim: raise SpaceError("Need %d-dimensional CoordinateSystem" % ndim) return world_id if spaces is None: spaces = known_spaces if isinstance(world_id, basestring): space_names = [s.name for s in spaces] if world_id not in space_names: raise SpaceError('Unkown space "%s"; known spaces are %s' % (world_id, ', '.join(space_names))) world_id = spaces[space_names.index(world_id)] if is_xyz_space(world_id): world_id = world_id.to_coordsys_maker(extras) if is_coordsys_maker(world_id): return world_id(ndim) raise ValueError('Expecting CoordinateSystem, CoordSysMaker, ' 'XYZSpace, or str, got %s' % world_id) class SpaceError(Exception): pass class SpaceTypeError(SpaceError): pass class AxesError(SpaceError): pass class AffineError(SpaceError): pass def xyz_affine(coordmap, name2xyz=None): """ Return (4, 4) affine mapping voxel coordinates to XYZ from `coordmap` If no (4, 4) affine "makes sense"(TM) for this `coordmap` then raise errors listed below. A (4, 4) affine makes sense if the first three output axes are recognizably X, Y, and Z in that order AND they there are corresponding input dimensions, AND the corresponding input dimensions are the first three input dimension (in any order). Thus the input axes have to be 3D. Parameters ---------- coordmap : ``CoordinateMap`` instance name2xyz : None or mapping, optional Object such that ``name2xyz[ax_name]`` returns 'x', or 'y' or 'z' or raises a KeyError for a str ``ax_name``. None means use module default. Returns ------- xyz_aff : (4,4) array voxel to X, Y, Z affine mapping Raises ------ SpaceTypeError : if this is not an affine coordinate map AxesError : if not all of x, y, z recognized in `coordmap` output, or they are in the wrong order, or the x, y, z axes do not correspond to the first three input axes. AffineError : if axes dropped from the affine contribute to x, y, z coordinates. Notes ----- We could also try and "make sense" (TM) of a coordmap that had X, Y and Z outputs, but not in that order, nor all in the first three axes. In that case we could just permute the affine to get the output order we need. But, that could become confusing if the returned affine has different output coordinates than the passed `coordmap`. And it's more complicated. So, let's not do that for now. Examples -------- >>> cmap = vox2mni(np.diag([2,3,4,5,1])) >>> cmap AffineTransform( function_domain=CoordinateSystem(coord_names=('i', 'j', 'k', 'l'), name='voxels', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('mni-x=L->R', 'mni-y=P->A', 'mni-z=I->S', 't'), name='mni', coord_dtype=float64), affine=array([[ 2., 0., 0., 0., 0.], [ 0., 3., 0., 0., 0.], [ 0., 0., 4., 0., 0.], [ 0., 0., 0., 5., 0.], [ 0., 0., 0., 0., 1.]]) ) >>> xyz_affine(cmap) array([[ 2., 0., 0., 0.], [ 0., 3., 0., 0.], [ 0., 0., 4., 0.], [ 0., 0., 0., 1.]]) """ if name2xyz is None: name2xyz = known_names try: affine = coordmap.affine except AttributeError: raise SpaceTypeError('Need affine coordinate map') order = xyz_order(coordmap.function_range, name2xyz) if order[:3] != [0, 1, 2]: raise AxesError('First 3 output axes must be X, Y, Z') # Check equivalent input axes ornt = io_orientation(affine) if set(ornt[:3, 0]) != set((0, 1, 2)): raise AxesError('First 3 input axes must correspond to X, Y, Z') # Check that dropped dimensions don't provide xyz coordinate info extra_cols = affine[:3,3:-1] if not np.allclose(extra_cols, 0): raise AffineError('Dropped dimensions not orthogonal to xyz') return from_matvec(affine[:3,:3], affine[:3,-1]) def xyz_order(coordsys, name2xyz=None): """ Vector of orders for sorting coordsys axes in xyz first order Parameters ---------- coordsys : ``CoordinateSystem`` instance name2xyz : None or mapping, optional Object such that ``name2xyz[ax_name]`` returns 'x', or 'y' or 'z' or raises a KeyError for a str ``ax_name``. None means use module default. Returns ------- xyz_order : list Ordering of axes to get xyz first ordering. See the examples. Raises ------ AxesError : if there are not all of x, y and z axes Examples -------- >>> from nipy.core.api import CoordinateSystem >>> xyzt_cs = mni_csm(4) # coordsys with t (time) last >>> xyzt_cs CoordinateSystem(coord_names=('mni-x=L->R', 'mni-y=P->A', 'mni-z=I->S', 't'), name='mni', coord_dtype=float64) >>> xyz_order(xyzt_cs) [0, 1, 2, 3] >>> tzyx_cs = CoordinateSystem(xyzt_cs.coord_names[::-1], 'reversed') >>> tzyx_cs CoordinateSystem(coord_names=('t', 'mni-z=I->S', 'mni-y=P->A', 'mni-x=L->R'), name='reversed', coord_dtype=float64) >>> xyz_order(tzyx_cs) [3, 2, 1, 0] """ if name2xyz is None: name2xyz = known_names names = coordsys.coord_names N = len(names) axvals = np.zeros(N, dtype=int) for i, name in enumerate(names): try: xyz_char = name2xyz[name] except KeyError: axvals[i] = N+i else: axvals[i] = 'xyz'.index(xyz_char) if not set(axvals).issuperset(range(3)): raise AxesError("Not all of x, y, z recognized in coordinate map") return list(np.argsort(axvals)) def is_xyz_space(obj): """ True if `obj` appears to be an XYZ space definition """ return (hasattr(obj, 'x') and hasattr(obj, 'y') and hasattr(obj, 'z') and hasattr(obj, 'to_coordsys_maker')) def is_xyz_affable(coordmap, name2xyz=None): """ Return True if the coordap has an xyz affine Parameters ---------- coordmap : ``CoordinateMap`` instance Coordinate map to test name2xyz : None or mapping, optional Object such that ``name2xyz[ax_name]`` returns 'x', or 'y' or 'z' or raises a KeyError for a str ``ax_name``. None means use module default. Returns ------- tf : bool True if `coordmap` has an xyz affine, False otherwise Examples -------- >>> cmap = vox2mni(np.diag([2,3,4,5,1])) >>> cmap AffineTransform( function_domain=CoordinateSystem(coord_names=('i', 'j', 'k', 'l'), name='voxels', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('mni-x=L->R', 'mni-y=P->A', 'mni-z=I->S', 't'), name='mni', coord_dtype=float64), affine=array([[ 2., 0., 0., 0., 0.], [ 0., 3., 0., 0., 0.], [ 0., 0., 4., 0., 0.], [ 0., 0., 0., 5., 0.], [ 0., 0., 0., 0., 1.]]) ) >>> is_xyz_affable(cmap) True >>> time0_cmap = cmap.reordered_domain([3,0,1,2]) >>> time0_cmap AffineTransform( function_domain=CoordinateSystem(coord_names=('l', 'i', 'j', 'k'), name='voxels', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('mni-x=L->R', 'mni-y=P->A', 'mni-z=I->S', 't'), name='mni', coord_dtype=float64), affine=array([[ 0., 2., 0., 0., 0.], [ 0., 0., 3., 0., 0.], [ 0., 0., 0., 4., 0.], [ 5., 0., 0., 0., 0.], [ 0., 0., 0., 0., 1.]]) ) >>> is_xyz_affable(time0_cmap) False """ try: xyz_affine(coordmap, name2xyz) except SpaceError: return False return True nipy-0.3.0/nipy/core/reference/tests/000077500000000000000000000000001210344137400174775ustar00rootroot00000000000000nipy-0.3.0/nipy/core/reference/tests/__init__.py000066400000000000000000000000001210344137400215760ustar00rootroot00000000000000nipy-0.3.0/nipy/core/reference/tests/matrix_groups.py000066400000000000000000000202521210344137400227550ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This module is essentially a test of the AffineTransform object to see if it can succinctly describe an object like a matrix group. """ import numpy as np from nipy.core.api import CoordinateSystem, AffineTransform from nipy.core.reference.coordinate_map import compose, product as cmap_product ################################################################################### class Linear(AffineTransform): """ Subclass of AffineTransform that is Linear as opposed to AffineTransform, i.e. the translation is 0. It is instantiated with an matrix of shape (ndim,ndim) instead of (ndim+1,ndim+1) """ def _getmatrix(self): return self.affine[:-1,:-1] matrix = property(_getmatrix) def __init__(self, function_domain, function_range, matrix): ndim = matrix.shape[0] T = np.identity(ndim+1, dtype=matrix.dtype) T[:-1,:-1] = matrix AffineTransform.__init__(self, function_domain, function_range, T) ################################################################################### class MatrixGroup(Linear): """ A matrix group of linear (not affine) transformations with matrices having a specific dtype. """ dtype = np.float def __init__(self, matrix, coords, dtype=None): dtype = dtype or self.dtype if not isinstance(coords, CoordinateSystem): coords = CoordinateSystem(coords, 'space', coord_dtype=dtype) else: coords = CoordinateSystem(coords.coord_names, 'space', dtype) Linear.__init__(self, coords, coords, matrix.astype(dtype)) if not self.validate(): raise ValueError('this matrix is not an element of %s' % `self.__class__`) if not self.coords.coord_dtype == self.dtype: raise ValueError('the input coordinates builtin ' 'dtype should agree with self.dtype') def validate(self, M=None): """ Abstract method: Ensure that a given matrix is a valid member of the group """ raise NotImplementedError def _getcoords(self): return self.function_domain coords = property(_getcoords) def inverse(self): inv_matrix = np.linalg.inv(self.affine[:-1,:-1]) return self.__class__(inv_matrix, self.coords) # inverse = property(_getinverse) ################################################################################### class GLC(MatrixGroup): dtype = np.complex def validate(self, M=None): """ Check that the matrix is invertible. """ if M is None: M = self.matrix return not np.allclose(np.linalg.det(M), 0) ################################################################################### class GLR(GLC): """ The general linear group """ dtype = np.float ################################################################################### class SLR(GLR): """ Special linear group """ def validate(self, M=None): if M is None: M = self.matrix return np.allclose(np.linalg.det(M), 1) ################################################################################### class O(GLR): """ The orthogonal group """ dtype = np.float def validate(self, M=None): """ Check that the matrix is (almost) orthogonal. """ if M is None: M = self.matrix return np.allclose(np.identity(self.ndims[0], dtype=self.dtype), np.dot(M.T, M)) ################################################################################### class SO(O,SLR): """ The special orthogonal group """ dtype = np.float def validate(self, M=None): """ Check that the matrix is (almost) orthogonal. """ if M is None: M = self.matrix return O.validate(self) and np.allclose(np.linalg.det(M), 1) ################################################################################### class GLZ(GLR): """ Matrices with integer entries and determinant \pm 1 """ dtype = np.int def __init__(self, matrix, coords): """ Safely round coordmap.mapping.matrix, creating a new coordmap first. """ M = np.around(matrix).astype(self.dtype) GLR.__init__(self, M, coords, dtype=self.dtype) def validate(self): """ Must have determinant \pm 1 """ M = self.matrix return np.allclose(np.fabs(np.linalg.det(M)), 1) ################################################################################### def product(*elements): """ Compute the group product of a set of elements """ notsame = filter(lambda x: type(x) != type(elements[0]), elements) if notsame: raise ValueError('all elements should be members of the same group') composed_mapping = compose(*elements) matrix = composed_mapping.affine[:-1,:-1] return elements[0].__class__(matrix, elements[0].coords) ################################################################################### def change_basis(element, bchange_linear): """ Matrices can be thought of as representations of linear mappings between two (coordinate-free) vector spaces represented in particular bases. Hence, a MatrixGroup instance with matrix.shape = (ndim, ndim) represents a linear transformation L on a vector space of dimension ndim, in a given coordinate system. If we change the basis in which we represent L, the matrix that represents L should also change. A change of basis is represented as a mapping between two coordinate systems and is also represented by a change of basis matrix. This is expressed in this function as bchange_linear.function_range == element.coords This function expresses the same transformation L in a different basis. """ newcm = compose(bchange_linear.inverse(), element, bchange_linear) matrix = newcm.affine[:-1,:-1] if bchange_linear.function_range != element.coords: raise ValueError('expecting the basis change mapping to have the same output coords as element') return element.__class__(matrix, newcm.function_domain) ################################################################################### def same_transformation(element1, element2, basis_change): """ Matrices can be thought of as representations of linear mappings between two (coordinate-free) vector spaces represented in particular bases. Hence, a MatrixGroup instance with matrix.shape = (ndim, ndim) represents a linear transformation L on a vector space of dimension ndim, in a given coordinate system. This function asks the question: Do the two elements of a MatrixGroup (element1, element2) represent the same linear mapping if basis_change represents the change of basis between the two? element1.coords = change_basis(element2, basis_change).coords """ newelement = change_basis(element1, basis_change) return np.allclose(newelement.matrix, element2.matrix) and newelement.coords == element2.coords ################################################################################### def product_homomorphism(*elements): """ Given a sequence of elements of the same subclass of MatrixGroup, they can be thought of as an element of the topological product, which has a natural group structure. If all of the elements are of the same subclass, then there is a natural group homomorphism from the product space to a larger MatrixGroup. The matrices of the elements of the larger group will be block diagonal with blocks of the size corresponding to the dimensions of each corresponding element. This function is that homomorphism. """ notsame = filter(lambda x: type(x) != type(elements[0]), elements) if notsame: raise ValueError, 'all elements should be members of the same group' newcmap = cmap_product(*elements) matrix = newcmap.affine[:-1,:-1] return elements[0].__class__(matrix, newcmap.function_domain) nipy-0.3.0/nipy/core/reference/tests/test_array_coords.py000066400000000000000000000113321210344137400235770ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Testing array coords """ import numpy as np from nipy.core.api import (AffineTransform, CoordinateSystem, CoordinateMap, Grid, ArrayCoordMap) import nipy.core.reference.array_coords as acs from nose.tools import assert_true, assert_false, \ assert_equal, assert_raises from numpy.testing import assert_array_equal, assert_array_almost_equal def test_array_coord_map(): # array coord map recreates the affine when you slice an image. In # general, if you take an integer slice in some dimension, the # corresponding column of the affine will go, leaving a row for the # lost dimension, with all zeros, execpt for the translation in the # now-removed dimension, encoding the position of that particular # slice xz = 1.1; yz = 2.3; zz = 3.5 xt = 10.0; yt = 11; zt = 12 aff = np.diag([xz, yz, zz, 1]) aff[:3,3] = [xt, yt, zt] shape = (2,3,4) cmap = AffineTransform.from_params('ijk', 'xyz', aff) acm = acs.ArrayCoordMap(cmap, shape) # slice the coordinate map for the first axis sacm = acm[1] # The affine has lost the first column, but has a remaining row (the # first) encoding the translation to get to this slice assert_array_almost_equal(sacm.coordmap.affine, np.array([ [0, 0, xz+xt], [yz, 0, yt], [0, zz, zt], [0, 0, 1]])) sacm = acm[:,1] # lost second column, remaining second row with translation assert_array_almost_equal(sacm.coordmap.affine, np.array([ [xz, 0, xt], [0, 0, yz+yt], [0, zz, zt], [0, 0, 1]])) sacm = acm[:,:,2] # ditto third column and row assert_array_almost_equal(sacm.coordmap.affine, np.array([ [xz, 0, xt], [0, yz, yt], [0, 0, 2*zz+zt], [0, 0, 1]])) # check ellipsis slicing is the same as [:,: ... sacm = acm[...,2] assert_array_almost_equal(sacm.coordmap.affine, np.array([ [xz, 0, xt], [0, yz, yt], [0, 0, 2*zz+zt], [0, 0, 1]])) # that ellipsis can follow other slice types sacm = acm[:,...,2] assert_array_almost_equal(sacm.coordmap.affine, np.array([ [xz, 0, xt], [0, yz, yt], [0, 0, 2*zz+zt], [0, 0, 1]])) # that there can be only one ellipsis assert_raises(ValueError, acm.__getitem__, ( (Ellipsis, Ellipsis,2))) # that you can integer slice in all three dimensions, leaving only # the translation column sacm = acm[1,0,2] assert_array_almost_equal(sacm.coordmap.affine, np.array([ [xz+xt], [yt], [2*zz+zt], [1]])) # that anything other than an int, slice or Ellipsis is an error assert_raises(ValueError, acm.__getitem__, ([0,2],)) assert_raises(ValueError, acm.__getitem__, (np.array([0,2]),)) def test_grid(): input = CoordinateSystem('ij', 'input') output = CoordinateSystem('xy', 'output') def f(ij): i = ij[:,0] j = ij[:,1] return np.array([i**2+j,j**3+i]).T cmap = CoordinateMap(input, output, f) grid = Grid(cmap) eval = ArrayCoordMap.from_shape(cmap, (50,40)) assert_true(np.allclose(grid[0:50,0:40].values, eval.values)) def test_eval_slice(): input = CoordinateSystem('ij', 'input') output = CoordinateSystem('xy', 'output') def f(ij): i = ij[:,0] j = ij[:,1] return np.array([i**2+j,j**3+i]).T cmap = CoordinateMap(input, output, f) cmap = CoordinateMap(input, output, f) grid = Grid(cmap) e = grid[0:50,0:40] ee = e[0:20:3] yield assert_equal, ee.shape, (7,40) yield assert_equal, ee.values.shape, (280,2) yield assert_equal, ee.transposed_values.shape, (2,7,40) ee = e[0:20:2,3] yield assert_equal, ee.values.shape, (10,2) yield assert_equal, ee.transposed_values.shape, (2,10) yield assert_equal, ee.shape, (10,) nipy-0.3.0/nipy/core/reference/tests/test_coordinate_map.py000066400000000000000000001250211210344137400240750ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from copy import copy import numpy as np # this import line is a little ridiculous... from ..coordinate_map import (CoordinateMap, AffineTransform, compose, product, append_io_dim, drop_io_dim, equivalent, shifted_domain_origin, shifted_range_origin, CoordMapMaker, CoordMapMakerError, _as_coordinate_map, AxisError, _fix0, axmap, orth_axes, input_axis_index, io_axis_indices) from ..coordinate_system import (CoordinateSystem, CoordinateSystemError, CoordSysMaker, CoordSysMakerError) # shortcut CS = CoordinateSystem from nose.tools import (assert_true, assert_equal, assert_raises, assert_false) from numpy.testing import (assert_array_equal, assert_almost_equal, dec) class empty(object): pass # object to hold module global setup E = empty() def setup(): def f(x): return 2*x def g(x): return x/2.0 x = CoordinateSystem('x', 'x') E.a = CoordinateMap(x, x, f) E.b = CoordinateMap(x, x, f, inverse_function=g) E.c = CoordinateMap(x, x, g) E.d = CoordinateMap(x, x, g, inverse_function=f) E.e = AffineTransform.identity('ijk') A = np.identity(4) A[0:3] = np.random.standard_normal((3,4)) E.mapping = AffineTransform.from_params('ijk' ,'xyz', A) E.singular = AffineTransform.from_params('ijk', 'xyzt', np.array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11], [ 8, 9, 10, 11], [ 0, 0, 0, 1]])) def test_shift_origin(): CS = CoordinateSystem A = np.random.standard_normal((5,6)) A[-1] = [0,0,0,0,0,1] aff1 = AffineTransform(CS('ijklm', 'oldorigin'), CS('xyzt'), A) difference = np.random.standard_normal(5) point_in_old_basis = np.random.standard_normal(5) for aff in [aff1, _as_coordinate_map(aff1)]: # The same affine transformation with a different origin for its domain shifted_aff = shifted_domain_origin(aff, difference, 'neworigin') # This is the relationship between coordinates in old and new origins assert_almost_equal(shifted_aff(point_in_old_basis), aff(point_in_old_basis+difference)) assert_almost_equal(shifted_aff(point_in_old_basis-difference), aff(point_in_old_basis)) # OK, now for the range A = np.random.standard_normal((5,6)) A[-1] = [0,0,0,0,0,1] aff2 = AffineTransform(CS('ijklm', 'oldorigin'), CS('xyzt'), A) difference = np.random.standard_normal(4) for aff in [aff2, _as_coordinate_map(aff2)]: # The same affine transformation with a different origin for its domain shifted_aff = shifted_range_origin(aff, difference, 'neworigin') # Let's check that things work point_in_old_basis = np.random.standard_normal(5) # This is the relation ship between coordinates in old and new origins assert_almost_equal(shifted_aff(point_in_old_basis), aff(point_in_old_basis)-difference) assert_almost_equal(shifted_aff(point_in_old_basis)+difference, aff(point_in_old_basis)) def test_renamed(): # Renaming domain and range A = AffineTransform.from_params('ijk', 'xyz', np.identity(4)) ijk = CoordinateSystem('ijk') xyz = CoordinateSystem('xyz') C = CoordinateMap(ijk, xyz, np.log) for B in [A,C]: B_re = B.renamed_domain({'i':'foo'}) assert_equal(B_re.function_domain.coord_names, ('foo', 'j', 'k')) B_re = B.renamed_domain({'i':'foo','j':'bar'}) assert_equal(B_re.function_domain.coord_names, ('foo', 'bar', 'k')) B_re = B.renamed_range({'y':'foo'}) assert_equal(B_re.function_range.coord_names, ('x', 'foo', 'z')) B_re = B.renamed_range({0:'foo',1:'bar'}) assert_equal(B_re.function_range.coord_names, ('foo', 'bar', 'z')) B_re = B.renamed_domain({0:'foo',1:'bar'}) assert_equal(B_re.function_domain.coord_names, ('foo', 'bar', 'k')) B_re = B.renamed_range({'y':'foo','x':'bar'}) assert_equal(B_re.function_range.coord_names, ('bar', 'foo', 'z')) assert_raises(ValueError, B.renamed_range, {'foo':'y'}) assert_raises(ValueError, B.renamed_domain, {'foo':'y'}) def test_calling_shapes(): cs2d = CS('ij') cs1d = CS('i') cm2d = CoordinateMap(cs2d, cs2d, lambda x : x+1) cm1d2d = CoordinateMap(cs1d, cs2d, lambda x : np.concatenate((x, x), axis=-1)) at2d = AffineTransform(cs2d, cs2d, np.array([[1, 0, 1], [0, 1, 1], [0, 0, 1]])) at1d2d = AffineTransform(cs1d, cs2d, np.array([[1,0], [0,1], [0,1]])) # test coordinate maps and affine transforms for xfm2d, xfm1d2d in ((cm2d, cm1d2d), (at2d, at1d2d)): arr = np.array([0, 1]) assert_array_equal(xfm2d(arr), [1, 2]) # test lists work too res = xfm2d([0, 1]) assert_array_equal(res, [1, 2]) # and return arrays (by checking shape attribute) assert_equal(res.shape, (2,)) # maintaining input shape arr_long = arr[None, None, :] assert_array_equal(xfm2d(arr_long), arr_long + 1) # wrong shape array raises error assert_raises(CoordinateSystemError, xfm2d, np.zeros((3,))) assert_raises(CoordinateSystemError, xfm2d, np.zeros((3,3))) # 1d to 2d arr = np.array(1) assert_array_equal(xfm1d2d(arr), [1,1] ) arr_long = arr[None, None, None] assert_array_equal(xfm1d2d(arr_long), np.ones((1,1,2))) # wrong shape array raises error. Note 1d input requires size 1 # as final axis assert_raises(CoordinateSystemError, xfm1d2d, np.zeros((3,))) assert_raises(CoordinateSystemError, xfm1d2d, np.zeros((3,2))) def test_call(): value = 10 assert_true(np.allclose(E.a(value), 2*value)) assert_true(np.allclose(E.b(value), 2*value)) # FIXME: this shape just below is not # really expected for a CoordinateMap assert_true(np.allclose(E.b([value]), 2*value)) assert_true(np.allclose(E.c(value), value/2)) assert_true(np.allclose(E.d(value), value/2)) value = np.array([1., 2., 3.]) assert_true(np.allclose(E.e(value), value)) # check that error raised for wrong shape value = np.array([1., 2.,]) assert_raises(CoordinateSystemError, E.e, value) def test_compose(): value = np.array([[1., 2., 3.]]).T aa = compose(E.a, E.a) assert_true(aa.inverse() is None) assert_almost_equal(aa(value), 4*value) ab = compose(E.a,E.b) assert_true(ab.inverse() is None) assert_almost_equal(ab(value), 4*value) ac = compose(E.a,E.c) assert_true(ac.inverse() is None) assert_almost_equal(ac(value), value) bb = compose(E.b,E.b) # yield assert_true, bb.inverse() is not None aff1 = np.diag([1,2,3,1]) affine1 = AffineTransform.from_params('ijk', 'xyz', aff1) aff2 = np.diag([4,5,6,1]) affine2 = AffineTransform.from_params('xyz', 'abc', aff2) # compose mapping from 'ijk' to 'abc' compcm = compose(affine2, affine1) assert_equal(compcm.function_domain.coord_names, ('i', 'j', 'k')) assert_equal(compcm.function_range.coord_names, ('a', 'b', 'c')) assert_almost_equal(compcm.affine, np.dot(aff2, aff1)) # check invalid coordinate mappings assert_raises(ValueError, compose, affine1, affine2) assert_raises(ValueError, compose, affine1, 'foo') cm1 = CoordinateMap(CoordinateSystem('ijk'), CoordinateSystem('xyz'), np.log) cm2 = CoordinateMap(CoordinateSystem('xyz'), CoordinateSystem('abc'), np.exp) assert_raises(ValueError, compose, cm1, cm2) def test__eq__(): yield assert_true, E.a == E.a yield assert_false, E.a != E.a yield assert_false, E.a == E.b yield assert_true, E.a != E.b yield assert_true, E.singular == E.singular yield assert_false, E.singular != E.singular A = AffineTransform.from_params('ijk', 'xyz', np.diag([4,3,2,1])) B = AffineTransform.from_params('ijk', 'xyz', np.diag([4,3,2,1])) yield assert_true, A == B yield assert_false, A != B def test_similar_to(): in_cs = CoordinateSystem('ijk', 'in', np.float32) in_cs2 = CoordinateSystem('ijk', 'another name', np.float32) out_cs = CoordinateSystem('xyz', 'out', np.float32) out_cs2 = CoordinateSystem('xyz', 'again another', np.float32) for klass, arg0, arg1 in ((CoordinateMap, lambda x : x + 1, lambda x : x + 2), (AffineTransform, np.eye(4), np.diag([1, 2, 3, 1]))): c0 = klass(in_cs, out_cs, arg0) c1 = klass(in_cs, out_cs, arg0) assert_true(c0.similar_to(c1)) c1b = klass(in_cs, out_cs, arg1) assert_false(c0.similar_to(c1b)) c2 = klass(in_cs2, out_cs, arg0) assert_true(c0.similar_to(c2)) c3 = klass(in_cs, out_cs2, arg0) assert_true(c0.similar_to(c3)) def test_isinvertible(): yield assert_false, E.a.inverse() yield assert_true, E.b.inverse() yield assert_false, E.c.inverse() yield assert_true, E.d.inverse() yield assert_true, E.e.inverse() yield assert_true, E.mapping.inverse() yield assert_false, E.singular.inverse() def test_inverse1(): inv = lambda a: a.inverse() yield assert_true, inv(E.a) is None yield assert_true, inv(E.c) is None inv_b = E.b.inverse() inv_d = E.d.inverse() ident_b = compose(inv_b,E.b) ident_d = compose(inv_d,E.d) value = np.array([[1., 2., 3.]]).T yield assert_true, np.allclose(ident_b(value), value) yield assert_true, np.allclose(ident_d(value), value) def test_compose_cmap(): value = np.array([1., 2., 3.]) b = compose(E.e, E.e) assert_true(np.allclose(b(value), value)) def test_inverse2(): assert_true(np.allclose(E.e.affine, E.e.inverse().inverse().affine)) def voxel_to_world(): # utility function for generating trivial CoordinateMap incs = CoordinateSystem('ijk', 'voxels') outcs = CoordinateSystem('xyz', 'world') map = lambda x: x + 1 inv = lambda x: x - 1 return incs, outcs, map, inv def test_comap_init(): # Test mapping and non-mapping functions incs, outcs, map, inv = voxel_to_world() cm = CoordinateMap(incs, outcs, map, inv) yield assert_equal, cm.function, map yield assert_equal, cm.function_domain, incs yield assert_equal, cm.function_range, outcs yield assert_equal, cm.inverse_function, inv yield assert_raises, ValueError, CoordinateMap, incs, outcs, 'foo', inv yield assert_raises, ValueError, CoordinateMap, incs, outcs, map, 'bar' def test_comap_cosys(): # Check we can pass in coordinate names instead of coordinate systems d_sys = CoordinateSystem('ijk') r_sys = CoordinateSystem('xyz') fn = lambda x : x+1 cm = CoordinateMap(d_sys, r_sys, fn) assert_equal(CoordinateMap('ijk', 'xyz', fn), cm) assert_equal(CoordinateMap(d_sys, 'xyz', fn), cm) assert_equal(CoordinateMap('ijk', r_sys, fn), cm) aff = np.diag([2,3,4,1]) cm = AffineTransform(d_sys, r_sys, aff) assert_equal(AffineTransform('ijk', 'xyz', aff), cm) assert_equal(AffineTransform(d_sys, 'xyz', aff), cm) assert_equal(AffineTransform('ijk', r_sys, aff), cm) def test_comap_copy(): import copy incs, outcs, map, inv = voxel_to_world() cm = CoordinateMap(incs, outcs, inv, map) cmcp = copy.copy(cm) yield assert_equal, cmcp.function, cm.function yield assert_equal, cmcp.function_domain, cm.function_domain yield assert_equal, cmcp.function_range, cm.function_range yield assert_equal, cmcp.inverse_function, cm.inverse_function # # AffineTransform tests # def affine_v2w(): # utility function incs = CoordinateSystem('ijk', 'voxels') outcs = CoordinateSystem('xyz', 'world') aff = np.diag([1, 2, 4, 1]) aff[:3, 3] = [11, 12, 13] """array([[ 1, 0, 0, 11], [ 0, 2, 0, 12], [ 0, 0, 4, 13], [ 0, 0, 0, 1]]) """ return incs, outcs, aff def test_affine_init(): incs, outcs, aff = affine_v2w() cm = AffineTransform(incs, outcs, aff) assert_equal(cm.function_domain, incs) assert_equal(cm.function_range, outcs) assert_array_equal(cm.affine, aff) badaff = np.diag([1,2]) assert_raises(ValueError, AffineTransform, incs, outcs, badaff) def test_affine_bottom_row(): # homogeneous transformations have bottom rows all zero # except the last one assert_raises(ValueError, AffineTransform.from_params, 'ij', 'x', np.array([[3,4,5],[1,1,1]])) def test_affine_inverse(): incs, outcs, aff = affine_v2w() inv = np.linalg.inv(aff) cm = AffineTransform(incs, outcs, aff) x = np.array([10, 20, 30], np.float) x_roundtrip = cm(cm.inverse()(x)) assert_almost_equal(x_roundtrip, x) badaff = np.array([[1,2,3],[0,0,1]]) badcm = AffineTransform(CoordinateSystem('ij'), CoordinateSystem('x'), badaff) assert_equal(badcm.inverse(), None) def test_affine_from_params(): incs, outcs, aff = affine_v2w() cm = AffineTransform.from_params('ijk', 'xyz', aff) assert_array_equal(cm.affine, aff) badaff = np.array([[1,2,3],[4,5,6]]) assert_raises(ValueError, AffineTransform.from_params, 'ijk', 'xyz', badaff) def test_affine_start_step(): incs, outcs, aff = affine_v2w() start = aff[:3, 3] step = aff.diagonal()[:3] cm = AffineTransform.from_start_step(incs.coord_names, outcs.coord_names, start, step) assert_array_equal(cm.affine, aff) assert_raises(ValueError, AffineTransform.from_start_step, 'ijk', 'xy', start, step) def test_affine_identity(): aff = AffineTransform.identity('ijk') assert_array_equal(aff.affine, np.eye(4)) assert_equal(aff.function_domain, aff.function_range) # AffineTransform's aren't CoordinateMaps, so # they don't have "function" attributes assert_false(hasattr(aff, 'function')) def test_affine_copy(): incs, outcs, aff = affine_v2w() cm = AffineTransform(incs, outcs, aff) import copy cmcp = copy.copy(cm) assert_array_equal(cmcp.affine, cm.affine) assert_equal(cmcp.function_domain, cm.function_domain) assert_equal(cmcp.function_range, cm.function_range) # # Module level functions # def test_reordered_domain(): incs, outcs, map, inv = voxel_to_world() cm = CoordinateMap(incs, outcs, map, inv) recm = cm.reordered_domain('jki') yield assert_equal, recm.function_domain.coord_names, ('j', 'k', 'i') yield assert_equal, recm.function_range.coord_names, outcs.coord_names yield assert_equal, recm.function_domain.name, incs.name yield assert_equal, recm.function_range.name, outcs.name # default reverse reorder recm = cm.reordered_domain() yield assert_equal, recm.function_domain.coord_names, ('k', 'j', 'i') # reorder with order as indices recm = cm.reordered_domain([2,0,1]) yield assert_equal, recm.function_domain.coord_names, ('k', 'i', 'j') def test_str(): result = """AffineTransform( function_domain=CoordinateSystem(coord_names=('i', 'j', 'k'), name='', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('x', 'y', 'z'), name='', coord_dtype=float64), affine=array([[ 1., 0., 0., 0.], [ 0., 1., 0., 0.], [ 0., 0., 1., 0.], [ 0., 0., 0., 1.]]) )""" domain = CoordinateSystem('ijk') range = CoordinateSystem('xyz') affine = np.identity(4) affine_mapping = AffineTransform(domain, range, affine) assert_equal(result, str(affine_mapping)) cmap = CoordinateMap(domain, range, np.exp, np.log) result="""CoordinateMap( function_domain=CoordinateSystem(coord_names=('i', 'j', 'k'), name='', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('x', 'y', 'z'), name='', coord_dtype=float64), function=, inverse_function= )""" cmap = CoordinateMap(domain, range, np.exp) result="""CoordinateMap( function_domain=CoordinateSystem(coord_names=('i', 'j', 'k'), name='', coord_dtype=float64), function_range=CoordinateSystem(coord_names=('x', 'y', 'z'), name='', coord_dtype=float64), function= )""" assert_equal(result, repr(cmap)) def test_reordered_range(): incs, outcs, map, inv = voxel_to_world() cm = CoordinateMap(incs, outcs, inv, map) recm = cm.reordered_range('yzx') yield assert_equal, recm.function_domain.coord_names, incs.coord_names yield assert_equal, recm.function_range.coord_names, ('y', 'z', 'x') yield assert_equal, recm.function_domain.name, incs.name yield assert_equal, recm.function_range.name, outcs.name # default reverse order recm = cm.reordered_range() yield assert_equal, recm.function_range.coord_names, ('z', 'y', 'x') # reorder with indicies recm = cm.reordered_range([2,0,1]) yield assert_equal, recm.function_range.coord_names, ('z', 'x', 'y') def test_product(): affine1 = AffineTransform.from_params('i', 'x', np.diag([2, 1])) affine2 = AffineTransform.from_params('j', 'y', np.diag([3, 1])) affine = product(affine1, affine2) cm1 = CoordinateMap(CoordinateSystem('i'), CoordinateSystem('x'), np.log) cm2 = CoordinateMap(CoordinateSystem('j'), CoordinateSystem('y'), np.log) cm = product(cm1, cm2) assert_equal(affine.function_domain.coord_names, ('i', 'j')) assert_equal(affine.function_range.coord_names, ('x', 'y')) assert_almost_equal(cm([3,4]), np.log([3,4])) assert_almost_equal(cm.function([[3,4],[5,6]]), np.log([[3,4],[5,6]])) assert_equal(affine.function_domain.coord_names, ('i', 'j')) assert_equal(affine.function_range.coord_names, ('x', 'y')) assert_array_equal(affine.affine, np.diag([2, 3, 1])) # Test name argument for m1, m2 in ((affine1, affine2), (cm1, cm2), (affine1, cm2)): cm = product(m1, m2) assert_equal(cm.function_domain.name, 'product') assert_equal(cm.function_range.name, 'product') cm = product(m1, m2, input_name='name0') assert_equal(cm.function_domain.name, 'name0') assert_equal(cm.function_range.name, 'product') cm = product(m1, m2, output_name='name1') assert_equal(cm.function_domain.name, 'product') assert_equal(cm.function_range.name, 'name1') assert_raises(TypeError, product, m1, m2, whatgains='name0') def test_equivalent(): ijk = CoordinateSystem('ijk') xyz = CoordinateSystem('xyz') T = np.random.standard_normal((4,4)) T[-1] = [0,0,0,1] A = AffineTransform(ijk, xyz, T) # now, cycle through # all possible permutations of # 'ijk' and 'xyz' and confirm that # the mapping is equivalent yield assert_false, equivalent(A, A.renamed_domain({'i':'foo'})) try: import itertools for pijk in itertools.permutations('ijk'): for pxyz in itertools.permutations('xyz'): B = A.reordered_domain(pijk).reordered_range(pxyz) yield assert_true, equivalent(A, B) except (ImportError, AttributeError): # just do some if we can't find itertools, or if itertools # doesn't have permutations for pijk in ['ikj', 'kij']: for pxyz in ['xzy', 'yxz']: B = A.reordered_domain(pijk).reordered_range(pxyz) yield assert_true, equivalent(A, B) def test_as_coordinate_map(): ijk = CoordinateSystem('ijk') xyz = CoordinateSystem('xyz') A = np.random.standard_normal((4,4)) # bottom row of A is not [0,0,0,1] yield assert_raises, ValueError, AffineTransform, ijk, xyz, A A[-1] = [0,0,0,1] aff = AffineTransform(ijk, xyz, A) _cmapA = _as_coordinate_map(aff) yield assert_true, isinstance(_cmapA, CoordinateMap) yield assert_true, _cmapA.inverse_function != None # a non-invertible one B = A[1:] xy = CoordinateSystem('xy') affB = AffineTransform(ijk, xy, B) _cmapB = _as_coordinate_map(affB) yield assert_true, isinstance(_cmapB, CoordinateMap) yield assert_true, _cmapB.inverse_function == None def test_cm__setattr__raise_error(): # CoordinateMap has all read-only attributes # AffineTransform has some properties and it seems # the same __setattr__ doesn't work for it. ijk = CoordinateSystem('ijk') xyz = CoordinateSystem('xyz') cm = CoordinateMap(ijk, xyz, np.exp) yield assert_raises, AttributeError, cm.__setattr__, "function_range", xyz def test_append_io_dim(): aff = np.diag([1,2,3,1]) in_dims = tuple('ijk') out_dims = tuple('xyz') cm = AffineTransform.from_params(in_dims, out_dims, aff) cm2 = append_io_dim(cm, 'l', 't') assert_array_equal(cm2.affine, np.diag([1,2,3,1,1])) assert_equal(cm2.function_range.coord_names, out_dims + ('t',)) assert_equal(cm2.function_domain.coord_names, in_dims + ('l',)) cm2 = append_io_dim(cm, 'l', 't', 9, 5) a2 = np.diag([1,2,3,5,1]) a2[3,4] = 9 assert_array_equal(cm2.affine, a2) assert_equal(cm2.function_range.coord_names, out_dims + ('t',)) assert_equal(cm2.function_domain.coord_names, in_dims + ('l',)) # non square case aff = np.array([[2,0,0], [0,3,0], [0,0,1], [0,0,1]]) cm = AffineTransform.from_params('ij', 'xyz', aff) cm2 = append_io_dim(cm, 'q', 't', 9, 5) a2 = np.array([[2,0,0,0], [0,3,0,0], [0,0,0,1], [0,0,5,9], [0,0,0,1]]) assert_array_equal(cm2.affine, a2) assert_equal(cm2.function_range.coord_names, tuple('xyzt')) assert_equal(cm2.function_domain.coord_names, tuple('ijq')) def test__fix0(): # Test routine to fix possible zero TR in affine assert_array_equal(_fix0(np.diag([1, 2, 3, 1])), np.diag([1, 2, 3, 1])) assert_array_equal(_fix0(np.diag([0, 2, 3, 1])), np.diag([1, 2, 3, 1])) assert_array_equal(_fix0(np.diag([1, 0, 3, 1])), np.diag([1, 1, 3, 1])) assert_array_equal(_fix0(np.diag([1, 2, 0, 1])), np.diag([1, 2, 1, 1])) aff = [[1, 0, 0, 10], [0, 0, 0, 11], [0, 0, 0, 1]] assert_array_equal(_fix0(aff), aff) aff = [[1, 0, 0, 10], [0, 2, 0, 11], [0, 0, 0, 12], [0, 0, 0, 1]] assert_array_equal(_fix0(aff), [[1, 0, 0, 10], [0, 2, 0, 11], [0, 0, 1, 12], [0, 0, 0, 1]]) eps = np.finfo(np.float64).eps aff[2][2] = eps assert_array_equal(_fix0(aff), aff) def test_drop_io_dim(): # test ordinary case of 4d to 3d cm4d = AffineTransform.from_params('ijkl', 'xyzt', np.diag([1,2,3,4,1])) cm3d = drop_io_dim(cm4d, 't') assert_array_equal(cm3d.affine, np.diag([1, 2, 3, 1])) cm3d = drop_io_dim(cm4d, 'l') assert_array_equal(cm3d.affine, np.diag([1, 2, 3, 1])) cm3d = drop_io_dim(cm4d, 3) assert_array_equal(cm3d.affine, np.diag([1, 2, 3, 1])) cm3d = drop_io_dim(cm4d, -1) assert_array_equal(cm3d.affine, np.diag([1, 2, 3, 1])) # 3d to 2d cm3d = AffineTransform.from_params('ijk', 'xyz', np.diag([1,2,3,1])) cm2d = drop_io_dim(cm3d, 'z') assert_array_equal(cm2d.affine, np.diag([1, 2, 1])) # test zero scaling for dropped dimension cm3d = AffineTransform.from_params('ijk', 'xyz', np.diag([1, 2, 0, 1])) cm2d = drop_io_dim(cm3d, 'z') assert_array_equal(cm2d.affine, np.diag([1, 2, 1])) # test not diagonal but orthogonal aff = np.array([[1, 0, 0, 0], [0, 0, 2, 0], [0, 3, 0, 0], [0, 0, 0, 1]]) cm3d = AffineTransform.from_params('ijk', 'xyz', aff) cm2d = drop_io_dim(cm3d, 'z') assert_array_equal(cm2d.affine, np.diag([1, 2, 1])) cm2d = drop_io_dim(cm3d, 'k') assert_array_equal(cm2d.affine, np.diag([1, 3, 1])) # and with zeros scaling fix for orthogonal dropped dimension aff[2] = 0 cm3d = AffineTransform.from_params('ijk', 'xyz', aff) cm2d = drop_io_dim(cm3d, 'z') assert_array_equal(cm2d.affine, np.diag([1, 2, 1])) # Unless told otherwise cm2d = drop_io_dim(cm3d, 'z', fix0=False) # In this case we drop z because it has no matching input assert_array_equal(cm2d.affine, [[1, 0, 0, 0], [0, 0, 2, 0], [0, 0, 0, 1]]) # Don't zero-fix untested dimensions cm2d = drop_io_dim(cm3d, 'y', fix0=True) assert_array_equal(cm2d.affine, np.diag([1, 0, 1])) # Test test for ambiguous coordinate names # This one is OK because they match cm3d = AffineTransform.from_params('ijk', 'iyz', np.diag([1, 2, 3, 1])) cm2d = drop_io_dim(cm3d, 'i') assert_array_equal(cm2d.affine, np.diag([2, 3, 1])) # Here they don't match and this raises an error cm3d = AffineTransform.from_params('ijk', 'xiz', np.diag([1, 2, 3, 1])) assert_raises(AxisError, drop_io_dim, cm3d, 'i') # Dropping input or outputs that have no matching dimensions is also OK aff = np.array([[1, .1, 0, 10], [.1, 0, 0, 11], [ 0, 3, 0, 12], [ 0, 0, 0, 1]]) cm3d = AffineTransform.from_params('ijk', 'xyz', aff) cm2d = drop_io_dim(cm3d, 'k') assert_array_equal(cm2d.affine, [[1, .1, 10], [.1, 0, 11], [ 0, 3, 12], [ 0, 0, 1]]) aff = np.array([[1, .1, 0, 10], [0, 0, 0, 11], [0, 3, .1, 12], [0, 0, 0, 1]]) cm3d = AffineTransform.from_params('ijk', 'xyz', aff) cm2d = drop_io_dim(cm3d, 'y') assert_array_equal(cm2d.affine, [[1, .1, 0, 10], [ 0, 3, .1, 12], [ 0, 0, 0, 1]]) def test_axmap(): # Test mapping between axes cmap = AffineTransform('ijk', 'xyz', np.eye(4)) assert_equal(axmap(cmap), {0: 0, 1:1, 2:2, 'i': 0, 'j': 1, 'k': 2}) assert_equal(axmap(cmap, 'out2in'), {0: 0, 1:1, 2:2, 'x': 0, 'y': 1, 'z': 2}) assert_equal(axmap(cmap, 'both'), ({0: 0, 1:1, 2:2, 'i': 0, 'j': 1, 'k': 2}, {0: 0, 1:1, 2:2, 'x': 0, 'y': 1, 'z': 2})) cmap = AffineTransform('ijk', 'xyz', [[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0], [0, 0, 0, 1]]) assert_equal(axmap(cmap), {0: 2, 1: 0, 2: 1, 'i': 2, 'j': 0, 'k': 1}) assert_equal(axmap(cmap, 'out2in'), {2: 0, 0: 1, 1: 2, 'z': 0, 'x': 1, 'y': 2}) # Test in presence of nasty zero cmap = AffineTransform('ijk', 'xyz', np.diag([2, 3, 0, 1])) # Default is to fix zero assert_equal(axmap(cmap), {0: 0, 1: 1, 2: 2, 'i': 0, 'j': 1, 'k': 2}) assert_equal(axmap(cmap, fix0=True), {0: 0, 1: 1, 2: 2, 'i': 0, 'j': 1, 'k': 2}) assert_equal(axmap(cmap, 'out2in'), {0: 0, 1: 1, 2: 2, 'x': 0, 'y': 1, 'z': 2}) # If turned off, we can't find the axis anymore assert_equal(axmap(cmap, fix0=False), {0: 0, 1: 1, 2: None, 'i': 0, 'j': 1, 'k': None}) assert_equal(axmap(cmap, 'out2in', fix0=False), {0: 0, 1: 1, 2: None, 'x': 0, 'y': 1, 'z': None}) # Need in2out or out2in as action strings assert_raises(ValueError, axmap, cmap, 'do what exactly?') # Non-square cmap = AffineTransform('ij', 'xyz', [[0, 1, 0], [0, 0, 0], [1, 0, 0], [0, 0, 1]]) assert_equal(axmap(cmap), {0: 2, 1: 0, 'i': 2, 'j': 0}) assert_equal(axmap(cmap, 'out2in'), {0: 1, 1: None, 2: 0, 'x': 1, 'y': None, 'z': 0}) cmap = AffineTransform('ijk', 'xy', [[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]) assert_equal(axmap(cmap), {0: None, 1: 0, 2: 1, 'i': None, 'j': 0, 'k': 1}) assert_equal(axmap(cmap, 'out2in'), {0: 1, 1: 2, 'x': 1, 'y': 2}) # What happens if there are ties? cmap = AffineTransform('ijk', 'xyz', [[0, 1, 0, 0], [0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1]]) assert_equal(axmap(cmap), {0: 2, 1: 0, 2: None, 'i': 2, 'j': 0, 'k': None}) assert_equal(axmap(cmap, 'out2in'), {0: 1, 1: None, 2: 0, 'x': 1, 'y': None, 'z': 0}) def test_orth_axes(): # Test for test of orthogality of in, out axis to rest of affine # Check 3,3, 2, 3, and that negative values don't confuse for aff in (np.eye(4), np.diag([2, 3, 1]), np.eye(4) * -1): for i in range(aff.shape[0]-1): assert_true(orth_axes(i, i, aff)) assert_true(orth_axes(2, 2, np.diag([2, 3, 0, 1]))) assert_false(orth_axes(2, 2, np.diag([2, 3, 0, 1]), False)) aff = np.eye(4) assert_true(orth_axes(0, 0, aff)) aff[0, 1] = 1e-4 assert_false(orth_axes(0, 0, aff)) assert_true(orth_axes(0, 0, aff, tol=2e-4)) aff[1, 0] = 3e-4 assert_false(orth_axes(0, 0, aff)) def test_input_axis_index(): # Test routine to map name to input axis cmap = AffineTransform('ijk', 'xyz', np.eye(4)) for i, in_name, out_name in zip(range(3), 'ijk', 'xyz'): assert_equal(input_axis_index(cmap, in_name), i) assert_equal(input_axis_index(cmap, out_name), i) flipped = [[0, 0, 1, 1], [0, 1, 0, 2], [1, 0, 0, 3], [0, 0, 0, 1]] cmap_f = AffineTransform('ijk', 'xyz', flipped) for i, in_name, out_name in zip(range(3), 'ijk', 'zyx'): assert_equal(input_axis_index(cmap_f, in_name), i) assert_equal(input_axis_index(cmap_f, out_name), i) # Names can be same in input and output but they must match cmap_m = AffineTransform('ijk', 'kji', flipped) for i, in_name, out_name in zip(range(3), 'ijk', 'ijk'): assert_equal(input_axis_index(cmap_m, in_name), i) assert_equal(input_axis_index(cmap_m, out_name), i) # If they don't match, AxisError cmap_b = AffineTransform('ijk', 'xiz', np.eye(4)) assert_equal(input_axis_index(cmap_m, 'j'), 1) assert_raises(AxisError, input_axis_index, cmap_b, 'i') # Name not found, AxisError assert_raises(AxisError, input_axis_index, cmap_b, 'q') # 0 leads to no match if fix0 turned off cmap_z = AffineTransform('ijk', 'xyz', np.diag([2, 3, 0, 1])) assert_equal(input_axis_index(cmap_z, 'z'), 2) assert_equal(input_axis_index(cmap_z, 'z', fix0=True), 2) assert_raises(AxisError, input_axis_index, cmap_z, 'z', fix0=False) # Other axes not affected in presence of 0 assert_equal(input_axis_index(cmap_z, 'y'), 1) def test_io_axis_indices(): # Test routine to get input and output axis indices cmap = AffineTransform('ijk', 'xyz', np.eye(4)) for i, in_name, out_name in zip(range(3), 'ijk', 'xyz'): assert_equal(io_axis_indices(cmap, i), (i, i)) assert_equal(io_axis_indices(cmap, in_name), (i, i)) assert_equal(io_axis_indices(cmap, out_name), (i, i)) flipped = [[0, 0, 1, 1], [0, 1, 0, 2], [1, 0, 0, 3], [0, 0, 0, 1]] cmap_f = AffineTransform('ijk', 'xyz', flipped) for i, in_name, out_name in zip(range(3), 'ijk', 'xyz'): assert_equal(io_axis_indices(cmap_f, i), (i, 2-i)) assert_equal(io_axis_indices(cmap_f, in_name), (i, 2-i)) assert_equal(io_axis_indices(cmap_f, out_name), (2-i, i)) # Names can be same in input and output but they must match cmap_m = AffineTransform('ijk', 'kji', flipped) for i, in_name, out_name in zip(range(3), 'ijk', 'kji'): assert_equal(io_axis_indices(cmap_m, i), (i, 2-i)) assert_equal(io_axis_indices(cmap_m, in_name), (i, 2-i)) assert_equal(io_axis_indices(cmap_m, out_name), (2-i, i)) # If they don't match, AxisError, if selecting by name cmap_b = AffineTransform('ijk', 'xiz', np.eye(4)) assert_raises(AxisError, io_axis_indices, cmap_b, 'i') # ... but not if name corresponds assert_equal(io_axis_indices(cmap_b, 'k'), (2, 2)) # ... or if input name not found in output assert_equal(io_axis_indices(cmap_b, 'j'), (1, 1)) # ... or if selecting by number assert_equal(io_axis_indices(cmap_b, 0), (0, 0)) # Name not found, AxisError assert_raises(AxisError, io_axis_indices, cmap_b, 'q') # 0 leads to no match if fix0 set to false cmap_z = AffineTransform('ijk', 'xyz', np.diag([2, 3, 0, 1])) assert_equal(io_axis_indices(cmap_z, 'y'), (1, 1)) assert_equal(io_axis_indices(cmap_z, 'z'), (2, 2)) assert_equal(io_axis_indices(cmap_z, 'z', fix0=False), (None, 2)) # For either input or output assert_equal(io_axis_indices(cmap_z, 'k'), (2, 2)) assert_equal(io_axis_indices(cmap_z, 'k', fix0=False), (2, None)) # axis name and number access without fix0 cmap = AffineTransform('ijkt', 'xyzt', np.diag([1, 1, 1, 0, 1])) assert_raises(AxisError, io_axis_indices, cmap, 't', fix0=False) in_ax, out_ax = io_axis_indices(cmap, -1, fix0=False) assert_equal((in_ax, out_ax), (3, None)) # Non-square is OK cmap = AffineTransform('ij', 'xyz', [[0, 1, 0], [0, 0, 0], [1, 0, 0], [0, 0, 1]]) assert_equal(io_axis_indices(cmap, 'j'), (1, 0)) assert_equal(io_axis_indices(cmap, 'y'), (None, 1)) assert_equal(io_axis_indices(cmap, 'z'), (0, 2)) cmap = AffineTransform('ijk', 'xy', [[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]) assert_equal(io_axis_indices(cmap, 'i'), (0, None)) assert_equal(io_axis_indices(cmap, 'j'), (1, 0)) assert_equal(io_axis_indices(cmap, 'y'), (2, 1)) def test_make_cmap(): # Routine to put the guessing back into making coordinate maps d_names = list('ijklm') r_names = list('xyztu') domain_maker = CoordSysMaker(d_names, 'voxels') range_maker = CoordSysMaker(r_names, 'world') cmm = CoordMapMaker(domain_maker, range_maker) # Making with generic functions and with affines xform = lambda x : x+1 inv_xform = lambda x : x-1 diag_vals = range(2,8) for i in range(1, 6): dcs = CS(d_names[:i], 'voxels') rcs = CS(r_names[:i], 'world') # Generic assert_equal(cmm.make_cmap(i, xform, inv_xform), CoordinateMap(dcs, rcs, xform, inv_xform)) assert_equal(cmm.make_cmap(i, xform), CoordinateMap(dcs, rcs, xform)) # Affines aff = np.diag(diag_vals[:i] + [1]) assert_equal(cmm.make_affine(aff), AffineTransform(dcs, rcs, aff)) # Test that the call method selects what it got correctly assert_equal(cmm(i, xform, inv_xform), CoordinateMap(dcs, rcs, xform, inv_xform)) assert_equal(cmm(i, xform), CoordinateMap(dcs, rcs, xform)) assert_equal(cmm(aff), AffineTransform(dcs, rcs, aff)) # For affines, we can append dimensions by adding on the diagonal aff = np.diag([2,3,4,1]) dcs = CS(d_names[:4], 'voxels') rcs = CS(r_names[:4], 'world') assert_equal(cmm.make_affine(aff, 5), AffineTransform(CS(d_names[:4], 'voxels'), CS(r_names[:4], 'world'), np.diag([2,3,4,5,1]))) assert_equal(cmm.make_affine(aff, [5,6]), AffineTransform(CS(d_names[:5], 'voxels'), CS(r_names[:5], 'world'), np.diag([2,3,4,5,6,1]))) # we can add offsets too exp_aff = np.diag([2,3,4,5,6,1]) exp_aff[3:5,-1] = [7,8] assert_equal(cmm.make_affine(aff, [5,6],[7,8]), AffineTransform(CS(d_names[:5], 'voxels'), CS(r_names[:5], 'world'), exp_aff)) # The zooms (diagonal elements) and offsets must match in length assert_raises(CoordMapMakerError, cmm.make_affine, aff, [5,6], 7) # Check non-square affines aff = np.array([[2,0,0], [0,3,0], [0,0,1], [0,0,1]]) dcs = CS(d_names[:2], 'voxels') rcs = CS(r_names[:3], 'world') assert_equal(cmm.make_affine(aff), AffineTransform(dcs, rcs, aff)) dcs = CS(d_names[:3], 'voxels') rcs = CS(r_names[:4], 'world') exp_aff = np.array([[2,0,0,0], [0,3,0,0], [0,0,0,1], [0,0,4,0], [0,0,0,1]]) assert_equal(cmm.make_affine(aff, 4), AffineTransform(dcs, rcs, exp_aff)) def test_dtype_cmap_inverses(): # Check that we can make functional inverses of AffineTransforms, and # CoordinateMap versions of AffineTransforms dtypes = (np.sctypes['int'] + np.sctypes['uint'] + np.sctypes['float'] + np.sctypes['complex'] + [np.object]) arr_p1 = np.eye(4)[:, [0, 2, 1, 3]] in_list = [0, 1, 2] out_list = [0, 2, 1] for dt in dtypes: in_cs = CoordinateSystem('ijk', coord_dtype=dt) out_cs = CoordinateSystem('xyz', coord_dtype=dt) cmap = AffineTransform(in_cs, out_cs, arr_p1.astype(dt)) coord = np.array(in_list, dtype=dt) out_coord = np.array(out_list, dtype=dt) # Expected output type of inverse, not preserving if dt in np.sctypes['int'] + np.sctypes['uint']: exp_i_dt = np.float64 else: exp_i_dt = dt # Default inverse cmap may alter coordinate types r_cmap = cmap.inverse() res = r_cmap(out_coord) assert_array_equal(res, coord) assert_equal(res.dtype, exp_i_dt) # Default behavior is preserve_type=False r_cmap = cmap.inverse(preserve_dtype=False) res = r_cmap(out_coord) assert_array_equal(res, coord) assert_equal(res.dtype, exp_i_dt) # Preserve_dtype=True - preserves dtype r_cmap = cmap.inverse(preserve_dtype=True) res = r_cmap(out_coord) assert_array_equal(res, coord) assert_equal(res.dtype, dt) # Preserve_dtype=True is default for conversion to CoordinateMap cm_cmap = _as_coordinate_map(cmap) assert_array_equal(cm_cmap(coord), out_list) rcm_cmap = cm_cmap.inverse() assert_array_equal(rcm_cmap(coord), out_list) res = rcm_cmap(out_coord) assert_array_equal(res, coord) assert_equal(res.dtype, dt) # For integer types, where there is no integer inverse, return floatey # inverse by default, and None for inverse when preserve_dtype=True arr_p2 = arr_p1 * 2 arr_p2[-1, -1] = 1 out_list = [0, 4, 2] for dt in np.sctypes['int'] + np.sctypes['uint']: in_cs = CoordinateSystem('ijk', coord_dtype=dt) out_cs = CoordinateSystem('xyz', coord_dtype=dt) cmap = AffineTransform(in_cs, out_cs, arr_p2.astype(dt)) coord = np.array(in_list, dtype=dt) out_coord = np.array(out_list, dtype=dt) # Default r_cmap = cmap.inverse() res = r_cmap(out_coord) assert_array_equal(res, coord) assert_equal(res.dtype, np.float64) # Default is preserve_type=False r_cmap = cmap.inverse(preserve_dtype=False) res = r_cmap(out_coord) assert_array_equal(res, coord) assert_equal(res.dtype, np.float64) # preserve_dtype=True means there is no valid inverse for non integer # affine inverses, as here assert_equal(cmap.inverse(preserve_dtype=True), None) def test_subtype_equalities(): # Check cmap compare equal if subtypes, on either side in_cs = CoordinateSystem('ijk') out_cs = CoordinateSystem('xyz') f = lambda x : x + 1 cmap = CoordinateMap(in_cs, out_cs, f) class CM2(CoordinateMap): pass cmap2 = CM2(in_cs, out_cs, f) assert_equal(cmap, cmap2) assert_equal(cmap2, cmap) cmap = AffineTransform(in_cs, out_cs, np.eye(4)) class AT2(AffineTransform): pass cmap2 = AT2(in_cs, out_cs, np.eye(4)) assert_equal(cmap, cmap2) assert_equal(cmap2, cmap) def test_cmap_coord_types(): # Check that we can use full range of coordinate system types. The inverse # of an AffineTransform should generate coordinates in the input coordinate # system dtype dtypes = (np.sctypes['int'] + np.sctypes['uint'] + np.sctypes['float'] + np.sctypes['complex'] + [np.object]) arr_p1 = np.eye(4) arr_p1[:3, 3] = 1 for dt in dtypes: in_cs = CoordinateSystem('ijk', coord_dtype=dt) out_cs = CoordinateSystem('xyz', coord_dtype=dt) # CoordinateMap cmap = CoordinateMap(in_cs, out_cs, lambda x : x + 1) assert_equal(cmap, copy(cmap)) res = cmap(np.array([0, 1, 2], dtype=dt)) assert_array_equal(res, [1, 2, 3]) assert_equal(res.dtype, in_cs.coord_dtype) # Check reordering works rcmap = cmap.reordered_domain('ikj').reordered_range('yxz') res = rcmap(np.array([0, 1, 2], dtype=dt)) assert_array_equal(res, [3, 1, 2]) assert_equal(res.dtype, in_cs.coord_dtype) # AffineTransform cmap = AffineTransform(in_cs, out_cs, arr_p1.astype(dt)) res = cmap(np.array([0, 1, 2], dtype=dt)) assert_array_equal(res, [1, 2, 3]) assert_equal(res.dtype, in_cs.coord_dtype) assert_equal(cmap, copy(cmap)) # Check reordering works rcmap = cmap.reordered_domain('ikj').reordered_range('yxz') res = rcmap(np.array([0, 1, 2], dtype=dt)) assert_array_equal(res, [3, 1, 2]) assert_equal(res.dtype, in_cs.coord_dtype) nipy-0.3.0/nipy/core/reference/tests/test_coordinate_system.py000066400000000000000000000221031210344137400246410ustar00rootroot00000000000000""" Tests for coordinate_system module """ import numpy as np from ..coordinate_system import (CoordinateSystem, CoordinateSystemError, is_coordsys, is_coordsys_maker, product, safe_dtype, CoordSysMaker, CoordSysMakerError) from nose.tools import (assert_true, assert_false, assert_equal, assert_raises, assert_not_equal) class empty(object): pass E = empty() def setup(): E.name = "test" E.axes = ('i', 'j', 'k') E.coord_dtype = np.float32 E.cs = CoordinateSystem(E.axes, E.name, E.coord_dtype) def test_CoordinateSystem(): assert_equal(E.cs.name, E.name) assert_equal(E.cs.coord_names, E.axes) assert_equal(E.cs.coord_dtype, E.coord_dtype) def test_iterator_coordinate(): def gen(): yield 'i' yield 'j' yield 'k' coordsys = CoordinateSystem(gen(), name='test_iter') assert_equal(coordsys.coord_names, ('i','j','k')) def test_ndim(): cs = CoordinateSystem('xy') assert_equal(cs.ndim, 2) cs = CoordinateSystem('ijk') assert_equal(cs.ndim, 3) def test_unique_coord_names(): unique = ('i','j','k') notuniq = ('i','i','k') coordsys = CoordinateSystem(unique) assert_equal(coordsys.coord_names, unique) assert_raises(ValueError, CoordinateSystem, notuniq) def test_dtypes(): # invalid dtypes dtypes = np.sctypes['others'] dtypes.remove(np.object) for dt in dtypes: assert_raises(ValueError, CoordinateSystem, 'ijk', 'test', dt) # compound dtype dtype = np.dtype([('field1', ' int64 ax1 = CoordinateSystem('x', coord_dtype=np.int32) ax2 = CoordinateSystem('y', coord_dtype=np.int64) cs = product(ax1, ax2) # assert up-casting of dtype assert_equal(cs.coord_dtype, np.dtype(np.int64)) # assert composed dtype assert_equal(cs.dtype, np.dtype([('x', np.int64), ('y', np.int64)])) # the axes should be typecast in the CoordinateSystem but # uneffected themselves assert_equal(ax1.dtype, np.dtype([('x', np.int32)])) assert_equal(ax2.dtype, np.dtype([('y', np.int64)])) # float32 + int64 => float64 ax1 = CoordinateSystem('x', coord_dtype=np.float32) cs = product(ax1, ax2) assert_equal(cs.coord_dtype, np.dtype(np.float64)) assert_equal(cs.dtype, np.dtype([('x', np.float64), ('y', np.float64)])) # int16 + complex64 => complex64 ax1 = CoordinateSystem('x', coord_dtype=np.int16) ax2 = CoordinateSystem('y', coord_dtype=np.complex64) # Order of the params effects order of dtype but not resulting value type cs = product(ax2, ax1) assert_equal(cs.coord_dtype, np.complex64) assert_equal(cs.dtype, np.dtype([('y', np.complex64), ('x', np.complex64)])) # Passing name as argument cs = product(ax2, ax1, name='a name') assert_equal(cs.name, 'a name') # Anything else as kwarg -> error assert_raises(TypeError, product, ax2, ax1, newarg='a name') def test_coordsys_maker(): # Things that help making coordinate maps ax_names = list('ijklm') nl = len(ax_names) cs_maker = CoordSysMaker(ax_names, 'myname') for i in range(1,nl+1): assert_equal(cs_maker(i), CoordinateSystem(ax_names[:i], 'myname', np.float)) assert_raises(CoordSysMakerError, cs_maker, nl+1) # You can pass in your own name assert_equal(cs_maker(i, 'anothername'), CoordinateSystem(ax_names[:i+1], 'anothername', np.float)) # And your own dtype if you really want assert_equal(cs_maker(i, coord_dtype=np.int32), CoordinateSystem(ax_names[:i+1], 'myname', np.int32)) def test_is_coordsys_maker(): # Test coordinate system check cm = CoordSysMaker('xyz') assert_true(is_coordsys_maker(cm)) class C(object): pass c = C() assert_false(is_coordsys_maker(c)) c.coord_names = [] assert_false(is_coordsys_maker(c)) c.name = '' assert_false(is_coordsys_maker(c)) c.coord_dtype = np.float assert_false(is_coordsys_maker(c)) # Distinguish from CoordinateSystem class C(object): coord_names = [] name = '' coord_dtype=np.float def __call__(self): pass assert_true(is_coordsys_maker(C())) assert_false(is_coordsys_maker(CoordinateSystem('ijk'))) nipy-0.3.0/nipy/core/reference/tests/test_matrix_groups.py000066400000000000000000000074151210344137400240220ustar00rootroot00000000000000 import numpy as np from scipy.linalg import expm from nose.tools import assert_true, assert_equal, assert_raises import nipy.core.reference.tests.matrix_groups as MG from nipy.core.api import ArrayCoordMap A = np.array([[0,1], [1,0]]) B = np.array([[5,4], [4,3]]) D = np.array([[25,4], [31,5]]) def test_init(): """ Test that we can initialize the MatrixGroup subclasses """ O_A = MG.O(A, 'xy') GLR_A = MG.GLR(A, 'xy') GLZ_A = MG.GLZ(A, 'xy') SO_A = MG.SO(np.identity(2), 'xy') B = np.array([[np.sin(0.75), np.cos(0.75)], [-np.cos(0.75), np.sin(0.75)]]) O_B = MG.O(B, 'xy') SO_B = MG.O(B, 'xy') B[1] = -B[1] assert_raises(ValueError, MG.SO, B, 'xy') O_B = MG.O(B, 'xy') def test2(): Z = np.random.standard_normal((3,3)) GL_Z = MG.GLR(Z, 'xyz') assert_raises(ValueError, MG.SO, Z, 'zxy') detZ = np.linalg.det(Z) if detZ < 0: W = -Z else: W = Z f = np.fabs(detZ)**(1/3.) SL_Z = MG.SLR(W/f, 'xyz') orth = expm(Z - Z.T) O_Z = MG.O(orth, 'xyz') def random_orth(dim=3, names=None): Z = np.random.standard_normal((3,3)) orth = expm(Z - Z.T) if not names: names = ['e%d' % i for i in range(dim)] else: if len(names) != dim: raise ValueError('len(names) != dim') return MG.O(orth, names) def test_basis_change(): basis1 = random_orth(names='xyz') basis2 = random_orth(names='uvw') bchange = MG.Linear(basis2.coords, basis1.coords, random_orth(dim=3).matrix) #print basis1.coords new = MG.change_basis(basis1, bchange) assert_true(MG.same_transformation(basis1, new, bchange)) def test_product(): GLZ_A = MG.GLZ(A, 'xy') GLZ_B = MG.GLZ(B, 'xy') GLZ_C = MG.GLZ(B, 'ij') GLZ_AB = MG.product(GLZ_A, GLZ_B) yield (assert_true, np.allclose(GLZ_AB.matrix, np.dot(GLZ_A.matrix, GLZ_B.matrix))) # different coordinates: can't make the product yield assert_raises, ValueError, MG.product, GLZ_A, GLZ_C def test_product2(): O_1 = random_orth(names='xyz') O_2 = random_orth(names='xyz') O_21 = MG.product(O_2, O_1) #print type(O_21) def test_homomorphism(): GLZ_B = MG.GLZ(B, 'xy') GLZ_D = MG.GLZ(D, 'ij') GLZ_BD = MG.product_homomorphism(GLZ_B, GLZ_D) yield assert_true, np.allclose(GLZ_BD.matrix[:2,:2], GLZ_B.matrix) yield assert_true, np.allclose(GLZ_BD.matrix[2:,2:], GLZ_D.matrix) yield assert_true, np.allclose(GLZ_BD.matrix[2:,:2], 0) yield assert_true, np.allclose(GLZ_BD.matrix[:2,2:], 0) GLZ_C = MG.GLZ(D, 'xy') # have the same axisnames, an exception will be raised yield assert_raises, ValueError, MG.product_homomorphism, GLZ_C, GLZ_B E = np.array([[7,8], [8,9]]) GLZ_E = MG.GLZ(E, 'ij') F = np.array([[6,7], [5,6]]) GLZ_F = MG.GLZ(E, 'xy') GLZ_FE = MG.product_homomorphism(GLZ_F, GLZ_E) test1 = MG.product(GLZ_FE, GLZ_BD) test2 = MG.product_homomorphism(MG.product(GLZ_F, GLZ_B), MG.product(GLZ_E, GLZ_D)) yield assert_true, np.allclose(test1.matrix, test2.matrix) def test_32(): class O32(MG.O): dtype = np.float32 def validate(self, M=None): """ Check that the matrix is (almost) orthogonal. """ if M is None: M = self.matrix return np.allclose(np.identity(self.ndims[0], dtype=self.dtype), np.dot(M.T, M), atol=1.0e-06) a = random_orth(3).matrix.astype(np.float32) A = O32(a, 'xyz') B = O32(random_orth(3).matrix.astype(np.float32), 'xyz') C = MG.product(A, B) yield assert_equal, C.dtype, np.float32 ev = ArrayCoordMap.from_shape(C, (20,30,40)) yield assert_equal, ev.values.dtype, np.float32 nipy-0.3.0/nipy/core/reference/tests/test_parametrized_surface.py000066400000000000000000000051671210344137400253200ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Parametrized surfaces using a CoordinateMap """ import numpy as np from nose.tools import assert_equal from nipy.core.api import CoordinateMap, CoordinateSystem from nipy.core.api import Grid uv = CoordinateSystem('uv', 'input') xyz = CoordinateSystem('xyz', 'output') def parametric_mapping(vals): """ Parametrization of the surface x**2-y**2*z**2+z**3=0 """ u = vals[:,0] v = vals[:, 1] o = np.array([v*(u**2-v**2), u, u**2-v**2]).T return o """ Let's check that indeed this is a parametrization of that surface """ def implicit(vals): x = vals[:,0]; y = vals[:,1]; z = vals[:,2] return x**2-y**2*z**2+z**3 surface_param = CoordinateMap(uv, xyz, parametric_mapping) def test_surface(): assert np.allclose( implicit( parametric_mapping( np.random.standard_normal((40,2)) ) ), 0) def test_grid(): g = Grid(surface_param) xyz_grid = g[-1:1:201j,-1:1:101j] x, y, z = xyz_grid.transposed_values yield assert_equal, x.shape, (201,101) yield assert_equal, y.shape, (201,101) yield assert_equal, z.shape, (201,101) def test_grid32(): # Check that we can use a float32 input and output uv32 = CoordinateSystem('uv', 'input', np.float32) xyz32 = CoordinateSystem('xyz', 'output', np.float32) surface32 = CoordinateMap(uv32, xyz32, parametric_mapping) g = Grid(surface32) xyz_grid = g[-1:1:201j,-1:1:101j] x, y, z = xyz_grid.transposed_values yield assert_equal, x.shape, (201,101) yield assert_equal, y.shape, (201,101) yield assert_equal, z.shape, (201,101) yield assert_equal, x.dtype, np.dtype(np.float32) def test_grid32_c128(): # Check that we can use a float32 input and complex128 output uv32 = CoordinateSystem('uv', 'input', np.float32) xyz128 = CoordinateSystem('xyz', 'output', np.complex128) def par_c128(x): return parametric_mapping(x).astype(np.complex128) surface = CoordinateMap(uv32, xyz128, par_c128) g = Grid(surface) xyz_grid = g[-1:1:201j,-1:1:101j] x, y, z = xyz_grid.transposed_values yield assert_equal, x.shape, (201,101) yield assert_equal, y.shape, (201,101) yield assert_equal, z.shape, (201,101) yield assert_equal, x.dtype, np.dtype(np.complex128) def view_surface(): from enthought.mayavi import mlab g = Grid(surface_param) xyz_grid = g[-1:1:201j,-1:1:101j] x, y, z = xyz_grid.transposed_values mlab.mesh(x, y, z) mlab.draw() nipy-0.3.0/nipy/core/reference/tests/test_slices.py000066400000000000000000000037021210344137400223740ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from ..slices import bounding_box, zslice, yslice, xslice from ..coordinate_map import AffineTransform from ..coordinate_system import CoordinateSystem as CS from ..spaces import scanner_csm, scanner_space, mni_csm from nose.tools import ( assert_equal, assert_true, assert_false) from numpy.testing import ( assert_array_equal, assert_array_almost_equal) # Names for a 3D axis set names = ['xspace', 'yspace', 'zspace'] def test_bounding_box(): shape = (10, 14, 16) coordmap = AffineTransform.identity(names) assert_equal(bounding_box(coordmap, shape), ((0., 9.), (0, 13), (0, 15))) def test_box_slice(): t = xslice(5, ([0, 9], 10), ([0, 9], 10), scanner_space) assert_array_almost_equal(t.affine, [[ 0., 0., 5.], [ 1., 0., 0.], [ 0., 1., 0.], [ 0., 0., 1.]]) assert_equal(t.function_domain, CS(['i_y', 'i_z'], 'slice')) assert_equal(t.function_range, scanner_csm(3)) t = yslice(4, ([0, 9], 10), ([0, 9], 10), 'mni') assert_array_almost_equal(t.affine, [[ 1., 0., 0.], [ 0., 0., 4.], [ 0., 1., 0.], [ 0., 0., 1.]]) assert_equal(t.function_domain, CS(['i_x', 'i_z'], 'slice')) assert_equal(t.function_range, mni_csm(3)) t = zslice(3, ([0, 9], 10), ([0, 9], 10), mni_csm(3)) assert_array_almost_equal(t.affine, [[ 1., 0., 0.], [ 0., 1., 0.], [ 0., 0., 3.], [ 0., 0., 1.]]) assert_equal(t.function_domain, CS(['i_x', 'i_y'], 'slice')) assert_equal(t.function_range, mni_csm(3)) nipy-0.3.0/nipy/core/reference/tests/test_spaces.py000066400000000000000000000257251210344137400224010ustar00rootroot00000000000000""" Testing coordinate map defined spaces """ import numpy as np from nibabel.affines import from_matvec from ...image.image import Image from ..coordinate_system import CoordinateSystem as CS, CoordSysMakerError from ..coordinate_map import AffineTransform, CoordinateMap from ..spaces import (vox2mni, vox2scanner, vox2talairach, vox2unknown, vox2aligned, xyz_affine, xyz_order, SpaceTypeError, AxesError, AffineError, XYZSpace, known_space, known_spaces, is_xyz_space, SpaceError, is_xyz_affable, get_world_cs, mni_csm, mni_space) from numpy.testing import (assert_array_almost_equal, assert_array_equal) from nose.tools import (assert_true, assert_false, assert_equal, assert_raises, assert_not_equal) VARS = {} def setup(): d_names = list('ijkl') xyzs = 'x=L->R', 'y=P->A', 'z=I->S' mni_xyzs = ['mni-' + suff for suff in xyzs] scanner_xyzs = ['scanner-' + suff for suff in xyzs] unknown_xyzs = ['unknown-' + suff for suff in xyzs] aligned_xyzs = ['aligned-' + suff for suff in xyzs] talairach_xyzs = ['talairach-' + suff for suff in xyzs] r_names = mni_xyzs + ['t'] d_cs_r3 = CS(d_names[:3], 'voxels') d_cs_r4 = CS(d_names[:4], 'voxels') r_cs_r3 = CS(r_names[:3], 'mni') r_cs_r4 = CS(r_names[:4], 'mni') VARS.update(locals()) def test_xyz_space(): # Space objects sp = XYZSpace('hijo') assert_equal(sp.name, 'hijo') exp_labels = ['hijo-' + L for L in 'x=L->R', 'y=P->A', 'z=I->S'] exp_map = dict(zip('xyz', exp_labels)) assert_equal([sp.x, sp.y, sp.z], exp_labels) assert_equal(sp.as_tuple(), tuple(exp_labels)) assert_equal(sp.as_map(), exp_map) known = {} sp.register_to(known) assert_equal(known, dict(zip(exp_labels, 'xyz'))) # Coordinate system making, and __contains__ tests csm = sp.to_coordsys_maker() cs = csm(2) assert_equal(cs, CS(exp_labels[:2], 'hijo')) # This is only 2 dimensions, not fully in space assert_false(cs in sp) cs = csm(3) assert_equal(cs, CS(exp_labels, 'hijo')) # We now have all 3, this in in the space assert_true(cs in sp) # More dimensions than default, error assert_raises(CoordSysMakerError, csm, 4) # But we can pass in names for further dimensions csm = sp.to_coordsys_maker('tuv') cs = csm(6) assert_equal(cs, CS(exp_labels + list('tuv'), 'hijo')) # These are also in the space, because they contain xyz assert_true(cs in sp) # The axes can be in any order as long as they are a subset cs = CS(exp_labels, 'hijo') assert_true(cs in sp) cs = CS(exp_labels[::-1], 'hijo') assert_true(cs in sp) cs = CS(['t'] + exp_labels, 'hijo') assert_true(cs in sp) # The coordinate system name doesn't matter cs = CS(exp_labels, 'hija') assert_true(cs in sp) # Images, and coordinate maps, also work cmap = AffineTransform('ijk', cs, np.eye(4)) assert_true(cmap in sp) img = Image(np.zeros((2,3,4)), cmap) assert_true(img in sp) # equality assert_equal(XYZSpace('hijo'), XYZSpace('hijo')) assert_not_equal(XYZSpace('hijo'), XYZSpace('hija')) def test_is_xyz_space(): # test test for xyz space assert_true(is_xyz_space(XYZSpace('hijo'))) for sp in known_spaces: assert_true(is_xyz_space(sp)) for obj in ([], {}, object(), CS('xyz')): assert_false(is_xyz_space(obj)) def test_known_space(): # Known space utility routine for sp in known_spaces: cs = sp.to_coordsys_maker()(3) assert_equal(known_space(cs), sp) cs = CS('xyz') assert_equal(known_space(cs), None) sp0 = XYZSpace('hijo') sp1 = XYZSpace('hija') custom_spaces = (sp0, sp1) for sp in custom_spaces: cs = sp.to_coordsys_maker()(3) assert_equal(known_space(cs, custom_spaces), sp) def test_image_creation(): # 3D image arr = np.arange(24).reshape(2,3,4) aff = np.diag([2,3,4,1]) img = Image(arr, vox2mni(aff)) assert_equal(img.shape, (2,3,4)) assert_array_equal(img.affine, aff) assert_array_equal(img.coordmap, AffineTransform(VARS['d_cs_r3'], VARS['r_cs_r3'], aff)) # 4D image arr = np.arange(24).reshape(2,3,4,1) img = Image(arr, vox2mni(aff, 7)) exp_aff = np.diag([2,3,4,7,1]) assert_equal(img.shape, (2,3,4,1)) exp_cmap = AffineTransform(VARS['d_cs_r4'], VARS['r_cs_r4'], exp_aff) assert_equal(img.coordmap, exp_cmap) def test_default_makers(): # Tests that the makers make expected coordinate maps for csm, r_names, r_name in ( (vox2scanner, VARS['scanner_xyzs'] + ['t'], 'scanner'), (vox2unknown, VARS['unknown_xyzs'] + ['t'], 'unknown'), (vox2aligned, VARS['aligned_xyzs'] + ['t'], 'aligned'), (vox2mni, VARS['mni_xyzs'] + ['t'], 'mni'), (vox2talairach, VARS['talairach_xyzs'] + ['t'], 'talairach')): for i in range(1,5): dom_cs = CS('ijkl'[:i], 'voxels') ran_cs = CS(r_names[:i], r_name) aff = np.diag(range(i) + [1]) assert_equal(csm(aff), AffineTransform(dom_cs, ran_cs, aff)) def test_get_world_cs(): # Utility to get world from a variety of inputs assert_equal(get_world_cs('mni'), mni_csm(3)) mnit = mni_space.to_coordsys_maker('t')(4) assert_equal(get_world_cs(mni_space, 4), mnit) assert_equal(get_world_cs(mni_csm, 4), mni_csm(4)) assert_equal(get_world_cs(CS('xyz')), CS('xyz')) hija = XYZSpace('hija') maker = hija.to_coordsys_maker('qrs') assert_equal(get_world_cs('hija', ndim = 5, extras='qrs', spaces=[hija]), maker(5)) assert_raises(SpaceError, get_world_cs, 'hijo') assert_raises(SpaceError, get_world_cs, 'hijo', spaces=[hija]) assert_raises(ValueError, get_world_cs, 0) def test_xyz_affine(): # Getting an xyz affine from coordmaps aff3d = from_matvec(np.arange(9).reshape((3,3)), [15,16,17]) cmap3d = AffineTransform(VARS['d_cs_r3'], VARS['r_cs_r3'], aff3d) rzs = np.c_[np.arange(12).reshape((4,3)), [0,0,0,12]] aff4d = from_matvec(rzs, [15,16,17,18]) cmap4d = AffineTransform(VARS['d_cs_r4'], VARS['r_cs_r4'], aff4d) # Simplest case of 3D affine -> affine unchanged assert_array_equal(xyz_affine(cmap3d), aff3d) # 4D (5, 5) affine -> 3D equivalent assert_array_equal(xyz_affine(cmap4d), aff3d) # Any dimensions not spatial, AxesError r_cs = CS(('mni-x', 'mni-y', 'mni-q'), 'mni') funny_cmap = AffineTransform(VARS['d_cs_r3'],r_cs, aff3d) assert_raises(AxesError, xyz_affine, funny_cmap) r_cs = CS(('mni-x', 'mni-q', 'mni-z'), 'mni') funny_cmap = AffineTransform(VARS['d_cs_r3'],r_cs, aff3d) assert_raises(AxesError, xyz_affine, funny_cmap) # We insist that the coordmap is in output xyz order permutations = (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0) for perm in permutations: assert_raises(AxesError, xyz_affine, cmap3d.reordered_range(perm)) # The input order doesn't matter, as long as the xyz axes map to the first # three input axes for perm in permutations: assert_array_equal(xyz_affine( cmap3d.reordered_domain(perm)), aff3d[:, perm + (-1,)]) # But if the corresponding input axes not in the first three, an axis error wrong_inputs = cmap4d.reordered_domain([0, 1, 3, 2]) assert_raises(AxesError, xyz_affine, wrong_inputs) # xyzs must be orthogonal to dropped axis for i in range(3): aff = aff4d.copy() aff[i,3] = 1 cmap = AffineTransform(VARS['d_cs_r4'], VARS['r_cs_r4'], aff) assert_raises(AffineError, xyz_affine, cmap) # And if reordered assert_raises(AxesError, xyz_affine, cmap.reordered_range([2,0,1,3])) # Non-square goes to square aff54 = np.array([[0, 1, 2, 15], [3, 4, 5, 16], [6, 7, 8, 17], [0, 0, 0, 18], [0, 0, 0, 1]]) cmap = AffineTransform(VARS['d_cs_r3'], VARS['r_cs_r4'], aff54) assert_array_equal(xyz_affine(cmap), aff3d) aff57 = np.array([[0, 1, 2, 0, 0, 0, 15], [3, 4, 5, 0, 0, 0, 16], [6, 7, 8, 0, 0, 0, 17], [0, 0, 0, 0, 0, 0, 18], [0, 0, 0, 0, 0, 0, 1]]) d_cs_r6 = CS('ijklmn', 'voxels') cmap = AffineTransform(d_cs_r6, VARS['r_cs_r4'], aff57) assert_array_equal(xyz_affine(cmap), aff3d) # Non-affine raises SpaceTypeError cmap_cmap = CoordinateMap(VARS['d_cs_r4'], VARS['r_cs_r4'], lambda x:x*3) assert_raises(SpaceTypeError, xyz_affine, cmap_cmap) # Not enough dimensions - SpaceTypeError d_cs_r2 = CS('ij', 'voxels') r_cs_r2 = CS(VARS['r_names'][:2], 'mni') cmap = AffineTransform(d_cs_r2, r_cs_r2, np.array([[2,0,10],[0,3,11],[0,0,1]])) assert_raises(AxesError, xyz_affine, cmap) # Can pass in own validator my_valtor = dict(blind='x', leading='y', ditch='z') r_cs = CS(('blind', 'leading', 'ditch'), 'fall') cmap = AffineTransform(VARS['d_cs_r3'],r_cs, aff3d) assert_raises(AxesError, xyz_affine, cmap) assert_array_equal(xyz_affine(cmap, my_valtor), aff3d) # Slices in x, y, z coordmaps raise error because of missing spatial # dimensions arr = np.arange(120).reshape((2, 3, 4, 5)) aff = np.diag([2, 3, 4, 5, 1]) img = Image(arr, vox2mni(aff)) assert_raises(AxesError, xyz_affine, img[1].coordmap) assert_raises(AxesError, xyz_affine, img[:,1].coordmap) assert_raises(AxesError, xyz_affine, img[:,:,1].coordmap) def test_xyz_order(): # Getting xyz ordering from a coordinate system assert_array_equal(xyz_order(VARS['r_cs_r3']), [0,1,2]) assert_array_equal(xyz_order(VARS['r_cs_r4']), [0,1,2,3]) r_cs = CS(('mni-x=L->R', 'mni-y=P->A', 'mni-q'), 'mni') assert_raises(AxesError, xyz_order, r_cs) r_cs = CS(('t', 'mni-x=L->R', 'mni-z=I->S', 'mni-y=P->A'), 'mni') assert_array_equal(xyz_order(r_cs), [1, 3, 2, 0]) # Can pass in own validator my_valtor = dict(ditch='x', leading='y', blind='z') r_cs = CS(('blind', 'leading', 'ditch'), 'fall') assert_raises(AxesError, xyz_order, r_cs) assert_array_equal(xyz_order(r_cs, my_valtor), [2,1,0]) def test_is_xyz_affable(): # Whether there exists an xyz affine for this coordmap affine = np.diag([2,4,5,6,1]) cmap = AffineTransform(VARS['d_cs_r4'], VARS['r_cs_r4'], affine) assert_true(is_xyz_affable(cmap)) assert_false(is_xyz_affable(cmap.reordered_range([3,0,1,2]))) assert_false(is_xyz_affable(cmap.reordered_domain([3,0,1,2]))) # Can pass in own validator my_valtor = dict(blind='x', leading='y', ditch='z') r_cs = CS(('blind', 'leading', 'ditch'), 'fall') affine = from_matvec(np.arange(9).reshape((3, 3)), [11, 12, 13]) cmap = AffineTransform(VARS['d_cs_r3'], r_cs, affine) # No xyz affine if we don't use our custom dictionary assert_false(is_xyz_affable(cmap)) # Is if we do assert_true(is_xyz_affable(cmap, my_valtor)) nipy-0.3.0/nipy/core/setup.py000066400000000000000000000010241210344137400161060ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('core', parent_package, top_path) config.add_subpackage('image') config.add_subpackage('utils') config.add_subpackage('reference') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipy-0.3.0/nipy/core/utils/000077500000000000000000000000001210344137400155375ustar00rootroot00000000000000nipy-0.3.0/nipy/core/utils/__init__.py000066400000000000000000000000471210344137400176510ustar00rootroot00000000000000""" Utilities for core subpackage """ nipy-0.3.0/nipy/core/utils/generators.py000066400000000000000000000154211210344137400202650ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This module defines a few common generators for slicing over arrays. They are defined on ndarray, so they do not depend on Image. * data_generator: return (item, data[item]) tuples from an iterable object * slice_generator: return slices through an ndarray, possibly over many indices * f_generator: return a generator that applies a function to the output of another generator The above three generators return 2-tuples. * write_data: write the output of a generator to an ndarray * parcels: return binary array of the unique components of data """ import numpy as np def parcels(data, labels=None, exclude=[]): """ Return a generator for ``[data == label for label in labels]`` If labels is None, labels = numpy.unique(data). Each label in labels can be a sequence, in which case the value returned for that label union:: [numpy.equal(data, l) for l in label] Examples -------- >>> for p in parcels([[1,1],[2,1]]): ... print p ... [[ True True] [False True]] [[False False] [ True False]] >>> for p in parcels([[1,1],[2,3]], labels=[2,3]): ... print p ... [[False False] [ True False]] [[False False] [False True]] >>> for p in parcels([[1,1],[2,3]], labels=[(2,3),2]): ... print p ... [[False False] [ True True]] [[False False] [ True False]] """ try: data = data.get_data() except AttributeError: data = np.asarray(data) if labels is None: labels = np.unique(data) if exclude: labels = set(labels) for e in exclude: if e in labels: labels.remove(e) for label in labels: if type(label) not in [type(()), type([])]: yield np.equal(data, label) else: v = 0 for l in label: v += np.equal(data, l) yield v.astype(bool) def data_generator(data, iterable=None): """ Return generator for ``[(i, data[i]) for i in iterable]`` If iterable is None, it defaults to range(data.shape[0]) Examples -------- >>> a = np.asarray([[True,False],[False,True]]) >>> b = np.asarray([[False,False],[True,False]]) >>> for i, d in data_generator(np.asarray([[1,2],[3,4]]), [a,b]): ... print d ... [1 4] [3] """ data = np.asarray(data) if iterable is None: iterable = range(data.shape[0]) for index in iterable: yield index, data[index] def write_data(output, iterable): """ Write (index, data) iterable to `output` Write some data to `output`. Iterable should return 2-tuples of the form index, data such that:: output[index] = data makes sense. Examples -------- >>> a=np.zeros((2,2)) >>> write_data(a, data_generator(np.asarray([[1,2],[3,4]]))) >>> a array([[ 1., 2.], [ 3., 4.]]) """ for index, data in iterable: output[index] = data def slice_generator(data, axis=0): """ Return generator for yielding slices along `axis` Examples -------- >>> for i,d in slice_generator([[1,2],[3,4]]): ... print i, d ... (0,) [1 2] (1,) [3 4] >>> for i,d in slice_generator([[1,2],[3,4]], axis=1): ... print i, d ... (slice(None, None, None), 0) [1 3] (slice(None, None, None), 1) [2 4] """ data = np.asarray(data) if type(axis) is type(1): for j in range(data.shape[axis]): ij = (slice(None,None,None),)*axis + (j,) yield ij, data[(slice(None,None,None),)*axis + (j,)] elif type(axis) in [type(()),type([])]: data = np.asarray(data) # the total number of iterations to be made nmax = np.product(np.asarray(data.shape)[axis]) # calculate the 'divmod' paramter which is used to work out # which index to use to use for each axis during iteration mods = np.cumprod(np.asarray(data.shape)[axis]) divs = [1] + list(mods[:-1]) # set up a full set of slices for the image, to be modified # at each iteration slices = [slice(0, s) for s in data.shape] n = 0 while True: if n >= nmax: raise StopIteration for (a, div, mod) in zip(axis, divs, mods): x = n / div % mod slices[a] = x n += 1 yield slices, data[slices] def f_generator(f, iterable): """ Return a generator for ``[(i, f(x)) for i, x in iterable]`` Examples -------- >>> for i, d in f_generator(lambda x: x**2, data_generator([[1,2],[3,4]])): ... print i, d ... 0 [1 4] 1 [ 9 16] """ for i, x in iterable: yield i, np.asarray(f(x)) def slice_parcels(data, labels=None, axis=0): """ A generator for slicing through parcels and slices of data... hmmm... a better description is needed >>> x=np.array([[0,0,0,1],[0,1,0,1],[2,2,0,1]]) >>> for a in slice_parcels(x): ... print a, x[a] ... ((0,), array([ True, True, True, False], dtype=bool)) [0 0 0] ((0,), array([False, False, False, True], dtype=bool)) [1] ((1,), array([ True, False, True, False], dtype=bool)) [0 0] ((1,), array([False, True, False, True], dtype=bool)) [1 1] ((2,), array([False, False, True, False], dtype=bool)) [0] ((2,), array([False, False, False, True], dtype=bool)) [1] ((2,), array([ True, True, False, False], dtype=bool)) [2 2] >>> for a in slice_parcels(x, axis=1): ... b, c = a ... print a, x[b][c] ... ((slice(None, None, None), 0), array([ True, True, False], dtype=bool)) [0 0] ((slice(None, None, None), 0), array([False, False, True], dtype=bool)) [2] ((slice(None, None, None), 1), array([ True, False, False], dtype=bool)) [0] ((slice(None, None, None), 1), array([False, True, False], dtype=bool)) [1] ((slice(None, None, None), 1), array([False, False, True], dtype=bool)) [2] ((slice(None, None, None), 2), array([ True, True, True], dtype=bool)) [0 0 0] ((slice(None, None, None), 3), array([ True, True, True], dtype=bool)) [1 1 1] """ for i, d in slice_generator(data, axis=axis): for p in parcels(d, labels=labels): yield (i, p) def matrix_generator(img): """ From a generator of items (i, r), return (i, rp) where rp is a 2d array with rp.shape = (r.shape[0], prod(r.shape[1:])) """ for i, r in img: r.shape = (r.shape[0], np.product(r.shape[1:])) yield i, r def shape_generator(img, shape): """ From a generator of items (i, r), return (i, r.reshape(shape)) """ for i, r in img: r.shape = shape yield i, r nipy-0.3.0/nipy/core/utils/setup.py000066400000000000000000000007131210344137400172520ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('utils', parent_package, top_path) config.add_subpackage('tests') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipy-0.3.0/nipy/core/utils/tests/000077500000000000000000000000001210344137400167015ustar00rootroot00000000000000nipy-0.3.0/nipy/core/utils/tests/__init__.py000066400000000000000000000000271210344137400210110ustar00rootroot00000000000000# Make tests a package nipy-0.3.0/nipy/core/utils/tests/test_generators.py000066400000000000000000000101741210344137400224660ustar00rootroot00000000000000# EMAcs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np from ...api import write_data, slice_generator from .. import generators as gen from nose.tools import assert_equal from numpy.testing import assert_almost_equal, assert_array_equal shape = (10,20,30) DATA = np.zeros(shape) DATA2 = np.ones(shape) shape = (3,5,4) DATA3 = np.zeros(shape) def test_read_slices(): for _, d in slice_generator(DATA): assert_equal(d.shape, (20, 30)) for _, d in slice_generator(DATA, axis=1): assert_equal(d.shape, (10, 30)) for _, d in slice_generator(DATA, axis=2): assert_equal(d.shape, (10, 20)) def test_write_slices(): tmp = np.zeros(DATA.shape) write_data(tmp, slice_generator(DATA)) assert_almost_equal(tmp, np.asarray(DATA)) tmp = np.zeros(DATA.shape) write_data(tmp, slice_generator(DATA, axis=1)) assert_almost_equal(tmp, np.asarray(DATA)) tmp = np.zeros(DATA.shape) write_data(tmp, slice_generator(DATA, axis=2)) assert_almost_equal(tmp, np.asarray(DATA)) def test_multi_slice(): for _, d in slice_generator(DATA, axis=[0, 1]): assert_equal(d.shape, (30,)) for _, d in slice_generator(DATA, axis=[2, 1]): assert_equal(d.shape, (10,)) def test_multi_slice_write(): a = np.zeros(DATA.shape) write_data(a, slice_generator(DATA, axis=[0, 1])) assert_almost_equal(a, np.asarray(DATA)) def test_parcel(): parcelmap = np.zeros(DATA3.shape) parcelmap[0,0,0] = 1 parcelmap[1,1,1] = 1 parcelmap[2,2,2] = 1 parcelmap[1,2,1] = 2 parcelmap[2,3,2] = 2 parcelmap[0,1,0] = 2 parcelseq = (0, 1, 2, 3) expected = [np.product(DATA3.shape) - 6, 3, 3, 0] iterator = gen.data_generator(DATA3, gen.parcels(parcelmap, labels=parcelseq)) for i, pair in enumerate(iterator): s, d = pair assert_equal((expected[i],), d.shape) iterator = gen.data_generator(DATA3, gen.parcels(parcelmap)) for i, pair in enumerate(iterator): s, d = pair assert_equal((expected[i],), d.shape) def test_parcel_write(): parcelmap = np.zeros(DATA3.shape) parcelmap[0,0,0] = 1 parcelmap[1,1,1] = 1 parcelmap[2,2,2] = 1 parcelmap[1,2,1] = 2 parcelmap[2,3,2] = 2 parcelmap[0,1,0] = 2 parcelseq = (0, 1, 2, 3) expected = [np.product(DATA3.shape) - 6, 3, 3, 0] iterator = gen.parcels(parcelmap, labels=parcelseq) for i, s in enumerate(iterator): value = np.arange(expected[i]) DATA3[s] = value iterator = gen.parcels(parcelmap, labels=parcelseq) for i, pair in enumerate(gen.data_generator(DATA3, iterator)): s, d = pair assert_equal((expected[i],), d.shape) assert_array_equal(d, np.arange(expected[i])) iterator = gen.parcels(parcelmap) for i, s in enumerate(iterator): value = np.arange(expected[i]) DATA3[s] = value iterator = gen.parcels(parcelmap) for i, pair in enumerate(gen.data_generator(DATA3, iterator)): s, d = pair assert_equal((expected[i],), d.shape) assert_array_equal(d, np.arange(expected[i])) def test_parcel_copy(): parcelmap = np.zeros(DATA3.shape) parcelmap[0,0,0] = 1 parcelmap[1,1,1] = 1 parcelmap[2,2,2] = 1 parcelmap[1,2,1] = 2 parcelmap[2,3,2] = 2 parcelmap[0,1,0] = 2 parcelseq = (0, 1, 2, 3) expected = [np.product(DATA3.shape) - 6, 3, 3, 0] tmp = DATA3.copy() gen_parcels = gen.parcels(parcelmap, labels=parcelseq) new_iterator = gen.data_generator(tmp, gen_parcels) for i, slice_ in enumerate(new_iterator): assert_equal((expected[i],), slice_[1].shape) def test_sliceparcel(): parcelmap = np.asarray([[0,0,0,1,2],[0,0,1,1,2],[0,0,0,0,2]]) parcelseq = ((1, 2), 0, 2) o = np.zeros(parcelmap.shape) iterator = gen.slice_parcels(parcelmap, labels=parcelseq) for i, pair in enumerate(iterator): a, s = pair o[a][s] = i assert_array_equal(o, np.array([[1,1,1,0,2], [4,4,3,3,5], [7,7,7,7,8]])) nipy-0.3.0/nipy/externals/000077500000000000000000000000001210344137400154545ustar00rootroot00000000000000nipy-0.3.0/nipy/externals/__init__.py000066400000000000000000000002761210344137400175720ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: # init for externals package from . import argparse from . import configobj nipy-0.3.0/nipy/externals/argparse.py000066400000000000000000002462611210344137400176450ustar00rootroot00000000000000# emacs: -*- coding: utf-8; mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: # Copyright 2006-2009 Steven J. Bethard . # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Command-line parsing library This module is an optparse-inspired command-line parsing library that: - handles both optional and positional arguments - produces highly informative usage messages - supports parsers that dispatch to sub-parsers The following is a simple usage example that sums integers from the command-line and writes the result to a file:: parser = argparse.ArgumentParser( description='sum the integers at the command line') parser.add_argument( 'integers', metavar='int', nargs='+', type=int, help='an integer to be summed') parser.add_argument( '--log', default=sys.stdout, type=argparse.FileType('w'), help='the file where the sum should be written') args = parser.parse_args() args.log.write('%s' % sum(args.integers)) args.log.close() The module contains the following public classes: - ArgumentParser -- The main entry point for command-line parsing. As the example above shows, the add_argument() method is used to populate the parser with actions for optional and positional arguments. Then the parse_args() method is invoked to convert the args at the command-line into an object with attributes. - ArgumentError -- The exception raised by ArgumentParser objects when there are errors with the parser's actions. Errors raised while parsing the command-line are caught by ArgumentParser and emitted as command-line messages. - FileType -- A factory for defining types of files to be created. As the example above shows, instances of FileType are typically passed as the type= argument of add_argument() calls. - Action -- The base class for parser actions. Typically actions are selected by passing strings like 'store_true' or 'append_const' to the action= argument of add_argument(). However, for greater customization of ArgumentParser actions, subclasses of Action may be defined and passed as the action= argument. - HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter, ArgumentDefaultsHelpFormatter -- Formatter classes which may be passed as the formatter_class= argument to the ArgumentParser constructor. HelpFormatter is the default, RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser not to change the formatting for help text, and ArgumentDefaultsHelpFormatter adds information about argument defaults to the help. All other classes in this module are considered implementation details. (Also note that HelpFormatter and RawDescriptionHelpFormatter are only considered public as object names -- the API of the formatter objects is still considered an implementation detail.) """ __version__ = '1.0.1' __all__ = [ 'ArgumentParser', 'ArgumentError', 'Namespace', 'Action', 'FileType', 'HelpFormatter', 'RawDescriptionHelpFormatter', 'RawTextHelpFormatter' 'ArgumentDefaultsHelpFormatter', ] import copy as _copy import os as _os import re as _re import sys as _sys import textwrap as _textwrap from gettext import gettext as _ try: _set = set except NameError: from sets import Set as _set try: _basestring = basestring except NameError: _basestring = str try: _sorted = sorted except NameError: def _sorted(iterable, reverse=False): result = list(iterable) result.sort() if reverse: result.reverse() return result # silence Python 2.6 buggy warnings about Exception.message if _sys.version_info[:2] == (2, 6): import warnings warnings.filterwarnings( action='ignore', message='BaseException.message has been deprecated as of Python 2.6', category=DeprecationWarning, module='argparse') SUPPRESS = '==SUPPRESS==' OPTIONAL = '?' ZERO_OR_MORE = '*' ONE_OR_MORE = '+' PARSER = '==PARSER==' # ============================= # Utility functions and classes # ============================= class _AttributeHolder(object): """Abstract base class that provides __repr__. The __repr__ method returns a string in the format:: ClassName(attr=name, attr=name, ...) The attributes are determined either by a class-level attribute, '_kwarg_names', or by inspecting the instance __dict__. """ def __repr__(self): type_name = type(self).__name__ arg_strings = [] for arg in self._get_args(): arg_strings.append(repr(arg)) for name, value in self._get_kwargs(): arg_strings.append('%s=%r' % (name, value)) return '%s(%s)' % (type_name, ', '.join(arg_strings)) def _get_kwargs(self): return _sorted(self.__dict__.items()) def _get_args(self): return [] def _ensure_value(namespace, name, value): if getattr(namespace, name, None) is None: setattr(namespace, name, value) return getattr(namespace, name) # =============== # Formatting Help # =============== class HelpFormatter(object): """Formatter for generating usage messages and argument help strings. Only the name of this class is considered a public API. All the methods provided by the class are considered an implementation detail. """ def __init__(self, prog, indent_increment=2, max_help_position=24, width=None): # default setting for width if width is None: try: width = int(_os.environ['COLUMNS']) except (KeyError, ValueError): width = 80 width -= 2 self._prog = prog self._indent_increment = indent_increment self._max_help_position = max_help_position self._width = width self._current_indent = 0 self._level = 0 self._action_max_length = 0 self._root_section = self._Section(self, None) self._current_section = self._root_section self._whitespace_matcher = _re.compile(r'\s+') self._long_break_matcher = _re.compile(r'\n\n\n+') # =============================== # Section and indentation methods # =============================== def _indent(self): self._current_indent += self._indent_increment self._level += 1 def _dedent(self): self._current_indent -= self._indent_increment assert self._current_indent >= 0, 'Indent decreased below 0.' self._level -= 1 class _Section(object): def __init__(self, formatter, parent, heading=None): self.formatter = formatter self.parent = parent self.heading = heading self.items = [] def format_help(self): # format the indented section if self.parent is not None: self.formatter._indent() join = self.formatter._join_parts for func, args in self.items: func(*args) item_help = join([func(*args) for func, args in self.items]) if self.parent is not None: self.formatter._dedent() # return nothing if the section was empty if not item_help: return '' # add the heading if the section was non-empty if self.heading is not SUPPRESS and self.heading is not None: current_indent = self.formatter._current_indent heading = '%*s%s:\n' % (current_indent, '', self.heading) else: heading = '' # join the section-initial newline, the heading and the help return join(['\n', heading, item_help, '\n']) def _add_item(self, func, args): self._current_section.items.append((func, args)) # ======================== # Message building methods # ======================== def start_section(self, heading): self._indent() section = self._Section(self, self._current_section, heading) self._add_item(section.format_help, []) self._current_section = section def end_section(self): self._current_section = self._current_section.parent self._dedent() def add_text(self, text): if text is not SUPPRESS and text is not None: self._add_item(self._format_text, [text]) def add_usage(self, usage, actions, groups, prefix=None): if usage is not SUPPRESS: args = usage, actions, groups, prefix self._add_item(self._format_usage, args) def add_argument(self, action): if action.help is not SUPPRESS: # find all invocations get_invocation = self._format_action_invocation invocations = [get_invocation(action)] for subaction in self._iter_indented_subactions(action): invocations.append(get_invocation(subaction)) # update the maximum item length invocation_length = max([len(s) for s in invocations]) action_length = invocation_length + self._current_indent self._action_max_length = max(self._action_max_length, action_length) # add the item to the list self._add_item(self._format_action, [action]) def add_arguments(self, actions): for action in actions: self.add_argument(action) # ======================= # Help-formatting methods # ======================= def format_help(self): help = self._root_section.format_help() if help: help = self._long_break_matcher.sub('\n\n', help) help = help.strip('\n') + '\n' return help def _join_parts(self, part_strings): return ''.join([part for part in part_strings if part and part is not SUPPRESS]) def _format_usage(self, usage, actions, groups, prefix): if prefix is None: prefix = _('usage: ') # if usage is specified, use that if usage is not None: usage = usage % dict(prog=self._prog) # if no optionals or positionals are available, usage is just prog elif usage is None and not actions: usage = '%(prog)s' % dict(prog=self._prog) # if optionals and positionals are available, calculate usage elif usage is None: prog = '%(prog)s' % dict(prog=self._prog) # split optionals from positionals optionals = [] positionals = [] for action in actions: if action.option_strings: optionals.append(action) else: positionals.append(action) # build full usage string format = self._format_actions_usage action_usage = format(optionals + positionals, groups) usage = ' '.join([s for s in [prog, action_usage] if s]) # wrap the usage parts if it's too long text_width = self._width - self._current_indent if len(prefix) + len(usage) > text_width: # break usage into wrappable parts part_regexp = r'\(.*?\)+|\[.*?\]+|\S+' opt_usage = format(optionals, groups) pos_usage = format(positionals, groups) opt_parts = _re.findall(part_regexp, opt_usage) pos_parts = _re.findall(part_regexp, pos_usage) assert ' '.join(opt_parts) == opt_usage assert ' '.join(pos_parts) == pos_usage # helper for wrapping lines def get_lines(parts, indent, prefix=None): lines = [] line = [] if prefix is not None: line_len = len(prefix) - 1 else: line_len = len(indent) - 1 for part in parts: if line_len + 1 + len(part) > text_width: lines.append(indent + ' '.join(line)) line = [] line_len = len(indent) - 1 line.append(part) line_len += len(part) + 1 if line: lines.append(indent + ' '.join(line)) if prefix is not None: lines[0] = lines[0][len(indent):] return lines # if prog is short, follow it with optionals or positionals if len(prefix) + len(prog) <= 0.75 * text_width: indent = ' ' * (len(prefix) + len(prog) + 1) if opt_parts: lines = get_lines([prog] + opt_parts, indent, prefix) lines.extend(get_lines(pos_parts, indent)) elif pos_parts: lines = get_lines([prog] + pos_parts, indent, prefix) else: lines = [prog] # if prog is long, put it on its own line else: indent = ' ' * len(prefix) parts = opt_parts + pos_parts lines = get_lines(parts, indent) if len(lines) > 1: lines = [] lines.extend(get_lines(opt_parts, indent)) lines.extend(get_lines(pos_parts, indent)) lines = [prog] + lines # join lines into usage usage = '\n'.join(lines) # prefix with 'usage:' return '%s%s\n\n' % (prefix, usage) def _format_actions_usage(self, actions, groups): # find group indices and identify actions in groups group_actions = _set() inserts = {} for group in groups: try: start = actions.index(group._group_actions[0]) except ValueError: continue else: end = start + len(group._group_actions) if actions[start:end] == group._group_actions: for action in group._group_actions: group_actions.add(action) if not group.required: inserts[start] = '[' inserts[end] = ']' else: inserts[start] = '(' inserts[end] = ')' for i in range(start + 1, end): inserts[i] = '|' # collect all actions format strings parts = [] for i, action in enumerate(actions): # suppressed arguments are marked with None # remove | separators for suppressed arguments if action.help is SUPPRESS: parts.append(None) if inserts.get(i) == '|': inserts.pop(i) elif inserts.get(i + 1) == '|': inserts.pop(i + 1) # produce all arg strings elif not action.option_strings: part = self._format_args(action, action.dest) # if it's in a group, strip the outer [] if action in group_actions: if part[0] == '[' and part[-1] == ']': part = part[1:-1] # add the action string to the list parts.append(part) # produce the first way to invoke the option in brackets else: option_string = action.option_strings[0] # if the Optional doesn't take a value, format is: # -s or --long if action.nargs == 0: part = '%s' % option_string # if the Optional takes a value, format is: # -s ARGS or --long ARGS else: default = action.dest.upper() args_string = self._format_args(action, default) part = '%s %s' % (option_string, args_string) # make it look optional if it's not required or in a group if not action.required and action not in group_actions: part = '[%s]' % part # add the action string to the list parts.append(part) # insert things at the necessary indices for i in _sorted(inserts, reverse=True): parts[i:i] = [inserts[i]] # join all the action items with spaces text = ' '.join([item for item in parts if item is not None]) # clean up separators for mutually exclusive groups open = r'[\[(]' close = r'[\])]' text = _re.sub(r'(%s) ' % open, r'\1', text) text = _re.sub(r' (%s)' % close, r'\1', text) text = _re.sub(r'%s *%s' % (open, close), r'', text) text = _re.sub(r'\(([^|]*)\)', r'\1', text) text = text.strip() # return the text return text def _format_text(self, text): text_width = self._width - self._current_indent indent = ' ' * self._current_indent return self._fill_text(text, text_width, indent) + '\n\n' def _format_action(self, action): # determine the required width and the entry label help_position = min(self._action_max_length + 2, self._max_help_position) help_width = self._width - help_position action_width = help_position - self._current_indent - 2 action_header = self._format_action_invocation(action) # ho nelp; start on same line and add a final newline if not action.help: tup = self._current_indent, '', action_header action_header = '%*s%s\n' % tup # short action name; start on the same line and pad two spaces elif len(action_header) <= action_width: tup = self._current_indent, '', action_width, action_header action_header = '%*s%-*s ' % tup indent_first = 0 # long action name; start on the next line else: tup = self._current_indent, '', action_header action_header = '%*s%s\n' % tup indent_first = help_position # collect the pieces of the action help parts = [action_header] # if there was help for the action, add lines of help text if action.help: help_text = self._expand_help(action) help_lines = self._split_lines(help_text, help_width) parts.append('%*s%s\n' % (indent_first, '', help_lines[0])) for line in help_lines[1:]: parts.append('%*s%s\n' % (help_position, '', line)) # or add a newline if the description doesn't end with one elif not action_header.endswith('\n'): parts.append('\n') # if there are any sub-actions, add their help as well for subaction in self._iter_indented_subactions(action): parts.append(self._format_action(subaction)) # return a single string return self._join_parts(parts) def _format_action_invocation(self, action): if not action.option_strings: metavar, = self._metavar_formatter(action, action.dest)(1) return metavar else: parts = [] # if the Optional doesn't take a value, format is: # -s, --long if action.nargs == 0: parts.extend(action.option_strings) # if the Optional takes a value, format is: # -s ARGS, --long ARGS else: default = action.dest.upper() args_string = self._format_args(action, default) for option_string in action.option_strings: parts.append('%s %s' % (option_string, args_string)) return ', '.join(parts) def _metavar_formatter(self, action, default_metavar): if action.metavar is not None: result = action.metavar elif action.choices is not None: choice_strs = [str(choice) for choice in action.choices] result = '{%s}' % ','.join(choice_strs) else: result = default_metavar def format(tuple_size): if isinstance(result, tuple): return result else: return (result, ) * tuple_size return format def _format_args(self, action, default_metavar): get_metavar = self._metavar_formatter(action, default_metavar) if action.nargs is None: result = '%s' % get_metavar(1) elif action.nargs == OPTIONAL: result = '[%s]' % get_metavar(1) elif action.nargs == ZERO_OR_MORE: result = '[%s [%s ...]]' % get_metavar(2) elif action.nargs == ONE_OR_MORE: result = '%s [%s ...]' % get_metavar(2) elif action.nargs is PARSER: result = '%s ...' % get_metavar(1) else: formats = ['%s' for _ in range(action.nargs)] result = ' '.join(formats) % get_metavar(action.nargs) return result def _expand_help(self, action): params = dict(vars(action), prog=self._prog) for name in list(params): if params[name] is SUPPRESS: del params[name] if params.get('choices') is not None: choices_str = ', '.join([str(c) for c in params['choices']]) params['choices'] = choices_str return self._get_help_string(action) % params def _iter_indented_subactions(self, action): try: get_subactions = action._get_subactions except AttributeError: pass else: self._indent() for subaction in get_subactions(): yield subaction self._dedent() def _split_lines(self, text, width): text = self._whitespace_matcher.sub(' ', text).strip() return _textwrap.wrap(text, width) def _fill_text(self, text, width, indent): text = self._whitespace_matcher.sub(' ', text).strip() return _textwrap.fill(text, width, initial_indent=indent, subsequent_indent=indent) def _get_help_string(self, action): return action.help class RawDescriptionHelpFormatter(HelpFormatter): """Help message formatter which retains any formatting in descriptions. Only the name of this class is considered a public API. All the methods provided by the class are considered an implementation detail. """ def _fill_text(self, text, width, indent): return ''.join([indent + line for line in text.splitlines(True)]) class RawTextHelpFormatter(RawDescriptionHelpFormatter): """Help message formatter which retains formatting of all help text. Only the name of this class is considered a public API. All the methods provided by the class are considered an implementation detail. """ def _split_lines(self, text, width): return text.splitlines() class ArgumentDefaultsHelpFormatter(HelpFormatter): """Help message formatter which adds default values to argument help. Only the name of this class is considered a public API. All the methods provided by the class are considered an implementation detail. """ def _get_help_string(self, action): help = action.help if '%(default)' not in action.help: if action.default is not SUPPRESS: defaulting_nargs = [OPTIONAL, ZERO_OR_MORE] if action.option_strings or action.nargs in defaulting_nargs: help += ' (default: %(default)s)' return help # ===================== # Options and Arguments # ===================== def _get_action_name(argument): if argument is None: return None elif argument.option_strings: return '/'.join(argument.option_strings) elif argument.metavar not in (None, SUPPRESS): return argument.metavar elif argument.dest not in (None, SUPPRESS): return argument.dest else: return None class ArgumentError(Exception): """An error from creating or using an argument (optional or positional). The string value of this exception is the message, augmented with information about the argument that caused it. """ def __init__(self, argument, message): self.argument_name = _get_action_name(argument) self.message = message def __str__(self): if self.argument_name is None: format = '%(message)s' else: format = 'argument %(argument_name)s: %(message)s' return format % dict(message=self.message, argument_name=self.argument_name) # ============== # Action classes # ============== class Action(_AttributeHolder): """Information about how to convert command line strings to Python objects. Action objects are used by an ArgumentParser to represent the information needed to parse a single argument from one or more strings from the command line. The keyword arguments to the Action constructor are also all attributes of Action instances. Keyword Arguments: - option_strings -- A list of command-line option strings which should be associated with this action. - dest -- The name of the attribute to hold the created object(s) - nargs -- The number of command-line arguments that should be consumed. By default, one argument will be consumed and a single value will be produced. Other values include: - N (an integer) consumes N arguments (and produces a list) - '?' consumes zero or one arguments - '*' consumes zero or more arguments (and produces a list) - '+' consumes one or more arguments (and produces a list) Note that the difference between the default and nargs=1 is that with the default, a single value will be produced, while with nargs=1, a list containing a single value will be produced. - const -- The value to be produced if the option is specified and the option uses an action that takes no values. - default -- The value to be produced if the option is not specified. - type -- The type which the command-line arguments should be converted to, should be one of 'string', 'int', 'float', 'complex' or a callable object that accepts a single string argument. If None, 'string' is assumed. - choices -- A container of values that should be allowed. If not None, after a command-line argument has been converted to the appropriate type, an exception will be raised if it is not a member of this collection. - required -- True if the action must always be specified at the command line. This is only meaningful for optional command-line arguments. - help -- The help string describing the argument. - metavar -- The name to be used for the option's argument with the help string. If None, the 'dest' value will be used as the name. """ def __init__(self, option_strings, dest, nargs=None, const=None, default=None, type=None, choices=None, required=False, help=None, metavar=None): self.option_strings = option_strings self.dest = dest self.nargs = nargs self.const = const self.default = default self.type = type self.choices = choices self.required = required self.help = help self.metavar = metavar def _get_kwargs(self): names = [ 'option_strings', 'dest', 'nargs', 'const', 'default', 'type', 'choices', 'help', 'metavar', ] return [(name, getattr(self, name)) for name in names] def __call__(self, parser, namespace, values, option_string=None): raise NotImplementedError(_('.__call__() not defined')) class _StoreAction(Action): def __init__(self, option_strings, dest, nargs=None, const=None, default=None, type=None, choices=None, required=False, help=None, metavar=None): if nargs == 0: raise ValueError('nargs for store actions must be > 0; if you ' 'have nothing to store, actions such as store ' 'true or store const may be more appropriate') if const is not None and nargs != OPTIONAL: raise ValueError('nargs must be %r to supply const' % OPTIONAL) super(_StoreAction, self).__init__( option_strings=option_strings, dest=dest, nargs=nargs, const=const, default=default, type=type, choices=choices, required=required, help=help, metavar=metavar) def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, values) class _StoreConstAction(Action): def __init__(self, option_strings, dest, const, default=None, required=False, help=None, metavar=None): super(_StoreConstAction, self).__init__( option_strings=option_strings, dest=dest, nargs=0, const=const, default=default, required=required, help=help) def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, self.const) class _StoreTrueAction(_StoreConstAction): def __init__(self, option_strings, dest, default=False, required=False, help=None): super(_StoreTrueAction, self).__init__( option_strings=option_strings, dest=dest, const=True, default=default, required=required, help=help) class _StoreFalseAction(_StoreConstAction): def __init__(self, option_strings, dest, default=True, required=False, help=None): super(_StoreFalseAction, self).__init__( option_strings=option_strings, dest=dest, const=False, default=default, required=required, help=help) class _AppendAction(Action): def __init__(self, option_strings, dest, nargs=None, const=None, default=None, type=None, choices=None, required=False, help=None, metavar=None): if nargs == 0: raise ValueError('nargs for append actions must be > 0; if arg ' 'strings are not supplying the value to append, ' 'the append const action may be more appropriate') if const is not None and nargs != OPTIONAL: raise ValueError('nargs must be %r to supply const' % OPTIONAL) super(_AppendAction, self).__init__( option_strings=option_strings, dest=dest, nargs=nargs, const=const, default=default, type=type, choices=choices, required=required, help=help, metavar=metavar) def __call__(self, parser, namespace, values, option_string=None): items = _copy.copy(_ensure_value(namespace, self.dest, [])) items.append(values) setattr(namespace, self.dest, items) class _AppendConstAction(Action): def __init__(self, option_strings, dest, const, default=None, required=False, help=None, metavar=None): super(_AppendConstAction, self).__init__( option_strings=option_strings, dest=dest, nargs=0, const=const, default=default, required=required, help=help, metavar=metavar) def __call__(self, parser, namespace, values, option_string=None): items = _copy.copy(_ensure_value(namespace, self.dest, [])) items.append(self.const) setattr(namespace, self.dest, items) class _CountAction(Action): def __init__(self, option_strings, dest, default=None, required=False, help=None): super(_CountAction, self).__init__( option_strings=option_strings, dest=dest, nargs=0, default=default, required=required, help=help) def __call__(self, parser, namespace, values, option_string=None): new_count = _ensure_value(namespace, self.dest, 0) + 1 setattr(namespace, self.dest, new_count) class _HelpAction(Action): def __init__(self, option_strings, dest=SUPPRESS, default=SUPPRESS, help=None): super(_HelpAction, self).__init__( option_strings=option_strings, dest=dest, default=default, nargs=0, help=help) def __call__(self, parser, namespace, values, option_string=None): parser.print_help() parser.exit() class _VersionAction(Action): def __init__(self, option_strings, dest=SUPPRESS, default=SUPPRESS, help=None): super(_VersionAction, self).__init__( option_strings=option_strings, dest=dest, default=default, nargs=0, help=help) def __call__(self, parser, namespace, values, option_string=None): parser.print_version() parser.exit() class _SubParsersAction(Action): class _ChoicesPseudoAction(Action): def __init__(self, name, help): sup = super(_SubParsersAction._ChoicesPseudoAction, self) sup.__init__(option_strings=[], dest=name, help=help) def __init__(self, option_strings, prog, parser_class, dest=SUPPRESS, help=None, metavar=None): self._prog_prefix = prog self._parser_class = parser_class self._name_parser_map = {} self._choices_actions = [] super(_SubParsersAction, self).__init__( option_strings=option_strings, dest=dest, nargs=PARSER, choices=self._name_parser_map, help=help, metavar=metavar) def add_parser(self, name, **kwargs): # set prog from the existing prefix if kwargs.get('prog') is None: kwargs['prog'] = '%s %s' % (self._prog_prefix, name) # create a pseudo-action to hold the choice help if 'help' in kwargs: help = kwargs.pop('help') choice_action = self._ChoicesPseudoAction(name, help) self._choices_actions.append(choice_action) # create the parser and add it to the map parser = self._parser_class(**kwargs) self._name_parser_map[name] = parser return parser def _get_subactions(self): return self._choices_actions def __call__(self, parser, namespace, values, option_string=None): parser_name = values[0] arg_strings = values[1:] # set the parser name if requested if self.dest is not SUPPRESS: setattr(namespace, self.dest, parser_name) # select the parser try: parser = self._name_parser_map[parser_name] except KeyError: tup = parser_name, ', '.join(self._name_parser_map) msg = _('unknown parser %r (choices: %s)' % tup) raise ArgumentError(self, msg) # parse all the remaining options into the namespace parser.parse_args(arg_strings, namespace) # ============== # Type classes # ============== class FileType(object): """Factory for creating file object types Instances of FileType are typically passed as type= arguments to the ArgumentParser add_argument() method. Keyword Arguments: - mode -- A string indicating how the file is to be opened. Accepts the same values as the builtin open() function. - bufsize -- The file's desired buffer size. Accepts the same values as the builtin open() function. """ def __init__(self, mode='r', bufsize=None): self._mode = mode self._bufsize = bufsize def __call__(self, string): # the special argument "-" means sys.std{in,out} if string == '-': if 'r' in self._mode: return _sys.stdin elif 'w' in self._mode: return _sys.stdout else: msg = _('argument "-" with mode %r' % self._mode) raise ValueError(msg) # all other arguments are used as file names if self._bufsize: return open(string, self._mode, self._bufsize) else: return open(string, self._mode) def __repr__(self): args = [self._mode, self._bufsize] args_str = ', '.join([repr(arg) for arg in args if arg is not None]) return '%s(%s)' % (type(self).__name__, args_str) # =========================== # Optional and Positional Parsing # =========================== class Namespace(_AttributeHolder): """Simple object for storing attributes. Implements equality by attribute names and values, and provides a simple string representation. """ def __init__(self, **kwargs): for name in kwargs: setattr(self, name, kwargs[name]) def __eq__(self, other): return vars(self) == vars(other) def __ne__(self, other): return not (self == other) class _ActionsContainer(object): def __init__(self, description, prefix_chars, argument_default, conflict_handler): super(_ActionsContainer, self).__init__() self.description = description self.argument_default = argument_default self.prefix_chars = prefix_chars self.conflict_handler = conflict_handler # set up registries self._registries = {} # register actions self.register('action', None, _StoreAction) self.register('action', 'store', _StoreAction) self.register('action', 'store_const', _StoreConstAction) self.register('action', 'store_true', _StoreTrueAction) self.register('action', 'store_false', _StoreFalseAction) self.register('action', 'append', _AppendAction) self.register('action', 'append_const', _AppendConstAction) self.register('action', 'count', _CountAction) self.register('action', 'help', _HelpAction) self.register('action', 'version', _VersionAction) self.register('action', 'parsers', _SubParsersAction) # raise an exception if the conflict handler is invalid self._get_handler() # action storage self._actions = [] self._option_string_actions = {} # groups self._action_groups = [] self._mutually_exclusive_groups = [] # defaults storage self._defaults = {} # determines whether an "option" looks like a negative number self._negative_number_matcher = _re.compile(r'^-\d+|-\d*.\d+$') # whether or not there are any optionals that look like negative # numbers -- uses a list so it can be shared and edited self._has_negative_number_optionals = [] # ==================== # Registration methods # ==================== def register(self, registry_name, value, object): registry = self._registries.setdefault(registry_name, {}) registry[value] = object def _registry_get(self, registry_name, value, default=None): return self._registries[registry_name].get(value, default) # ================================== # Namespace default settings methods # ================================== def set_defaults(self, **kwargs): self._defaults.update(kwargs) # if these defaults match any existing arguments, replace # the previous default on the object with the new one for action in self._actions: if action.dest in kwargs: action.default = kwargs[action.dest] # ======================= # Adding argument actions # ======================= def add_argument(self, *args, **kwargs): """ add_argument(dest, ..., name=value, ...) add_argument(option_string, option_string, ..., name=value, ...) """ # if no positional args are supplied or only one is supplied and # it doesn't look like an option string, parse a positional # argument chars = self.prefix_chars if not args or len(args) == 1 and args[0][0] not in chars: kwargs = self._get_positional_kwargs(*args, **kwargs) # otherwise, we're adding an optional argument else: kwargs = self._get_optional_kwargs(*args, **kwargs) # if no default was supplied, use the parser-level default if 'default' not in kwargs: dest = kwargs['dest'] if dest in self._defaults: kwargs['default'] = self._defaults[dest] elif self.argument_default is not None: kwargs['default'] = self.argument_default # create the action object, and add it to the parser action_class = self._pop_action_class(kwargs) action = action_class(**kwargs) return self._add_action(action) def add_argument_group(self, *args, **kwargs): group = _ArgumentGroup(self, *args, **kwargs) self._action_groups.append(group) return group def add_mutually_exclusive_group(self, **kwargs): group = _MutuallyExclusiveGroup(self, **kwargs) self._mutually_exclusive_groups.append(group) return group def _add_action(self, action): # resolve any conflicts self._check_conflict(action) # add to actions list self._actions.append(action) action.container = self # index the action by any option strings it has for option_string in action.option_strings: self._option_string_actions[option_string] = action # set the flag if any option strings look like negative numbers for option_string in action.option_strings: if self._negative_number_matcher.match(option_string): if not self._has_negative_number_optionals: self._has_negative_number_optionals.append(True) # return the created action return action def _remove_action(self, action): self._actions.remove(action) def _add_container_actions(self, container): # collect groups by titles title_group_map = {} for group in self._action_groups: if group.title in title_group_map: msg = _('cannot merge actions - two groups are named %r') raise ValueError(msg % (group.title)) title_group_map[group.title] = group # map each action to its group group_map = {} for group in container._action_groups: # if a group with the title exists, use that, otherwise # create a new group matching the container's group if group.title not in title_group_map: title_group_map[group.title] = self.add_argument_group( title=group.title, description=group.description, conflict_handler=group.conflict_handler) # map the actions to their new group for action in group._group_actions: group_map[action] = title_group_map[group.title] # add container's mutually exclusive groups # NOTE: if add_mutually_exclusive_group ever gains title= and # description= then this code will need to be expanded as above for group in container._mutually_exclusive_groups: mutex_group = self.add_mutually_exclusive_group( required=group.required) # map the actions to their new mutex group for action in group._group_actions: group_map[action] = mutex_group # add all actions to this container or their group for action in container._actions: group_map.get(action, self)._add_action(action) def _get_positional_kwargs(self, dest, **kwargs): # make sure required is not specified if 'required' in kwargs: msg = _("'required' is an invalid argument for positionals") raise TypeError(msg) # mark positional arguments as required if at least one is # always required if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]: kwargs['required'] = True if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs: kwargs['required'] = True # return the keyword arguments with no option strings return dict(kwargs, dest=dest, option_strings=[]) def _get_optional_kwargs(self, *args, **kwargs): # determine short and long option strings option_strings = [] long_option_strings = [] for option_string in args: # error on one-or-fewer-character option strings if len(option_string) < 2: msg = _('invalid option string %r: ' 'must be at least two characters long') raise ValueError(msg % option_string) # error on strings that don't start with an appropriate prefix if not option_string[0] in self.prefix_chars: msg = _('invalid option string %r: ' 'must start with a character %r') tup = option_string, self.prefix_chars raise ValueError(msg % tup) # error on strings that are all prefix characters if not (_set(option_string) - _set(self.prefix_chars)): msg = _('invalid option string %r: ' 'must contain characters other than %r') tup = option_string, self.prefix_chars raise ValueError(msg % tup) # strings starting with two prefix characters are long options option_strings.append(option_string) if option_string[0] in self.prefix_chars: if option_string[1] in self.prefix_chars: long_option_strings.append(option_string) # infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x' dest = kwargs.pop('dest', None) if dest is None: if long_option_strings: dest_option_string = long_option_strings[0] else: dest_option_string = option_strings[0] dest = dest_option_string.lstrip(self.prefix_chars) dest = dest.replace('-', '_') # return the updated keyword arguments return dict(kwargs, dest=dest, option_strings=option_strings) def _pop_action_class(self, kwargs, default=None): action = kwargs.pop('action', default) return self._registry_get('action', action, action) def _get_handler(self): # determine function from conflict handler string handler_func_name = '_handle_conflict_%s' % self.conflict_handler try: return getattr(self, handler_func_name) except AttributeError: msg = _('invalid conflict_resolution value: %r') raise ValueError(msg % self.conflict_handler) def _check_conflict(self, action): # find all options that conflict with this option confl_optionals = [] for option_string in action.option_strings: if option_string in self._option_string_actions: confl_optional = self._option_string_actions[option_string] confl_optionals.append((option_string, confl_optional)) # resolve any conflicts if confl_optionals: conflict_handler = self._get_handler() conflict_handler(action, confl_optionals) def _handle_conflict_error(self, action, conflicting_actions): message = _('conflicting option string(s): %s') conflict_string = ', '.join([option_string for option_string, action in conflicting_actions]) raise ArgumentError(action, message % conflict_string) def _handle_conflict_resolve(self, action, conflicting_actions): # remove all conflicting options for option_string, action in conflicting_actions: # remove the conflicting option action.option_strings.remove(option_string) self._option_string_actions.pop(option_string, None) # if the option now has no option string, remove it from the # container holding it if not action.option_strings: action.container._remove_action(action) class _ArgumentGroup(_ActionsContainer): def __init__(self, container, title=None, description=None, **kwargs): # add any missing keyword arguments by checking the container update = kwargs.setdefault update('conflict_handler', container.conflict_handler) update('prefix_chars', container.prefix_chars) update('argument_default', container.argument_default) super_init = super(_ArgumentGroup, self).__init__ super_init(description=description, **kwargs) # group attributes self.title = title self._group_actions = [] # share most attributes with the container self._registries = container._registries self._actions = container._actions self._option_string_actions = container._option_string_actions self._defaults = container._defaults self._has_negative_number_optionals = \ container._has_negative_number_optionals def _add_action(self, action): action = super(_ArgumentGroup, self)._add_action(action) self._group_actions.append(action) return action def _remove_action(self, action): super(_ArgumentGroup, self)._remove_action(action) self._group_actions.remove(action) class _MutuallyExclusiveGroup(_ArgumentGroup): def __init__(self, container, required=False): super(_MutuallyExclusiveGroup, self).__init__(container) self.required = required self._container = container def _add_action(self, action): if action.required: msg = _('mutually exclusive arguments must be optional') raise ValueError(msg) action = self._container._add_action(action) self._group_actions.append(action) return action def _remove_action(self, action): self._container._remove_action(action) self._group_actions.remove(action) class ArgumentParser(_AttributeHolder, _ActionsContainer): """Object for parsing command line strings into Python objects. Keyword Arguments: - prog -- The name of the program (default: sys.argv[0]) - usage -- A usage message (default: auto-generated from arguments) - description -- A description of what the program does - epilog -- Text following the argument descriptions - version -- Add a -v/--version option with the given version string - parents -- Parsers whose arguments should be copied into this one - formatter_class -- HelpFormatter class for printing help messages - prefix_chars -- Characters that prefix optional arguments - fromfile_prefix_chars -- Characters that prefix files containing additional arguments - argument_default -- The default value for all arguments - conflict_handler -- String indicating how to handle conflicts - add_help -- Add a -h/-help option """ def __init__(self, prog=None, usage=None, description=None, epilog=None, version=None, parents=[], formatter_class=HelpFormatter, prefix_chars='-', fromfile_prefix_chars=None, argument_default=None, conflict_handler='error', add_help=True): superinit = super(ArgumentParser, self).__init__ superinit(description=description, prefix_chars=prefix_chars, argument_default=argument_default, conflict_handler=conflict_handler) # default setting for prog if prog is None: prog = _os.path.basename(_sys.argv[0]) self.prog = prog self.usage = usage self.epilog = epilog self.version = version self.formatter_class = formatter_class self.fromfile_prefix_chars = fromfile_prefix_chars self.add_help = add_help add_group = self.add_argument_group self._positionals = add_group(_('positional arguments')) self._optionals = add_group(_('optional arguments')) self._subparsers = None # register types def identity(string): return string self.register('type', None, identity) # add help and version arguments if necessary # (using explicit default to override global argument_default) if self.add_help: self.add_argument( '-h', '--help', action='help', default=SUPPRESS, help=_('show this help message and exit')) if self.version: self.add_argument( '-v', '--version', action='version', default=SUPPRESS, help=_("show program's version number and exit")) # add parent arguments and defaults for parent in parents: self._add_container_actions(parent) try: defaults = parent._defaults except AttributeError: pass else: self._defaults.update(defaults) # ======================= # Pretty __repr__ methods # ======================= def _get_kwargs(self): names = [ 'prog', 'usage', 'description', 'version', 'formatter_class', 'conflict_handler', 'add_help', ] return [(name, getattr(self, name)) for name in names] # ================================== # Optional/Positional adding methods # ================================== def add_subparsers(self, **kwargs): if self._subparsers is not None: self.error(_('cannot have multiple subparser arguments')) # add the parser class to the arguments if it's not present kwargs.setdefault('parser_class', type(self)) if 'title' in kwargs or 'description' in kwargs: title = _(kwargs.pop('title', 'subcommands')) description = _(kwargs.pop('description', None)) self._subparsers = self.add_argument_group(title, description) else: self._subparsers = self._positionals # prog defaults to the usage message of this parser, skipping # optional arguments and with no "usage:" prefix if kwargs.get('prog') is None: formatter = self._get_formatter() positionals = self._get_positional_actions() groups = self._mutually_exclusive_groups formatter.add_usage(self.usage, positionals, groups, '') kwargs['prog'] = formatter.format_help().strip() # create the parsers action and add it to the positionals list parsers_class = self._pop_action_class(kwargs, 'parsers') action = parsers_class(option_strings=[], **kwargs) self._subparsers._add_action(action) # return the created parsers action return action def _add_action(self, action): if action.option_strings: self._optionals._add_action(action) else: self._positionals._add_action(action) return action def _get_optional_actions(self): return [action for action in self._actions if action.option_strings] def _get_positional_actions(self): return [action for action in self._actions if not action.option_strings] # ===================================== # Command line argument parsing methods # ===================================== def parse_args(self, args=None, namespace=None): args, argv = self.parse_known_args(args, namespace) if argv: msg = _('unrecognized arguments: %s') self.error(msg % ' '.join(argv)) return args def parse_known_args(self, args=None, namespace=None): # args default to the system args if args is None: args = _sys.argv[1:] # default Namespace built from parser defaults if namespace is None: namespace = Namespace() # add any action defaults that aren't present for action in self._actions: if action.dest is not SUPPRESS: if not hasattr(namespace, action.dest): if action.default is not SUPPRESS: default = action.default if isinstance(action.default, _basestring): default = self._get_value(action, default) setattr(namespace, action.dest, default) # add any parser defaults that aren't present for dest in self._defaults: if not hasattr(namespace, dest): setattr(namespace, dest, self._defaults[dest]) # parse the arguments and exit if there are any errors try: return self._parse_known_args(args, namespace) except ArgumentError: err = _sys.exc_info()[1] self.error(str(err)) def _parse_known_args(self, arg_strings, namespace): # replace arg strings that are file references if self.fromfile_prefix_chars is not None: arg_strings = self._read_args_from_files(arg_strings) # map all mutually exclusive arguments to the other arguments # they can't occur with action_conflicts = {} for mutex_group in self._mutually_exclusive_groups: group_actions = mutex_group._group_actions for i, mutex_action in enumerate(mutex_group._group_actions): conflicts = action_conflicts.setdefault(mutex_action, []) conflicts.extend(group_actions[:i]) conflicts.extend(group_actions[i + 1:]) # find all option indices, and determine the arg_string_pattern # which has an 'O' if there is an option at an index, # an 'A' if there is an argument, or a '-' if there is a '--' option_string_indices = {} arg_string_pattern_parts = [] arg_strings_iter = iter(arg_strings) for i, arg_string in enumerate(arg_strings_iter): # all args after -- are non-options if arg_string == '--': arg_string_pattern_parts.append('-') for arg_string in arg_strings_iter: arg_string_pattern_parts.append('A') # otherwise, add the arg to the arg strings # and note the index if it was an option else: option_tuple = self._parse_optional(arg_string) if option_tuple is None: pattern = 'A' else: option_string_indices[i] = option_tuple pattern = 'O' arg_string_pattern_parts.append(pattern) # join the pieces together to form the pattern arg_strings_pattern = ''.join(arg_string_pattern_parts) # converts arg strings to the appropriate and then takes the action seen_actions = _set() seen_non_default_actions = _set() def take_action(action, argument_strings, option_string=None): seen_actions.add(action) argument_values = self._get_values(action, argument_strings) # error if this argument is not allowed with other previously # seen arguments, assuming that actions that use the default # value don't really count as "present" if argument_values is not action.default: seen_non_default_actions.add(action) for conflict_action in action_conflicts.get(action, []): if conflict_action in seen_non_default_actions: msg = _('not allowed with argument %s') action_name = _get_action_name(conflict_action) raise ArgumentError(action, msg % action_name) # take the action if we didn't receive a SUPPRESS value # (e.g. from a default) if argument_values is not SUPPRESS: action(self, namespace, argument_values, option_string) # function to convert arg_strings into an optional action def consume_optional(start_index): # get the optional identified at this index option_tuple = option_string_indices[start_index] action, option_string, explicit_arg = option_tuple # identify additional optionals in the same arg string # (e.g. -xyz is the same as -x -y -z if no args are required) match_argument = self._match_argument action_tuples = [] while True: # if we found no optional action, skip it if action is None: extras.append(arg_strings[start_index]) return start_index + 1 # if there is an explicit argument, try to match the # optional's string arguments to only this if explicit_arg is not None: arg_count = match_argument(action, 'A') # if the action is a single-dash option and takes no # arguments, try to parse more single-dash options out # of the tail of the option string chars = self.prefix_chars if arg_count == 0 and option_string[1] not in chars: action_tuples.append((action, [], option_string)) for char in self.prefix_chars: option_string = char + explicit_arg[0] explicit_arg = explicit_arg[1:] or None optionals_map = self._option_string_actions if option_string in optionals_map: action = optionals_map[option_string] break else: msg = _('ignored explicit argument %r') raise ArgumentError(action, msg % explicit_arg) # if the action expect exactly one argument, we've # successfully matched the option; exit the loop elif arg_count == 1: stop = start_index + 1 args = [explicit_arg] action_tuples.append((action, args, option_string)) break # error if a double-dash option did not use the # explicit argument else: msg = _('ignored explicit argument %r') raise ArgumentError(action, msg % explicit_arg) # if there is no explicit argument, try to match the # optional's string arguments with the following strings # if successful, exit the loop else: start = start_index + 1 selected_patterns = arg_strings_pattern[start:] arg_count = match_argument(action, selected_patterns) stop = start + arg_count args = arg_strings[start:stop] action_tuples.append((action, args, option_string)) break # add the Optional to the list and return the index at which # the Optional's string args stopped assert action_tuples for action, args, option_string in action_tuples: take_action(action, args, option_string) return stop # the list of Positionals left to be parsed; this is modified # by consume_positionals() positionals = self._get_positional_actions() # function to convert arg_strings into positional actions def consume_positionals(start_index): # match as many Positionals as possible match_partial = self._match_arguments_partial selected_pattern = arg_strings_pattern[start_index:] arg_counts = match_partial(positionals, selected_pattern) # slice off the appropriate arg strings for each Positional # and add the Positional and its args to the list for action, arg_count in zip(positionals, arg_counts): args = arg_strings[start_index: start_index + arg_count] start_index += arg_count take_action(action, args) # slice off the Positionals that we just parsed and return the # index at which the Positionals' string args stopped positionals[:] = positionals[len(arg_counts):] return start_index # consume Positionals and Optionals alternately, until we have # passed the last option string extras = [] start_index = 0 if option_string_indices: max_option_string_index = max(option_string_indices) else: max_option_string_index = -1 while start_index <= max_option_string_index: # consume any Positionals preceding the next option next_option_string_index = min([ index for index in option_string_indices if index >= start_index]) if start_index != next_option_string_index: positionals_end_index = consume_positionals(start_index) # only try to parse the next optional if we didn't consume # the option string during the positionals parsing if positionals_end_index > start_index: start_index = positionals_end_index continue else: start_index = positionals_end_index # if we consumed all the positionals we could and we're not # at the index of an option string, there were extra arguments if start_index not in option_string_indices: strings = arg_strings[start_index:next_option_string_index] extras.extend(strings) start_index = next_option_string_index # consume the next optional and any arguments for it start_index = consume_optional(start_index) # consume any positionals following the last Optional stop_index = consume_positionals(start_index) # if we didn't consume all the argument strings, there were extras extras.extend(arg_strings[stop_index:]) # if we didn't use all the Positional objects, there were too few # arg strings supplied. if positionals: self.error(_('too few arguments')) # make sure all required actions were present for action in self._actions: if action.required: if action not in seen_actions: name = _get_action_name(action) self.error(_('argument %s is required') % name) # make sure all required groups had one option present for group in self._mutually_exclusive_groups: if group.required: for action in group._group_actions: if action in seen_non_default_actions: break # if no actions were used, report the error else: names = [_get_action_name(action) for action in group._group_actions if action.help is not SUPPRESS] msg = _('one of the arguments %s is required') self.error(msg % ' '.join(names)) # return the updated namespace and the extra arguments return namespace, extras def _read_args_from_files(self, arg_strings): # expand arguments referencing files new_arg_strings = [] for arg_string in arg_strings: # for regular arguments, just add them back into the list if arg_string[0] not in self.fromfile_prefix_chars: new_arg_strings.append(arg_string) # replace arguments referencing files with the file content else: try: args_file = open(arg_string[1:]) try: arg_strings = args_file.read().splitlines() arg_strings = self._read_args_from_files(arg_strings) new_arg_strings.extend(arg_strings) finally: args_file.close() except IOError: err = _sys.exc_info()[1] self.error(str(err)) # return the modified argument list return new_arg_strings def _match_argument(self, action, arg_strings_pattern): # match the pattern for this action to the arg strings nargs_pattern = self._get_nargs_pattern(action) match = _re.match(nargs_pattern, arg_strings_pattern) # raise an exception if we weren't able to find a match if match is None: nargs_errors = { None: _('expected one argument'), OPTIONAL: _('expected at most one argument'), ONE_OR_MORE: _('expected at least one argument'), } default = _('expected %s argument(s)') % action.nargs msg = nargs_errors.get(action.nargs, default) raise ArgumentError(action, msg) # return the number of arguments matched return len(match.group(1)) def _match_arguments_partial(self, actions, arg_strings_pattern): # progressively shorten the actions list by slicing off the # final actions until we find a match result = [] for i in range(len(actions), 0, -1): actions_slice = actions[:i] pattern = ''.join([self._get_nargs_pattern(action) for action in actions_slice]) match = _re.match(pattern, arg_strings_pattern) if match is not None: result.extend([len(string) for string in match.groups()]) break # return the list of arg string counts return result def _parse_optional(self, arg_string): # if it's an empty string, it was meant to be a positional if not arg_string: return None # if it doesn't start with a prefix, it was meant to be positional if not arg_string[0] in self.prefix_chars: return None # if it's just dashes, it was meant to be positional if not arg_string.strip('-'): return None # if the option string is present in the parser, return the action if arg_string in self._option_string_actions: action = self._option_string_actions[arg_string] return action, arg_string, None # search through all possible prefixes of the option string # and all actions in the parser for possible interpretations option_tuples = self._get_option_tuples(arg_string) # if multiple actions match, the option string was ambiguous if len(option_tuples) > 1: options = ', '.join([option_string for action, option_string, explicit_arg in option_tuples]) tup = arg_string, options self.error(_('ambiguous option: %s could match %s') % tup) # if exactly one action matched, this segmentation is good, # so return the parsed action elif len(option_tuples) == 1: option_tuple, = option_tuples return option_tuple # if it was not found as an option, but it looks like a negative # number, it was meant to be positional # unless there are negative-number-like options if self._negative_number_matcher.match(arg_string): if not self._has_negative_number_optionals: return None # if it contains a space, it was meant to be a positional if ' ' in arg_string: return None # it was meant to be an optional but there is no such option # in this parser (though it might be a valid option in a subparser) return None, arg_string, None def _get_option_tuples(self, option_string): result = [] # option strings starting with two prefix characters are only # split at the '=' chars = self.prefix_chars if option_string[0] in chars and option_string[1] in chars: if '=' in option_string: option_prefix, explicit_arg = option_string.split('=', 1) else: option_prefix = option_string explicit_arg = None for option_string in self._option_string_actions: if option_string.startswith(option_prefix): action = self._option_string_actions[option_string] tup = action, option_string, explicit_arg result.append(tup) # single character options can be concatenated with their arguments # but multiple character options always have to have their argument # separate elif option_string[0] in chars and option_string[1] not in chars: option_prefix = option_string explicit_arg = None short_option_prefix = option_string[:2] short_explicit_arg = option_string[2:] for option_string in self._option_string_actions: if option_string == short_option_prefix: action = self._option_string_actions[option_string] tup = action, option_string, short_explicit_arg result.append(tup) elif option_string.startswith(option_prefix): action = self._option_string_actions[option_string] tup = action, option_string, explicit_arg result.append(tup) # shouldn't ever get here else: self.error(_('unexpected option string: %s') % option_string) # return the collected option tuples return result def _get_nargs_pattern(self, action): # in all examples below, we have to allow for '--' args # which are represented as '-' in the pattern nargs = action.nargs # the default (None) is assumed to be a single argument if nargs is None: nargs_pattern = '(-*A-*)' # allow zero or one arguments elif nargs == OPTIONAL: nargs_pattern = '(-*A?-*)' # allow zero or more arguments elif nargs == ZERO_OR_MORE: nargs_pattern = '(-*[A-]*)' # allow one or more arguments elif nargs == ONE_OR_MORE: nargs_pattern = '(-*A[A-]*)' # allow one argument followed by any number of options or arguments elif nargs is PARSER: nargs_pattern = '(-*A[-AO]*)' # all others should be integers else: nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs) # if this is an optional action, -- is not allowed if action.option_strings: nargs_pattern = nargs_pattern.replace('-*', '') nargs_pattern = nargs_pattern.replace('-', '') # return the pattern return nargs_pattern # ======================== # Value conversion methods # ======================== def _get_values(self, action, arg_strings): # for everything but PARSER args, strip out '--' if action.nargs is not PARSER: arg_strings = [s for s in arg_strings if s != '--'] # optional argument produces a default when not present if not arg_strings and action.nargs == OPTIONAL: if action.option_strings: value = action.const else: value = action.default if isinstance(value, _basestring): value = self._get_value(action, value) self._check_value(action, value) # when nargs='*' on a positional, if there were no command-line # args, use the default if it is anything other than None elif (not arg_strings and action.nargs == ZERO_OR_MORE and not action.option_strings): if action.default is not None: value = action.default else: value = arg_strings self._check_value(action, value) # single argument or optional argument produces a single value elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]: arg_string, = arg_strings value = self._get_value(action, arg_string) self._check_value(action, value) # PARSER arguments convert all values, but check only the first elif action.nargs is PARSER: value = [self._get_value(action, v) for v in arg_strings] self._check_value(action, value[0]) # all other types of nargs produce a list else: value = [self._get_value(action, v) for v in arg_strings] for v in value: self._check_value(action, v) # return the converted value return value def _get_value(self, action, arg_string): type_func = self._registry_get('type', action.type, action.type) if not hasattr(type_func, '__call__'): if not hasattr(type_func, '__bases__'): # classic classes msg = _('%r is not callable') raise ArgumentError(action, msg % type_func) # convert the value to the appropriate type try: result = type_func(arg_string) # TypeErrors or ValueErrors indicate errors except (TypeError, ValueError): name = getattr(action.type, '__name__', repr(action.type)) msg = _('invalid %s value: %r') raise ArgumentError(action, msg % (name, arg_string)) # return the converted value return result def _check_value(self, action, value): # converted value must be one of the choices (if specified) if action.choices is not None and value not in action.choices: tup = value, ', '.join(map(repr, action.choices)) msg = _('invalid choice: %r (choose from %s)') % tup raise ArgumentError(action, msg) # ======================= # Help-formatting methods # ======================= def format_usage(self): formatter = self._get_formatter() formatter.add_usage(self.usage, self._actions, self._mutually_exclusive_groups) return formatter.format_help() def format_help(self): formatter = self._get_formatter() # usage formatter.add_usage(self.usage, self._actions, self._mutually_exclusive_groups) # description formatter.add_text(self.description) # positionals, optionals and user-defined groups for action_group in self._action_groups: formatter.start_section(action_group.title) formatter.add_text(action_group.description) formatter.add_arguments(action_group._group_actions) formatter.end_section() # epilog formatter.add_text(self.epilog) # determine help from format above return formatter.format_help() def format_version(self): formatter = self._get_formatter() formatter.add_text(self.version) return formatter.format_help() def _get_formatter(self): return self.formatter_class(prog=self.prog) # ===================== # Help-printing methods # ===================== def print_usage(self, file=None): self._print_message(self.format_usage(), file) def print_help(self, file=None): self._print_message(self.format_help(), file) def print_version(self, file=None): self._print_message(self.format_version(), file) def _print_message(self, message, file=None): if message: if file is None: file = _sys.stderr file.write(message) # =============== # Exiting methods # =============== def exit(self, status=0, message=None): if message: _sys.stderr.write(message) _sys.exit(status) def error(self, message): """error(message: string) Prints a usage message incorporating the message to stderr and exits. If you override this in a subclass, it should not return -- it should either exit or raise an exception. """ self.print_usage(_sys.stderr) self.exit(2, _('%s: error: %s\n') % (self.prog, message)) nipy-0.3.0/nipy/externals/configobj.py000066400000000000000000002534431210344137400200010ustar00rootroot00000000000000# configobj.py # A config file reader/writer that supports nested sections in config files. # Copyright (C) 2005-2010 Michael Foord, Nicola Larosa # E-mail: fuzzyman AT voidspace DOT org DOT uk # nico AT tekNico DOT net # ConfigObj 4 # http://www.voidspace.org.uk/python/configobj.html # Released subject to the BSD License # Please see http://www.voidspace.org.uk/python/license.shtml # Scripts maintained at http://www.voidspace.org.uk/python/index.shtml # For information about bugfixes, updates and support, please join the # ConfigObj mailing list: # http://lists.sourceforge.net/lists/listinfo/configobj-develop # Comments, suggestions and bug reports welcome. from __future__ import generators # disable nose tests here try: from nose.plugins.skip import SkipTest except ImportError: pass else: def setup_module(): raise SkipTest('Doctests fail for externals.configobj') import os import re import sys from codecs import BOM_UTF8, BOM_UTF16, BOM_UTF16_BE, BOM_UTF16_LE # imported lazily to avoid startup performance hit if it isn't used compiler = None # A dictionary mapping BOM to # the encoding to decode with, and what to set the # encoding attribute to. BOMS = { BOM_UTF8: ('utf_8', None), BOM_UTF16_BE: ('utf16_be', 'utf_16'), BOM_UTF16_LE: ('utf16_le', 'utf_16'), BOM_UTF16: ('utf_16', 'utf_16'), } # All legal variants of the BOM codecs. # TODO: the list of aliases is not meant to be exhaustive, is there a # better way ? BOM_LIST = { 'utf_16': 'utf_16', 'u16': 'utf_16', 'utf16': 'utf_16', 'utf-16': 'utf_16', 'utf16_be': 'utf16_be', 'utf_16_be': 'utf16_be', 'utf-16be': 'utf16_be', 'utf16_le': 'utf16_le', 'utf_16_le': 'utf16_le', 'utf-16le': 'utf16_le', 'utf_8': 'utf_8', 'u8': 'utf_8', 'utf': 'utf_8', 'utf8': 'utf_8', 'utf-8': 'utf_8', } # Map of encodings to the BOM to write. BOM_SET = { 'utf_8': BOM_UTF8, 'utf_16': BOM_UTF16, 'utf16_be': BOM_UTF16_BE, 'utf16_le': BOM_UTF16_LE, None: BOM_UTF8 } def match_utf8(encoding): return BOM_LIST.get(encoding.lower()) == 'utf_8' # Quote strings used for writing values squot = "'%s'" dquot = '"%s"' noquot = "%s" wspace_plus = ' \r\n\v\t\'"' tsquot = '"""%s"""' tdquot = "'''%s'''" # Sentinel for use in getattr calls to replace hasattr MISSING = object() __version__ = '4.7.1' try: any except NameError: def any(iterable): for entry in iterable: if entry: return True return False __all__ = ( '__version__', 'DEFAULT_INDENT_TYPE', 'DEFAULT_INTERPOLATION', 'ConfigObjError', 'NestingError', 'ParseError', 'DuplicateError', 'ConfigspecError', 'ConfigObj', 'SimpleVal', 'InterpolationError', 'InterpolationLoopError', 'MissingInterpolationOption', 'RepeatSectionError', 'ReloadError', 'UnreprError', 'UnknownType', 'flatten_errors', 'get_extra_values' ) DEFAULT_INTERPOLATION = 'configparser' DEFAULT_INDENT_TYPE = ' ' MAX_INTERPOL_DEPTH = 10 OPTION_DEFAULTS = { 'interpolation': True, 'raise_errors': False, 'list_values': True, 'create_empty': False, 'file_error': False, 'configspec': None, 'stringify': True, # option may be set to one of ('', ' ', '\t') 'indent_type': None, 'encoding': None, 'default_encoding': None, 'unrepr': False, 'write_empty_values': False, } def getObj(s): global compiler if compiler is None: import compiler s = "a=" + s p = compiler.parse(s) return p.getChildren()[1].getChildren()[0].getChildren()[1] class UnknownType(Exception): pass class Builder(object): def build(self, o): m = getattr(self, 'build_' + o.__class__.__name__, None) if m is None: raise UnknownType(o.__class__.__name__) return m(o) def build_List(self, o): return map(self.build, o.getChildren()) def build_Const(self, o): return o.value def build_Dict(self, o): d = {} i = iter(map(self.build, o.getChildren())) for el in i: d[el] = i.next() return d def build_Tuple(self, o): return tuple(self.build_List(o)) def build_Name(self, o): if o.name == 'None': return None if o.name == 'True': return True if o.name == 'False': return False # An undefined Name raise UnknownType('Undefined Name') def build_Add(self, o): real, imag = map(self.build_Const, o.getChildren()) try: real = float(real) except TypeError: raise UnknownType('Add') if not isinstance(imag, complex) or imag.real != 0.0: raise UnknownType('Add') return real+imag def build_Getattr(self, o): parent = self.build(o.expr) return getattr(parent, o.attrname) def build_UnarySub(self, o): return -self.build_Const(o.getChildren()[0]) def build_UnaryAdd(self, o): return self.build_Const(o.getChildren()[0]) _builder = Builder() def unrepr(s): if not s: return s return _builder.build(getObj(s)) class ConfigObjError(SyntaxError): """ This is the base class for all errors that ConfigObj raises. It is a subclass of SyntaxError. """ def __init__(self, message='', line_number=None, line=''): self.line = line self.line_number = line_number SyntaxError.__init__(self, message) class NestingError(ConfigObjError): """ This error indicates a level of nesting that doesn't match. """ class ParseError(ConfigObjError): """ This error indicates that a line is badly written. It is neither a valid ``key = value`` line, nor a valid section marker line. """ class ReloadError(IOError): """ A 'reload' operation failed. This exception is a subclass of ``IOError``. """ def __init__(self): IOError.__init__(self, 'reload failed, filename is not set.') class DuplicateError(ConfigObjError): """ The keyword or section specified already exists. """ class ConfigspecError(ConfigObjError): """ An error occured whilst parsing a configspec. """ class InterpolationError(ConfigObjError): """Base class for the two interpolation errors.""" class InterpolationLoopError(InterpolationError): """Maximum interpolation depth exceeded in string interpolation.""" def __init__(self, option): InterpolationError.__init__( self, 'interpolation loop detected in value "%s".' % option) class RepeatSectionError(ConfigObjError): """ This error indicates additional sections in a section with a ``__many__`` (repeated) section. """ class MissingInterpolationOption(InterpolationError): """A value specified for interpolation was missing.""" def __init__(self, option): msg = 'missing option "%s" in interpolation.' % option InterpolationError.__init__(self, msg) class UnreprError(ConfigObjError): """An error parsing in unrepr mode.""" class InterpolationEngine(object): """ A helper class to help perform string interpolation. This class is an abstract base class; its descendants perform the actual work. """ # compiled regexp to use in self.interpolate() _KEYCRE = re.compile(r"%\(([^)]*)\)s") _cookie = '%' def __init__(self, section): # the Section instance that "owns" this engine self.section = section def interpolate(self, key, value): # short-cut if not self._cookie in value: return value def recursive_interpolate(key, value, section, backtrail): """The function that does the actual work. ``value``: the string we're trying to interpolate. ``section``: the section in which that string was found ``backtrail``: a dict to keep track of where we've been, to detect and prevent infinite recursion loops This is similar to a depth-first-search algorithm. """ # Have we been here already? if (key, section.name) in backtrail: # Yes - infinite loop detected raise InterpolationLoopError(key) # Place a marker on our backtrail so we won't come back here again backtrail[(key, section.name)] = 1 # Now start the actual work match = self._KEYCRE.search(value) while match: # The actual parsing of the match is implementation-dependent, # so delegate to our helper function k, v, s = self._parse_match(match) if k is None: # That's the signal that no further interpolation is needed replacement = v else: # Further interpolation may be needed to obtain final value replacement = recursive_interpolate(k, v, s, backtrail) # Replace the matched string with its final value start, end = match.span() value = ''.join((value[:start], replacement, value[end:])) new_search_start = start + len(replacement) # Pick up the next interpolation key, if any, for next time # through the while loop match = self._KEYCRE.search(value, new_search_start) # Now safe to come back here again; remove marker from backtrail del backtrail[(key, section.name)] return value # Back in interpolate(), all we have to do is kick off the recursive # function with appropriate starting values value = recursive_interpolate(key, value, self.section, {}) return value def _fetch(self, key): """Helper function to fetch values from owning section. Returns a 2-tuple: the value, and the section where it was found. """ # switch off interpolation before we try and fetch anything ! save_interp = self.section.main.interpolation self.section.main.interpolation = False # Start at section that "owns" this InterpolationEngine current_section = self.section while True: # try the current section first val = current_section.get(key) if val is not None: break # try "DEFAULT" next val = current_section.get('DEFAULT', {}).get(key) if val is not None: break # move up to parent and try again # top-level's parent is itself if current_section.parent is current_section: # reached top level, time to give up break current_section = current_section.parent # restore interpolation to previous value before returning self.section.main.interpolation = save_interp if val is None: raise MissingInterpolationOption(key) return val, current_section def _parse_match(self, match): """Implementation-dependent helper function. Will be passed a match object corresponding to the interpolation key we just found (e.g., "%(foo)s" or "$foo"). Should look up that key in the appropriate config file section (using the ``_fetch()`` helper function) and return a 3-tuple: (key, value, section) ``key`` is the name of the key we're looking for ``value`` is the value found for that key ``section`` is a reference to the section where it was found ``key`` and ``section`` should be None if no further interpolation should be performed on the resulting value (e.g., if we interpolated "$$" and returned "$"). """ raise NotImplementedError() class ConfigParserInterpolation(InterpolationEngine): """Behaves like ConfigParser.""" _cookie = '%' _KEYCRE = re.compile(r"%\(([^)]*)\)s") def _parse_match(self, match): key = match.group(1) value, section = self._fetch(key) return key, value, section class TemplateInterpolation(InterpolationEngine): """Behaves like string.Template.""" _cookie = '$' _delimiter = '$' _KEYCRE = re.compile(r""" \$(?: (?P\$) | # Two $ signs (?P[_a-z][_a-z0-9]*) | # $name format {(?P[^}]*)} # ${name} format ) """, re.IGNORECASE | re.VERBOSE) def _parse_match(self, match): # Valid name (in or out of braces): fetch value from section key = match.group('named') or match.group('braced') if key is not None: value, section = self._fetch(key) return key, value, section # Escaped delimiter (e.g., $$): return single delimiter if match.group('escaped') is not None: # Return None for key and section to indicate it's time to stop return None, self._delimiter, None # Anything else: ignore completely, just return it unchanged return None, match.group(), None interpolation_engines = { 'configparser': ConfigParserInterpolation, 'template': TemplateInterpolation, } def __newobj__(cls, *args): # Hack for pickle return cls.__new__(cls, *args) class Section(dict): """ A dictionary-like object that represents a section in a config file. It does string interpolation if the 'interpolation' attribute of the 'main' object is set to True. Interpolation is tried first from this object, then from the 'DEFAULT' section of this object, next from the parent and its 'DEFAULT' section, and so on until the main object is reached. A Section will behave like an ordered dictionary - following the order of the ``scalars`` and ``sections`` attributes. You can use this to change the order of members. Iteration follows the order: scalars, then sections. """ def __setstate__(self, state): dict.update(self, state[0]) self.__dict__.update(state[1]) def __reduce__(self): state = (dict(self), self.__dict__) return (__newobj__, (self.__class__,), state) def __init__(self, parent, depth, main, indict=None, name=None): """ * parent is the section above * depth is the depth level of this section * main is the main ConfigObj * indict is a dictionary to initialise the section with """ if indict is None: indict = {} dict.__init__(self) # used for nesting level *and* interpolation self.parent = parent # used for the interpolation attribute self.main = main # level of nesting depth of this Section self.depth = depth # purely for information self.name = name # self._initialise() # we do this explicitly so that __setitem__ is used properly # (rather than just passing to ``dict.__init__``) for entry, value in indict.iteritems(): self[entry] = value def _initialise(self): # the sequence of scalar values in this Section self.scalars = [] # the sequence of sections in this Section self.sections = [] # for comments :-) self.comments = {} self.inline_comments = {} # the configspec self.configspec = None # for defaults self.defaults = [] self.default_values = {} self.extra_values = [] self._created = False def _interpolate(self, key, value): try: # do we already have an interpolation engine? engine = self._interpolation_engine except AttributeError: # not yet: first time running _interpolate(), so pick the engine name = self.main.interpolation if name == True: # note that "if name:" would be incorrect here # backwards-compatibility: interpolation=True means use default name = DEFAULT_INTERPOLATION name = name.lower() # so that "Template", "template", etc. all work class_ = interpolation_engines.get(name, None) if class_ is None: # invalid value for self.main.interpolation self.main.interpolation = False return value else: # save reference to engine so we don't have to do this again engine = self._interpolation_engine = class_(self) # let the engine do the actual work return engine.interpolate(key, value) def __getitem__(self, key): """Fetch the item and do string interpolation.""" val = dict.__getitem__(self, key) if self.main.interpolation: if isinstance(val, basestring): return self._interpolate(key, val) if isinstance(val, list): def _check(entry): if isinstance(entry, basestring): return self._interpolate(key, entry) return entry return [_check(entry) for entry in val] return val def __setitem__(self, key, value, unrepr=False): """ Correctly set a value. Making dictionary values Section instances. (We have to special case 'Section' instances - which are also dicts) Keys must be strings. Values need only be strings (or lists of strings) if ``main.stringify`` is set. ``unrepr`` must be set when setting a value to a dictionary, without creating a new sub-section. """ if not isinstance(key, basestring): raise ValueError('The key "%s" is not a string.' % key) # add the comment if key not in self.comments: self.comments[key] = [] self.inline_comments[key] = '' # remove the entry from defaults if key in self.defaults: self.defaults.remove(key) # if isinstance(value, Section): if key not in self: self.sections.append(key) dict.__setitem__(self, key, value) elif isinstance(value, dict) and not unrepr: # First create the new depth level, # then create the section if key not in self: self.sections.append(key) new_depth = self.depth + 1 dict.__setitem__( self, key, Section( self, new_depth, self.main, indict=value, name=key)) else: if key not in self: self.scalars.append(key) if not self.main.stringify: if isinstance(value, basestring): pass elif isinstance(value, (list, tuple)): for entry in value: if not isinstance(entry, basestring): raise TypeError('Value is not a string "%s".' % entry) else: raise TypeError('Value is not a string "%s".' % value) dict.__setitem__(self, key, value) def __delitem__(self, key): """Remove items from the sequence when deleting.""" dict. __delitem__(self, key) if key in self.scalars: self.scalars.remove(key) else: self.sections.remove(key) del self.comments[key] del self.inline_comments[key] def get(self, key, default=None): """A version of ``get`` that doesn't bypass string interpolation.""" try: return self[key] except KeyError: return default def update(self, indict): """ A version of update that uses our ``__setitem__``. """ for entry in indict: self[entry] = indict[entry] def pop(self, key, *args): """ 'D.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised' """ val = dict.pop(self, key, *args) if key in self.scalars: del self.comments[key] del self.inline_comments[key] self.scalars.remove(key) elif key in self.sections: del self.comments[key] del self.inline_comments[key] self.sections.remove(key) if self.main.interpolation and isinstance(val, basestring): return self._interpolate(key, val) return val def popitem(self): """Pops the first (key,val)""" sequence = (self.scalars + self.sections) if not sequence: raise KeyError(": 'popitem(): dictionary is empty'") key = sequence[0] val = self[key] del self[key] return key, val def clear(self): """ A version of clear that also affects scalars/sections Also clears comments and configspec. Leaves other attributes alone : depth/main/parent are not affected """ dict.clear(self) self.scalars = [] self.sections = [] self.comments = {} self.inline_comments = {} self.configspec = None self.defaults = [] self.extra_values = [] def setdefault(self, key, default=None): """A version of setdefault that sets sequence if appropriate.""" try: return self[key] except KeyError: self[key] = default return self[key] def items(self): """D.items() -> list of D's (key, value) pairs, as 2-tuples""" return zip((self.scalars + self.sections), self.values()) def keys(self): """D.keys() -> list of D's keys""" return (self.scalars + self.sections) def values(self): """D.values() -> list of D's values""" return [self[key] for key in (self.scalars + self.sections)] def iteritems(self): """D.iteritems() -> an iterator over the (key, value) items of D""" return iter(self.items()) def iterkeys(self): """D.iterkeys() -> an iterator over the keys of D""" return iter((self.scalars + self.sections)) __iter__ = iterkeys def itervalues(self): """D.itervalues() -> an iterator over the values of D""" return iter(self.values()) def __repr__(self): """x.__repr__() <==> repr(x)""" return '{%s}' % ', '.join([('%s: %s' % (repr(key), repr(self[key]))) for key in (self.scalars + self.sections)]) __str__ = __repr__ __str__.__doc__ = "x.__str__() <==> str(x)" # Extra methods - not in a normal dictionary def dict(self): """ Return a deepcopy of self as a dictionary. All members that are ``Section`` instances are recursively turned to ordinary dictionaries - by calling their ``dict`` method. >>> n = a.dict() >>> n == a 1 >>> n is a 0 """ newdict = {} for entry in self: this_entry = self[entry] if isinstance(this_entry, Section): this_entry = this_entry.dict() elif isinstance(this_entry, list): # create a copy rather than a reference this_entry = list(this_entry) elif isinstance(this_entry, tuple): # create a copy rather than a reference this_entry = tuple(this_entry) newdict[entry] = this_entry return newdict def merge(self, indict): """ A recursive update - useful for merging config files. >>> a = '''[section1] ... option1 = True ... [[subsection]] ... more_options = False ... # end of file'''.splitlines() >>> b = '''# File is user.ini ... [section1] ... option1 = False ... # end of file'''.splitlines() >>> c1 = ConfigObj(b) >>> c2 = ConfigObj(a) >>> c2.merge(c1) >>> c2 ConfigObj({'section1': {'option1': 'False', 'subsection': {'more_options': 'False'}}}) """ for key, val in indict.items(): if (key in self and isinstance(self[key], dict) and isinstance(val, dict)): self[key].merge(val) else: self[key] = val def rename(self, oldkey, newkey): """ Change a keyname to another, without changing position in sequence. Implemented so that transformations can be made on keys, as well as on values. (used by encode and decode) Also renames comments. """ if oldkey in self.scalars: the_list = self.scalars elif oldkey in self.sections: the_list = self.sections else: raise KeyError('Key "%s" not found.' % oldkey) pos = the_list.index(oldkey) # val = self[oldkey] dict.__delitem__(self, oldkey) dict.__setitem__(self, newkey, val) the_list.remove(oldkey) the_list.insert(pos, newkey) comm = self.comments[oldkey] inline_comment = self.inline_comments[oldkey] del self.comments[oldkey] del self.inline_comments[oldkey] self.comments[newkey] = comm self.inline_comments[newkey] = inline_comment def walk(self, function, raise_errors=True, call_on_sections=False, **keywargs): """ Walk every member and call a function on the keyword and value. Return a dictionary of the return values If the function raises an exception, raise the errror unless ``raise_errors=False``, in which case set the return value to ``False``. Any unrecognised keyword arguments you pass to walk, will be pased on to the function you pass in. Note: if ``call_on_sections`` is ``True`` then - on encountering a subsection, *first* the function is called for the *whole* subsection, and then recurses into it's members. This means your function must be able to handle strings, dictionaries and lists. This allows you to change the key of subsections as well as for ordinary members. The return value when called on the whole subsection has to be discarded. See the encode and decode methods for examples, including functions. .. admonition:: caution You can use ``walk`` to transform the names of members of a section but you mustn't add or delete members. >>> config = '''[XXXXsection] ... XXXXkey = XXXXvalue'''.splitlines() >>> cfg = ConfigObj(config) >>> cfg ConfigObj({'XXXXsection': {'XXXXkey': 'XXXXvalue'}}) >>> def transform(section, key): ... val = section[key] ... newkey = key.replace('XXXX', 'CLIENT1') ... section.rename(key, newkey) ... if isinstance(val, (tuple, list, dict)): ... pass ... else: ... val = val.replace('XXXX', 'CLIENT1') ... section[newkey] = val >>> cfg.walk(transform, call_on_sections=True) {'CLIENT1section': {'CLIENT1key': None}} >>> cfg ConfigObj({'CLIENT1section': {'CLIENT1key': 'CLIENT1value'}}) """ out = {} # scalars first for i in range(len(self.scalars)): entry = self.scalars[i] try: val = function(self, entry, **keywargs) # bound again in case name has changed entry = self.scalars[i] out[entry] = val except Exception: if raise_errors: raise else: entry = self.scalars[i] out[entry] = False # then sections for i in range(len(self.sections)): entry = self.sections[i] if call_on_sections: try: function(self, entry, **keywargs) except Exception: if raise_errors: raise else: entry = self.sections[i] out[entry] = False # bound again in case name has changed entry = self.sections[i] # previous result is discarded out[entry] = self[entry].walk( function, raise_errors=raise_errors, call_on_sections=call_on_sections, **keywargs) return out def as_bool(self, key): """ Accepts a key as input. The corresponding value must be a string or the objects (``True`` or 1) or (``False`` or 0). We allow 0 and 1 to retain compatibility with Python 2.2. If the string is one of ``True``, ``On``, ``Yes``, or ``1`` it returns ``True``. If the string is one of ``False``, ``Off``, ``No``, or ``0`` it returns ``False``. ``as_bool`` is not case sensitive. Any other input will raise a ``ValueError``. >>> a = ConfigObj() >>> a['a'] = 'fish' >>> a.as_bool('a') Traceback (most recent call last): ValueError: Value "fish" is neither True nor False >>> a['b'] = 'True' >>> a.as_bool('b') 1 >>> a['b'] = 'off' >>> a.as_bool('b') 0 """ val = self[key] if val == True: return True elif val == False: return False else: try: if not isinstance(val, basestring): # TODO: Why do we raise a KeyError here? raise KeyError() else: return self.main._bools[val.lower()] except KeyError: raise ValueError('Value "%s" is neither True nor False' % val) def as_int(self, key): """ A convenience method which coerces the specified value to an integer. If the value is an invalid literal for ``int``, a ``ValueError`` will be raised. >>> a = ConfigObj() >>> a['a'] = 'fish' >>> a.as_int('a') Traceback (most recent call last): ValueError: invalid literal for int() with base 10: 'fish' >>> a['b'] = '1' >>> a.as_int('b') 1 >>> a['b'] = '3.2' >>> a.as_int('b') Traceback (most recent call last): ValueError: invalid literal for int() with base 10: '3.2' """ return int(self[key]) def as_float(self, key): """ A convenience method which coerces the specified value to a float. If the value is an invalid literal for ``float``, a ``ValueError`` will be raised. >>> a = ConfigObj() >>> a['a'] = 'fish' >>> a.as_float('a') Traceback (most recent call last): ValueError: invalid literal for float(): fish >>> a['b'] = '1' >>> a.as_float('b') 1.0 >>> a['b'] = '3.2' >>> a.as_float('b') 3.2000000000000002 """ return float(self[key]) def as_list(self, key): """ A convenience method which fetches the specified value, guaranteeing that it is a list. >>> a = ConfigObj() >>> a['a'] = 1 >>> a.as_list('a') [1] >>> a['a'] = (1,) >>> a.as_list('a') [1] >>> a['a'] = [1] >>> a.as_list('a') [1] """ result = self[key] if isinstance(result, (tuple, list)): return list(result) return [result] def restore_default(self, key): """ Restore (and return) default value for the specified key. This method will only work for a ConfigObj that was created with a configspec and has been validated. If there is no default value for this key, ``KeyError`` is raised. """ default = self.default_values[key] dict.__setitem__(self, key, default) if key not in self.defaults: self.defaults.append(key) return default def restore_defaults(self): """ Recursively restore default values to all members that have them. This method will only work for a ConfigObj that was created with a configspec and has been validated. It doesn't delete or modify entries without default values. """ for key in self.default_values: self.restore_default(key) for section in self.sections: self[section].restore_defaults() class ConfigObj(Section): """An object to read, create, and write config files.""" _keyword = re.compile(r'''^ # line start (\s*) # indentation ( # keyword (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'"=].*?) # no quotes ) \s*=\s* # divider (.*) # value (including list values and comments) $ # line end ''', re.VERBOSE) _sectionmarker = re.compile(r'''^ (\s*) # 1: indentation ((?:\[\s*)+) # 2: section marker open ( # 3: section name open (?:"\s*\S.*?\s*")| # at least one non-space with double quotes (?:'\s*\S.*?\s*')| # at least one non-space with single quotes (?:[^'"\s].*?) # at least one non-space unquoted ) # section name close ((?:\s*\])+) # 4: section marker close \s*(\#.*)? # 5: optional comment $''', re.VERBOSE) # this regexp pulls list values out as a single string # or single values and comments # FIXME: this regex adds a '' to the end of comma terminated lists # workaround in ``_handle_value`` _valueexp = re.compile(r'''^ (?: (?: ( (?: (?: (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'",\#][^,\#]*?) # unquoted ) \s*,\s* # comma )* # match all list items ending in a comma (if any) ) ( (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'",\#\s][^,]*?)| # unquoted (?:(? 1: msg = "Parsing failed with several errors.\nFirst error %s" % info error = ConfigObjError(msg) else: error = self._errors[0] # set the errors attribute; it's a list of tuples: # (error_type, message, line_number) error.errors = self._errors # set the config attribute error.config = self raise error # delete private attributes del self._errors if configspec is None: self.configspec = None else: self._handle_configspec(configspec) def _initialise(self, options=None): if options is None: options = OPTION_DEFAULTS # initialise a few variables self.filename = None self._errors = [] self.raise_errors = options['raise_errors'] self.interpolation = options['interpolation'] self.list_values = options['list_values'] self.create_empty = options['create_empty'] self.file_error = options['file_error'] self.stringify = options['stringify'] self.indent_type = options['indent_type'] self.encoding = options['encoding'] self.default_encoding = options['default_encoding'] self.BOM = False self.newlines = None self.write_empty_values = options['write_empty_values'] self.unrepr = options['unrepr'] self.initial_comment = [] self.final_comment = [] self.configspec = None if self._inspec: self.list_values = False # Clear section attributes as well Section._initialise(self) def __repr__(self): return ('ConfigObj({%s})' % ', '.join([('%s: %s' % (repr(key), repr(self[key]))) for key in (self.scalars + self.sections)])) def _handle_bom(self, infile): """ Handle any BOM, and decode if necessary. If an encoding is specified, that *must* be used - but the BOM should still be removed (and the BOM attribute set). (If the encoding is wrongly specified, then a BOM for an alternative encoding won't be discovered or removed.) If an encoding is not specified, UTF8 or UTF16 BOM will be detected and removed. The BOM attribute will be set. UTF16 will be decoded to unicode. NOTE: This method must not be called with an empty ``infile``. Specifying the *wrong* encoding is likely to cause a ``UnicodeDecodeError``. ``infile`` must always be returned as a list of lines, but may be passed in as a single string. """ if ((self.encoding is not None) and (self.encoding.lower() not in BOM_LIST)): # No need to check for a BOM # the encoding specified doesn't have one # just decode return self._decode(infile, self.encoding) if isinstance(infile, (list, tuple)): line = infile[0] else: line = infile if self.encoding is not None: # encoding explicitly supplied # And it could have an associated BOM # TODO: if encoding is just UTF16 - we ought to check for both # TODO: big endian and little endian versions. enc = BOM_LIST[self.encoding.lower()] if enc == 'utf_16': # For UTF16 we try big endian and little endian for BOM, (encoding, final_encoding) in BOMS.items(): if not final_encoding: # skip UTF8 continue if infile.startswith(BOM): ### BOM discovered ##self.BOM = True # Don't need to remove BOM return self._decode(infile, encoding) # If we get this far, will *probably* raise a DecodeError # As it doesn't appear to start with a BOM return self._decode(infile, self.encoding) # Must be UTF8 BOM = BOM_SET[enc] if not line.startswith(BOM): return self._decode(infile, self.encoding) newline = line[len(BOM):] # BOM removed if isinstance(infile, (list, tuple)): infile[0] = newline else: infile = newline self.BOM = True return self._decode(infile, self.encoding) # No encoding specified - so we need to check for UTF8/UTF16 for BOM, (encoding, final_encoding) in BOMS.items(): if not line.startswith(BOM): continue else: # BOM discovered self.encoding = final_encoding if not final_encoding: self.BOM = True # UTF8 # remove BOM newline = line[len(BOM):] if isinstance(infile, (list, tuple)): infile[0] = newline else: infile = newline # UTF8 - don't decode if isinstance(infile, basestring): return infile.splitlines(True) else: return infile # UTF16 - have to decode return self._decode(infile, encoding) # No BOM discovered and no encoding specified, just return if isinstance(infile, basestring): # infile read from a file will be a single string return infile.splitlines(True) return infile def _a_to_u(self, aString): """Decode ASCII strings to unicode if a self.encoding is specified.""" if self.encoding: return aString.decode('ascii') else: return aString def _decode(self, infile, encoding): """ Decode infile to unicode. Using the specified encoding. if is a string, it also needs converting to a list. """ if isinstance(infile, basestring): # can't be unicode # NOTE: Could raise a ``UnicodeDecodeError`` return infile.decode(encoding).splitlines(True) for i, line in enumerate(infile): if not isinstance(line, unicode): # NOTE: The isinstance test here handles mixed lists of unicode/string # NOTE: But the decode will break on any non-string values # NOTE: Or could raise a ``UnicodeDecodeError`` infile[i] = line.decode(encoding) return infile def _decode_element(self, line): """Decode element to unicode if necessary.""" if not self.encoding: return line if isinstance(line, str) and self.default_encoding: return line.decode(self.default_encoding) return line def _str(self, value): """ Used by ``stringify`` within validate, to turn non-string values into strings. """ if not isinstance(value, basestring): return str(value) else: return value def _parse(self, infile): """Actually parse the config file.""" temp_list_values = self.list_values if self.unrepr: self.list_values = False comment_list = [] done_start = False this_section = self maxline = len(infile) - 1 cur_index = -1 reset_comment = False while cur_index < maxline: if reset_comment: comment_list = [] cur_index += 1 line = infile[cur_index] sline = line.strip() # do we have anything on the line ? if not sline or sline.startswith('#'): reset_comment = False comment_list.append(line) continue if not done_start: # preserve initial comment self.initial_comment = comment_list comment_list = [] done_start = True reset_comment = True # first we check if it's a section marker mat = self._sectionmarker.match(line) if mat is not None: # is a section line (indent, sect_open, sect_name, sect_close, comment) = mat.groups() if indent and (self.indent_type is None): self.indent_type = indent cur_depth = sect_open.count('[') if cur_depth != sect_close.count(']'): self._handle_error("Cannot compute the section depth at line %s.", NestingError, infile, cur_index) continue if cur_depth < this_section.depth: # the new section is dropping back to a previous level try: parent = self._match_depth(this_section, cur_depth).parent except SyntaxError: self._handle_error("Cannot compute nesting level at line %s.", NestingError, infile, cur_index) continue elif cur_depth == this_section.depth: # the new section is a sibling of the current section parent = this_section.parent elif cur_depth == this_section.depth + 1: # the new section is a child the current section parent = this_section else: self._handle_error("Section too nested at line %s.", NestingError, infile, cur_index) sect_name = self._unquote(sect_name) if sect_name in parent: self._handle_error('Duplicate section name at line %s.', DuplicateError, infile, cur_index) continue # create the new section this_section = Section( parent, cur_depth, self, name=sect_name) parent[sect_name] = this_section parent.inline_comments[sect_name] = comment parent.comments[sect_name] = comment_list continue # # it's not a section marker, # so it should be a valid ``key = value`` line mat = self._keyword.match(line) if mat is None: # it neither matched as a keyword # or a section marker self._handle_error( 'Invalid line at line "%s".', ParseError, infile, cur_index) else: # is a keyword value # value will include any inline comment (indent, key, value) = mat.groups() if indent and (self.indent_type is None): self.indent_type = indent # check for a multiline value if value[:3] in ['"""', "'''"]: try: value, comment, cur_index = self._multiline( value, infile, cur_index, maxline) except SyntaxError: self._handle_error( 'Parse error in value at line %s.', ParseError, infile, cur_index) continue else: if self.unrepr: comment = '' try: value = unrepr(value) except Exception, e: if type(e) == UnknownType: msg = 'Unknown name or type in value at line %s.' else: msg = 'Parse error in value at line %s.' self._handle_error(msg, UnreprError, infile, cur_index) continue else: if self.unrepr: comment = '' try: value = unrepr(value) except Exception, e: if isinstance(e, UnknownType): msg = 'Unknown name or type in value at line %s.' else: msg = 'Parse error in value at line %s.' self._handle_error(msg, UnreprError, infile, cur_index) continue else: # extract comment and lists try: (value, comment) = self._handle_value(value) except SyntaxError: self._handle_error( 'Parse error in value at line %s.', ParseError, infile, cur_index) continue # key = self._unquote(key) if key in this_section: self._handle_error( 'Duplicate keyword name at line %s.', DuplicateError, infile, cur_index) continue # add the key. # we set unrepr because if we have got this far we will never # be creating a new section this_section.__setitem__(key, value, unrepr=True) this_section.inline_comments[key] = comment this_section.comments[key] = comment_list continue # if self.indent_type is None: # no indentation used, set the type accordingly self.indent_type = '' # preserve the final comment if not self and not self.initial_comment: self.initial_comment = comment_list elif not reset_comment: self.final_comment = comment_list self.list_values = temp_list_values def _match_depth(self, sect, depth): """ Given a section and a depth level, walk back through the sections parents to see if the depth level matches a previous section. Return a reference to the right section, or raise a SyntaxError. """ while depth < sect.depth: if sect is sect.parent: # we've reached the top level already raise SyntaxError() sect = sect.parent if sect.depth == depth: return sect # shouldn't get here raise SyntaxError() def _handle_error(self, text, ErrorClass, infile, cur_index): """ Handle an error according to the error settings. Either raise the error or store it. The error will have occured at ``cur_index`` """ line = infile[cur_index] cur_index += 1 message = text % cur_index error = ErrorClass(message, cur_index, line) if self.raise_errors: # raise the error - parsing stops here raise error # store the error # reraise when parsing has finished self._errors.append(error) def _unquote(self, value): """Return an unquoted version of a value""" if not value: # should only happen during parsing of lists raise SyntaxError if (value[0] == value[-1]) and (value[0] in ('"', "'")): value = value[1:-1] return value def _quote(self, value, multiline=True): """ Return a safely quoted version of a value. Raise a ConfigObjError if the value cannot be safely quoted. If multiline is ``True`` (default) then use triple quotes if necessary. * Don't quote values that don't need it. * Recursively quote members of a list and return a comma joined list. * Multiline is ``False`` for lists. * Obey list syntax for empty and single member lists. If ``list_values=False`` then the value is only quoted if it contains a ``\\n`` (is multiline) or '#'. If ``write_empty_values`` is set, and the value is an empty string, it won't be quoted. """ if multiline and self.write_empty_values and value == '': # Only if multiline is set, so that it is used for values not # keys, and not values that are part of a list return '' if multiline and isinstance(value, (list, tuple)): if not value: return ',' elif len(value) == 1: return self._quote(value[0], multiline=False) + ',' return ', '.join([self._quote(val, multiline=False) for val in value]) if not isinstance(value, basestring): if self.stringify: value = str(value) else: raise TypeError('Value "%s" is not a string.' % value) if not value: return '""' no_lists_no_quotes = not self.list_values and '\n' not in value and '#' not in value need_triple = multiline and ((("'" in value) and ('"' in value)) or ('\n' in value )) hash_triple_quote = multiline and not need_triple and ("'" in value) and ('"' in value) and ('#' in value) check_for_single = (no_lists_no_quotes or not need_triple) and not hash_triple_quote if check_for_single: if not self.list_values: # we don't quote if ``list_values=False`` quot = noquot # for normal values either single or double quotes will do elif '\n' in value: # will only happen if multiline is off - e.g. '\n' in key raise ConfigObjError('Value "%s" cannot be safely quoted.' % value) elif ((value[0] not in wspace_plus) and (value[-1] not in wspace_plus) and (',' not in value)): quot = noquot else: quot = self._get_single_quote(value) else: # if value has '\n' or "'" *and* '"', it will need triple quotes quot = self._get_triple_quote(value) if quot == noquot and '#' in value and self.list_values: quot = self._get_single_quote(value) return quot % value def _get_single_quote(self, value): if ("'" in value) and ('"' in value): raise ConfigObjError('Value "%s" cannot be safely quoted.' % value) elif '"' in value: quot = squot else: quot = dquot return quot def _get_triple_quote(self, value): if (value.find('"""') != -1) and (value.find("'''") != -1): raise ConfigObjError('Value "%s" cannot be safely quoted.' % value) if value.find('"""') == -1: quot = tdquot else: quot = tsquot return quot def _handle_value(self, value): """ Given a value string, unquote, remove comment, handle lists. (including empty and single member lists) """ if self._inspec: # Parsing a configspec so don't handle comments return (value, '') # do we look for lists in values ? if not self.list_values: mat = self._nolistvalue.match(value) if mat is None: raise SyntaxError() # NOTE: we don't unquote here return mat.groups() # mat = self._valueexp.match(value) if mat is None: # the value is badly constructed, probably badly quoted, # or an invalid list raise SyntaxError() (list_values, single, empty_list, comment) = mat.groups() if (list_values == '') and (single is None): # change this if you want to accept empty values raise SyntaxError() # NOTE: note there is no error handling from here if the regex # is wrong: then incorrect values will slip through if empty_list is not None: # the single comma - meaning an empty list return ([], comment) if single is not None: # handle empty values if list_values and not single: # FIXME: the '' is a workaround because our regex now matches # '' at the end of a list if it has a trailing comma single = None else: single = single or '""' single = self._unquote(single) if list_values == '': # not a list value return (single, comment) the_list = self._listvalueexp.findall(list_values) the_list = [self._unquote(val) for val in the_list] if single is not None: the_list += [single] return (the_list, comment) def _multiline(self, value, infile, cur_index, maxline): """Extract the value, where we are in a multiline situation.""" quot = value[:3] newvalue = value[3:] single_line = self._triple_quote[quot][0] multi_line = self._triple_quote[quot][1] mat = single_line.match(value) if mat is not None: retval = list(mat.groups()) retval.append(cur_index) return retval elif newvalue.find(quot) != -1: # somehow the triple quote is missing raise SyntaxError() # while cur_index < maxline: cur_index += 1 newvalue += '\n' line = infile[cur_index] if line.find(quot) == -1: newvalue += line else: # end of multiline, process it break else: # we've got to the end of the config, oops... raise SyntaxError() mat = multi_line.match(line) if mat is None: # a badly formed line raise SyntaxError() (value, comment) = mat.groups() return (newvalue + value, comment, cur_index) def _handle_configspec(self, configspec): """Parse the configspec.""" # FIXME: Should we check that the configspec was created with the # correct settings ? (i.e. ``list_values=False``) if not isinstance(configspec, ConfigObj): try: configspec = ConfigObj(configspec, raise_errors=True, file_error=True, _inspec=True) except ConfigObjError, e: # FIXME: Should these errors have a reference # to the already parsed ConfigObj ? raise ConfigspecError('Parsing configspec failed: %s' % e) except IOError, e: raise IOError('Reading configspec failed: %s' % e) self.configspec = configspec def _set_configspec(self, section, copy): """ Called by validate. Handles setting the configspec on subsections including sections to be validated by __many__ """ configspec = section.configspec many = configspec.get('__many__') if isinstance(many, dict): for entry in section.sections: if entry not in configspec: section[entry].configspec = many for entry in configspec.sections: if entry == '__many__': continue if entry not in section: section[entry] = {} section[entry]._created = True if copy: # copy comments section.comments[entry] = configspec.comments.get(entry, []) section.inline_comments[entry] = configspec.inline_comments.get(entry, '') # Could be a scalar when we expect a section if isinstance(section[entry], Section): section[entry].configspec = configspec[entry] def _write_line(self, indent_string, entry, this_entry, comment): """Write an individual line, for the write method""" # NOTE: the calls to self._quote here handles non-StringType values. if not self.unrepr: val = self._decode_element(self._quote(this_entry)) else: val = repr(this_entry) return '%s%s%s%s%s' % (indent_string, self._decode_element(self._quote(entry, multiline=False)), self._a_to_u(' = '), val, self._decode_element(comment)) def _write_marker(self, indent_string, depth, entry, comment): """Write a section marker line""" return '%s%s%s%s%s' % (indent_string, self._a_to_u('[' * depth), self._quote(self._decode_element(entry), multiline=False), self._a_to_u(']' * depth), self._decode_element(comment)) def _handle_comment(self, comment): """Deal with a comment.""" if not comment: return '' start = self.indent_type if not comment.startswith('#'): start += self._a_to_u(' # ') return (start + comment) # Public methods def write(self, outfile=None, section=None): """ Write the current ConfigObj as a file tekNico: FIXME: use StringIO instead of real files >>> filename = a.filename >>> a.filename = 'test.ini' >>> a.write() >>> a.filename = filename >>> a == ConfigObj('test.ini', raise_errors=True) 1 """ if self.indent_type is None: # this can be true if initialised from a dictionary self.indent_type = DEFAULT_INDENT_TYPE out = [] cs = self._a_to_u('#') csp = self._a_to_u('# ') if section is None: int_val = self.interpolation self.interpolation = False section = self for line in self.initial_comment: line = self._decode_element(line) stripped_line = line.strip() if stripped_line and not stripped_line.startswith(cs): line = csp + line out.append(line) indent_string = self.indent_type * section.depth for entry in (section.scalars + section.sections): if entry in section.defaults: # don't write out default values continue for comment_line in section.comments[entry]: comment_line = self._decode_element(comment_line.lstrip()) if comment_line and not comment_line.startswith(cs): comment_line = csp + comment_line out.append(indent_string + comment_line) this_entry = section[entry] comment = self._handle_comment(section.inline_comments[entry]) if isinstance(this_entry, dict): # a section out.append(self._write_marker( indent_string, this_entry.depth, entry, comment)) out.extend(self.write(section=this_entry)) else: out.append(self._write_line( indent_string, entry, this_entry, comment)) if section is self: for line in self.final_comment: line = self._decode_element(line) stripped_line = line.strip() if stripped_line and not stripped_line.startswith(cs): line = csp + line out.append(line) self.interpolation = int_val if section is not self: return out if (self.filename is None) and (outfile is None): # output a list of lines # might need to encode # NOTE: This will *screw* UTF16, each line will start with the BOM if self.encoding: out = [l.encode(self.encoding) for l in out] if (self.BOM and ((self.encoding is None) or (BOM_LIST.get(self.encoding.lower()) == 'utf_8'))): # Add the UTF8 BOM if not out: out.append('') out[0] = BOM_UTF8 + out[0] return out # Turn the list to a string, joined with correct newlines newline = self.newlines or os.linesep output = self._a_to_u(newline).join(out) if self.encoding: output = output.encode(self.encoding) if self.BOM and ((self.encoding is None) or match_utf8(self.encoding)): # Add the UTF8 BOM output = BOM_UTF8 + output if not output.endswith(newline): output += newline if outfile is not None: outfile.write(output) else: h = open(self.filename, 'wb') h.write(output) h.close() def validate(self, validator, preserve_errors=False, copy=False, section=None): """ Test the ConfigObj against a configspec. It uses the ``validator`` object from *validate.py*. To run ``validate`` on the current ConfigObj, call: :: test = config.validate(validator) (Normally having previously passed in the configspec when the ConfigObj was created - you can dynamically assign a dictionary of checks to the ``configspec`` attribute of a section though). It returns ``True`` if everything passes, or a dictionary of pass/fails (True/False). If every member of a subsection passes, it will just have the value ``True``. (It also returns ``False`` if all members fail). In addition, it converts the values from strings to their native types if their checks pass (and ``stringify`` is set). If ``preserve_errors`` is ``True`` (``False`` is default) then instead of a marking a fail with a ``False``, it will preserve the actual exception object. This can contain info about the reason for failure. For example the ``VdtValueTooSmallError`` indicates that the value supplied was too small. If a value (or section) is missing it will still be marked as ``False``. You must have the validate module to use ``preserve_errors=True``. You can then use the ``flatten_errors`` function to turn your nested results dictionary into a flattened list of failures - useful for displaying meaningful error messages. """ if section is None: if self.configspec is None: raise ValueError('No configspec supplied.') if preserve_errors: # We do this once to remove a top level dependency on the validate module # Which makes importing configobj faster from validate import VdtMissingValue self._vdtMissingValue = VdtMissingValue section = self if copy: section.initial_comment = section.configspec.initial_comment section.final_comment = section.configspec.final_comment section.encoding = section.configspec.encoding section.BOM = section.configspec.BOM section.newlines = section.configspec.newlines section.indent_type = section.configspec.indent_type # # section.default_values.clear() #?? configspec = section.configspec self._set_configspec(section, copy) def validate_entry(entry, spec, val, missing, ret_true, ret_false): section.default_values.pop(entry, None) try: section.default_values[entry] = validator.get_default_value(configspec[entry]) except (KeyError, AttributeError, validator.baseErrorClass): # No default, bad default or validator has no 'get_default_value' # (e.g. SimpleVal) pass try: check = validator.check(spec, val, missing=missing ) except validator.baseErrorClass, e: if not preserve_errors or isinstance(e, self._vdtMissingValue): out[entry] = False else: # preserve the error out[entry] = e ret_false = False ret_true = False else: ret_false = False out[entry] = True if self.stringify or missing: # if we are doing type conversion # or the value is a supplied default if not self.stringify: if isinstance(check, (list, tuple)): # preserve lists check = [self._str(item) for item in check] elif missing and check is None: # convert the None from a default to a '' check = '' else: check = self._str(check) if (check != val) or missing: section[entry] = check if not copy and missing and entry not in section.defaults: section.defaults.append(entry) return ret_true, ret_false # out = {} ret_true = True ret_false = True unvalidated = [k for k in section.scalars if k not in configspec] incorrect_sections = [k for k in configspec.sections if k in section.scalars] incorrect_scalars = [k for k in configspec.scalars if k in section.sections] for entry in configspec.scalars: if entry in ('__many__', '___many___'): # reserved names continue if (not entry in section.scalars) or (entry in section.defaults): # missing entries # or entries from defaults missing = True val = None if copy and entry not in section.scalars: # copy comments section.comments[entry] = ( configspec.comments.get(entry, [])) section.inline_comments[entry] = ( configspec.inline_comments.get(entry, '')) # else: missing = False val = section[entry] ret_true, ret_false = validate_entry(entry, configspec[entry], val, missing, ret_true, ret_false) many = None if '__many__' in configspec.scalars: many = configspec['__many__'] elif '___many___' in configspec.scalars: many = configspec['___many___'] if many is not None: for entry in unvalidated: val = section[entry] ret_true, ret_false = validate_entry(entry, many, val, False, ret_true, ret_false) unvalidated = [] for entry in incorrect_scalars: ret_true = False if not preserve_errors: out[entry] = False else: ret_false = False msg = 'Value %r was provided as a section' % entry out[entry] = validator.baseErrorClass(msg) for entry in incorrect_sections: ret_true = False if not preserve_errors: out[entry] = False else: ret_false = False msg = 'Section %r was provided as a single value' % entry out[entry] = validator.baseErrorClass(msg) # Missing sections will have been created as empty ones when the # configspec was read. for entry in section.sections: # FIXME: this means DEFAULT is not copied in copy mode if section is self and entry == 'DEFAULT': continue if section[entry].configspec is None: unvalidated.append(entry) continue if copy: section.comments[entry] = configspec.comments.get(entry, []) section.inline_comments[entry] = configspec.inline_comments.get(entry, '') check = self.validate(validator, preserve_errors=preserve_errors, copy=copy, section=section[entry]) out[entry] = check if check == False: ret_true = False elif check == True: ret_false = False else: ret_true = False section.extra_values = unvalidated if preserve_errors and not section._created: # If the section wasn't created (i.e. it wasn't missing) # then we can't return False, we need to preserve errors ret_false = False # if ret_false and preserve_errors and out: # If we are preserving errors, but all # the failures are from missing sections / values # then we can return False. Otherwise there is a # real failure that we need to preserve. ret_false = not any(out.values()) if ret_true: return True elif ret_false: return False return out def reset(self): """Clear ConfigObj instance and restore to 'freshly created' state.""" self.clear() self._initialise() # FIXME: Should be done by '_initialise', but ConfigObj constructor (and reload) # requires an empty dictionary self.configspec = None # Just to be sure ;-) self._original_configspec = None def reload(self): """ Reload a ConfigObj from file. This method raises a ``ReloadError`` if the ConfigObj doesn't have a filename attribute pointing to a file. """ if not isinstance(self.filename, basestring): raise ReloadError() filename = self.filename current_options = {} for entry in OPTION_DEFAULTS: if entry == 'configspec': continue current_options[entry] = getattr(self, entry) configspec = self._original_configspec current_options['configspec'] = configspec self.clear() self._initialise(current_options) self._load(filename, configspec) class SimpleVal(object): """ A simple validator. Can be used to check that all members expected are present. To use it, provide a configspec with all your members in (the value given will be ignored). Pass an instance of ``SimpleVal`` to the ``validate`` method of your ``ConfigObj``. ``validate`` will return ``True`` if all members are present, or a dictionary with True/False meaning present/missing. (Whole missing sections will be replaced with ``False``) """ def __init__(self): self.baseErrorClass = ConfigObjError def check(self, check, member, missing=False): """A dummy check method, always returns the value unchanged.""" if missing: raise self.baseErrorClass() return member def flatten_errors(cfg, res, levels=None, results=None): """ An example function that will turn a nested dictionary of results (as returned by ``ConfigObj.validate``) into a flat list. ``cfg`` is the ConfigObj instance being checked, ``res`` is the results dictionary returned by ``validate``. (This is a recursive function, so you shouldn't use the ``levels`` or ``results`` arguments - they are used by the function.) Returns a list of keys that failed. Each member of the list is a tuple:: ([list of sections...], key, result) If ``validate`` was called with ``preserve_errors=False`` (the default) then ``result`` will always be ``False``. *list of sections* is a flattened list of sections that the key was found in. If the section was missing (or a section was expected and a scalar provided - or vice-versa) then key will be ``None``. If the value (or section) was missing then ``result`` will be ``False``. If ``validate`` was called with ``preserve_errors=True`` and a value was present, but failed the check, then ``result`` will be the exception object returned. You can use this as a string that describes the failure. For example *The value "3" is of the wrong type*. """ if levels is None: # first time called levels = [] results = [] if res == True: return results if res == False or isinstance(res, Exception): results.append((levels[:], None, res)) if levels: levels.pop() return results for (key, val) in res.items(): if val == True: continue if isinstance(cfg.get(key), dict): # Go down one level levels.append(key) flatten_errors(cfg[key], val, levels, results) continue results.append((levels[:], key, val)) # # Go up one level if levels: levels.pop() # return results def get_extra_values(conf, _prepend=()): """ Find all the values and sections not in the configspec from a validated ConfigObj. ``get_extra_values`` returns a list of tuples where each tuple represents either an extra section, or an extra value. The tuples contain two values, a tuple representing the section the value is in and the name of the extra values. For extra values in the top level section the first member will be an empty tuple. For values in the 'foo' section the first member will be ``('foo',)``. For members in the 'bar' subsection of the 'foo' section the first member will be ``('foo', 'bar')``. NOTE: If you call ``get_extra_values`` on a ConfigObj instance that hasn't been validated it will return an empty list. """ out = [] out.extend((_prepend, name) for name in conf.extra_values) for name in conf.sections: if name not in conf.extra_values: out.extend(get_extra_values(conf[name], _prepend + (name,))) return out """*A programming language is a medium of expression.* - Paul Graham""" nipy-0.3.0/nipy/externals/setup.py000066400000000000000000000007261210344137400171730ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('externals', parent_package, top_path) config.add_subpackage('transforms3d') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipy-0.3.0/nipy/externals/transforms3d/000077500000000000000000000000001210344137400201015ustar00rootroot00000000000000nipy-0.3.0/nipy/externals/transforms3d/__init__.py000066400000000000000000000001661210344137400222150ustar00rootroot00000000000000''' Copies from transforms3d package Please see github.com/matthew-brett/transforms3d ''' from . import quaternions nipy-0.3.0/nipy/externals/transforms3d/quaternions.py000066400000000000000000000307661210344137400230370ustar00rootroot00000000000000'''Functions to operate on, or return, quaternions. The module also includes functions for the closely related angle, axis pair as a specification for rotation. Quaternions here consist of 4 values ``w, x, y, z``, where ``w`` is the real (scalar) part, and ``x, y, z`` are the complex (vector) part. Note - rotation matrices here apply to column vectors, that is, they are applied on the left of the vector. For example: >>> import numpy as np >>> q = [0, 1, 0, 0] # 180 degree rotation around axis 0 >>> M = quat2mat(q) # from this module >>> vec = np.array([1, 2, 3]).reshape((3,1)) # column vector >>> tvec = np.dot(M, vec) ''' import math import numpy as np _MAX_FLOAT = np.maximum_sctype(np.float) _FLOAT_EPS = np.finfo(np.float).eps def fillpositive(xyz, w2_thresh=None): ''' Compute unit quaternion from last 3 values Parameters ---------- xyz : iterable iterable containing 3 values, corresponding to quaternion x, y, z w2_thresh : None or float, optional threshold to determine if w squared is really negative. If None (default) then w2_thresh set equal to ``-np.finfo(xyz.dtype).eps``, if possible, otherwise ``-np.finfo(np.float).eps`` Returns ------- wxyz : array shape (4,) Full 4 values of quaternion Notes ----- If w, x, y, z are the values in the full quaternion, assumes w is positive. Gives error if w*w is estimated to be negative w = 0 corresponds to a 180 degree rotation The unit quaternion specifies that np.dot(wxyz, wxyz) == 1. If w is positive (assumed here), w is given by: w = np.sqrt(1.0-(x*x+y*y+z*z)) w2 = 1.0-(x*x+y*y+z*z) can be near zero, which will lead to numerical instability in sqrt. Here we use the system maximum float type to reduce numerical instability Examples -------- >>> import numpy as np >>> wxyz = fillpositive([0,0,0]) >>> np.all(wxyz == [1, 0, 0, 0]) True >>> wxyz = fillpositive([1,0,0]) # Corner case; w is 0 >>> np.all(wxyz == [0, 1, 0, 0]) True >>> np.dot(wxyz, wxyz) 1.0 ''' # Check inputs (force error if < 3 values) if len(xyz) != 3: raise ValueError('xyz should have length 3') # If necessary, guess precision of input if w2_thresh is None: try: # trap errors for non-array, integer array w2_thresh = -np.finfo(xyz.dtype).eps except (AttributeError, ValueError): w2_thresh = -_FLOAT_EPS # Use maximum precision xyz = np.asarray(xyz, dtype=_MAX_FLOAT) # Calculate w w2 = 1.0 - np.dot(xyz, xyz) if w2 < 0: if w2 < w2_thresh: raise ValueError('w2 should be positive, but is %f' % w2) w = 0 else: w = np.sqrt(w2) return np.r_[w, xyz] def quat2mat(q): ''' Calculate rotation matrix corresponding to quaternion Parameters ---------- q : 4 element array-like Returns ------- M : (3,3) array Rotation matrix corresponding to input quaternion *q* Notes ----- Rotation matrix applies to column vectors, and is applied to the left of coordinate vectors. The algorithm here allows non-unit quaternions. References ---------- Algorithm from http://en.wikipedia.org/wiki/Rotation_matrix#Quaternion Examples -------- >>> import numpy as np >>> M = quat2mat([1, 0, 0, 0]) # Identity quaternion >>> np.allclose(M, np.eye(3)) True >>> M = quat2mat([0, 1, 0, 0]) # 180 degree rotn around axis 0 >>> np.allclose(M, np.diag([1, -1, -1])) True ''' w, x, y, z = q Nq = w*w + x*x + y*y + z*z if Nq == 0.0: return np.eye(3) s = 2.0/Nq X = x*s Y = y*s Z = z*s wX = w*X; wY = w*Y; wZ = w*Z xX = x*X; xY = x*Y; xZ = x*Z yY = y*Y; yZ = y*Z; zZ = z*Z return np.array( [[ 1.0-(yY+zZ), xY-wZ, xZ+wY ], [ xY+wZ, 1.0-(xX+zZ), yZ-wX ], [ xZ-wY, yZ+wX, 1.0-(xX+yY) ]]) def mat2quat(M): ''' Calculate quaternion corresponding to given rotation matrix Parameters ---------- M : array-like 3x3 rotation matrix Returns ------- q : (4,) array closest quaternion to input matrix, having positive q[0] Notes ----- Method claimed to be robust to numerical errors in M Constructs quaternion by calculating maximum eigenvector for matrix K (constructed from input `M`). Although this is not tested, a maximum eigenvalue of 1 corresponds to a valid rotation. A quaternion q*-1 corresponds to the same rotation as q; thus the sign of the reconstructed quaternion is arbitrary, and we return quaternions with positive w (q[0]). Examples -------- >>> import numpy as np >>> q = mat2quat(np.eye(3)) # Identity rotation >>> np.allclose(q, [1, 0, 0, 0]) True >>> q = mat2quat(np.diag([1, -1, -1])) >>> np.allclose(q, [0, 1, 0, 0]) # 180 degree rotn around axis 0 True Notes ----- http://en.wikipedia.org/wiki/Rotation_matrix#Quaternion Bar-Itzhack, Itzhack Y. (2000), "New method for extracting the quaternion from a rotation matrix", AIAA Journal of Guidance, Control and Dynamics 23(6):1085-1087 (Engineering Note), ISSN 0731-5090 ''' # Qyx refers to the contribution of the y input vector component to # the x output vector component. Qyx is therefore the same as # M[0,1]. The notation is from the Wikipedia article. Qxx,Qyx,Qzx,Qxy,Qyy,Qzy,Qxz,Qyz,Qzz=M.flat # Fill only lower half of symmetric matrix K = np.array([ [Qxx-Qyy-Qzz, 0, 0, 0], [Qyx+Qxy, Qyy-Qxx-Qzz, 0, 0], [Qzx+Qxz, Qzy+Qyz, Qzz-Qxx-Qyy, 0], [Qyz-Qzy, Qzx-Qxz, Qxy-Qyx, Qxx+Qyy+Qzz]]) / 3 # Use Hermitian eigenvectors, values for speed vals, vecs = np.linalg.eigh(K) # Select largest eigenvector, reorder to w,x,y,z quaternion q = vecs[[3, 0, 1, 2],np.argmax(vals)] # Prefer quaternion with positive w # (q * -1 corresponds to same rotation as q) if q[0]<0: q *= -1 return q def mult(q1, q2): ''' Multiply two quaternions Parameters ---------- q1 : 4 element sequence q2 : 4 element sequence Returns ------- q12 : shape (4,) array Notes ----- See : http://en.wikipedia.org/wiki/Quaternions#Hamilton_product ''' w1, x1, y1, z1 = q1 w2, x2, y2, z2 = q2 w = w1*w2 - x1*x2 - y1*y2 - z1*z2 x = w1*x2 + x1*w2 + y1*z2 - z1*y2 y = w1*y2 + y1*w2 + z1*x2 - x1*z2 z = w1*z2 + z1*w2 + x1*y2 - y1*x2 return np.array([w, x, y, z]) def conjugate(q): ''' Conjugate of quaternion Parameters ---------- q : 4 element sequence w, i, j, k of quaternion Returns ------- conjq : array shape (4,) w, i, j, k of conjugate of `q` ''' return np.array(q) * np.array([1.0,-1,-1,-1]) def norm(q): ''' Return norm of quaternion Parameters ---------- q : 4 element sequence w, i, j, k of quaternion Returns ------- n : scalar quaternion norm ''' return np.dot(q,q) def isunit(q): ''' Return True is this is very nearly a unit quaternion ''' return np.allclose(norm(q),1) def inverse(q): ''' Return multiplicative inverse of quaternion `q` Parameters ---------- q : 4 element sequence w, i, j, k of quaternion Returns ------- invq : array shape (4,) w, i, j, k of quaternion inverse ''' return conjugate(q) / norm(q) def eye(): ''' Return identity quaternion ''' return np.array([1.0,0,0,0]) def rotate_vector(v, q): ''' Apply transformation in quaternion `q` to vector `v` Parameters ---------- v : 3 element sequence 3 dimensional vector q : 4 element sequence w, i, j, k of quaternion Returns ------- vdash : array shape (3,) `v` rotated by quaternion `q` Notes ----- See: http://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation#Describing_rotations_with_quaternions ''' varr = np.zeros((4,)) varr[1:] = v return mult(q, mult(varr, conjugate(q)))[1:] def nearly_equivalent(q1, q2, rtol=1e-5, atol=1e-8): ''' Returns True if `q1` and `q2` give near equivalent transforms q1 may be nearly numerically equal to q2, or nearly equal to q2 * -1 (becuase a quaternion multiplied by -1 gives the same transform). Parameters ---------- q1 : 4 element sequence w, x, y, z of first quaternion q2 : 4 element sequence w, x, y, z of second quaternion Returns ------- equiv : bool True if `q1` and `q2` are nearly equivalent, False otherwise Examples -------- >>> q1 = [1, 0, 0, 0] >>> nearly_equivalent(q1, [0, 1, 0, 0]) False >>> nearly_equivalent(q1, [1, 0, 0, 0]) True >>> nearly_equivalent(q1, [-1, 0, 0, 0]) True ''' q1 = np.array(q1) q2 = np.array(q2) if np.allclose(q1, q2, rtol, atol): return True return np.allclose(q1 * -1, q2, rtol, atol) def axangle2quat(vector, theta, is_normalized=False): ''' Quaternion for rotation of angle `theta` around `vector` Parameters ---------- vector : 3 element sequence vector specifying axis for rotation. theta : scalar angle of rotation is_normalized : bool, optional True if vector is already normalized (has norm of 1). Default False Returns ------- quat : 4 element sequence of symbols quaternion giving specified rotation Examples -------- >>> q = axangle2quat([1, 0, 0], np.pi) >>> np.allclose(q, [0, 1, 0, 0]) True Notes ----- Formula from http://mathworld.wolfram.com/EulerParameters.html ''' vector = np.array(vector) if not is_normalized: # Not in place to avoid numpy's stricter casting rules vector = vector / math.sqrt(np.dot(vector, vector)) t2 = theta / 2.0 st2 = math.sin(t2) return np.concatenate(([math.cos(t2)], vector * st2)) def axangle2rmat(vector, theta): ''' Rotation matrix of angle `theta` around `vector` Parameters ---------- vector : 3 element sequence vector specifying axis for rotation. theta : scalar angle of rotation Returns ------- mat : array shape (3,3) rotation matrix specified rotation Notes ----- From: http://en.wikipedia.org/wiki/Rotation_matrix#Axis_and_angle ''' x, y, z = vector[:3] n = math.sqrt(x*x + y*y + z*z) x = x/n y = y/n z = z/n c = math.cos(theta); s = math.sin(theta); C = 1-c xs = x*s; ys = y*s; zs = z*s xC = x*C; yC = y*C; zC = z*C xyC = x*yC; yzC = y*zC; zxC = z*xC return np.array([ [ x*xC+c, xyC-zs, zxC+ys ], [ xyC+zs, y*yC+c, yzC-xs ], [ zxC-ys, yzC+xs, z*zC+c ]]) def quat2axangle(quat, identity_thresh=None): ''' Convert quaternion to rotation of angle around axis Parameters ---------- quat : 4 element sequence w, x, y, z forming quaternion identity_thresh : None or scalar, optional threshold below which the norm of the vector part of the quaternion (x, y, z) is deemed to be 0, leading to the identity rotation. None (the default) leads to a threshold estimated based on the precision of the input. Returns ------- theta : scalar angle of rotation vector : array shape (3,) axis around which rotation occurs Examples -------- >>> vec, theta = quat2axangle([0, 1, 0, 0]) >>> vec array([ 1., 0., 0.]) >>> np.allclose(theta, np.pi) True If this is an identity rotation, we return a zero angle and an arbitrary vector >>> quat2axangle([1, 0, 0, 0]) (array([ 1., 0., 0.]), 0.0) Notes ----- A quaternion for which x, y, z are all equal to 0, is an identity rotation. In this case we return a 0 angle and an arbitrary vector, here [1, 0, 0] ''' w, x, y, z = quat vec = np.asarray([x, y, z]) if identity_thresh is None: try: identity_thresh = np.finfo(vec.dtype).eps * 3 except ValueError: # integer type identity_thresh = _FLOAT_EPS * 3 len2 = x * x + y * y + z * z if len2 < identity_thresh ** 2: # if vec is nearly 0,0,0, this is an identity rotation return np.array([1.0, 0, 0]), 0.0 theta = 2 * math.acos(max(min(w, 1), -1)) if len2 == float('inf'): return np.zeros((3,)), theta return vec / math.sqrt(len2), theta nipy-0.3.0/nipy/externals/transforms3d/setup.py000066400000000000000000000007211210344137400216130ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('transforms3d', parent_package, top_path) config.add_subpackage('tests') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipy-0.3.0/nipy/externals/transforms3d/taitbryan.py000066400000000000000000000302311210344137400224470ustar00rootroot00000000000000''' Module implementing Euler angle rotations and their conversions See: * http://en.wikipedia.org/wiki/Rotation_matrix * http://en.wikipedia.org/wiki/Euler_angles * http://mathworld.wolfram.com/EulerAngles.html See also: *Representing Attitude with Euler Angles and Quaternions: A Reference* (2006) by James Diebel. A cached PDF link last found here: http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.110.5134 Euler's rotation theorem tells us that any rotation in 3D can be described by 3 angles. Let's call the 3 angles the *Euler angle vector* and call the angles in the vector :math:`alpha`, :math:`beta` and :math:`gamma`. The vector is [ :math:`alpha`, :math:`beta`. :math:`gamma` ] and, in this description, the order of the parameters specifies the order in which the rotations occur (so the rotation corresponding to :math:`alpha` is applied first). In order to specify the meaning of an *Euler angle vector* we need to specify the axes around which each of the rotations corresponding to :math:`alpha`, :math:`beta` and :math:`gamma` will occur. There are therefore three axes for the rotations :math:`alpha`, :math:`beta` and :math:`gamma`; let's call them :math:`i` :math:`j`, :math:`k`. Let us express the rotation :math:`alpha` around axis `i` as a 3 by 3 rotation matrix `A`. Similarly :math:`beta` around `j` becomes 3 x 3 matrix `B` and :math:`gamma` around `k` becomes matrix `G`. Then the whole rotation expressed by the Euler angle vector [ :math:`alpha`, :math:`beta`. :math:`gamma` ], `R` is given by:: R = np.dot(G, np.dot(B, A)) See http://mathworld.wolfram.com/EulerAngles.html The order :math:`G B A` expresses the fact that the rotations are performed in the order of the vector (:math:`alpha` around axis `i` = `A` first). To convert a given Euler angle vector to a meaningful rotation, and a rotation matrix, we need to define: * the axes `i`, `j`, `k` * whether a rotation matrix should be applied on the left of a vector to be transformed (vectors are column vectors) or on the right (vectors are row vectors). * whether the rotations move the axes as they are applied (intrinsic rotations) - compared the situation where the axes stay fixed and the vectors move within the axis frame (extrinsic) * the handedness of the coordinate system See: http://en.wikipedia.org/wiki/Rotation_matrix#Ambiguities We are using the following conventions: * axes `i`, `j`, `k` are the `z`, `y`, and `x` axes respectively. Thus an Euler angle vector [ :math:`alpha`, :math:`beta`. :math:`gamma` ] in our convention implies a :math:`alpha` radian rotation around the `z` axis, followed by a :math:`beta` rotation around the `y` axis, followed by a :math:`gamma` rotation around the `x` axis. * the rotation matrix applies on the left, to column vectors on the right, so if `R` is the rotation matrix, and `v` is a 3 x N matrix with N column vectors, the transformed vector set `vdash` is given by ``vdash = np.dot(R, v)``. * extrinsic rotations - the axes are fixed, and do not move with the rotations. * a right-handed coordinate system The convention of rotation around ``z``, followed by rotation around ``y``, followed by rotation around ``x``, is known (confusingly) as "xyz", pitch-roll-yaw, Cardan angles, or Tait-Bryan angles. ''' import math import numpy as np _FLOAT_EPS_4 = np.finfo(float).eps * 4.0 def euler2mat(z=0, y=0, x=0): ''' Return matrix for rotations around z, y and x axes Uses the z, then y, then x convention above Parameters ---------- z : scalar Rotation angle in radians around z-axis (performed first) y : scalar Rotation angle in radians around y-axis x : scalar Rotation angle in radians around x-axis (performed last) Returns ------- M : array shape (3,3) Rotation matrix giving same rotation as for given angles Examples -------- >>> zrot = 1.3 # radians >>> yrot = -0.1 >>> xrot = 0.2 >>> M = euler2mat(zrot, yrot, xrot) >>> M.shape (3, 3) The output rotation matrix is equal to the composition of the individual rotations >>> M1 = euler2mat(zrot) >>> M2 = euler2mat(0, yrot) >>> M3 = euler2mat(0, 0, xrot) >>> composed_M = np.dot(M3, np.dot(M2, M1)) >>> np.allclose(M, composed_M) True You can specify rotations by named arguments >>> np.all(M3 == euler2mat(x=xrot)) True When applying M to a vector, the vector should column vector to the right of M. If the right hand side is a 2D array rather than a vector, then each column of the 2D array represents a vector. >>> vec = np.array([1, 0, 0]).reshape((3,1)) >>> v2 = np.dot(M, vec) >>> vecs = np.array([[1, 0, 0],[0, 1, 0]]).T # giving 3x2 array >>> vecs2 = np.dot(M, vecs) Rotations are counter-clockwise. >>> zred = np.dot(euler2mat(z=np.pi/2), np.eye(3)) >>> np.allclose(zred, [[0, -1, 0],[1, 0, 0], [0, 0, 1]]) True >>> yred = np.dot(euler2mat(y=np.pi/2), np.eye(3)) >>> np.allclose(yred, [[0, 0, 1],[0, 1, 0], [-1, 0, 0]]) True >>> xred = np.dot(euler2mat(x=np.pi/2), np.eye(3)) >>> np.allclose(xred, [[1, 0, 0],[0, 0, -1], [0, 1, 0]]) True Notes ----- The direction of rotation is given by the right-hand rule (orient the thumb of the right hand along the axis around which the rotation occurs, with the end of the thumb at the positive end of the axis; curl your fingers; the direction your fingers curl is the direction of rotation). Therefore, the rotations are counterclockwise if looking along the axis of rotation from positive to negative. ''' Ms = [] if z: cosz = math.cos(z) sinz = math.sin(z) Ms.append(np.array( [[cosz, -sinz, 0], [sinz, cosz, 0], [0, 0, 1]])) if y: cosy = math.cos(y) siny = math.sin(y) Ms.append(np.array( [[cosy, 0, siny], [0, 1, 0], [-siny, 0, cosy]])) if x: cosx = math.cos(x) sinx = math.sin(x) Ms.append(np.array( [[1, 0, 0], [0, cosx, -sinx], [0, sinx, cosx]])) if Ms: return reduce(np.dot, Ms[::-1]) return np.eye(3) def mat2euler(M, cy_thresh=None): ''' Discover Euler angle vector from 3x3 matrix Uses the conventions above. Parameters ---------- M : array-like, shape (3,3) cy_thresh : None or scalar, optional threshold below which to give up on straightforward arctan for estimating x rotation. If None (default), estimate from precision of input. Returns ------- z : scalar y : scalar x : scalar Rotations in radians around z, y, x axes, respectively Notes ----- If there was no numerical error, the routine could be derived using Sympy expression for z then y then x rotation matrix, (see ``eulerangles.py`` in ``derivations`` subdirectory):: [ cos(y)*cos(z), -cos(y)*sin(z), sin(y)], [cos(x)*sin(z) + cos(z)*sin(x)*sin(y), cos(x)*cos(z) - sin(x)*sin(y)*sin(z), -cos(y)*sin(x)], [sin(x)*sin(z) - cos(x)*cos(z)*sin(y), cos(z)*sin(x) + cos(x)*sin(y)*sin(z), cos(x)*cos(y)] with the obvious derivations for z, y, and x z = atan2(-r12, r11) y = asin(r13) x = atan2(-r23, r33) Problems arise when cos(y) is close to zero, because both of:: z = atan2(cos(y)*sin(z), cos(y)*cos(z)) x = atan2(cos(y)*sin(x), cos(x)*cos(y)) will be close to atan2(0, 0), and highly unstable. The ``cy`` fix for numerical instability below is from: *Graphics Gems IV*, Paul Heckbert (editor), Academic Press, 1994, ISBN: 0123361559. Specifically it comes from EulerAngles.c by Ken Shoemake, and deals with the case where cos(y) is close to zero: See: http://www.graphicsgems.org/ The code appears to be licensed (from the website) as "can be used without restrictions". ''' M = np.asarray(M) if cy_thresh is None: try: cy_thresh = np.finfo(M.dtype).eps * 4 except ValueError: cy_thresh = _FLOAT_EPS_4 r11, r12, r13, r21, r22, r23, r31, r32, r33 = M.flat # cy: sqrt((cos(y)*cos(z))**2 + (cos(x)*cos(y))**2) cy = math.sqrt(r33*r33 + r23*r23) if cy > cy_thresh: # cos(y) not close to zero, standard form z = math.atan2(-r12, r11) # atan2(cos(y)*sin(z), cos(y)*cos(z)) y = math.atan2(r13, cy) # atan2(sin(y), cy) x = math.atan2(-r23, r33) # atan2(cos(y)*sin(x), cos(x)*cos(y)) else: # cos(y) (close to) zero, so x -> 0.0 (see above) # so r21 -> sin(z), r22 -> cos(z) and z = math.atan2(r21, r22) y = math.atan2(r13, cy) # atan2(sin(y), cy) x = 0.0 return z, y, x def euler2quat(z=0, y=0, x=0): ''' Return quaternion corresponding to these Euler angles Uses the z, then y, then x convention above Parameters ---------- z : scalar Rotation angle in radians around z-axis (performed first) y : scalar Rotation angle in radians around y-axis x : scalar Rotation angle in radians around x-axis (performed last) Returns ------- quat : array shape (4,) Quaternion in w, x, y z (real, then vector) format Notes ----- Formula from Sympy - see ``eulerangles.py`` in ``derivations`` subdirectory ''' z = z/2.0 y = y/2.0 x = x/2.0 cz = math.cos(z) sz = math.sin(z) cy = math.cos(y) sy = math.sin(y) cx = math.cos(x) sx = math.sin(x) return np.array([ cx*cy*cz - sx*sy*sz, cx*sy*sz + cy*cz*sx, cx*cz*sy - sx*cy*sz, cx*cy*sz + sx*cz*sy]) def quat2euler(q): ''' Return Euler angles corresponding to quaternion `q` Parameters ---------- q : 4 element sequence w, x, y, z of quaternion Returns ------- z : scalar Rotation angle in radians around z-axis (performed first) y : scalar Rotation angle in radians around y-axis x : scalar Rotation angle in radians around x-axis (performed last) Notes ----- It's possible to reduce the amount of calculation a little, by combining parts of the ``quat2mat`` and ``mat2euler`` functions, but the reduction in computation is small, and the code repetition is large. ''' # delayed import to avoid cyclic dependencies from . import quaternions as nq return mat2euler(nq.quat2mat(q)) def euler2axangle(z=0, y=0, x=0): ''' Return axis, angle corresponding to these Euler angles Uses the z, then y, then x convention above Parameters ---------- z : scalar Rotation angle in radians around z-axis (performed first) y : scalar Rotation angle in radians around y-axis x : scalar Rotation angle in radians around x-axis (performed last) Returns ------- vector : array shape (3,) axis around which rotation occurs theta : scalar angle of rotation Examples -------- >>> vec, theta = euler2axangle(0, 1.5, 0) >>> np.allclose(vec, [0, 1, 0]) True >>> print(theta) 1.5 ''' # delayed import to avoid cyclic dependencies from . import quaternions as nq return nq.quat2axangle(euler2quat(z, y, x)) def axangle2euler(vector, theta): ''' Convert axis, angle pair to Euler angles Parameters ---------- vector : 3 element sequence vector specifying axis for rotation. theta : scalar angle of rotation Returns ------- z : scalar y : scalar x : scalar Rotations in radians around z, y, x axes, respectively Examples -------- >>> z, y, x = axangle2euler([1, 0, 0], 0) >>> np.allclose((z, y, x), 0) True Notes ----- It's possible to reduce the amount of calculation a little, by combining parts of the ``angle_axis2mat`` and ``mat2euler`` functions, but the reduction in computation is small, and the code repetition is large. ''' # delayed import to avoid cyclic dependencies from . import quaternions as nq M = nq.axangle2rmat(vector, theta) return mat2euler(M) nipy-0.3.0/nipy/externals/transforms3d/tests/000077500000000000000000000000001210344137400212435ustar00rootroot00000000000000nipy-0.3.0/nipy/externals/transforms3d/tests/__init__.py000066400000000000000000000000541210344137400233530ustar00rootroot00000000000000# Init to make tests directory into package nipy-0.3.0/nipy/externals/transforms3d/tests/samples.py000066400000000000000000000012561210344137400232650ustar00rootroot00000000000000import numpy as np from ..utils import inique, permuted_signs, permuted_with_signs from ..taitbryan import euler2mat # Regular points around a sphere _r13 = np.sqrt(1/3.0) _r12 = np.sqrt(0.5) sphere_points = ( tuple(inique(permuted_with_signs([1, 0, 0]))) + tuple(inique(permuted_with_signs([_r12, _r12, 0]))) + tuple(inique(permuted_signs([_r13, _r13, _r13]))) ) # Example rotations ''' euler_tuples = [] params = np.arange(-np.pi,np.pi,np.pi/2) euler_tuples = tuple((x, y, z) for x in params for y in params for z in params) euler_mats = tuple(euler2mat(*t) for t in euler_tuples) nipy-0.3.0/nipy/externals/transforms3d/tests/test_quaternions.py000066400000000000000000000140421210344137400252250ustar00rootroot00000000000000''' Test quaternion calculations ''' import math import numpy as np # Recent (1.2) versions of numpy have this decorator try: from numpy.testing.decorators import slow except ImportError: def slow(t): t.slow = True return t from nose.tools import assert_raises, assert_true, assert_false, \ assert_equal from numpy.testing import assert_array_almost_equal, assert_array_equal from .. import quaternions as tq from .samples import euler_mats # Example quaternions (from rotations) euler_quats = [] for M in euler_mats: euler_quats.append(tq.mat2quat(M)) # M, quaternion pairs eg_pairs = zip(euler_mats, euler_quats) # Set of arbitrary unit quaternions unit_quats = set() params = range(-2,3) for w in params: for x in params: for y in params: for z in params: q = (w, x, y, z) Nq = np.sqrt(np.dot(q, q)) if not Nq == 0: q = tuple([e / Nq for e in q]) unit_quats.add(q) def test_fillpos(): # Takes np array xyz = np.zeros((3,)) w,x,y,z = tq.fillpositive(xyz) yield assert_true, w == 1 # Or lists xyz = [0] * 3 w,x,y,z = tq.fillpositive(xyz) yield assert_true, w == 1 # Errors with wrong number of values yield assert_raises, ValueError, tq.fillpositive, [0, 0] yield assert_raises, ValueError, tq.fillpositive, [0]*4 # Errors with negative w2 yield assert_raises, ValueError, tq.fillpositive, [1.0]*3 # Test corner case where w is near zero wxyz = tq.fillpositive([1,0,0]) yield assert_true, wxyz[0] == 0.0 def test_conjugate(): # Takes sequence cq = tq.conjugate((1, 0, 0, 0)) # Returns float type yield assert_true, cq.dtype.kind == 'f' def test_quat2mat(): # also tested in roundtrip case below M = tq.quat2mat([1, 0, 0, 0]) yield assert_array_almost_equal, M, np.eye(3) M = tq.quat2mat([3, 0, 0, 0]) yield assert_array_almost_equal, M, np.eye(3) M = tq.quat2mat([0, 1, 0, 0]) yield assert_array_almost_equal, M, np.diag([1, -1, -1]) M = tq.quat2mat([0, 2, 0, 0]) yield assert_array_almost_equal, M, np.diag([1, -1, -1]) M = tq.quat2mat([0, 0, 0, 0]) yield assert_array_almost_equal, M, np.eye(3) def test_inverse(): # Takes sequence iq = tq.inverse((1, 0, 0, 0)) # Returns float type yield assert_true, iq.dtype.kind == 'f' for M, q in eg_pairs: iq = tq.inverse(q) iqM = tq.quat2mat(iq) iM = np.linalg.inv(M) yield assert_true, np.allclose(iM, iqM) def test_eye(): qi = tq.eye() yield assert_true, qi.dtype.kind == 'f' yield assert_true, np.all([1,0,0,0]==qi) yield assert_true, np.allclose(tq.quat2mat(qi), np.eye(3)) def test_norm(): qi = tq.eye() yield assert_true, tq.norm(qi) == 1 yield assert_true, tq.isunit(qi) qi[1] = 0.2 yield assert_true, not tq.isunit(qi) @slow def test_mult(): # Test that quaternion * same as matrix * for M1, q1 in eg_pairs[0::4]: for M2, q2 in eg_pairs[1::4]: q21 = tq.mult(q2, q1) yield assert_array_almost_equal, np.dot(M2,M1), tq.quat2mat(q21) @slow def test_qrotate(): vecs = np.eye(3) for vec in np.eye(3): for M, q in eg_pairs: vdash = tq.rotate_vector(vec, q) vM = np.dot(M, vec.reshape(3,1))[:,0] yield assert_array_almost_equal, vdash, vM @slow def test_quaternion_reconstruction(): # Test reconstruction of arbitrary unit quaternions for q in unit_quats: M = tq.quat2mat(q) qt = tq.mat2quat(M) # Accept positive or negative match posm = np.allclose(q, qt) negm = np.allclose(q, -qt) yield assert_true, posm or negm def test_angle_axis2quat(): q = tq.axangle2quat([1, 0, 0], 0) yield assert_array_equal, q, [1, 0, 0, 0] q = tq.axangle2quat([1, 0, 0], np.pi) yield assert_array_almost_equal, q, [0, 1, 0, 0] q = tq.axangle2quat([1, 0, 0], np.pi, True) yield assert_array_almost_equal, q, [0, 1, 0, 0] q = tq.axangle2quat([2, 0, 0], np.pi, False) yield assert_array_almost_equal, q, [0, 1, 0, 0] def sympy_aa2mat(vec, theta): # sympy expression derived from quaternion formulae v0, v1, v2 = vec # assumed normalized sin = math.sin cos = math.cos return np.array([ [ 1 - 2*v1**2*sin(0.5*theta)**2 - 2*v2**2*sin(0.5*theta)**2, -2*v2*cos(0.5*theta)*sin(0.5*theta) + 2*v0*v1*sin(0.5*theta)**2, 2*v1*cos(0.5*theta)*sin(0.5*theta) + 2*v0*v2*sin(0.5*theta)**2], [ 2*v2*cos(0.5*theta)*sin(0.5*theta) + 2*v0*v1*sin(0.5*theta)**2, 1 - 2*v0**2*sin(0.5*theta)**2 - 2*v2**2*sin(0.5*theta)**2, -2*v0*cos(0.5*theta)*sin(0.5*theta) + 2*v1*v2*sin(0.5*theta)**2], [-2*v1*cos(0.5*theta)*sin(0.5*theta) + 2*v0*v2*sin(0.5*theta)**2, 2*v0*cos(0.5*theta)*sin(0.5*theta) + 2*v1*v2*sin(0.5*theta)**2, 1 - 2*v0**2*sin(0.5*theta)**2 - 2*v1**2*sin(0.5*theta)**2]]) def sympy_aa2mat2(vec, theta): # sympy expression derived from direct formula v0, v1, v2 = vec # assumed normalized sin = math.sin cos = math.cos return np.array([ [v0**2*(1 - cos(theta)) + cos(theta), -v2*sin(theta) + v0*v1*(1 - cos(theta)), v1*sin(theta) + v0*v2*(1 - cos(theta))], [v2*sin(theta) + v0*v1*(1 - cos(theta)), v1**2*(1 - cos(theta)) + cos(theta), -v0*sin(theta) + v1*v2*(1 - cos(theta))], [-v1*sin(theta) + v0*v2*(1 - cos(theta)), v0*sin(theta) + v1*v2*(1 - cos(theta)), v2**2*(1 - cos(theta)) + cos(theta)]]) def test_axis_angle(): for M, q in eg_pairs: vec, theta = tq.quat2axangle(q) q2 = tq.axangle2quat(vec, theta) yield tq.nearly_equivalent, q, q2 aa_mat = tq.axangle2rmat(vec, theta) yield assert_array_almost_equal, aa_mat, M aa_mat2 = sympy_aa2mat(vec, theta) yield assert_array_almost_equal, aa_mat, aa_mat2 aa_mat22 = sympy_aa2mat2(vec, theta) yield assert_array_almost_equal, aa_mat, aa_mat22 nipy-0.3.0/nipy/externals/transforms3d/tests/test_taitbryan.py000066400000000000000000000122061210344137400246520ustar00rootroot00000000000000''' Tests for Euler angles ''' import math import numpy as np from numpy import pi from .. import quaternions as tq from .. import taitbryan as ttb from nose.tools import assert_true, assert_false, assert_equal from numpy.testing import assert_array_equal, assert_array_almost_equal from .samples import euler_tuples FLOAT_EPS = np.finfo(np.float).eps def x_only(x): cosx = np.cos(x) sinx = np.sin(x) return np.array( [[1, 0, 0], [0, cosx, -sinx], [0, sinx, cosx]]) def y_only(y): cosy = np.cos(y) siny = np.sin(y) return np.array( [[cosy, 0, siny], [0, 1, 0], [-siny, 0, cosy]]) def z_only(z): cosz = np.cos(z) sinz = np.sin(z) return np.array( [[cosz, -sinz, 0], [sinz, cosz, 0], [0, 0, 1]]) def sympy_euler(z, y, x): # The whole matrix formula for z,y,x rotations from Sympy cos = math.cos sin = math.sin # the following copy / pasted from Sympy - see derivations subdirectory return [ [ cos(y)*cos(z), -cos(y)*sin(z), sin(y)], [cos(x)*sin(z) + cos(z)*sin(x)*sin(y), cos(x)*cos(z) - sin(x)*sin(y)*sin(z), -cos(y)*sin(x)], [sin(x)*sin(z) - cos(x)*cos(z)*sin(y), cos(z)*sin(x) + cos(x)*sin(y)*sin(z), cos(x)*cos(y)] ] def is_valid_rotation(M): if not np.allclose(np.linalg.det(M), 1): return False return np.allclose(np.eye(3), np.dot(M, M.T)) def test_basic_euler(): # some example rotations, in radians zr = 0.05 yr = -0.4 xr = 0.2 # Rotation matrix composing the three rotations M = ttb.euler2mat(zr, yr, xr) # Corresponding individual rotation matrices M1 = ttb.euler2mat(zr) M2 = ttb.euler2mat(0, yr) M3 = ttb.euler2mat(0, 0, xr) # which are all valid rotation matrices yield assert_true, is_valid_rotation(M) yield assert_true, is_valid_rotation(M1) yield assert_true, is_valid_rotation(M2) yield assert_true, is_valid_rotation(M3) # Full matrix is composition of three individual matrices yield assert_true, np.allclose(M, np.dot(M3, np.dot(M2, M1))) # Rotations can be specified with named args, default 0 yield assert_true, np.all(ttb.euler2mat(zr) == ttb.euler2mat(z=zr)) yield assert_true, np.all(ttb.euler2mat(0, yr) == ttb.euler2mat(y=yr)) yield assert_true, np.all(ttb.euler2mat(0, 0, xr) == ttb.euler2mat(x=xr)) # Applying an opposite rotation same as inverse (the inverse is # the same as the transpose, but just for clarity) yield assert_true, np.allclose(ttb.euler2mat(x=-xr), np.linalg.inv(ttb.euler2mat(x=xr))) def test_euler_mat(): M = ttb.euler2mat() yield assert_array_equal, M, np.eye(3) for x, y, z in euler_tuples: M1 = ttb.euler2mat(z, y, x) M2 = sympy_euler(z, y, x) yield assert_array_almost_equal, M1, M2 M3 = np.dot(x_only(x), np.dot(y_only(y), z_only(z))) yield assert_array_almost_equal, M1, M3 zp, yp, xp = ttb.mat2euler(M1) # The parameters may not be the same as input, but they give the # same rotation matrix M4 = ttb.euler2mat(zp, yp, xp) yield assert_array_almost_equal, M1, M4 def sympy_euler2quat(z=0, y=0, x=0): # direct formula for z,y,x quaternion rotations using sympy # see derivations subfolder cos = math.cos sin = math.sin # the following copy / pasted from Sympy output return (cos(0.5*x)*cos(0.5*y)*cos(0.5*z) - sin(0.5*x)*sin(0.5*y)*sin(0.5*z), cos(0.5*x)*sin(0.5*y)*sin(0.5*z) + cos(0.5*y)*cos(0.5*z)*sin(0.5*x), cos(0.5*x)*cos(0.5*z)*sin(0.5*y) - cos(0.5*y)*sin(0.5*x)*sin(0.5*z), cos(0.5*x)*cos(0.5*y)*sin(0.5*z) + cos(0.5*z)*sin(0.5*x)*sin(0.5*y)) def crude_mat2euler(M): ''' The simplest possible - ignoring atan2 instability ''' r11, r12, r13, r21, r22, r23, r31, r32, r33 = M.flat return math.atan2(-r12, r11), math.asin(r13), math.atan2(-r23, r33) def test_euler_instability(): # Test for numerical errors in mat2euler # problems arise for cos(y) near 0 po2 = pi / 2 zyx = po2, po2, po2 M = ttb.euler2mat(*zyx) # Round trip M_back = ttb.euler2mat(*ttb.mat2euler(M)) yield assert_true, np.allclose(M, M_back) # disturb matrix slightly M_e = M - FLOAT_EPS # round trip to test - OK M_e_back = ttb.euler2mat(*ttb.mat2euler(M_e)) yield assert_true, np.allclose(M_e, M_e_back) # not so with crude routine M_e_back = ttb.euler2mat(*crude_mat2euler(M_e)) yield assert_false, np.allclose(M_e, M_e_back) def test_quats(): for x, y, z in euler_tuples: M1 = ttb.euler2mat(z, y, x) quatM = tq.mat2quat(M1) quat = ttb.euler2quat(z, y, x) yield tq.nearly_equivalent, quatM, quat quatS = sympy_euler2quat(z, y, x) yield tq.nearly_equivalent, quat, quatS zp, yp, xp = ttb.quat2euler(quat) # The parameters may not be the same as input, but they give the # same rotation matrix M2 = ttb.euler2mat(zp, yp, xp) yield assert_array_almost_equal, M1, M2 nipy-0.3.0/nipy/externals/transforms3d/utils.py000066400000000000000000000101321210344137400216100ustar00rootroot00000000000000''' Utilities for transforms3d ''' import math import numpy as np def normalized_vector(vec): ''' Return vector divided by Euclidean (L2) norm See :term:`unit vector` and :term:`Euclidean norm` Parameters ---------- vec : array-like shape (3,) Returns ------- nvec : array shape (3,) vector divided by L2 norm Examples -------- >>> vec = [1, 2, 3] >>> l2n = np.sqrt(np.dot(vec, vec)) >>> nvec = normalized_vector(vec) >>> np.allclose(np.array(vec) / l2n, nvec) True >>> vec = np.array([[1, 2, 3]]) >>> vec.shape (1, 3) >>> normalized_vector(vec).shape (3,) ''' vec = np.asarray(vec).squeeze() return vec / math.sqrt((vec**2).sum()) def vector_norm(vec): ''' Return vector Euclidaan (L2) norm See :term:`unit vector` and :term:`Euclidean norm` Parameters ---------- vec : array-like shape (3,) Returns ------- norm : scalar Examples -------- >>> vec = [1, 2, 3] >>> l2n = np.sqrt(np.dot(vec, vec)) >>> nvec = vector_norm(vec) >>> np.allclose(nvec, np.sqrt(np.dot(vec, vec))) True ''' vec = np.asarray(vec) return math.sqrt((vec**2).sum()) def permutations(iterable, r=None): ''' Generate all permutations of `iterable`, of length `r` From Python docs, expressing 2.6 ``itertools.permutations`` algorithm. If the elements are unique, then the resulting permutations will also be unique. Parameters ---------- iterable : iterable returning elements that will be permuted r : None or int length of sequence to return, if None (default) use length of `iterable` Returns ------- gen : generator generator that yields permutations Examples -------- >>> tuple(permutations(range(3))) ((0, 1, 2), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0)) >>> tuple(permutations(range(3), 2)) ((0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)) ''' pool = tuple(iterable) n = len(pool) r = n if r is None else r if r > n: return indices = range(n) cycles = range(n, n-r, -1) yield tuple(pool[i] for i in indices[:r]) while n: for i in reversed(range(r)): cycles[i] -= 1 if cycles[i] == 0: indices[i:] = indices[i+1:] + indices[i:i+1] cycles[i] = n - i else: j = cycles[i] indices[i], indices[-j] = indices[-j], indices[i] yield tuple(pool[i] for i in indices[:r]) break else: return def inique(iterable): ''' Generate unique elements from `iterable` Parameters ---------- iterable : iterable Returns ------- gen : generator generator that yields unique elements from `iterable` Examples -------- >>> tuple(inique([0, 1, 2, 0, 2, 3])) (0, 1, 2, 3) ''' history = [] for val in iterable: if val not in history: history.append(val) yield val def permuted_signs(seq): ''' Generate permuted signs for sequence `seq` Parameters ---------- seq : sequence Returns ------- gen : generator generator returning `seq` with signs permuted Examples -------- >>> tuple(permuted_signs([1, -2, 0])) ((1, -2, 0), (1, -2, 0), (1, 2, 0), (1, 2, 0), (-1, -2, 0), (-1, -2, 0), (-1, 2, 0), (-1, 2, 0)) ''' seq = tuple(seq) n = len(seq) for fs in inique(permutations([1]*n + [-1]*n, n)): yield tuple(e * f for e, f in zip(seq, fs)) def permuted_with_signs(seq): ''' Return all permutations of `seq` with all sign permutations Parameters ---------- seq : sequence Returns ------- gen : generator generator returning permutations and sign permutations Examples -------- >>> tuple(permuted_with_signs((1,2))) ((1, 2), (1, -2), (-1, 2), (-1, -2), (2, 1), (2, -1), (-2, 1), (-2, -1)) ''' for pseq in permutations(seq): for sseq in permuted_signs(pseq): yield sseq nipy-0.3.0/nipy/externals/validate.py000066400000000000000000001334201210344137400176220ustar00rootroot00000000000000# validate.py # A Validator object # Copyright (C) 2005-2010 Michael Foord, Mark Andrews, Nicola Larosa # E-mail: fuzzyman AT voidspace DOT org DOT uk # mark AT la-la DOT com # nico AT tekNico DOT net # This software is licensed under the terms of the BSD license. # http://www.voidspace.org.uk/python/license.shtml # Basically you're free to copy, modify, distribute and relicense it, # So long as you keep a copy of the license with it. # Scripts maintained at http://www.voidspace.org.uk/python/index.shtml # For information about bugfixes, updates and support, please join the # ConfigObj mailing list: # http://lists.sourceforge.net/lists/listinfo/configobj-develop # Comments, suggestions and bug reports welcome. # disable nose tests here try: from nose.plugins.skip import SkipTest except ImportError: pass else: def setup_module(): raise SkipTest('Doctests fail for externals.validator') """ The Validator object is used to check that supplied values conform to a specification. The value can be supplied as a string - e.g. from a config file. In this case the check will also *convert* the value to the required type. This allows you to add validation as a transparent layer to access data stored as strings. The validation checks that the data is correct *and* converts it to the expected type. Some standard checks are provided for basic data types. Additional checks are easy to write. They can be provided when the ``Validator`` is instantiated or added afterwards. The standard functions work with the following basic data types : * integers * floats * booleans * strings * ip_addr plus lists of these datatypes Adding additional checks is done through coding simple functions. The full set of standard checks are : * 'integer': matches integer values (including negative) Takes optional 'min' and 'max' arguments : :: integer() integer(3, 9) # any value from 3 to 9 integer(min=0) # any positive value integer(max=9) * 'float': matches float values Has the same parameters as the integer check. * 'boolean': matches boolean values - ``True`` or ``False`` Acceptable string values for True are : true, on, yes, 1 Acceptable string values for False are : false, off, no, 0 Any other value raises an error. * 'ip_addr': matches an Internet Protocol address, v.4, represented by a dotted-quad string, i.e. '1.2.3.4'. * 'string': matches any string. Takes optional keyword args 'min' and 'max' to specify min and max lengths of the string. * 'list': matches any list. Takes optional keyword args 'min', and 'max' to specify min and max sizes of the list. (Always returns a list.) * 'tuple': matches any tuple. Takes optional keyword args 'min', and 'max' to specify min and max sizes of the tuple. (Always returns a tuple.) * 'int_list': Matches a list of integers. Takes the same arguments as list. * 'float_list': Matches a list of floats. Takes the same arguments as list. * 'bool_list': Matches a list of boolean values. Takes the same arguments as list. * 'ip_addr_list': Matches a list of IP addresses. Takes the same arguments as list. * 'string_list': Matches a list of strings. Takes the same arguments as list. * 'mixed_list': Matches a list with different types in specific positions. List size must match the number of arguments. Each position can be one of : 'integer', 'float', 'ip_addr', 'string', 'boolean' So to specify a list with two strings followed by two integers, you write the check as : :: mixed_list('string', 'string', 'integer', 'integer') * 'pass': This check matches everything ! It never fails and the value is unchanged. It is also the default if no check is specified. * 'option': This check matches any from a list of options. You specify this check with : :: option('option 1', 'option 2', 'option 3') You can supply a default value (returned if no value is supplied) using the default keyword argument. You specify a list argument for default using a list constructor syntax in the check : :: checkname(arg1, arg2, default=list('val 1', 'val 2', 'val 3')) A badly formatted set of arguments will raise a ``VdtParamError``. """ __version__ = '1.0.1' __all__ = ( '__version__', 'dottedQuadToNum', 'numToDottedQuad', 'ValidateError', 'VdtUnknownCheckError', 'VdtParamError', 'VdtTypeError', 'VdtValueError', 'VdtValueTooSmallError', 'VdtValueTooBigError', 'VdtValueTooShortError', 'VdtValueTooLongError', 'VdtMissingValue', 'Validator', 'is_integer', 'is_float', 'is_boolean', 'is_list', 'is_tuple', 'is_ip_addr', 'is_string', 'is_int_list', 'is_bool_list', 'is_float_list', 'is_string_list', 'is_ip_addr_list', 'is_mixed_list', 'is_option', '__docformat__', ) import re _list_arg = re.compile(r''' (?: ([a-zA-Z_][a-zA-Z0-9_]*)\s*=\s*list\( ( (?: \s* (?: (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'",\s\)][^,\)]*?) # unquoted ) \s*,\s* )* (?: (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'",\s\)][^,\)]*?) # unquoted )? # last one ) \) ) ''', re.VERBOSE | re.DOTALL) # two groups _list_members = re.compile(r''' ( (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'",\s=][^,=]*?) # unquoted ) (?: (?:\s*,\s*)|(?:\s*$) # comma ) ''', re.VERBOSE | re.DOTALL) # one group _paramstring = r''' (?: ( (?: [a-zA-Z_][a-zA-Z0-9_]*\s*=\s*list\( (?: \s* (?: (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'",\s\)][^,\)]*?) # unquoted ) \s*,\s* )* (?: (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'",\s\)][^,\)]*?) # unquoted )? # last one \) )| (?: (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'",\s=][^,=]*?)| # unquoted (?: # keyword argument [a-zA-Z_][a-zA-Z0-9_]*\s*=\s* (?: (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'",\s=][^,=]*?) # unquoted ) ) ) ) (?: (?:\s*,\s*)|(?:\s*$) # comma ) ) ''' _matchstring = '^%s*' % _paramstring # Python pre 2.2.1 doesn't have bool try: bool except NameError: def bool(val): """Simple boolean equivalent function. """ if val: return 1 else: return 0 def dottedQuadToNum(ip): """ Convert decimal dotted quad string to long integer >>> int(dottedQuadToNum('1 ')) 1 >>> int(dottedQuadToNum(' 1.2')) 16777218 >>> int(dottedQuadToNum(' 1.2.3 ')) 16908291 >>> int(dottedQuadToNum('1.2.3.4')) 16909060 >>> dottedQuadToNum('1.2.3. 4') 16909060 >>> dottedQuadToNum('255.255.255.255') 4294967295L >>> dottedQuadToNum('255.255.255.256') Traceback (most recent call last): ValueError: Not a good dotted-quad IP: 255.255.255.256 """ # import here to avoid it when ip_addr values are not used import socket, struct try: return struct.unpack('!L', socket.inet_aton(ip.strip()))[0] except socket.error: # bug in inet_aton, corrected in Python 2.3 if ip.strip() == '255.255.255.255': return 0xFFFFFFFFL else: raise ValueError('Not a good dotted-quad IP: %s' % ip) return def numToDottedQuad(num): """ Convert long int to dotted quad string >>> numToDottedQuad(-1L) Traceback (most recent call last): ValueError: Not a good numeric IP: -1 >>> numToDottedQuad(1L) '0.0.0.1' >>> numToDottedQuad(16777218L) '1.0.0.2' >>> numToDottedQuad(16908291L) '1.2.0.3' >>> numToDottedQuad(16909060L) '1.2.3.4' >>> numToDottedQuad(4294967295L) '255.255.255.255' >>> numToDottedQuad(4294967296L) Traceback (most recent call last): ValueError: Not a good numeric IP: 4294967296 """ # import here to avoid it when ip_addr values are not used import socket, struct # no need to intercept here, 4294967295L is fine if num > 4294967295L or num < 0: raise ValueError('Not a good numeric IP: %s' % num) try: return socket.inet_ntoa( struct.pack('!L', long(num))) except (socket.error, struct.error, OverflowError): raise ValueError('Not a good numeric IP: %s' % num) class ValidateError(Exception): """ This error indicates that the check failed. It can be the base class for more specific errors. Any check function that fails ought to raise this error. (or a subclass) >>> raise ValidateError Traceback (most recent call last): ValidateError """ class VdtMissingValue(ValidateError): """No value was supplied to a check that needed one.""" class VdtUnknownCheckError(ValidateError): """An unknown check function was requested""" def __init__(self, value): """ >>> raise VdtUnknownCheckError('yoda') Traceback (most recent call last): VdtUnknownCheckError: the check "yoda" is unknown. """ ValidateError.__init__(self, 'the check "%s" is unknown.' % (value,)) class VdtParamError(SyntaxError): """An incorrect parameter was passed""" def __init__(self, name, value): """ >>> raise VdtParamError('yoda', 'jedi') Traceback (most recent call last): VdtParamError: passed an incorrect value "jedi" for parameter "yoda". """ SyntaxError.__init__(self, 'passed an incorrect value "%s" for parameter "%s".' % (value, name)) class VdtTypeError(ValidateError): """The value supplied was of the wrong type""" def __init__(self, value): """ >>> raise VdtTypeError('jedi') Traceback (most recent call last): VdtTypeError: the value "jedi" is of the wrong type. """ ValidateError.__init__(self, 'the value "%s" is of the wrong type.' % (value,)) class VdtValueError(ValidateError): """The value supplied was of the correct type, but was not an allowed value.""" def __init__(self, value): """ >>> raise VdtValueError('jedi') Traceback (most recent call last): VdtValueError: the value "jedi" is unacceptable. """ ValidateError.__init__(self, 'the value "%s" is unacceptable.' % (value,)) class VdtValueTooSmallError(VdtValueError): """The value supplied was of the correct type, but was too small.""" def __init__(self, value): """ >>> raise VdtValueTooSmallError('0') Traceback (most recent call last): VdtValueTooSmallError: the value "0" is too small. """ ValidateError.__init__(self, 'the value "%s" is too small.' % (value,)) class VdtValueTooBigError(VdtValueError): """The value supplied was of the correct type, but was too big.""" def __init__(self, value): """ >>> raise VdtValueTooBigError('1') Traceback (most recent call last): VdtValueTooBigError: the value "1" is too big. """ ValidateError.__init__(self, 'the value "%s" is too big.' % (value,)) class VdtValueTooShortError(VdtValueError): """The value supplied was of the correct type, but was too short.""" def __init__(self, value): """ >>> raise VdtValueTooShortError('jed') Traceback (most recent call last): VdtValueTooShortError: the value "jed" is too short. """ ValidateError.__init__( self, 'the value "%s" is too short.' % (value,)) class VdtValueTooLongError(VdtValueError): """The value supplied was of the correct type, but was too long.""" def __init__(self, value): """ >>> raise VdtValueTooLongError('jedie') Traceback (most recent call last): VdtValueTooLongError: the value "jedie" is too long. """ ValidateError.__init__(self, 'the value "%s" is too long.' % (value,)) class Validator(object): """ Validator is an object that allows you to register a set of 'checks'. These checks take input and test that it conforms to the check. This can also involve converting the value from a string into the correct datatype. The ``check`` method takes an input string which configures which check is to be used and applies that check to a supplied value. An example input string would be: 'int_range(param1, param2)' You would then provide something like: >>> def int_range_check(value, min, max): ... # turn min and max from strings to integers ... min = int(min) ... max = int(max) ... # check that value is of the correct type. ... # possible valid inputs are integers or strings ... # that represent integers ... if not isinstance(value, (int, long, basestring)): ... raise VdtTypeError(value) ... elif isinstance(value, basestring): ... # if we are given a string ... # attempt to convert to an integer ... try: ... value = int(value) ... except ValueError: ... raise VdtValueError(value) ... # check the value is between our constraints ... if not min <= value: ... raise VdtValueTooSmallError(value) ... if not value <= max: ... raise VdtValueTooBigError(value) ... return value >>> fdict = {'int_range': int_range_check} >>> vtr1 = Validator(fdict) >>> vtr1.check('int_range(20, 40)', '30') 30 >>> vtr1.check('int_range(20, 40)', '60') Traceback (most recent call last): VdtValueTooBigError: the value "60" is too big. New functions can be added with : :: >>> vtr2 = Validator() >>> vtr2.functions['int_range'] = int_range_check Or by passing in a dictionary of functions when Validator is instantiated. Your functions *can* use keyword arguments, but the first argument should always be 'value'. If the function doesn't take additional arguments, the parentheses are optional in the check. It can be written with either of : :: keyword = function_name keyword = function_name() The first program to utilise Validator() was Michael Foord's ConfigObj, an alternative to ConfigParser which supports lists and can validate a config file using a config schema. For more details on using Validator with ConfigObj see: http://www.voidspace.org.uk/python/configobj.html """ # this regex does the initial parsing of the checks _func_re = re.compile(r'(.+?)\((.*)\)', re.DOTALL) # this regex takes apart keyword arguments _key_arg = re.compile(r'^([a-zA-Z_][a-zA-Z0-9_]*)\s*=\s*(.*)$', re.DOTALL) # this regex finds keyword=list(....) type values _list_arg = _list_arg # this regex takes individual values out of lists - in one pass _list_members = _list_members # These regexes check a set of arguments for validity # and then pull the members out _paramfinder = re.compile(_paramstring, re.VERBOSE | re.DOTALL) _matchfinder = re.compile(_matchstring, re.VERBOSE | re.DOTALL) def __init__(self, functions=None): """ >>> vtri = Validator() """ self.functions = { '': self._pass, 'integer': is_integer, 'float': is_float, 'boolean': is_boolean, 'ip_addr': is_ip_addr, 'string': is_string, 'list': is_list, 'tuple': is_tuple, 'int_list': is_int_list, 'float_list': is_float_list, 'bool_list': is_bool_list, 'ip_addr_list': is_ip_addr_list, 'string_list': is_string_list, 'mixed_list': is_mixed_list, 'pass': self._pass, 'option': is_option, 'force_list': force_list, } if functions is not None: self.functions.update(functions) # tekNico: for use by ConfigObj self.baseErrorClass = ValidateError self._cache = {} def check(self, check, value, missing=False): """ Usage: check(check, value) Arguments: check: string representing check to apply (including arguments) value: object to be checked Returns value, converted to correct type if necessary If the check fails, raises a ``ValidateError`` subclass. >>> vtor.check('yoda', '') Traceback (most recent call last): VdtUnknownCheckError: the check "yoda" is unknown. >>> vtor.check('yoda()', '') Traceback (most recent call last): VdtUnknownCheckError: the check "yoda" is unknown. >>> vtor.check('string(default="")', '', missing=True) '' """ fun_name, fun_args, fun_kwargs, default = self._parse_with_caching(check) if missing: if default is None: # no information needed here - to be handled by caller raise VdtMissingValue() value = self._handle_none(default) if value is None: return None return self._check_value(value, fun_name, fun_args, fun_kwargs) def _handle_none(self, value): if value == 'None': value = None elif value in ("'None'", '"None"'): # Special case a quoted None value = self._unquote(value) return value def _parse_with_caching(self, check): if check in self._cache: fun_name, fun_args, fun_kwargs, default = self._cache[check] # We call list and dict below to work with *copies* of the data # rather than the original (which are mutable of course) fun_args = list(fun_args) fun_kwargs = dict(fun_kwargs) else: fun_name, fun_args, fun_kwargs, default = self._parse_check(check) fun_kwargs = dict([(str(key), value) for (key, value) in fun_kwargs.items()]) self._cache[check] = fun_name, list(fun_args), dict(fun_kwargs), default return fun_name, fun_args, fun_kwargs, default def _check_value(self, value, fun_name, fun_args, fun_kwargs): try: fun = self.functions[fun_name] except KeyError: raise VdtUnknownCheckError(fun_name) else: return fun(value, *fun_args, **fun_kwargs) def _parse_check(self, check): fun_match = self._func_re.match(check) if fun_match: fun_name = fun_match.group(1) arg_string = fun_match.group(2) arg_match = self._matchfinder.match(arg_string) if arg_match is None: # Bad syntax raise VdtParamError('Bad syntax in check "%s".' % check) fun_args = [] fun_kwargs = {} # pull out args of group 2 for arg in self._paramfinder.findall(arg_string): # args may need whitespace removing (before removing quotes) arg = arg.strip() listmatch = self._list_arg.match(arg) if listmatch: key, val = self._list_handle(listmatch) fun_kwargs[key] = val continue keymatch = self._key_arg.match(arg) if keymatch: val = keymatch.group(2) if not val in ("'None'", '"None"'): # Special case a quoted None val = self._unquote(val) fun_kwargs[keymatch.group(1)] = val continue fun_args.append(self._unquote(arg)) else: # allows for function names without (args) return check, (), {}, None # Default must be deleted if the value is specified too, # otherwise the check function will get a spurious "default" keyword arg try: default = fun_kwargs.pop('default', None) except AttributeError: # Python 2.2 compatibility default = None try: default = fun_kwargs['default'] del fun_kwargs['default'] except KeyError: pass return fun_name, fun_args, fun_kwargs, default def _unquote(self, val): """Unquote a value if necessary.""" if (len(val) >= 2) and (val[0] in ("'", '"')) and (val[0] == val[-1]): val = val[1:-1] return val def _list_handle(self, listmatch): """Take apart a ``keyword=list('val, 'val')`` type string.""" out = [] name = listmatch.group(1) args = listmatch.group(2) for arg in self._list_members.findall(args): out.append(self._unquote(arg)) return name, out def _pass(self, value): """ Dummy check that always passes >>> vtor.check('', 0) 0 >>> vtor.check('', '0') '0' """ return value def get_default_value(self, check): """ Given a check, return the default value for the check (converted to the right type). If the check doesn't specify a default value then a ``KeyError`` will be raised. """ fun_name, fun_args, fun_kwargs, default = self._parse_with_caching(check) if default is None: raise KeyError('Check "%s" has no default value.' % check) value = self._handle_none(default) if value is None: return value return self._check_value(value, fun_name, fun_args, fun_kwargs) def _is_num_param(names, values, to_float=False): """ Return numbers from inputs or raise VdtParamError. Lets ``None`` pass through. Pass in keyword argument ``to_float=True`` to use float for the conversion rather than int. >>> _is_num_param(('', ''), (0, 1.0)) [0, 1] >>> _is_num_param(('', ''), (0, 1.0), to_float=True) [0.0, 1.0] >>> _is_num_param(('a'), ('a')) Traceback (most recent call last): VdtParamError: passed an incorrect value "a" for parameter "a". """ fun = to_float and float or int out_params = [] for (name, val) in zip(names, values): if val is None: out_params.append(val) elif isinstance(val, (int, long, float, basestring)): try: out_params.append(fun(val)) except ValueError, e: raise VdtParamError(name, val) else: raise VdtParamError(name, val) return out_params # built in checks # you can override these by setting the appropriate name # in Validator.functions # note: if the params are specified wrongly in your input string, # you will also raise errors. def is_integer(value, min=None, max=None): """ A check that tests that a given value is an integer (int, or long) and optionally, between bounds. A negative value is accepted, while a float will fail. If the value is a string, then the conversion is done - if possible. Otherwise a VdtError is raised. >>> vtor.check('integer', '-1') -1 >>> vtor.check('integer', '0') 0 >>> vtor.check('integer', 9) 9 >>> vtor.check('integer', 'a') Traceback (most recent call last): VdtTypeError: the value "a" is of the wrong type. >>> vtor.check('integer', '2.2') Traceback (most recent call last): VdtTypeError: the value "2.2" is of the wrong type. >>> vtor.check('integer(10)', '20') 20 >>> vtor.check('integer(max=20)', '15') 15 >>> vtor.check('integer(10)', '9') Traceback (most recent call last): VdtValueTooSmallError: the value "9" is too small. >>> vtor.check('integer(10)', 9) Traceback (most recent call last): VdtValueTooSmallError: the value "9" is too small. >>> vtor.check('integer(max=20)', '35') Traceback (most recent call last): VdtValueTooBigError: the value "35" is too big. >>> vtor.check('integer(max=20)', 35) Traceback (most recent call last): VdtValueTooBigError: the value "35" is too big. >>> vtor.check('integer(0, 9)', False) 0 """ (min_val, max_val) = _is_num_param(('min', 'max'), (min, max)) if not isinstance(value, (int, long, basestring)): raise VdtTypeError(value) if isinstance(value, basestring): # if it's a string - does it represent an integer ? try: value = int(value) except ValueError: raise VdtTypeError(value) if (min_val is not None) and (value < min_val): raise VdtValueTooSmallError(value) if (max_val is not None) and (value > max_val): raise VdtValueTooBigError(value) return value def is_float(value, min=None, max=None): """ A check that tests that a given value is a float (an integer will be accepted), and optionally - that it is between bounds. If the value is a string, then the conversion is done - if possible. Otherwise a VdtError is raised. This can accept negative values. >>> vtor.check('float', '2') 2.0 From now on we multiply the value to avoid comparing decimals >>> vtor.check('float', '-6.8') * 10 -68.0 >>> vtor.check('float', '12.2') * 10 122.0 >>> vtor.check('float', 8.4) * 10 84.0 >>> vtor.check('float', 'a') Traceback (most recent call last): VdtTypeError: the value "a" is of the wrong type. >>> vtor.check('float(10.1)', '10.2') * 10 102.0 >>> vtor.check('float(max=20.2)', '15.1') * 10 151.0 >>> vtor.check('float(10.0)', '9.0') Traceback (most recent call last): VdtValueTooSmallError: the value "9.0" is too small. >>> vtor.check('float(max=20.0)', '35.0') Traceback (most recent call last): VdtValueTooBigError: the value "35.0" is too big. """ (min_val, max_val) = _is_num_param( ('min', 'max'), (min, max), to_float=True) if not isinstance(value, (int, long, float, basestring)): raise VdtTypeError(value) if not isinstance(value, float): # if it's a string - does it represent a float ? try: value = float(value) except ValueError: raise VdtTypeError(value) if (min_val is not None) and (value < min_val): raise VdtValueTooSmallError(value) if (max_val is not None) and (value > max_val): raise VdtValueTooBigError(value) return value bool_dict = { True: True, 'on': True, '1': True, 'true': True, 'yes': True, False: False, 'off': False, '0': False, 'false': False, 'no': False, } def is_boolean(value): """ Check if the value represents a boolean. >>> vtor.check('boolean', 0) 0 >>> vtor.check('boolean', False) 0 >>> vtor.check('boolean', '0') 0 >>> vtor.check('boolean', 'off') 0 >>> vtor.check('boolean', 'false') 0 >>> vtor.check('boolean', 'no') 0 >>> vtor.check('boolean', 'nO') 0 >>> vtor.check('boolean', 'NO') 0 >>> vtor.check('boolean', 1) 1 >>> vtor.check('boolean', True) 1 >>> vtor.check('boolean', '1') 1 >>> vtor.check('boolean', 'on') 1 >>> vtor.check('boolean', 'true') 1 >>> vtor.check('boolean', 'yes') 1 >>> vtor.check('boolean', 'Yes') 1 >>> vtor.check('boolean', 'YES') 1 >>> vtor.check('boolean', '') Traceback (most recent call last): VdtTypeError: the value "" is of the wrong type. >>> vtor.check('boolean', 'up') Traceback (most recent call last): VdtTypeError: the value "up" is of the wrong type. """ if isinstance(value, basestring): try: return bool_dict[value.lower()] except KeyError: raise VdtTypeError(value) # we do an equality test rather than an identity test # this ensures Python 2.2 compatibilty # and allows 0 and 1 to represent True and False if value == False: return False elif value == True: return True else: raise VdtTypeError(value) def is_ip_addr(value): """ Check that the supplied value is an Internet Protocol address, v.4, represented by a dotted-quad string, i.e. '1.2.3.4'. >>> vtor.check('ip_addr', '1 ') '1' >>> vtor.check('ip_addr', ' 1.2') '1.2' >>> vtor.check('ip_addr', ' 1.2.3 ') '1.2.3' >>> vtor.check('ip_addr', '1.2.3.4') '1.2.3.4' >>> vtor.check('ip_addr', '0.0.0.0') '0.0.0.0' >>> vtor.check('ip_addr', '255.255.255.255') '255.255.255.255' >>> vtor.check('ip_addr', '255.255.255.256') Traceback (most recent call last): VdtValueError: the value "255.255.255.256" is unacceptable. >>> vtor.check('ip_addr', '1.2.3.4.5') Traceback (most recent call last): VdtValueError: the value "1.2.3.4.5" is unacceptable. >>> vtor.check('ip_addr', 0) Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type. """ if not isinstance(value, basestring): raise VdtTypeError(value) value = value.strip() try: dottedQuadToNum(value) except ValueError: raise VdtValueError(value) return value def is_list(value, min=None, max=None): """ Check that the value is a list of values. You can optionally specify the minimum and maximum number of members. It does no check on list members. >>> vtor.check('list', ()) [] >>> vtor.check('list', []) [] >>> vtor.check('list', (1, 2)) [1, 2] >>> vtor.check('list', [1, 2]) [1, 2] >>> vtor.check('list(3)', (1, 2)) Traceback (most recent call last): VdtValueTooShortError: the value "(1, 2)" is too short. >>> vtor.check('list(max=5)', (1, 2, 3, 4, 5, 6)) Traceback (most recent call last): VdtValueTooLongError: the value "(1, 2, 3, 4, 5, 6)" is too long. >>> vtor.check('list(min=3, max=5)', (1, 2, 3, 4)) [1, 2, 3, 4] >>> vtor.check('list', 0) Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type. >>> vtor.check('list', '12') Traceback (most recent call last): VdtTypeError: the value "12" is of the wrong type. """ (min_len, max_len) = _is_num_param(('min', 'max'), (min, max)) if isinstance(value, basestring): raise VdtTypeError(value) try: num_members = len(value) except TypeError: raise VdtTypeError(value) if min_len is not None and num_members < min_len: raise VdtValueTooShortError(value) if max_len is not None and num_members > max_len: raise VdtValueTooLongError(value) return list(value) def is_tuple(value, min=None, max=None): """ Check that the value is a tuple of values. You can optionally specify the minimum and maximum number of members. It does no check on members. >>> vtor.check('tuple', ()) () >>> vtor.check('tuple', []) () >>> vtor.check('tuple', (1, 2)) (1, 2) >>> vtor.check('tuple', [1, 2]) (1, 2) >>> vtor.check('tuple(3)', (1, 2)) Traceback (most recent call last): VdtValueTooShortError: the value "(1, 2)" is too short. >>> vtor.check('tuple(max=5)', (1, 2, 3, 4, 5, 6)) Traceback (most recent call last): VdtValueTooLongError: the value "(1, 2, 3, 4, 5, 6)" is too long. >>> vtor.check('tuple(min=3, max=5)', (1, 2, 3, 4)) (1, 2, 3, 4) >>> vtor.check('tuple', 0) Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type. >>> vtor.check('tuple', '12') Traceback (most recent call last): VdtTypeError: the value "12" is of the wrong type. """ return tuple(is_list(value, min, max)) def is_string(value, min=None, max=None): """ Check that the supplied value is a string. You can optionally specify the minimum and maximum number of members. >>> vtor.check('string', '0') '0' >>> vtor.check('string', 0) Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type. >>> vtor.check('string(2)', '12') '12' >>> vtor.check('string(2)', '1') Traceback (most recent call last): VdtValueTooShortError: the value "1" is too short. >>> vtor.check('string(min=2, max=3)', '123') '123' >>> vtor.check('string(min=2, max=3)', '1234') Traceback (most recent call last): VdtValueTooLongError: the value "1234" is too long. """ if not isinstance(value, basestring): raise VdtTypeError(value) (min_len, max_len) = _is_num_param(('min', 'max'), (min, max)) try: num_members = len(value) except TypeError: raise VdtTypeError(value) if min_len is not None and num_members < min_len: raise VdtValueTooShortError(value) if max_len is not None and num_members > max_len: raise VdtValueTooLongError(value) return value def is_int_list(value, min=None, max=None): """ Check that the value is a list of integers. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is an integer. >>> vtor.check('int_list', ()) [] >>> vtor.check('int_list', []) [] >>> vtor.check('int_list', (1, 2)) [1, 2] >>> vtor.check('int_list', [1, 2]) [1, 2] >>> vtor.check('int_list', [1, 'a']) Traceback (most recent call last): VdtTypeError: the value "a" is of the wrong type. """ return [is_integer(mem) for mem in is_list(value, min, max)] def is_bool_list(value, min=None, max=None): """ Check that the value is a list of booleans. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is a boolean. >>> vtor.check('bool_list', ()) [] >>> vtor.check('bool_list', []) [] >>> check_res = vtor.check('bool_list', (True, False)) >>> check_res == [True, False] 1 >>> check_res = vtor.check('bool_list', [True, False]) >>> check_res == [True, False] 1 >>> vtor.check('bool_list', [True, 'a']) Traceback (most recent call last): VdtTypeError: the value "a" is of the wrong type. """ return [is_boolean(mem) for mem in is_list(value, min, max)] def is_float_list(value, min=None, max=None): """ Check that the value is a list of floats. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is a float. >>> vtor.check('float_list', ()) [] >>> vtor.check('float_list', []) [] >>> vtor.check('float_list', (1, 2.0)) [1.0, 2.0] >>> vtor.check('float_list', [1, 2.0]) [1.0, 2.0] >>> vtor.check('float_list', [1, 'a']) Traceback (most recent call last): VdtTypeError: the value "a" is of the wrong type. """ return [is_float(mem) for mem in is_list(value, min, max)] def is_string_list(value, min=None, max=None): """ Check that the value is a list of strings. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is a string. >>> vtor.check('string_list', ()) [] >>> vtor.check('string_list', []) [] >>> vtor.check('string_list', ('a', 'b')) ['a', 'b'] >>> vtor.check('string_list', ['a', 1]) Traceback (most recent call last): VdtTypeError: the value "1" is of the wrong type. >>> vtor.check('string_list', 'hello') Traceback (most recent call last): VdtTypeError: the value "hello" is of the wrong type. """ if isinstance(value, basestring): raise VdtTypeError(value) return [is_string(mem) for mem in is_list(value, min, max)] def is_ip_addr_list(value, min=None, max=None): """ Check that the value is a list of IP addresses. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is an IP address. >>> vtor.check('ip_addr_list', ()) [] >>> vtor.check('ip_addr_list', []) [] >>> vtor.check('ip_addr_list', ('1.2.3.4', '5.6.7.8')) ['1.2.3.4', '5.6.7.8'] >>> vtor.check('ip_addr_list', ['a']) Traceback (most recent call last): VdtValueError: the value "a" is unacceptable. """ return [is_ip_addr(mem) for mem in is_list(value, min, max)] def force_list(value, min=None, max=None): """ Check that a value is a list, coercing strings into a list with one member. Useful where users forget the trailing comma that turns a single value into a list. You can optionally specify the minimum and maximum number of members. A minumum of greater than one will fail if the user only supplies a string. >>> vtor.check('force_list', ()) [] >>> vtor.check('force_list', []) [] >>> vtor.check('force_list', 'hello') ['hello'] """ if not isinstance(value, (list, tuple)): value = [value] return is_list(value, min, max) fun_dict = { 'integer': is_integer, 'float': is_float, 'ip_addr': is_ip_addr, 'string': is_string, 'boolean': is_boolean, } def is_mixed_list(value, *args): """ Check that the value is a list. Allow specifying the type of each member. Work on lists of specific lengths. You specify each member as a positional argument specifying type Each type should be one of the following strings : 'integer', 'float', 'ip_addr', 'string', 'boolean' So you can specify a list of two strings, followed by two integers as : mixed_list('string', 'string', 'integer', 'integer') The length of the list must match the number of positional arguments you supply. >>> mix_str = "mixed_list('integer', 'float', 'ip_addr', 'string', 'boolean')" >>> check_res = vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', True)) >>> check_res == [1, 2.0, '1.2.3.4', 'a', True] 1 >>> check_res = vtor.check(mix_str, ('1', '2.0', '1.2.3.4', 'a', 'True')) >>> check_res == [1, 2.0, '1.2.3.4', 'a', True] 1 >>> vtor.check(mix_str, ('b', 2.0, '1.2.3.4', 'a', True)) Traceback (most recent call last): VdtTypeError: the value "b" is of the wrong type. >>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a')) Traceback (most recent call last): VdtValueTooShortError: the value "(1, 2.0, '1.2.3.4', 'a')" is too short. >>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', 1, 'b')) Traceback (most recent call last): VdtValueTooLongError: the value "(1, 2.0, '1.2.3.4', 'a', 1, 'b')" is too long. >>> vtor.check(mix_str, 0) Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type. This test requires an elaborate setup, because of a change in error string output from the interpreter between Python 2.2 and 2.3 . >>> res_seq = ( ... 'passed an incorrect value "', ... 'yoda', ... '" for parameter "mixed_list".', ... ) >>> res_str = "'".join(res_seq) >>> try: ... vtor.check('mixed_list("yoda")', ('a')) ... except VdtParamError, err: ... str(err) == res_str 1 """ try: length = len(value) except TypeError: raise VdtTypeError(value) if length < len(args): raise VdtValueTooShortError(value) elif length > len(args): raise VdtValueTooLongError(value) try: return [fun_dict[arg](val) for arg, val in zip(args, value)] except KeyError, e: raise VdtParamError('mixed_list', e) def is_option(value, *options): """ This check matches the value to any of a set of options. >>> vtor.check('option("yoda", "jedi")', 'yoda') 'yoda' >>> vtor.check('option("yoda", "jedi")', 'jed') Traceback (most recent call last): VdtValueError: the value "jed" is unacceptable. >>> vtor.check('option("yoda", "jedi")', 0) Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type. """ if not isinstance(value, basestring): raise VdtTypeError(value) if not value in options: raise VdtValueError(value) return value def _test(value, *args, **keywargs): """ A function that exists for test purposes. >>> checks = [ ... '3, 6, min=1, max=3, test=list(a, b, c)', ... '3', ... '3, 6', ... '3,', ... 'min=1, test="a b c"', ... 'min=5, test="a, b, c"', ... 'min=1, max=3, test="a, b, c"', ... 'min=-100, test=-99', ... 'min=1, max=3', ... '3, 6, test="36"', ... '3, 6, test="a, b, c"', ... '3, max=3, test=list("a", "b", "c")', ... '''3, max=3, test=list("'a'", 'b', "x=(c)")''', ... "test='x=fish(3)'", ... ] >>> v = Validator({'test': _test}) >>> for entry in checks: ... print v.check(('test(%s)' % entry), 3) (3, ('3', '6'), {'test': ['a', 'b', 'c'], 'max': '3', 'min': '1'}) (3, ('3',), {}) (3, ('3', '6'), {}) (3, ('3',), {}) (3, (), {'test': 'a b c', 'min': '1'}) (3, (), {'test': 'a, b, c', 'min': '5'}) (3, (), {'test': 'a, b, c', 'max': '3', 'min': '1'}) (3, (), {'test': '-99', 'min': '-100'}) (3, (), {'max': '3', 'min': '1'}) (3, ('3', '6'), {'test': '36'}) (3, ('3', '6'), {'test': 'a, b, c'}) (3, ('3',), {'test': ['a', 'b', 'c'], 'max': '3'}) (3, ('3',), {'test': ["'a'", 'b', 'x=(c)'], 'max': '3'}) (3, (), {'test': 'x=fish(3)'}) >>> v = Validator() >>> v.check('integer(default=6)', '3') 3 >>> v.check('integer(default=6)', None, True) 6 >>> v.get_default_value('integer(default=6)') 6 >>> v.get_default_value('float(default=6)') 6.0 >>> v.get_default_value('pass(default=None)') >>> v.get_default_value("string(default='None')") 'None' >>> v.get_default_value('pass') Traceback (most recent call last): KeyError: 'Check "pass" has no default value.' >>> v.get_default_value('pass(default=list(1, 2, 3, 4))') ['1', '2', '3', '4'] >>> v = Validator() >>> v.check("pass(default=None)", None, True) >>> v.check("pass(default='None')", None, True) 'None' >>> v.check('pass(default="None")', None, True) 'None' >>> v.check('pass(default=list(1, 2, 3, 4))', None, True) ['1', '2', '3', '4'] Bug test for unicode arguments >>> v = Validator() >>> v.check(u'string(min=4)', u'test') u'test' >>> v = Validator() >>> v.get_default_value(u'string(min=4, default="1234")') u'1234' >>> v.check(u'string(min=4, default="1234")', u'test') u'test' >>> v = Validator() >>> default = v.get_default_value('string(default=None)') >>> default == None 1 """ return (value, args, keywargs) def _test2(): """ >>> >>> v = Validator() >>> v.get_default_value('string(default="#ff00dd")') '#ff00dd' >>> v.get_default_value('integer(default=3) # comment') 3 """ def _test3(): r""" >>> vtor.check('string(default="")', '', missing=True) '' >>> vtor.check('string(default="\n")', '', missing=True) '\n' >>> print vtor.check('string(default="\n")', '', missing=True), >>> vtor.check('string()', '\n') '\n' >>> vtor.check('string(default="\n\n\n")', '', missing=True) '\n\n\n' >>> vtor.check('string()', 'random \n text goes here\n\n') 'random \n text goes here\n\n' >>> vtor.check('string(default=" \nrandom text\ngoes \n here\n\n ")', ... '', missing=True) ' \nrandom text\ngoes \n here\n\n ' >>> vtor.check("string(default='\n\n\n')", '', missing=True) '\n\n\n' >>> vtor.check("option('\n','a','b',default='\n')", '', missing=True) '\n' >>> vtor.check("string_list()", ['foo', '\n', 'bar']) ['foo', '\n', 'bar'] >>> vtor.check("string_list(default=list('\n'))", '', missing=True) ['\n'] """ if __name__ == '__main__': # run the code tests in doctest format import sys import doctest m = sys.modules.get('__main__') globs = m.__dict__.copy() globs.update({ 'vtor': Validator(), }) doctest.testmod(m, globs=globs) nipy-0.3.0/nipy/fixes/000077500000000000000000000000001210344137400145655ustar00rootroot00000000000000nipy-0.3.0/nipy/fixes/README.txt000066400000000000000000000006061210344137400162650ustar00rootroot00000000000000This directory is meant to contain fixes to external packages, such as scipy, numpy that are meant to eventually be moved upstream to these packages. When these changes find their way upstream and are released, they can be deleted from the "fixes" directory when new versions of NIPY are released. PACKAGES/MODULES: --------- scipy/stats_models: corresponds to module "scipy.stats.models"nipy-0.3.0/nipy/fixes/__init__.py000066400000000000000000000012671210344137400167040ustar00rootroot00000000000000# We import numpy fixes during init of the testing package. We need to delay # import of the testing package until after it has initialized from os.path import dirname # Cache for the actual testing functin _tester = None def test(*args, **kwargs): """ test function for fixes subpackage This function defers import of the testing machinery so it can import from us first. See nipy.test docstring for parameters and return values """ global _tester if _tester is None: from nipy.testing import Tester _tester = Tester(dirname(__file__)).test return _tester(*args, **kwargs) # Remind nose not to test the test function test.__test__ = False nipy-0.3.0/nipy/fixes/nibabel/000077500000000000000000000000001210344137400161615ustar00rootroot00000000000000nipy-0.3.0/nipy/fixes/nibabel/__init__.py000066400000000000000000000001021210344137400202630ustar00rootroot00000000000000# Init for nibabel fixes from .orientations import io_orientation nipy-0.3.0/nipy/fixes/nibabel/orientations.py000066400000000000000000000074451210344137400212630ustar00rootroot00000000000000# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # # See COPYING file distributed along with the NiBabel package for the # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """ Copy of nibabel io_orientation function from nibabel > 1.2.0 This copy fixes a bug when there are columns of all zero in the affine. See https://github.com/nipy/nibabel/pull/128 Remove when we depend on nibabel > 1.2.0 """ import numpy as np import numpy.linalg as npl def io_orientation(affine, tol=None): ''' Orientation of input axes in terms of output axes for `affine` Valid for an affine transformation from ``p`` dimensions to ``q`` dimensions (``affine.shape == (q + 1, p + 1)``). The calculated orientations can be used to transform associated arrays to best match the output orientations. If ``p`` > ``q``, then some of the output axes should be considered dropped in this orientation. Parameters ---------- affine : (q+1, p+1) ndarray-like Transformation affine from ``p`` inputs to ``q`` outputs. Usually this will be a shape (4,4) matrix, transforming 3 inputs to 3 outputs, but the code also handles the more general case tol : {None, float}, optional threshold below which SVD values of the affine are considered zero. If `tol` is None, and ``S`` is an array with singular values for `affine`, and ``eps`` is the epsilon value for datatype of ``S``, then `tol` set to ``S.max() * eps``. Returns ------- orientations : (p, 2) ndarray one row per input axis, where the first value in each row is the closest corresponding output axis. The second value in each row is 1 if the input axis is in the same direction as the corresponding output axis and -1 if it is in the opposite direction. If a row is [np.nan, np.nan], which can happen when p > q, then this row should be considered dropped. ''' affine = np.asarray(affine) q, p = affine.shape[0]-1, affine.shape[1]-1 # extract the underlying rotation, zoom, shear matrix RZS = affine[:q, :p] zooms = np.sqrt(np.sum(RZS * RZS, axis=0)) # Zooms can be zero, in which case all elements in the column are zero, and # we can leave them as they are zooms[zooms == 0] = 1 RS = RZS / zooms # Transform below is polar decomposition, returning the closest # shearless matrix R to RS P, S, Qs = npl.svd(RS) # Threshold the singular values to determine the rank. if tol is None: tol = S.max() * np.finfo(S.dtype).eps keep = (S > tol) R = np.dot(P[:, keep], Qs[keep]) # the matrix R is such that np.dot(R,R.T) is projection onto the # columns of P[:,keep] and np.dot(R.T,R) is projection onto the rows # of Qs[keep]. R (== np.dot(R, np.eye(p))) gives rotation of the # unit input vectors to output coordinates. Therefore, the row # index of abs max R[:,N], is the output axis changing most as input # axis N changes. In case there are ties, we choose the axes # iteratively, removing used axes from consideration as we go ornt = np.ones((p, 2), dtype=np.int8) * np.nan for in_ax in range(p): col = R[:, in_ax] if not np.alltrue(np.equal(col, 0)): out_ax = np.argmax(np.abs(col)) ornt[in_ax, 0] = out_ax assert col[out_ax] != 0 if col[out_ax] < 0: ornt[in_ax, 1] = -1 else: ornt[in_ax, 1] = 1 # remove the identified axis from further consideration, by # zeroing out the corresponding row in R R[out_ax, :] = 0 return ornt nipy-0.3.0/nipy/fixes/numpy/000077500000000000000000000000001210344137400157355ustar00rootroot00000000000000nipy-0.3.0/nipy/fixes/numpy/__init__.py000066400000000000000000000000261210344137400200440ustar00rootroot00000000000000# numpy fixes package nipy-0.3.0/nipy/fixes/numpy/testing/000077500000000000000000000000001210344137400174125ustar00rootroot00000000000000nipy-0.3.0/nipy/fixes/numpy/testing/__init__.py000066400000000000000000000000471210344137400215240ustar00rootroot00000000000000# Package init for fixes.numpy.testing nipy-0.3.0/nipy/fixes/numpy/testing/noseclasses.py000066400000000000000000000340311210344137400223070ustar00rootroot00000000000000# These classes implement a doctest runner plugin for nose, a "known failure" # error class, and a customized TestProgram for NumPy. # Because this module imports nose directly, it should not # be used except by nosetester.py to avoid a general NumPy # dependency on nose. import os import doctest import nose from nose.plugins import doctests as npd from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin from nose.plugins.base import Plugin from nose.util import src import numpy from nosetester import get_package_name import inspect # Some of the classes in this module begin with 'Numpy' to clearly distinguish # them from the plethora of very similar names from nose/unittest/doctest #----------------------------------------------------------------------------- # Modified version of the one in the stdlib, that fixes a python bug (doctests # not found in extension modules, http://bugs.python.org/issue3158) class NumpyDocTestFinder(doctest.DocTestFinder): def _from_module(self, module, object): """ Return true if the given object is defined in the given module. """ if module is None: #print '_fm C1' # dbg return True elif inspect.isfunction(object): #print '_fm C2' # dbg return module.__dict__ is object.func_globals elif inspect.isbuiltin(object): #print '_fm C2-1' # dbg return module.__name__ == object.__module__ elif inspect.isclass(object): #print '_fm C3' # dbg return module.__name__ == object.__module__ elif inspect.ismethod(object): # This one may be a bug in cython that fails to correctly set the # __module__ attribute of methods, but since the same error is easy # to make by extension code writers, having this safety in place # isn't such a bad idea #print '_fm C3-1' # dbg return module.__name__ == object.im_class.__module__ elif inspect.getmodule(object) is not None: #print '_fm C4' # dbg #print 'C4 mod',module,'obj',object # dbg return module is inspect.getmodule(object) elif hasattr(object, '__module__'): #print '_fm C5' # dbg return module.__name__ == object.__module__ elif isinstance(object, property): #print '_fm C6' # dbg return True # [XX] no way not be sure. else: raise ValueError("object must be a class or function") def _find(self, tests, obj, name, module, source_lines, globs, seen): """ Find tests for the given object and any contained objects, and add them to `tests`. """ doctest.DocTestFinder._find(self,tests, obj, name, module, source_lines, globs, seen) # Below we re-run pieces of the above method with manual modifications, # because the original code is buggy and fails to correctly identify # doctests in extension modules. # Local shorthands from inspect import isroutine, isclass, ismodule, isfunction, \ ismethod # Look for tests in a module's contained objects. if ismodule(obj) and self._recurse: for valname, val in obj.__dict__.items(): valname1 = '%s.%s' % (name, valname) if ( (isroutine(val) or isclass(val)) and self._from_module(module, val) ): self._find(tests, val, valname1, module, source_lines, globs, seen) # Look for tests in a class's contained objects. if isclass(obj) and self._recurse: #print 'RECURSE into class:',obj # dbg for valname, val in obj.__dict__.items(): #valname1 = '%s.%s' % (name, valname) # dbg #print 'N',name,'VN:',valname,'val:',str(val)[:77] # dbg # Special handling for staticmethod/classmethod. if isinstance(val, staticmethod): val = getattr(obj, valname) if isinstance(val, classmethod): val = getattr(obj, valname).im_func # Recurse to methods, properties, and nested classes. if ((isfunction(val) or isclass(val) or ismethod(val) or isinstance(val, property)) and self._from_module(module, val)): valname = '%s.%s' % (name, valname) self._find(tests, val, valname, module, source_lines, globs, seen) # second-chance checker; if the default comparison doesn't # pass, then see if the expected output string contains flags that # tell us to ignore the output class NumpyOutputChecker(doctest.OutputChecker): def check_output(self, want, got, optionflags): ret = doctest.OutputChecker.check_output(self, want, got, optionflags) if not ret: if "#random" in want: return True # it would be useful to normalize endianness so that # bigendian machines don't fail all the tests (and there are # actually some bigendian examples in the doctests). Let's try # making them all little endian got = got.replace("'>","'<") want= want.replace("'>","'<") # try to normalize out 32 and 64 bit default int sizes for sz in [4,8]: got = got.replace("'>> np.testing.nosetester.get_package_name('nonsense') 'numpy' """ fullpath = filepath[:] pkg_name = [] while 'site-packages' in filepath or 'dist-packages' in filepath: filepath, p2 = os.path.split(filepath) if p2 in ('site-packages', 'dist-packages'): break pkg_name.append(p2) # if package name determination failed, just default to numpy/scipy if not pkg_name: if 'scipy' in fullpath: return 'scipy' else: return 'numpy' # otherwise, reverse to get correct order and return pkg_name.reverse() # don't include the outer egg directory if pkg_name[0].endswith('.egg'): pkg_name.pop(0) return '.'.join(pkg_name) def import_nose(): """ Import nose only when needed. """ fine_nose = True minimum_nose_version = (0,10,0) try: import nose from nose.tools import raises except ImportError: fine_nose = False else: if nose.__versioninfo__ < minimum_nose_version: fine_nose = False if not fine_nose: msg = 'Need nose >= %d.%d.%d for tests - see ' \ 'http://somethingaboutorange.com/mrl/projects/nose' % \ minimum_nose_version raise ImportError(msg) return nose def run_module_suite(file_to_run = None): if file_to_run is None: f = sys._getframe(1) file_to_run = f.f_locals.get('__file__', None) if file_to_run is None: raise AssertionError import_nose().run(argv=['',file_to_run]) class NoseTester(object): """ Nose test runner. This class is made available as numpy.testing.Tester, and a test function is typically added to a package's __init__.py like so:: from numpy.testing import Tester test = Tester().test Calling this test function finds and runs all tests associated with the package and all its sub-packages. Attributes ---------- package_path : str Full path to the package to test. package_name : str Name of the package to test. Parameters ---------- package : module, str or None The package to test. If a string, this should be the full path to the package. If None (default), `package` is set to the module from which `NoseTester` is initialized. """ # Stuff to exclude from tests. These are from numpy.distutils excludes = ['f2py_ext', 'f2py_f90_ext', 'gen_ext', 'pyrex_ext', 'swig_ext'] def __init__(self, package=None): ''' Test class init Parameters ---------- package : string or module If string, gives full path to package If None, extract calling module path Default is None ''' package_name = None if package is None: f = sys._getframe(1) package_path = f.f_locals.get('__file__', None) if package_path is None: raise AssertionError package_path = os.path.dirname(package_path) package_name = f.f_locals.get('__name__', None) elif isinstance(package, type(os)): package_path = os.path.dirname(package.__file__) package_name = getattr(package, '__name__', None) else: package_path = str(package) self.package_path = package_path # find the package name under test; this name is used to limit coverage # reporting (if enabled) if package_name is None: package_name = get_package_name(package_path) self.package_name = package_name def _test_argv(self, label, verbose, extra_argv): ''' Generate argv for nosetest command Parameters ---------- label : {'fast', 'full', '', attribute identifier}, optional see ``test`` docstring verbose : int, optional Verbosity value for test outputs, in the range 1-10. Default is 1. extra_argv : list, optional List with any extra arguments to pass to nosetests. Returns ------- argv : list command line arguments that will be passed to nose ''' argv = [__file__, self.package_path, '-s'] if label and label != 'full': if not isinstance(label, basestring): raise TypeError('Selection label should be a string') if label == 'fast': label = 'not slow' argv += ['-A', label] argv += ['--verbosity', str(verbose)] if extra_argv: argv += extra_argv return argv def _show_system_info(self): nose = import_nose() import numpy print "NumPy version %s" % numpy.__version__ npdir = os.path.dirname(numpy.__file__) print "NumPy is installed in %s" % npdir if 'scipy' in self.package_name: import scipy print "SciPy version %s" % scipy.__version__ spdir = os.path.dirname(scipy.__file__) print "SciPy is installed in %s" % spdir pyversion = sys.version.replace('\n','') print "Python version %s" % pyversion print "nose version %d.%d.%d" % nose.__versioninfo__ def _get_custom_doctester(self): """ Return instantiated plugin for doctests Allows subclassing of this class to override doctester A return value of None means use the nose builtin doctest plugin """ from noseclasses import NumpyDoctest return NumpyDoctest() def prepare_test_args(self, label='fast', verbose=1, extra_argv=None, doctests=False, coverage=False): """ Run tests for module using nose. This method does the heavy lifting for the `test` method. It takes all the same arguments, for details see `test`. See Also -------- test """ # fail with nice error message if nose is not present import_nose() # compile argv argv = self._test_argv(label, verbose, extra_argv) # bypass tests noted for exclude for ename in self.excludes: argv += ['--exclude', ename] # our way of doing coverage if coverage: argv+=['--cover-package=%s' % self.package_name, '--with-coverage', '--cover-tests', '--cover-inclusive', '--cover-erase'] # construct list of plugins import nose.plugins.builtin from noseclasses import KnownFailure, Unplugger plugins = [KnownFailure()] plugins += [p() for p in nose.plugins.builtin.plugins] # add doctesting if required doctest_argv = '--with-doctest' in argv if doctests == False and doctest_argv: doctests = True plug = self._get_custom_doctester() if plug is None: # use standard doctesting if doctests and not doctest_argv: argv += ['--with-doctest'] else: # custom doctesting if doctest_argv: # in fact the unplugger would take care of this argv.remove('--with-doctest') plugins += [Unplugger('doctest'), plug] if doctests: argv += ['--with-' + plug.name] return argv, plugins def test(self, label='fast', verbose=1, extra_argv=None, doctests=False, coverage=False): """ Run tests for module using nose. Parameters ---------- label : {'fast', 'full', '', attribute identifier}, optional Identifies the tests to run. This can be a string to pass to the nosetests executable with the '-A' option, or one of several special values. Special values are: * 'fast' - the default - which corresponds to the ``nosetests -A`` option of 'not slow'. * 'full' - fast (as above) and slow tests as in the 'no -A' option to nosetests - this is the same as ''. * None or '' - run all tests. attribute_identifier - string passed directly to nosetests as '-A'. verbose : int, optional Verbosity value for test outputs, in the range 1-10. Default is 1. extra_argv : list, optional List with any extra arguments to pass to nosetests. doctests : bool, optional If True, run doctests in module. Default is False. coverage : bool, optional If True, report coverage of NumPy code. Default is False. (This requires the `coverage module: `_). Returns ------- result : object Returns the result of running the tests as a ``nose.result.TextTestResult`` object. Notes ----- Each NumPy module exposes `test` in its namespace to run all tests for it. For example, to run all tests for numpy.lib: >>> np.lib.test() #doctest: +SKIP Examples -------- >>> result = np.lib.test() #doctest: +SKIP Running unit tests for numpy.lib ... Ran 976 tests in 3.933s OK >>> result.errors #doctest: +SKIP [] >>> result.knownfail #doctest: +SKIP [] """ # cap verbosity at 3 because nose becomes *very* verbose beyond that verbose = min(verbose, 3) import utils utils.verbose = verbose if doctests: print "Running unit tests and doctests for %s" % self.package_name else: print "Running unit tests for %s" % self.package_name self._show_system_info() # reset doctest state on every run import doctest doctest.master = None argv, plugins = self.prepare_test_args(label, verbose, extra_argv, doctests, coverage) from noseclasses import NumpyTestProgram t = NumpyTestProgram(argv=argv, exit=False, plugins=plugins) return t.result def bench(self, label='fast', verbose=1, extra_argv=None): """ Run benchmarks for module using nose. Parameters ---------- label : {'fast', 'full', '', attribute identifier}, optional Identifies the benchmarks to run. This can be a string to pass to the nosetests executable with the '-A' option, or one of several special values. Special values are: * 'fast' - the default - which corresponds to the ``nosetests -A`` option of 'not slow'. * 'full' - fast (as above) and slow benchmarks as in the 'no -A' option to nosetests - this is the same as ''. * None or '' - run all tests. attribute_identifier - string passed directly to nosetests as '-A'. verbose : int, optional Verbosity value for benchmark outputs, in the range 1-10. Default is 1. extra_argv : list, optional List with any extra arguments to pass to nosetests. Returns ------- success : bool Returns True if running the benchmarks works, False if an error occurred. Notes ----- Benchmarks are like tests, but have names starting with "bench" instead of "test", and can be found under the "benchmarks" sub-directory of the module. Each NumPy module exposes `bench` in its namespace to run all benchmarks for it. Examples -------- >>> success = np.lib.bench() #doctest: +SKIP Running benchmarks for numpy.lib ... using 562341 items: unique: 0.11 unique1d: 0.11 ratio: 1.0 nUnique: 56230 == 56230 ... OK >>> success #doctest: +SKIP True """ print "Running benchmarks for %s" % self.package_name self._show_system_info() argv = self._test_argv(label, verbose, extra_argv) argv += ['--match', r'(?:^|[\\b_\\.%s-])[Bb]ench' % os.sep] # import nose or make informative error nose = import_nose() # get plugin to disable doctests from noseclasses import Unplugger add_plugins = [Unplugger('doctest')] return nose.run(argv=argv, addplugins=add_plugins) nipy-0.3.0/nipy/fixes/numpy/testing/utils.py000066400000000000000000000001361210344137400211240ustar00rootroot00000000000000# Allow numpy fixes noseclasses to do local import of utils from numpy.testing.utils import * nipy-0.3.0/nipy/fixes/setup.py000066400000000000000000000011511210344137400162750ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('fixes', parent_package, top_path) config.add_subpackage('sympy') config.add_subpackage('sympy.utilities') config.add_subpackage('numpy') config.add_subpackage('numpy.testing') config.add_subpackage('nibabel') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipy-0.3.0/nipy/fixes/sympy/000077500000000000000000000000001210344137400157465ustar00rootroot00000000000000nipy-0.3.0/nipy/fixes/sympy/__init__.py000066400000000000000000000001031210344137400200510ustar00rootroot00000000000000# Package for sympy fixes from .utilities.lambdify import lambdify nipy-0.3.0/nipy/fixes/sympy/utilities/000077500000000000000000000000001210344137400177615ustar00rootroot00000000000000nipy-0.3.0/nipy/fixes/sympy/utilities/__init__.py000066400000000000000000000000441210344137400220700ustar00rootroot00000000000000# Package for sympy utilities fixes nipy-0.3.0/nipy/fixes/sympy/utilities/compatibility.py000066400000000000000000000056121210344137400232100ustar00rootroot00000000000000""" Copy out of the top of sympy.core.compatibility as of b8aa2de87d537eddde044c13f2eaaab99a5dcfe7 This can be deleted when we depend on sympy 0.7.0 or later """ """ Reimplementations of constructs introduced in later versions of Python than we support. """ # These are in here because telling if something is an iterable just by calling # hasattr(obj, "__iter__") behaves differently in Python 2 and Python 3. In # particular, hasattr(str, "__iter__") is False in Python 2 and True in Python 3. # I think putting them here also makes it easier to use them in the core. def iterable(i, exclude=(basestring, dict)): """ Return a boolean indicating whether i is an iterable in the sympy sense. When sympy is working with iterables, it is almost always assuming that the iterable is not a string or a mapping, so those are excluded by default. If you want a pure python definition, make exclude=None. To exclude multiple items, pass them as a tuple. See also: is_sequence Examples: >>> things = [[1], (1,), set([1]), (j for j in [1, 2]), {1:2}, '1', 1] In the test below, Python 2 prints output including e.g ````. >>> for i in things: #doctest: +IGNORE_OUTPUT ... print iterable(i), type(i) True True True True False False False >>> iterable({}, exclude=None) True >>> iterable({}, exclude=str) True >>> iterable("no", exclude=str) False """ try: iter(i) except TypeError: return False if exclude: return not isinstance(i, exclude) return True def is_sequence(i, include=None): """ Return a boolean indicating whether i is a sequence in the sympy sense. If anything that fails the test below should be included as being a sequence for your application, set 'include' to that object's type; multiple types should be passed as a tuple of types. Note: although generators can generate a sequence, they often need special handling to make sure their elements are captured before the generator is exhausted, so these are not included by default in the definition of a sequence. See also: iterable Examples: >>> from nipy.fixes.sympy.utilities.compatibility import is_sequence >>> from types import GeneratorType >>> is_sequence([]) True >>> is_sequence(set()) False >>> is_sequence('abc') False >>> is_sequence('abc', include=str) True >>> generator = (c for c in 'abc') >>> is_sequence(generator) False >>> is_sequence(generator, include=(str, GeneratorType)) True """ return (hasattr(i, '__getitem__') and iterable(i) or bool(include) and isinstance(i, include)) nipy-0.3.0/nipy/fixes/sympy/utilities/lambdify.py000066400000000000000000000334101210344137400221230ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ An implementation of Functions in sympy that allow 'implemented' functions that can be evaluated when 'lambdified'. This code is in sympy 0.7.0. We'll remove this copy in favor of the code in sympy when - say - 0.7 is in the current and the previous version of Ubuntu. This module licensed as is sympy, under the BSD license Sympy docstring: This module provides convenient functions to transform sympy expressions to lambda functions which can be used to calculate numerical values very fast. """ from __future__ import division from .compatibility import is_sequence # There follows a literal copy of sympy.utilities.lambdify # commit b8aa2de87d537eddde044c13f2eaaab99a5dcfe7 # except where marked by "#NIPY-EDIT" import inspect # These are the namespaces the lambda functions will use. MATH = {} MPMATH = {} NUMPY = {} SYMPY = {} # Default namespaces, letting us define translations that can't be defined # by simple variable maps, like I => 1j # These are separate from the names above because the above names are modified # throughout this file, whereas these should remain unmodified. MATH_DEFAULT = {} MPMATH_DEFAULT = {} NUMPY_DEFAULT = {"I": 1j} SYMPY_DEFAULT = {} # Mappings between sympy and other modules function names. MATH_TRANSLATIONS = { "Abs":"fabs", "ceiling":"ceil", "E":"e", "ln":"log", } MPMATH_TRANSLATIONS = { "ceiling":"ceil", "chebyshevt":"chebyt", "chebyshevu":"chebyu", "E":"e", "I":"j", "ln":"log", #"lowergamma":"lower_gamma", "oo":"inf", #"uppergamma":"upper_gamma", "LambertW":"lambertw", "Matrix":"matrix", "conjugate":"conj", } NUMPY_TRANSLATIONS = { "acos":"arccos", "acosh":"arccosh", "arg":"angle", "asin":"arcsin", "asinh":"arcsinh", "atan":"arctan", "atan2":"arctan2", "atanh":"arctanh", "ceiling":"ceil", "E":"e", "im":"imag", "ln":"log", "Matrix":"matrix", "Max":"amax", "Min":"amin", "oo":"inf", "re":"real", } # Available modules: MODULES = { "math":(MATH, MATH_DEFAULT, MATH_TRANSLATIONS, ("from math import *",)), "mpmath":(MPMATH, MPMATH_DEFAULT, MPMATH_TRANSLATIONS, ("from sympy.mpmath import *",)), "numpy":(NUMPY, NUMPY_DEFAULT, NUMPY_TRANSLATIONS, ("from numpy import *",)), "sympy":(SYMPY, SYMPY_DEFAULT, {}, ("from sympy.functions import *", "from sympy.matrices import Matrix", "from sympy import Integral, pi, oo, nan, zoo, E, I",)), } def _import(module, reload="False"): """ Creates a global translation dictionary for module. The argument module has to be one of the following strings: "math", "mpmath", "numpy", "sympy". These dictionaries map names of python functions to their equivalent in other modules. """ # TODO: rewrite this using import_module from sympy.external if not module in MODULES: raise NameError("This module can't be used for lambdification.") namespace, namespace_default, translations, import_commands = MODULES[module] # Clear namespace or exit if namespace != namespace_default: # The namespace was already generated, don't do it again if not forced. if reload: namespace.clear() namespace.update(namespace_default) else: return # It's possible that numpy is not available. for import_command in import_commands: try: exec import_command in {}, namespace except ImportError: raise ImportError("Can't import %s with command %s" % (module, import_command)) # Add translated names to namespace for sympyname, translation in translations.iteritems(): namespace[sympyname] = namespace[translation] def lambdify(args, expr, modules=None, printer=None, use_imps=True): """ Returns a lambda function for fast calculation of numerical values. Usage: >>> from sympy import sqrt, sin >>> from nipy.fixes.sympy.utilities.lambdify import lambdify >>> from sympy.abc import x, y, z >>> f = lambdify(x, x**2) >>> f(2) 4 >>> f = lambdify((x,y,z), [z,y,x]) >>> f(1,2,3) [3, 2, 1] >>> f = lambdify(x, sqrt(x)) >>> f(4) 2.0 >>> f = lambdify((x,y), sin(x*y)**2) >>> f(0, 5) 0.0 If not specified differently by the user, Sympy functions are replaced as far as possible by either python-math, numpy (if available) or mpmath functions - exactly in this order. To change this behavior, the "modules" argument can be used. It accepts: - the strings "math", "mpmath", "numpy", "sympy" - any modules (e.g. math) - dictionaries that map names of sympy functions to arbitrary functions - lists that contain a mix of the arguments above. (Entries that are first in the list have higher priority) Examples: (1) Use one of the provided modules: >> f = lambdify(x, sin(x), "math") Attention: Functions that are not in the math module will throw a name error when the lambda function is evaluated! So this would be better: >> f = lambdify(x, sin(x)*gamma(x), ("math", "mpmath", "sympy")) (2) Use some other module: >> import numpy >> f = lambdify((x,y), tan(x*y), numpy) Attention: There are naming differences between numpy and sympy. So if you simply take the numpy module, e.g. sympy.atan will not be translated to numpy.arctan. Use the modified module instead by passing the string "numpy": >> f = lambdify((x,y), tan(x*y), "numpy") >> f(1, 2) -2.18503986326 >> from numpy import array >> f(array([1, 2, 3]), array([2, 3, 5])) [-2.18503986 -0.29100619 -0.8559934 ] (3) Use own dictionaries: >> def my_cool_function(x): ... >> dic = {"sin" : my_cool_function} >> f = lambdify(x, sin(x), dic) Now f would look like: >> lambda x: my_cool_function(x) Functions present in `expr` can also carry their own numerical implementations, in a callable attached to the ``_imp_`` attribute. Usually you attach this using the ``implemented_function`` factory: >>> from sympy.abc import x, y, z >>> from nipy.fixes.sympy.utilities.lambdify import lambdify, implemented_function >>> from sympy import Function >>> f = implemented_function(Function('f'), lambda x : x+1) >>> func = lambdify(x, f(x)) >>> func(4) 5 ``lambdify`` always prefers ``_imp_`` implementations to implementations in other namespaces, unless the ``use_imps`` input parameter is False. """ from sympy.core.symbol import Symbol # If the user hasn't specified any modules, use what is available. if modules is None: # Use either numpy (if available) or python.math where possible. # XXX: This leads to different behaviour on different systems and # might be the reason for irreproducible errors. try: _import("numpy") modules = ("math", "numpy", "mpmath", "sympy") except ImportError: modules = ("math", "mpmath", "sympy") # Get the needed namespaces. namespaces = [] # First find any function implementations if use_imps: namespaces.append(_imp_namespace(expr)) # Check for dict before iterating if isinstance(modules, (dict, str)) or not hasattr(modules, '__iter__'): namespaces.append(modules) else: namespaces += list(modules) # fill namespace with first having highest priority namespace = {} for m in namespaces[::-1]: buf = _get_namespace(m) namespace.update(buf) if hasattr(expr, "atoms"): #Try if you can extract symbols from the expression. #Move on if expr.atoms in not implemented. syms = expr.atoms(Symbol) for term in syms: namespace.update({str(term): term}) # Create lambda function. lstr = lambdastr(args, expr, printer=printer) return eval(lstr, namespace) def _get_namespace(m): """ This is used by _lambdify to parse its arguments. """ if isinstance(m, str): _import(m) return MODULES[m][0] elif isinstance(m, dict): return m elif hasattr(m, "__dict__"): return m.__dict__ else: raise TypeError("Argument must be either a string, dict or module but it is: %s" % m) def lambdastr(args, expr, printer=None): """ Returns a string that can be evaluated to a lambda function. >>> from sympy.abc import x, y, z >>> from nipy.fixes.sympy.utilities.lambdify import lambdastr >>> lambdastr(x, x**2) 'lambda x: (x**2)' >>> lambdastr((x,y,z), [z,y,x]) 'lambda x,y,z: ([z, y, x])' """ if printer is not None: if inspect.isfunction(printer): lambdarepr = printer else: if inspect.isclass(printer): lambdarepr = lambda expr: printer().doprint(expr) else: lambdarepr = lambda expr: printer.doprint(expr) else: #XXX: This has to be done here because of circular imports from sympy.printing.lambdarepr import lambdarepr # Transform everything to strings. expr = lambdarepr(expr) if isinstance(args, str): pass elif hasattr(args, "__iter__"): args = ",".join(str(a) for a in args) else: args = str(args) return "lambda %s: (%s)" % (args, expr) def _imp_namespace(expr, namespace=None): """ Return namespace dict with function implementations We need to search for functions in anything that can be thrown at us - that is - anything that could be passed as `expr`. Examples include sympy expressions, as well as tuples, lists and dicts that may contain sympy expressions. Parameters ---------- expr : object Something passed to lambdify, that will generate valid code from ``str(expr)``. namespace : None or mapping Namespace to fill. None results in new empty dict Returns ------- namespace : dict dict with keys of implemented function names within `expr` and corresponding values being the numerical implementation of function Examples -------- >>> from sympy.abc import x, y, z >>> from nipy.fixes.sympy.utilities.lambdify import implemented_function, _imp_namespace >>> from sympy import Function >>> f = implemented_function(Function('f'), lambda x : x+1) >>> g = implemented_function(Function('g'), lambda x : x*10) >>> namespace = _imp_namespace(f(g(x))) >>> sorted(namespace.keys()) ['f', 'g'] """ # Delayed import to avoid circular imports from sympy.core.function import FunctionClass if namespace is None: namespace = {} # tuples, lists, dicts are valid expressions if is_sequence(expr): for arg in expr: _imp_namespace(arg, namespace) return namespace elif isinstance(expr, dict): for key, val in expr.items(): # functions can be in dictionary keys _imp_namespace(key, namespace) _imp_namespace(val, namespace) return namespace # sympy expressions may be Functions themselves func = getattr(expr, 'func', None) if isinstance(func, FunctionClass): imp = getattr(func, '_imp_', None) if not imp is None: name = expr.func.__name__ if name in namespace and namespace[name] != imp: raise ValueError('We found more than one ' 'implementation with name ' '"%s"' % name) namespace[name] = imp # and / or they may take Functions as arguments if hasattr(expr, 'args'): for arg in expr.args: _imp_namespace(arg, namespace) return namespace def implemented_function(symfunc, implementation): """ Add numerical `implementation` to function `symfunc` `symfunc` can by a Function, or a name, in which case we make an anonymous function with this name. The function is anonymous in the sense that the name is not unique in the sympy namespace. Parameters ---------- symfunc : str or ``sympy.FunctionClass`` instance If str, then create new anonymous sympy function with this as name. If `symfunc` is a sympy function, attach implementation to function implementation : callable numerical implementation of function for use in ``lambdify`` Returns ------- afunc : sympy.FunctionClass instance function with attached implementation Examples -------- >>> from sympy.abc import x, y, z >>> from nipy.fixes.sympy.utilities.lambdify import lambdify, implemented_function >>> from sympy import Function >>> f = implemented_function(Function('f'), lambda x : x+1) >>> lam_f = lambdify(x, f(x)) >>> lam_f(4) 5 """ #NIPY-EDIT: compatibility between sympy 0.6.x and 0.7.0. UndefinedFunction # is, er, not defined in sympy 0.6.x try: # Delayed import to avoid circular imports from sympy.core.function import UndefinedFunction as funcmaker except ImportError: from sympy import Function, FunctionClass funcmaker = lambda name : FunctionClass(Function, name) # if name, create anonymous function to hold implementation if isinstance(symfunc, basestring): symfunc = funcmaker(symfunc) #NIPY-EDIT: ends # We need to attach as a method because symfunc will be a class symfunc._imp_ = staticmethod(implementation) return symfunc # This is a nipy compatability wrapper import numpy as np aliased_function = np.deprecate_with_doc( 'Please use sympy.utilities.implemented_function')(implemented_function) nipy-0.3.0/nipy/info.py000066400000000000000000000126311210344137400147570ustar00rootroot00000000000000""" This file contains defines parameters for nipy that we use to fill settings in setup.py, the nipy top-level docstring, and for building the docs. In setup.py in particular, we exec this file, so it cannot import nipy """ # nipy version information. An empty _version_extra corresponds to a # full release. '.dev' as a _version_extra string means this is a development # version _version_major = 0 _version_minor = 3 _version_micro = 0 #_version_extra = '.dev' # For development _version_extra = '' # For release # Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z" __version__ = "%s.%s.%s%s" % (_version_major, _version_minor, _version_micro, _version_extra) CLASSIFIERS = ["Development Status :: 3 - Alpha", "Environment :: Console", "Intended Audience :: Science/Research", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Topic :: Scientific/Engineering"] description = 'A python package for analysis of neuroimaging data' # Note: this long_description is actually a copy/paste from the top-level # README.rst, so that it shows up nicely on PyPI. So please remember to edit # it only in one place and sync it correctly. long_description = \ """ ==== NIPY ==== Neuroimaging tools for Python. The aim of NIPY is to produce a platform-independent Python environment for the analysis of functional brain imaging data using an open development model. In NIPY we aim to: 1. Provide an open source, mixed language scientific programming environment suitable for rapid development. 2. Create sofware components in this environment to make it easy to develop tools for MRI, EEG, PET and other modalities. 3. Create and maintain a wide base of developers to contribute to this platform. 4. To maintain and develop this framework as a single, easily installable bundle. NIPY is the work of many people. We list the main authors in the file ``AUTHOR`` in the NIPY distribution, and other contributions in ``THANKS``. Website ======= Current information can always be found at the NIPY website:: http://nipy.org/nipy Mailing Lists ============= Please see the developer's list:: http://projects.scipy.org/mailman/listinfo/nipy-devel Code ==== You can find our sources and single-click downloads: * `Main repository`_ on Github. * Documentation_ for all releases and current development tree. * Download as a tar/zip file the `current trunk`_. * Downloads of all `available releases`_. .. _main repository: http://github.com/nipy/nipy .. _Documentation: http://nipy.org/nipy .. _current trunk: http://github.com/nipy/nipy/archives/master .. _available releases: http://pypi.python.org/pypi/nipy Dependencies ============ To run NIPY, you will need: * python_ >= 2.5 (tested with 2.5, 2.6, 2.7, 3.2, 3.3) * numpy_ >= 1.2 * scipy_ >= 0.7.0 * sympy_ >= 0.6.6 * nibabel_ >= 1.2 You will probably also like to have: * ipython_ for interactive work * matplotlib_ for 2D plotting * mayavi_ for 3D plotting .. _python: http://python.org .. _numpy: http://numpy.scipy.org .. _scipy: http://www.scipy.org .. _sympy: http://sympy.org .. _nibabel: http://nipy.org/nibabel .. _ipython: http://ipython.scipy.org .. _matplotlib: http://matplotlib.sourceforge.net .. _mayavi: http://code.enthought.com/projects/mayavi/ License ======= We use the 3-clause BSD license; the full license is in the file ``LICENSE`` in the nipy distribution. """ NAME = 'nipy' MAINTAINER = "nipy developers" MAINTAINER_EMAIL = "nipy-devel@neuroimaging.scipy.org" DESCRIPTION = description LONG_DESCRIPTION = long_description URL = "http://nipy.org/nipy" DOWNLOAD_URL = "http://github.com/nipy/nipy/archives/master" LICENSE = "BSD license" CLASSIFIERS = CLASSIFIERS AUTHOR = "nipy developmers" AUTHOR_EMAIL = "nipy-devel@neuroimaging.scipy.org" PLATFORMS = "OS Independent" MAJOR = _version_major MINOR = _version_minor MICRO = _version_micro ISRELEASE = _version_extra == '' VERSION = __version__ REQUIRES = ["numpy", "scipy", "sympy"] STATUS = 'beta' # versions NUMPY_MIN_VERSION='1.2' SCIPY_MIN_VERSION = '0.7' NIBABEL_MIN_VERSION = '1.2' SYMPY_MIN_VERSION = '0.6.6' MAYAVI_MIN_VERSION = '3.0' CYTHON_MIN_VERSION = '0.12.1' # Versions and locations of optional data packages NIPY_DATA_URL= 'http://nipy.sourceforge.net/data-packages/' DATA_PKGS = {'nipy-data': {'min version':'0.2', 'relpath':'nipy/data'}, 'nipy-templates': {'min version':'0.2', 'relpath':'nipy/templates'} } NIPY_INSTALL_HINT = \ """You can download and install the package from: %s Check the instructions in the ``doc/users/install_data.rst`` file in the nipy source tree, or online at http://nipy.org/nipy/stable/users/install_data.html If you have the package, have you set the path to the package correctly?""" for key, value in DATA_PKGS.items(): url = '%s%s-%s.tar.gz' % (NIPY_DATA_URL, key, value['min version']) value['name'] = key value['install hint'] = NIPY_INSTALL_HINT % url del key, value, url nipy-0.3.0/nipy/interfaces/000077500000000000000000000000001210344137400155725ustar00rootroot00000000000000nipy-0.3.0/nipy/interfaces/__init__.py000066400000000000000000000003521210344137400177030ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Interfaces to third party software """ from nipy.testing import Tester test = Tester().test bench = Tester().bench nipy-0.3.0/nipy/interfaces/matlab.py000066400000000000000000000047731210344137400174170ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ General matlab interface code This is for nipy convenience. If you're doing heavy matlab interfacing, please use NiPype instead: http://nipy.org/nipype """ # Stdlib imports import os import re import tempfile import subprocess matlab_cmd = 'matlab -nojvm -nosplash' def run_matlab(cmd): subprocess.call('%s -r \"%s;exit\" ' % (matlab_cmd, cmd), shell=True) def run_matlab_script(script_lines, script_name='pyscript'): ''' Put multiline matlab script into script file and run ''' mfile = open(script_name + '.m', 'wt') mfile.write(script_lines) mfile.close() return run_matlab(script_name) # Functions, classes and other top-level code def mlab_tempfile(dir=None): """Returns a temporary file-like object with valid matlab name. The file name is accessible as the .name attribute of the returned object. The caller is responsible for closing the returned object, at which time the underlying file gets deleted from the filesystem. Parameters ---------- dir : str A path to use as the starting directory. Note that this directory must already exist, it is NOT created if it doesn't (in that case, OSError is raised instead). Returns ------- f : file-like object Examples -------- >>> f = mlab_tempfile() >>> pth, fname = os.path.split(f.name) >>> '-' not in fname True >>> f.close() """ valid_name = re.compile(r'^\w+$') # Make temp files until we get one whose name is a valid matlab identifier, # since matlab imposes that constraint. Since the temp file routines may # return names that aren't valid matlab names, but we can't control that # directly, we just keep trying until we get a valid name. To avoid an # infinite loop for some strange reason, we only try 100 times. for n in range(100): f = tempfile.NamedTemporaryFile(suffix='.m',prefix='tmp_matlab_', dir=dir) # Check the file name for matlab compliance fname = os.path.splitext(os.path.basename(f.name))[0] if valid_name.match(fname): break # Close the temp file we just made if its name is not valid; the # tempfile module then takes care of deleting the actual file on disk. f.close() else: raise ValueError("Could not make temp file after 100 tries") return f nipy-0.3.0/nipy/interfaces/setup.py000066400000000000000000000007211210344137400173040ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('interfaces', parent_package, top_path) config.add_subpackage('tests') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipy-0.3.0/nipy/interfaces/spm.py000066400000000000000000000040411210344137400167420ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ''' Interfaces to SPM ''' from __future__ import with_statement import os import numpy as np from scipy.io import savemat from nibabel import load from nibabel.onetime import setattr_on_read from nibabel.tmpdirs import InTemporaryDirectory from .matlab import run_matlab_script class SpmInfo(object): @setattr_on_read def spm_path(self): with InTemporaryDirectory() as tmpdir: run_matlab_script(""" spm_path = spm('dir'); fid = fopen('spm_path.txt', 'wt'); fprintf(fid, '%s', spm_path); fclose(fid); """) spm_path = open('spm_path.txt', 'rt').read() return spm_path spm_info = SpmInfo() def make_job(jobtype, jobname, contents): return {'jobs':[{jobtype:[{jobname:contents}]}]} # XXX this should be moved into a matdict class or something def fltcols(vals): ''' Trivial little function to make 1xN float vector ''' return np.atleast_2d(np.array(vals, dtype=float)) def run_jobdef(jobdef): with InTemporaryDirectory(): savemat('pyjobs.mat', jobdef) run_matlab_script(""" load pyjobs; spm_jobman('run', jobs); """) def scans_for_fname(fname): img = load(fname) n_scans = img.shape[3] scans = np.zeros((n_scans, 1), dtype=object) for sno in range(n_scans): scans[sno] = '%s,%d' % (fname, sno+1) return scans def scans_for_fnames(fnames): n_sess = len(fnames) sess_scans = np.zeros((1,n_sess), dtype=object) for sess in range(n_sess): sess_scans[0,sess] = scans_for_fname(fnames[sess]) return sess_scans def fname_presuffix(fname, prefix='', suffix='', use_ext=True): pth, fname = os.path.split(fname) fname, ext = os.path.splitext(fname) if not use_ext: ext = '' return os.path.join(pth, prefix+fname+suffix+ext) def fnames_presuffix(fnames, prefix='', suffix=''): f2 = [] for fname in fnames: f2.append(fname_presuffix(fname, prefix, suffix)) return f2 nipy-0.3.0/nipy/interfaces/tests/000077500000000000000000000000001210344137400167345ustar00rootroot00000000000000nipy-0.3.0/nipy/interfaces/tests/__init__.py000066400000000000000000000000411210344137400210400ustar00rootroot00000000000000# Make interface tests a package nipy-0.3.0/nipy/interfaces/tests/test_mlabtemp.py000066400000000000000000000014621210344137400221510ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Tests for the temporary matlab file module.""" # Stdlib imports import os import tempfile # Our own imports from nipy.interfaces.matlab import mlab_tempfile # Functions, classes and other top-level code def check_mlab_tempfile(dir): """Helper function for testing the mlab temp file creation.""" try: f = mlab_tempfile(dir) except OSError,msg: if not os.path.isdir(dir) and 'No such file or directory' in msg: # This is OK, it's the expected error return True else: raise else: f.close() def test_mlab_tempfile(): for dir in [None,tempfile.tempdir,tempfile.mkdtemp()]: yield check_mlab_tempfile,dir nipy-0.3.0/nipy/io/000077500000000000000000000000001210344137400140565ustar00rootroot00000000000000nipy-0.3.0/nipy/io/__init__.py000066400000000000000000000004371210344137400161730ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Package contains generic functions for data input/output. This includes methods for accessing file systems and network resources. """ __docformat__ = 'restructuredtext' nipy-0.3.0/nipy/io/api.py000066400000000000000000000002661210344137400152050ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from .files import load as load_image, save as save_image, as_image nipy-0.3.0/nipy/io/files.py000066400000000000000000000145201210344137400155340ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ The io.files module provides basic functions for working with file-based images in nipy. * load : load an image from a file * save : save an image to a file Examples -------- See documentation for load and save functions for worked examples. """ import os import numpy as np import nibabel as nib from nibabel.spatialimages import HeaderDataError from ..core.image.image import is_image from .nifti_ref import (nipy2nifti, nifti2nipy) def load(filename): """Load an image from the given filename. Parameters ---------- filename : string Should resolve to a complete filename path. Returns ------- image : An `Image` object If successful, a new `Image` object is returned. See Also -------- save_image : function for saving images Image : image object Examples -------- >>> from nipy.io.api import load_image >>> from nipy.testing import anatfile >>> img = load_image(anatfile) >>> img.shape (33, 41, 25) """ img = nib.load(filename) ni_img = nib.Nifti1Image(img._data, img.get_affine(), img.get_header()) return nifti2nipy(ni_img) def save(img, filename, dtype_from='data'): """Write the image to a file. Parameters ---------- img : An `Image` object filename : string Should be a valid filename. dtype_from : {'data', 'header'} or dtype specifier, optional Method of setting dtype to save data to disk. Value of 'data' (default), means use data dtype to save. 'header' means use data dtype specified in header, if available, otherwise use data dtype. Can also be any valid specifier for a numpy dtype, e.g. 'i4', ``np.float32``. Not every format supports every dtype, so some values of this parameter or data dtypes will raise errors. Returns ------- image : An `Image` object Possibly modified by saving. See Also -------- load_image : function for loading images Image : image object Examples -------- Make a temporary directory to store files >>> import os >>> from tempfile import mkdtemp >>> tmpdir = mkdtemp() Make some some files and save them >>> import numpy as np >>> from nipy.core.api import Image, AffineTransform >>> from nipy.io.api import save_image >>> data = np.zeros((91,109,91), dtype=np.uint8) >>> cmap = AffineTransform('kji', 'zxy', np.eye(4)) >>> img = Image(data, cmap) >>> fname1 = os.path.join(tmpdir, 'img1.nii.gz') >>> saved_img1 = save_image(img, fname1) >>> saved_img1.shape (91, 109, 91) >>> fname2 = os.path.join(tmpdir, 'img2.img.gz') >>> saved_img2 = save_image(img, fname2) >>> saved_img2.shape (91, 109, 91) >>> fname = 'test.mnc' >>> saved_image3 = save_image(img, fname) Traceback (most recent call last): ... ValueError: Sorry, we cannot yet save as format "minc" Finally, we clear up our temporary files: >>> import shutil >>> shutil.rmtree(tmpdir) Notes ----- Filetype is determined by the file extension in 'filename'. Currently the following filetypes are supported: * Nifti single file : ['.nii', '.nii.gz'] * Nifti file pair : ['.hdr', '.hdr.gz'] * SPM Analyze : ['.img', '.img.gz'] """ # Try and get nifti dt_from_is_str = isinstance(dtype_from, basestring) if dt_from_is_str and dtype_from == 'header': # All done io_dtype = None elif dt_from_is_str and dtype_from == 'data': io_dtype = img.get_data().dtype else: io_dtype = np.dtype(dtype_from) # make new image ni_img = nipy2nifti(img, data_dtype = io_dtype) ftype = _type_from_filename(filename) if ftype.startswith('nifti1'): ni_img.to_filename(filename) elif ftype == 'analyze': try: ana_img = nib.Spm2AnalyzeImage.from_image(ni_img) except HeaderDataError: raise HeaderDataError('SPM analyze does not support datatype %s' % ni_img.get_header().get_data_dtype()) ana_img.to_filename(filename) else: raise ValueError('Sorry, we cannot yet save as format "%s"' % ftype) return img def _type_from_filename(filename): ''' Return image type determined from filename Filetype is determined by the file extension in 'filename'. Currently the following filetypes are supported: * Nifti single file : ['.nii', '.nii.gz'] * Nifti file pair : ['.hdr', '.hdr.gz'] * Analyze file pair : ['.img', '.img.gz'] >>> _type_from_filename('test.nii') 'nifti1single' >>> _type_from_filename('test') 'nifti1single' >>> _type_from_filename('test.hdr') 'nifti1pair' >>> _type_from_filename('test.hdr.gz') 'nifti1pair' >>> _type_from_filename('test.img.gz') 'analyze' >>> _type_from_filename('test.mnc') 'minc' ''' if filename.endswith('.gz'): filename = filename[:-3] elif filename.endswith('.bz2'): filename = filename[:-4] _, ext = os.path.splitext(filename) if ext in ('', '.nii'): return 'nifti1single' if ext == '.hdr': return 'nifti1pair' if ext == '.img': return 'analyze' if ext == '.mnc': return 'minc' raise ValueError('Strange file extension "%s"' % ext) def as_image(image_input): ''' Load image from filename or pass through image instance Parameters ---------- image_input : str or Image instance image or string filename of image. If a string, load image and return. If an image, pass through without modification Returns ------- img : Image or Image-like instance Input object if `image_input` seemed to be an image, loaded Image object if `image_input` was a string. Raises ------ TypeError : if neither string nor image-like passed Examples -------- >>> from nipy.testing import anatfile >>> from nipy.io.api import load_image >>> img = as_image(anatfile) >>> img2 = as_image(img) >>> img2 is img True ''' if is_image(image_input): return image_input if isinstance(image_input, basestring): return load(image_input) raise TypeError('Expecting an image-like object or filename string') nipy-0.3.0/nipy/io/imageformats/000077500000000000000000000000001210344137400165345ustar00rootroot00000000000000nipy-0.3.0/nipy/io/imageformats/__init__.py000066400000000000000000000010641210344137400206460ustar00rootroot00000000000000#emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- #ex: set sts=4 ts=4 sw=4 et: ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # # See COPYING file distributed along with the NiBabel package for the # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## import warnings warnings.warn('Please use nibabel instead of nipy.io.imageformats', DeprecationWarning, stacklevel=2) from nibabel import load, save, Nifti1Image nipy-0.3.0/nipy/io/nifti_ref.py000066400000000000000000000613771210344137400164130ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ An implementation of some of the NIFTI conventions as desribed in: http://nifti.nimh.nih.gov/pub/dist/src/niftilib/nifti1.h A version of the same file is in the nibabel repisitory at ``doc/source/external/nifti1.h``. Background ========== We (nipystas) make an explicit distinction between: * an input coordinate system of an image (the array == voxel coordinates) * output coordinate system (usually millimeters in some world for space, seconds for time) * the mapping between the two. The collection of these three is the ``coordmap`` attribute of a NIPY image. There is no constraint that the number of input and output coordinates should be the same. We don't specify the units of our output coordinate system, but assume spatial units are millimeters and time units are seconds. NIFTI is mostly less explicit, but more constrained. NIFTI input coordinate system ----------------------------- NIFTI files can have up to seven voxel dimensions (7 axes in the input coordinate system). The first 3 voxel dimensions of a NIFTI file must be spatial but can be in any order in relationship to directions in mm space (the output coordinate system) The 4th voxel dimension is assumed to be time. In particular, if you have some other meaning for a non-spatial dimension, the NIFTI standard suggests you set the length of the 4th dimension to be 1, and use the 5th dimension of the image instead, and set the NIFTI "intent" fields to state the meaning. If the ``intent`` field is set correctly then it should be possible to set meaningful input coordinate axis names for dimensions > (0, 1, 2). There's a wrinkle to the 4th axis is time story; the ``xyxt_units`` field in the NIFTI header can specify the 4th dimension units as Hz (frequency), PPM (concentration) or Radians / second. NIFTI also has a 'dim_info' header attribute that optionally specifies that 0 or more of the first three voxel axes are 'frequency', 'phase' or 'slice'. These terms refer to 2D MRI acquisition encoding, where 'slice's are collected sequentially, and the two remaining dimensions arose from frequency and phase encoding. The ``dim_info`` fields are often not set. 3D acquisitions don't have a 'slice' dimension. NIFTI output coordinate system ------------------------------ In the NIFTI specification, the order of the output coordinates (at least the first 3) are fixed to be what might be called RAS+, that is ('x=L->R', 'y=P->A', 'z=I->S'). This RAS+ output order is not allowed to change and there is no way of specifying such a change in the NIFTI header. The world in which these RAS+ X, Y, Z axes exist can be one of the recognized spaces, which are: scanner, aligned (to another file's world space), Talairach, MNI 152 (aligned to the MNI 152 atlas). By implication, the 4th output dimension is likely to be seconds (given the 4th input dimension is likely time), but there's a field ``xyzt_units`` (see above) that can be used to imply the 4th output dimension is actually frequency, concentration or angular velocity. NIFTI input / output mapping ---------------------------- NIFTI stores the relationship between the first 3 (spatial) voxel axes and the RAS+ coordinates in an *XYZ affine*. This is a homogenous coordinate affine, hence 4 by 4 for 3 (spatial) dimensions. NIFTI also stores "pixel dimensions" in a ``pixdim`` field. This can give you scaling for individual axes. We ignore the values of ``pixdim`` for the first 3 axes if we have a full ("sform") affine stored in the header, otherwise they form part of the affine above. ``pixdim``[3:] provide voxel to output scalings for later axes. The units for the 4th dimension can come from ``xyzt_units`` as above. We take the convention that the output coordinate names are ('x=L->R', 'y=P->A', 'z=I->S','t','u','v','w') unless there is no time axis (see below) in which case we just omit 't'. The first 3 axes are also named after the output space ('scanner-x=L->R', 'mni-x=L-R' etc). The input axes are 'ijktuvw' unless there is no time axis (see below), in which case they are 'ijkuvw' (remember, NIFTI only allows 7 dimensions, and one is used up by the time length 1 axis). Time-like axes -------------- A time-like axis is an axis that is any of time, Hz, PPM or radians / second. We recognize time in a NIPY coordinate map by an input or an output axis named 't' or 'time'. If it's an output axis we work out the corresponding input axis. A Hz axis can be called 'hz' or 'frequency-hz'. A PPM axis can be called 'ppm' or 'concentration-ppm'. A radians / second axis can be called 'rads' or 'radians/s'. Does this NIFTI image have a time-like axis? -------------------------------------------- We take there to be no time axis if there are only three NIFTI dimensions, or if: * the length of the fourth NIFTI dimension is 1 AND * There are more than four dimensions AND * The ``xyzt_units`` field does not indicate time or time-like units. What we do about all this ========================= For saving a NIPY image to NIFTI, see the docstring for :func:`nipy2nifti`. For loading a NIFTI image to NIPY, see the docstring for :func:`nifti2nipy`. """ import sys import warnings from copy import copy import numpy as np import nibabel as nib from nibabel.affines import to_matvec, from_matvec from ..core.reference.coordinate_system import CoordinateSystem as CS from ..core.reference.coordinate_map import (AffineTransform as AT, axmap, product as cm_product) from ..core.reference import spaces as ncrs from ..core.image.image import Image from ..core.image.image_spaces import as_xyz_image XFORM2SPACE = {'scanner': ncrs.scanner_space, 'aligned': ncrs.aligned_space, 'talairach': ncrs.talairach_space, 'mni': ncrs.mni_space} TIME_LIKE_AXES = dict( t = dict(aliases=('time',), units='sec'), hz = dict(aliases=('frequency-hz',), units='hz'), ppm = dict(aliases=('concentration-ppm',), units='ppm'), rads = dict(aliases=('radians/s',), units='rads')) TIME_LIKE_MAP = {} for _name, _info in TIME_LIKE_AXES.items(): for _alias in (_name,) + _info['aliases']: TIME_LIKE_MAP[_alias] = _name TIME_LIKE_ORDERED = ('t', 'hz', 'ppm', 'rads') # Threshold for near-zero affine values TINY = 1e-5 class NiftiError(Exception): pass def nipy2nifti(img, data_dtype=None, strict=None, fix0=True): """ Return NIFTI image from nipy image `img` Parameters ---------- img : object An object, usually a NIPY ``Image``, having attributes `coordmap` and `shape` data_dtype : None or dtype specifier None means try and use header dtype, otherwise try and use data dtype, otherwise use np.float32. A dtype specifier means set the header output data dtype using ``np.dtype(data_dtype)``. strict : bool, optional Whether to use strict checking of input image for creating NIFTI fix0: bool, optional Whether to fix potential 0 column / row in affine. This option only used when trying to find time etc axes in the coordmap output names. In order to find matching input names, we need to use the corresponding rows and columns in the affine. Sometimes time, in particular, has 0 scaling, and thus all 0 in the corresponding row / column. In that case it's hard to work out which input corresponds. If `fix0` is True, and there is only one all zero (matrix part of the) affine row, and only one all zero (matrix part of the) affine column, fix scaling for that combination to zero, assuming this a zero scaling for time. Returns ------- ni_img : ``nibabel.Nifti1Image`` NIFTI image Raises ------ NiftiError: if space axes not orthogonal to non-space axes NiftiError: if non-space axes not orthogonal to each other NiftiError: if `img` output space does not match named spaces in NIFTI NiftiError: if input image has more than 7 dimensions NiftiError: if input image has 7 dimensions, but no time dimension, because we need to add an extra 1 length axis at position 3 NiftiError: if we find a time-like input axis but the matching output axis is a different time-like. NiftiError: if we find a time-like output axis but the matching input axis is a different time-like. NiftiError: if we find a time output axis and there are non-zero non-spatial offsets in the affine, but we can't find a corresponding input axis. Notes ----- First, we need to create a valid XYZ Affine. We check if this can be done by checking if there are recognizable X, Y, Z output axes and corresponding input (voxel) axes. This requires the input image to be at least 3D. If we find these requirements, we reorder the image axes to have XYZ output axes and 3 spatial input axes first, and get the corresponding XYZ affine. If the spatial dimensions are not orthogonal to the non-spatial dimensions, raise a NiftiError. If the non-spatial dimensions are not orthogonal to each other, raise a NiftiError. We check if the XYZ output fits with the NIFTI named spaces of scanner, aligned, Talairach, MNI. If so, set the NIFTI code and qform, sform accordingly. If the space corresponds to 'unknown' then we must set the NIFTI transform codes to 0, and the affine must match the affine we will get from loading the NIFTI with no qform, sform. If not, we're going to lose information in the affine, and raise an error. If any of the first three input axes are named ('slice', 'freq', 'phase') set the ``dim_info`` field accordingly. Set the ``xyzt_units`` field to indicate millimeters and seconds, if there is a 't' axis, otherwise millimeters and 0 (unknown). We look to see if we have a time-like axis in the inputs or the outputs. A time-like axis has labels 't', 'hz', 'ppm', 'rads'. If we have an axis 't' in the inputs *and* the outputs, check they either correspond, or both inputs and output correspond with no other axis, otherwise raise NiftiError. Do the same check for 'hz', then 'ppm', then 'rads'. If we do have a time-like axis, roll that axis to be the 4th axis. If this axis is actually time, take the ``affine[3, -1]`` and put into the ``toffset`` field. If there's no time-like axis, but there are other non-spatial axes, make a length 1 4th array axis to indicate this. If the resulting NIFTI image has more than 7 dimensions, raise a NiftiError. Set ``pixdim`` for axes >= 3 using vector length of corresponding affine columns. We don't set the intent-related fields for now. """ strict_none = strict is None if strict_none: warnings.warn('Default `strict` currently False; this will change to ' 'True in a future version of nipy', FutureWarning, stacklevel = 2) strict = False known_names = ncrs.known_names if not strict: # add simple 'xyz' to acceptable spatial names known_names = copy(known_names) # copy module global dict for c in 'xyz': known_names[c] = c try: img = as_xyz_image(img, known_names) except (ncrs.AxesError, ncrs.AffineError): # Python 2.5 / 3 compatibility e = sys.exc_info()[1] raise NiftiError('Image cannot be reordered to XYZ because: "%s"' % e) coordmap = img.coordmap # Get useful information from old header in_hdr = img.metadata.get('header', None) hdr = nib.Nifti1Header.from_header(in_hdr) # Default behavior is to take datatype from old header, unless there was no # header, in which case we try to use the data dtype. data = None if data_dtype is None: if in_hdr is None: data = img.get_data() data_dtype = data.dtype else: data_dtype = in_hdr.get_data_dtype() else: data_dtype = np.dtype(data_dtype) hdr.set_data_dtype(data_dtype) # Remaining axes orthogonal? rzs, trans = to_matvec(coordmap.affine) if (not np.allclose(rzs[3:, :3], 0) or not np.allclose(rzs[:3, 3:], 0)): raise NiftiError('Non space axes not orthogonal to space') # And to each other? nsp_affine = rzs[3:,3:] nsp_nzs = np.abs(nsp_affine) > TINY n_in_col = np.sum(nsp_nzs, axis=0) n_in_row = np.sum(nsp_nzs, axis=1) if np.any(n_in_col > 1) or np.any(n_in_row > 1): raise NiftiError('Non space axes not orthogonal to each other') # Affine seems OK, check for space xyz_affine = ncrs.xyz_affine(coordmap, known_names) spatial_output_names = coordmap.function_range.coord_names[:3] out_space = CS(spatial_output_names) for name, space in XFORM2SPACE.items(): if out_space in space: hdr.set_sform(xyz_affine, name) hdr.set_qform(xyz_affine, name) break else: if not strict and spatial_output_names == ('x', 'y', 'z'): warnings.warn('Default `strict` currently False; ' 'this will change to True in a future version of ' 'nipy; output names of "x", "y", "z" will raise ' 'an error. Please use canonical output names from ' 'nipy.core.reference.spaces', FutureWarning, stacklevel = 2) hdr.set_sform(xyz_affine, 'scanner') hdr.set_qform(xyz_affine, 'scanner') elif not out_space in ncrs.unknown_space: # no space we recognize raise NiftiError('Image world not a NIFTI world') else: # unknown space requires affine that matches if not np.allclose(xyz_affine, hdr.get_base_affine()): raise NiftiError("Image world is 'unknown' but affine not " "compatible; please reset image world or " "affine") hdr.set_qform(None) hdr.set_sform(None) # Use list() to get .index method for python < 2.6 input_names = list(coordmap.function_domain.coord_names) spatial_names = input_names[:3] dim_infos = [] for fps in 'freq', 'phase', 'slice': dim_infos.append( spatial_names.index(fps) if fps in spatial_names else None) hdr.set_dim_info(*dim_infos) # Set units without knowing time hdr.set_xyzt_units(xyz='mm') # Done if we only have 3 input dimensions n_ns = coordmap.ndims[0] - 3 if n_ns == 0: # No non-spatial dimensions return nib.Nifti1Image(img.get_data(), xyz_affine, hdr) elif n_ns > 4: raise NiftiError("Too many dimensions to convert") # Go now to data, pixdims if data is None: data = img.get_data() rzs, trans = to_matvec(img.coordmap.affine) ns_pixdims = list(np.sqrt(np.sum(rzs[3:, 3:] ** 2, axis=0))) in_ax, out_ax, tl_name = _find_time_like(coordmap, fix0) if in_ax is None: # No time-like axes # add new 1-length axis if n_ns == 4: raise NiftiError("Too many dimensions to convert") n_ns += 1 data = data[:, :, :, None, ...] # xyzt_units hdr.set_xyzt_units(xyz='mm') # shift pixdims ns_pixdims.insert(0, 0) else: # Time-like hdr.set_xyzt_units(xyz='mm', t=TIME_LIKE_AXES[tl_name]['units']) # If this is really time, set toffset if tl_name == 't' and np.any(trans[3:]): # Which output axis corresponds to time? if out_ax is None: raise NiftiError('Time input and output do not match') hdr['toffset'] = trans[out_ax] # Make sure this time-like axis is first non-space axis if in_ax != 3: data = np.rollaxis(data, in_ax, 3) order = range(n_ns) order.pop(in_ax - 3) order.insert(0, in_ax - 3) ns_pixdims = [ns_pixdims[i] for i in order] hdr['pixdim'][4:(4 + n_ns)] = ns_pixdims return nib.Nifti1Image(data, xyz_affine, hdr) def _find_time_like(coordmap, fix0): """ Return input axis corresponding to best time-like axis Parameters ---------- coordmap : AffineTransform fix0 : bool True if we use zero column, zero row heuristic to match time input and output axes. Returns ------- in_ax : int or None None if there was no time-like axis that could be mapped to an input, otherwise the input axis index. out_ax : int or None None if there was no time-like axis that could be mapped to an input, otherwise the input axis index. tl_name: str or None: None if there was no time-like axis that could be mapped to an input, otherwise the canonical name of this time-like. """ non_space_inames = list(coordmap.function_domain.coord_names[3:]) non_space_onames = list(coordmap.function_range.coord_names[3:]) # Make time-like names canonical, set to None elsewhere for ax_names in [non_space_inames, non_space_onames]: for ax_no, ax_name in enumerate(ax_names): ax_names[ax_no] = TIME_LIKE_MAP.get(ax_name) # Find best time in axis, check correspondence in2out, out2in = axmap(coordmap, 'both', fix0) in_ax, out_ax = None, None for name in TIME_LIKE_ORDERED: if name in non_space_inames: in_ax = non_space_inames.index(name) + 3 corr_out = in2out[in_ax] if name in non_space_onames: # in both - matching? same_time_out = non_space_onames.index(name) + 3 corr_in = out2in[same_time_out] if corr_out is None: if not corr_in is None: raise NiftiError("Axis type '%s' found in input and " "output but they do not appear to " "match" % name) return (in_ax, None, name) if corr_out != same_time_out: raise NiftiError("Axis type '%s' found in input and " "output but they do not appear to " "match" % name) return (in_ax, corr_out, name) # Name not in output, but is there another time-like name at this # output position? matching = non_space_onames[corr_out - 3] if matching is None: return (in_ax, corr_out, name) raise NiftiError("Axis type '%s' in input matches axis type '%s' " "in output" % (name, matching)) # Now check in output names elif name in non_space_onames: # Found name in output axes, corresponding input? out_ax = non_space_onames.index(name) + 3 in_ax = out2in[out_ax] if in_ax is None: # no corresponding axis continue matching = non_space_inames[in_ax - 3] if matching is None: return (in_ax, out_ax, name) raise NiftiError("Axis type '%s' in output matches axis type " "'%s' in input" % (name, matching)) return None, None, None TIME_LIKE_UNITS = dict( sec = dict(name='t', scaling = 1), msec = dict(name='t', scaling = 1 / 1000.), usec = dict(name='t', scaling = 1 / 1000000.), hz = dict(name='hz', scaling = 1), ppm = dict(name='ppm', scaling = 1), rads = dict(name='rads', scaling = 1)) def nifti2nipy(ni_img): """ Return NIPY image from NIFTI image `ni_image` Parameters ---------- ni_img : nibabel.Nifti1Image NIFTI image Returns ------- img : :class:`Image` nipy image Raises ------ NiftiError : if image is < 3D Notes ----- Lacking any other information, we take the input coordinate names for axes 0:7 to be ('i', 'j', 'k', 't', 'u', 'v', 'w'). If the image is 1D or 2D then we have a problem. If there's a defined (sform, qform) affine, this has 3 input dimensions, and we have to guess what the extra input dimensions are. If we don't have a defined affine, we don't know what the output dimensions are. For example, if the image is 2D, and we don't have an affine, are these X and Y or X and Z or Y and Z? In the presence of ambiguity, resist the temptation to guess - raise a NiftiError. If there is a time-like axis, name the input and corresponding output axis for the type of axis ('t', 'hz', 'ppm', 'rads'). Otherwise remove the 't' axis from both input and output names, and squeeze the length 1 dimension from the input data. If there's a 't' axis get ``toffset`` and put into affine at position [3, -1]. If ``dim_info`` is set coherently, set input axis names to 'slice', 'freq', 'phase' from ``dim_info``. Get the output spatial coordinate names from the 'scanner', 'aligned', 'talairach', 'mni' XYZ spaces (see :mod:`nipy.core.reference.spaces`). We construct the N-D affine by taking the XYZ affine and adding scaling diagonal elements from ``pixdim``. If the space units in NIFTI ``xyzt_units`` are 'microns' or 'meters' we adjust the affine to mm units, but warn because this might be a mistake. If the time units in NIFTI `xyzt_units` are 'msec' or 'usec', scale the time axis ``pixdim`` values accordingly. Ignore the intent-related fields for now, but warn that we are doing so if there appears to be specific information in there. """ hdr = ni_img.get_header() affine = ni_img.get_affine() # Affine will not be None from a loaded image, but just in case if affine is None: affine = hdr.get_best_affine() else: affine = affine.copy() data = ni_img.get_data() shape = list(ni_img.shape) ndim = len(shape) if ndim < 3: raise NiftiError("With less than 3 dimensions we cannot be sure " "which input and output dimensions you intend for " "the coordinate map. Please fix this image with " "nibabel or some other tool") # For now we only warn if intent is set to an unexpected value intent, _, _ = hdr.get_intent() if intent != 'none': warnings.warn('Ignoring intent field meaning "%s"' % intent, UserWarning) # Which space? world_label = hdr.get_value_label('sform_code') if world_label == 'unknown': world_label = hdr.get_value_label('qform_code') world_space = XFORM2SPACE.get(world_label, ncrs.unknown_space) # Get information from dim_info input_names3 = list('ijk') freq, phase, slice = hdr.get_dim_info() if not freq is None: input_names3[freq] = 'freq' if not phase is None: input_names3[phase] = 'phase' if not slice is None: input_names3[slice] = 'slice' # Add to mm scaling, with warning space_units, time_like_units = hdr.get_xyzt_units() if space_units in ('micron', 'meter'): warnings.warn('"%s" space scaling in NIFTI ``xyt_units field; ' 'applying scaling to affine, but this may not be what ' 'you want' % space_units, UserWarning) if space_units == 'micron': affine[:3] /= 1000. elif space_units == 'meter': affine[:3] *= 1000. input_cs3 = CS(input_names3, name='voxels') output_cs3 = world_space.to_coordsys_maker()(3) cmap3 = AT(input_cs3, output_cs3, affine) if ndim == 3: return Image(data, cmap3, {'header': hdr}) space_units, time_like_units = hdr.get_xyzt_units() units_info = TIME_LIKE_UNITS.get(time_like_units, None) n_ns = ndim - 3 ns_zooms = list(hdr.get_zooms()[3:]) ns_trans = [0] * n_ns ns_names = tuple('uvw') # Have we got a time axis? if (shape[3] == 1 and ndim > 4 and units_info is None): # Squeeze length 1 no-time axis shape.pop(3) ns_zooms.pop(0) ns_trans.pop(0) data = data.reshape(shape) n_ns -= 1 else: # have time-like if units_info is None: units_info = TIME_LIKE_UNITS['sec'] time_name = units_info['name'] if units_info['scaling'] != 1: ns_zooms[0] *= units_info['scaling'] if time_name == 't': # Get time offset ns_trans[0] = hdr['toffset'] ns_names = (time_name,) + ns_names output_cs = CS(ns_names[:n_ns]) input_cs = CS(ns_names[:n_ns]) aff = from_matvec(np.diag(ns_zooms), ns_trans) ns_cmap = AT(input_cs, output_cs, aff) cmap = cm_product(cmap3, ns_cmap, input_name=cmap3.function_domain.name, output_name=cmap3.function_range.name) return Image(data, cmap, {'header': hdr}) nipy-0.3.0/nipy/io/setup.py000066400000000000000000000007631210344137400155760ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('io', parent_package, top_path) config.add_subpackage('tests') config.add_subpackage('imageformats') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipy-0.3.0/nipy/io/tests/000077500000000000000000000000001210344137400152205ustar00rootroot00000000000000nipy-0.3.0/nipy/io/tests/__init__.py000066400000000000000000000000341210344137400173260ustar00rootroot00000000000000# Make tests into a package nipy-0.3.0/nipy/io/tests/test_image_io.py000066400000000000000000000232071210344137400204060ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import with_statement import numpy as np from nibabel.spatialimages import ImageFileError, HeaderDataError from nibabel import Nifti1Header from ..api import load_image, save_image, as_image from nipy.core.api import AffineTransform as AfT, Image, vox2mni from nipy.testing import (assert_true, assert_equal, assert_raises, assert_array_equal, assert_array_almost_equal, assert_almost_equal, funcfile, anatfile) from nibabel.tmpdirs import InTemporaryDirectory from nipy.testing.decorators import if_templates from nipy.utils import templates, DataError from nibabel.tests.test_round_trip import big_bad_ulp gimg = None def setup_module(): global gimg try: gimg = load_template_img() except DataError: pass def load_template_img(): return load_image( templates.get_filename( 'ICBM152', '2mm', 'T1.nii.gz')) def test_badfile(): filename = "bad_file.foo" assert_raises(ImageFileError, load_image, filename) @if_templates def test_maxminmean_values(): # loaded array values from SPM y = gimg.get_data() yield assert_equal, y.shape, tuple(gimg.shape) yield assert_array_almost_equal, y.max(), 1.000000059 yield assert_array_almost_equal, y.mean(), 0.273968048 yield assert_equal, y.min(), 0.0 @if_templates def test_nondiag(): gimg.affine[0,1] = 3.0 with InTemporaryDirectory(): save_image(gimg, 'img.nii') img2 = load_image('img.nii') assert_almost_equal(img2.affine, gimg.affine) def randimg_in2out(rng, in_dtype, out_dtype, name): in_dtype = np.dtype(in_dtype) out_dtype = np.dtype(out_dtype) shape = (2,3,4) if in_dtype.kind in 'iu': info = np.iinfo(in_dtype) dmin, dmax = info.min, info.max # Numpy bug for np < 1.6.0 allows overflow for range that does not fit # into C long int (int32 on 32-bit, int64 on 64-bit) try: data = rng.randint(dmin, dmax, size=shape) except ValueError: from random import randint vals = [randint(dmin, dmax) for v in range(np.prod(shape))] data = np.array(vals).astype(in_dtype).reshape(shape) elif in_dtype.kind == 'f': info = np.finfo(in_dtype) dmin, dmax = info.min, info.max # set some value for scaling our data scale = np.iinfo(np.uint16).max * 2.0 data = rng.normal(size=shape, scale=scale) data[0,0,0] = dmin data[1,0,0] = dmax data = data.astype(in_dtype) img = Image(data, vox2mni(np.eye(4))) # The dtype_from dtype won't be visible until the image is loaded newimg = save_image(img, name, dtype_from=out_dtype) return newimg.get_data(), data def test_scaling_io_dtype(): # Does data dtype get set? # Is scaling correctly applied? rng = np.random.RandomState(19660520) # VBD ulp1_f32 = np.finfo(np.float32).eps types = (np.uint8, np.uint16, np.int16, np.int32, np.float32) with InTemporaryDirectory(): for in_type in types: for out_type in types: data, _ = randimg_in2out(rng, in_type, out_type, 'img.nii') img = load_image('img.nii') # Check the output type is as expected hdr = img.metadata['header'] assert_equal(hdr.get_data_dtype().type, out_type) # Check the data is within reasonable bounds. The exact bounds # are a little annoying to calculate - see # nibabel/tests/test_round_trip for inspiration data_back = img.get_data().copy() # copy to detach from file del img top = np.abs(data - data_back) nzs = (top !=0) & (data !=0) abs_err = top[nzs] if abs_err.size != 0: # all exact, that's OK. continue rel_err = abs_err / data[nzs] if np.dtype(out_type).kind in 'iu': slope, inter = hdr.get_slope_inter() abs_err_thresh = slope / 2.0 rel_err_thresh = ulp1_f32 elif np.dtype(out_type).kind == 'f': abs_err_thresh = big_bad_ulp(data.astype(out_type))[nzs] rel_err_thresh = ulp1_f32 assert_true(np.all( (abs_err <= abs_err_thresh) | (rel_err <= rel_err_thresh))) def assert_dt_no_end_equal(a, b): """ Assert two numpy dtype specifiers are equal apart from byte order Avoids failed comparison between int32 / int64 and intp """ a = np.dtype(a).newbyteorder('=') b = np.dtype(b).newbyteorder('=') assert_equal(a.str, b.str) def test_output_dtypes(): shape = (4, 2, 3) rng = np.random.RandomState(19441217) # IN-S BD data = rng.normal(4, 20, size=shape) aff = np.diag([2.2, 3.3, 4.1, 1]) cmap = vox2mni(aff) img = Image(data, cmap) fname_root = 'my_file' with InTemporaryDirectory(): for ext in 'img', 'nii': out_fname = fname_root + '.' + ext # Default is for data to come from data dtype save_image(img, out_fname) img_back = load_image(out_fname) hdr = img_back.metadata['header'] assert_dt_no_end_equal(hdr.get_data_dtype(), np.float) del img_back # lets window re-use the file # All these types are OK for both output formats for out_dt in 'i2', 'i4', np.int16, 'f8': # Specified output dtype save_image(img, out_fname, out_dt) img_back = load_image(out_fname) hdr = img_back.metadata['header'] assert_dt_no_end_equal(hdr.get_data_dtype(), out_dt) del img_back # windows file re-use # Output comes from data by default data_typed = data.astype(out_dt) img_again = Image(data_typed, cmap) save_image(img_again, out_fname) img_back = load_image(out_fname) hdr = img_back.metadata['header'] assert_dt_no_end_equal(hdr.get_data_dtype(), out_dt) del img_back # Even if header specifies otherwise in_hdr = Nifti1Header() in_hdr.set_data_dtype(np.dtype('c8')) img_more = Image(data_typed, cmap, metadata={'header': in_hdr}) save_image(img_more, out_fname) img_back = load_image(out_fname) hdr = img_back.metadata['header'] assert_dt_no_end_equal(hdr.get_data_dtype(), out_dt) del img_back # But can come from header if specified save_image(img_more, out_fname, dtype_from='header') img_back = load_image(out_fname) hdr = img_back.metadata['header'] assert_dt_no_end_equal(hdr.get_data_dtype(), 'c8') del img_back # u2 only OK for nifti save_image(img, 'my_file.nii', 'u2') img_back = load_image('my_file.nii') hdr = img_back.metadata['header'] assert_dt_no_end_equal(hdr.get_data_dtype(), 'u2') # Check analyze can't save u2 datatype assert_raises(HeaderDataError, save_image, img, 'my_file.img', 'u2') del img_back def test_header_roundtrip(): img = load_image(anatfile) hdr = img.metadata['header'] # Update some header values and make sure they're saved hdr['slice_duration'] = 0.200 hdr['intent_p1'] = 2.0 hdr['descrip'] = 'descrip for TestImage:test_header_roundtrip' hdr['slice_end'] = 12 with InTemporaryDirectory(): save_image(img, 'img.nii.gz') newimg = load_image('img.nii.gz') newhdr = newimg.metadata['header'] assert_array_almost_equal(newhdr['slice_duration'], hdr['slice_duration']) assert_equal(newhdr['intent_p1'], hdr['intent_p1']) assert_equal(newhdr['descrip'], hdr['descrip']) assert_equal(newhdr['slice_end'], hdr['slice_end']) def test_file_roundtrip(): img = load_image(anatfile) data = img.get_data() with InTemporaryDirectory(): save_image(img, 'img.nii.gz') img2 = load_image('img.nii.gz') data2 = img2.get_data() # verify data assert_almost_equal(data2, data) assert_almost_equal(data2.mean(), data.mean()) assert_almost_equal(data2.min(), data.min()) assert_almost_equal(data2.max(), data.max()) # verify shape and ndims assert_equal(img2.shape, img.shape) assert_equal(img2.ndim, img.ndim) # verify affine assert_almost_equal(img2.affine, img.affine) def test_roundtrip_from_array(): data = np.random.rand(10,20,30) img = Image(data, AfT('kji', 'xyz', np.eye(4))) with InTemporaryDirectory(): save_image(img, 'img.nii.gz') img2 = load_image('img.nii.gz') data2 = img2.get_data() # verify data assert_almost_equal(data2, data) assert_almost_equal(data2.mean(), data.mean()) assert_almost_equal(data2.min(), data.min()) assert_almost_equal(data2.max(), data.max()) # verify shape and ndims assert_equal(img2.shape, img.shape) assert_equal(img2.ndim, img.ndim) # verify affine assert_almost_equal(img2.affine, img.affine) def test_as_image(): # test image creation / pass through function img = as_image(funcfile) # string filename img1 = as_image(unicode(funcfile)) img2 = as_image(img) assert_equal(img.affine, img1.affine) assert_array_equal(img.get_data(), img1.get_data()) assert_true(img is img2) nipy-0.3.0/nipy/io/tests/test_nifti_ref.py000066400000000000000000000724451210344137400206120ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Test conversion between NIFTI and NIPY conventions. The algorithms are mostly written out in the :mod:`nipy.io.nifti_ref` docstrings. """ import warnings from copy import copy import numpy as np import nibabel as nib from nibabel.affines import from_matvec from nibabel.spatialimages import HeaderDataError from ...core.api import (Image, AffineTransform as AT, CoordinateSystem as CS) from ...core.reference.spaces import (unknown_csm, scanner_csm, aligned_csm, talairach_csm, mni_csm, unknown_space, vox2mni) from ..files import load from ..nifti_ref import (nipy2nifti, nifti2nipy, NiftiError) from nose.tools import assert_equal, assert_true, assert_false, assert_raises from numpy.testing import assert_almost_equal, assert_array_equal from ...testing import anatfile, funcfile def copy_of(fname): # Make a fresh copy of a image stored in a file img = load(fname) hdr = img.metadata['header'].copy() return Image(img.get_data().copy(), copy(img.coordmap), {'header': hdr}) def setup(): # Suppress warnings during tests warnings.simplefilter("ignore") def teardown(): # Clear list of warning filters warnings.resetwarnings() def test_basic_nipy2nifti(): # Go from nipy image to header and data for nifti fimg = copy_of(funcfile) hdr = fimg.metadata['header'] data = fimg.get_data() # Header is preserved # Put in some information to check header is preserved hdr['slice_duration'] = 0.25 ni_img = nipy2nifti(fimg) new_hdr = ni_img.get_header() # header copied on the way through assert_false(hdr is new_hdr) # Check information preserved assert_equal(hdr['slice_duration'], new_hdr['slice_duration']) assert_array_equal(data, ni_img.get_data()) # Shape obviously should be same assert_equal(ni_img.shape, fimg.shape) def test_xyz_affines(): fimg = copy_of(funcfile) data = fimg.get_data() # Check conversion to xyz affable # Roll time to front in array fimg_t0 = fimg.reordered_axes((3, 0, 1, 2)) # Nifti conversion rolls it back assert_array_equal(nipy2nifti(fimg_t0).get_data(), data) # Roll time to position 1 fimg_t0 = fimg.reordered_axes((0, 3, 1, 2)) assert_array_equal(nipy2nifti(fimg_t0).get_data(), data) # Check bad names cause NiftiError out_coords = fimg.reference.coord_names bad_img = fimg.renamed_reference(**{out_coords[0]: 'not a known axis'}) assert_raises(NiftiError, nipy2nifti, bad_img) # Check xyz works for not strict bad_img = fimg.renamed_reference(**dict(zip(out_coords, 'xyz'))) assert_array_equal(nipy2nifti(bad_img, strict=False).get_data(), data) # But fails for strict assert_raises(NiftiError, nipy2nifti, bad_img, strict=True) # 3D is OK aimg = copy_of(anatfile) adata = aimg.get_data() assert_array_equal(nipy2nifti(aimg).get_data(), adata) # For now, always error on 2D (this depends on as_xyz_image) assert_raises(NiftiError, nipy2nifti, aimg[:, :, 1]) assert_raises(NiftiError, nipy2nifti, aimg[:, 1, :]) assert_raises(NiftiError, nipy2nifti, aimg[1, :, :]) # Do not allow spaces not in the NIFTI canon for i in range(3): displaced_img = fimg.renamed_reference(**{out_coords[i]: 'obscure'}) assert_raises(NiftiError, nipy2nifti, displaced_img) def test_unknown(): # The 'unknown' coordinate space results from loading an image with no # affine set; allow setting into nifti iff the affine corresponds to the # default that would be created when there is no affine aimg = copy_of(anatfile) bare_affine = aimg.metadata['header'].get_base_affine() # The affine does not match the header-only affine assert_false(np.allclose(bare_affine, aimg.coordmap.affine)) unknown_cs = unknown_csm(3) out_coords = aimg.reference.coord_names # So nipy2nifti raises an error displaced_img = aimg.renamed_reference( **dict(zip(out_coords[:3], unknown_cs.coord_names))) assert_raises(NiftiError, nipy2nifti, displaced_img) # If the affine is the same, no error displaced_img.coordmap.affine[:] = bare_affine assert_true(np.allclose(bare_affine, displaced_img.coordmap.affine)) nimg = nipy2nifti(displaced_img) assert_array_equal(nimg.get_affine(), bare_affine) inimg = nifti2nipy(nimg) assert_true(inimg.coordmap.function_range in unknown_space) def test_orthogonal_dims(): # Test whether conversion to nifti raises an error for non-orthogonal # non-spatial dimensions # This affine is all nicely diagonal aff = from_matvec(np.diag([2., 3, 4, 5, 6]), [10, 11, 12, 13, 14]) data = np.random.normal(size=(3, 4, 5, 6, 7)) img = Image(data, vox2mni(aff)) def as3d(aff): return from_matvec(aff[:3, :3], aff[:3, -1]) assert_array_equal(nipy2nifti(img).get_affine(), as3d(aff)) # Non-orthogonal spatial dimensions OK aff[:3, :3] = np.random.normal(size=(3, 3)) img = Image(data, vox2mni(aff)) assert_array_equal(nipy2nifti(img).get_affine(), as3d(aff)) # Space must be orthogonal to time etc aff[0, 3] = 0.1 assert_raises(NiftiError, nipy2nifti, img) aff[0, 3] = 0 assert_array_equal(nipy2nifti(img).get_affine(), as3d(aff)) aff[3, 0] = 0.1 assert_raises(NiftiError, nipy2nifti, img) aff[3, 0] = 0 assert_array_equal(nipy2nifti(img).get_affine(), as3d(aff)) aff[4, 0] = 0.1 assert_raises(NiftiError, nipy2nifti, img) def test_dim_info(): # Test slice, freq, phase get set OK fimg = copy_of(funcfile) hdr = fimg.metadata['header'] assert_equal(hdr.get_dim_info(), (None, None, None)) ni_img = nipy2nifti(fimg) assert_equal(ni_img.get_header().get_dim_info(), (None, None, None)) data = fimg.get_data() cmap = fimg.coordmap for i in range(3): for order, name in enumerate(('freq', 'phase', 'slice')): ncmap = cmap.renamed_domain({i: name}) ni_img = nipy2nifti(Image(data, ncmap, {'header': hdr})) exp_info = [None, None, None] exp_info[order] = i assert_equal(ni_img.get_header().get_dim_info(), tuple(exp_info)) ncmap = cmap.renamed_domain( dict(zip(range(3), ('phase', 'slice', 'freq')))) ni_img = nipy2nifti(Image(data, ncmap, {'header': hdr})) assert_equal(ni_img.get_header().get_dim_info(), (2, 0, 1)) def test_time_like_matching(): # Check checks for matching time-like axes shape = (2, 3, 4, 5, 6) shape_shifted = (2, 3, 4, 6, 5) data = np.random.normal(size=shape) aff = np.diag([3, 4, 5, 6, 7, 1]) mni_names = mni_csm(3).coord_names time_cans = ('t', 'hz', 'ppm', 'rads') aliases = dict(t='time', hz='frequency-hz', ppm='concentration-ppm', rads='radians/s') all_names = set(time_cans + tuple(v for v in aliases.values())) for time_like in time_cans: alias = aliases[time_like] for name in (time_like, alias): # Names match cmap = AT(CS(('i', 'j', 'k', name, 'u')), CS(mni_names + (name, 'u')), aff) assert_equal(nipy2nifti(Image(data, cmap)).shape, shape) cmap = AT(CS(('i', 'j', 'k', 'u', name)), CS(mni_names + ('u', name)), aff) assert_equal(nipy2nifti(Image(data, cmap)).shape, shape_shifted) # No time-like in output is OK cmap = AT(CS(('i', 'j', 'k', 'u', name)), CS(mni_names + ('u', 'v')), aff) assert_equal(nipy2nifti(Image(data, cmap)).shape, shape_shifted) # No time-like in input is OK cmap = AT(CS(('i', 'j', 'k', 'u', 'v')), CS(mni_names + ('u', name)), aff) assert_equal(nipy2nifti(Image(data, cmap)).shape, shape_shifted) # Time-like in both, but not matching, not OK cmap = AT(CS(('i', 'j', 'k', 'u', name)), CS(mni_names + (name, 'u')), aff) assert_raises(NiftiError, nipy2nifti, Image(data, cmap)) # Time like in both with no match between but no match elsewhere # Actually this does cause a problem for non-zero time offset and # time axes, but we test that elsewhere. cmap = AT(CS(('i', 'j', 'k', 'u', name)), CS(mni_names + ('u', name)), np.diag([3, 4, 5, 6, 0, 1])) assert_equal(nipy2nifti(Image(data, cmap)).shape, shape_shifted) cmap = AT(CS(('i', 'j', 'k', 'u', name)), CS(mni_names + (name, 'u')), np.diag([3, 4, 5, 0, 0, 1])) assert_equal(nipy2nifti(Image(data, cmap)).shape, shape_shifted) # Matching to own alias is OK cmap = AT(CS(('i', 'j', 'k', time_like, 'u')), CS(mni_names + (alias, 'u')), aff) assert_equal(nipy2nifti(Image(data, cmap)).shape, shape) cmap = AT(CS(('i', 'j', 'k', alias, 'u')), CS(mni_names + (time_like, 'u')), aff) assert_equal(nipy2nifti(Image(data, cmap)).shape, shape) # But not to another time-like name others = all_names.difference((time_like, alias)) for name in others: cmap = AT(CS(('i', 'j', 'k', time_like, 'u')), CS(mni_names + (name, 'u')), aff) assert_raises(NiftiError, nipy2nifti, Image(data, cmap)) cmap = AT(CS(('i', 'j', 'k', name, 'u')), CS(mni_names + (time_like, 'u')), aff) assert_raises(NiftiError, nipy2nifti, Image(data, cmap)) # It's OK to have more than one time-like, but the order of recognition # is 't', 'hz', 'ppm', 'rads' for i, better in enumerate(time_cans[:-1]): for worse in time_cans[i+1:]: cmap = AT(CS(('i', 'j', 'k', better, worse)), CS(mni_names + (better, worse)), aff) assert_equal(nipy2nifti(Image(data, cmap)).shape, shape) cmap = AT(CS(('i', 'j', 'k', worse, better)), CS(mni_names + (worse, better)), aff) assert_equal(nipy2nifti(Image(data, cmap)).shape, shape_shifted) # Even if better is only in output cmap = AT(CS(('i', 'j', 'k', worse, 'u')), CS(mni_names + (worse, better)), aff) assert_equal(nipy2nifti(Image(data, cmap)).shape, shape_shifted) def test_time_pixdims(): # Pixdims get moved across when a no-time extra axis is added shape = (2, 3, 4, 5, 6, 7) data = np.random.normal(size=shape) aff = np.diag([3, 4, 5, 6, 7, 8, 1]) mni_names = mni_csm(3).coord_names in_cs = CS('ikjlmn') cmap = AT(in_cs, CS(mni_names + tuple('tuv')), aff) hdr = nipy2nifti(Image(data, cmap)).get_header() assert_equal(hdr.get_zooms(), (3, 4, 5, 6, 7, 8)) cmap = AT(in_cs, CS(mni_names + tuple('quv')), aff) hdr = nipy2nifti(Image(data, cmap)).get_header() assert_equal(hdr.get_zooms(), (3, 4, 5, 0, 6, 7, 8)) def test_xyzt_units(): # Whether xyzt_unit field gets set correctly fimg_orig = copy_of(funcfile) # Put time in output, input and both data = fimg_orig.get_data() hdr = fimg_orig.metadata['header'] aff = fimg_orig.coordmap.affine out_names = fimg_orig.reference.coord_names # Time in input only cmap_t_in = AT('ijkt', out_names[:3] + ('q',), aff) img_t_in = Image(data, cmap_t_in, {'header': hdr.copy()}) # Time in output only cmap_t_out = AT('ijkl', out_names[:3] + ('t',), aff) img_t_out = Image(data, cmap_t_out, {'header': hdr.copy()}) # Time in both cmap_t_b = AT('ijkt', out_names[:3] + ('t',), aff) img_t_b = Image(data, cmap_t_b, {'header': hdr.copy()}) # In neither cmap_t_no = AT('ijkl', out_names[:3] + ('q',), aff) img_t_no = Image(data, cmap_t_no, {'header': hdr.copy()}) # Check the default assert_equal(hdr.get_xyzt_units(), ('mm', 'sec')) # That default survives nifti conversion for img in (img_t_in, img_t_out, img_t_b): ni_img = nipy2nifti(img) assert_equal(ni_img.get_header().get_xyzt_units(), ('mm', 'sec')) # Now with no time for img in (img_t_no, img_t_b[...,0]): ni_img = nipy2nifti(img) assert_equal(ni_img.get_header().get_xyzt_units(), ('mm', 'unknown')) # Change to other time-like for units, name0, name1 in (('sec', 't', 'time'), ('hz', 'hz', 'frequency-hz'), ('ppm', 'ppm', 'concentration-ppm'), ('rads', 'rads', 'radians/s')): for name in (name0, name1): new_img = img_t_out.renamed_reference(t=name) ni_img = nipy2nifti(new_img) assert_equal(ni_img.get_header().get_xyzt_units(), ('mm', units)) new_img = img_t_in.renamed_axes(t=name) ni_img = nipy2nifti(new_img) assert_equal(ni_img.get_header().get_xyzt_units(), ('mm', units)) new_img = img_t_b.renamed_axes(t=name).renamed_reference(t=name) ni_img = nipy2nifti(new_img) assert_equal(ni_img.get_header().get_xyzt_units(), ('mm', units)) def test_time_axes_4th(): # Check time-like axes rolled to be 4th, and pixdims match data = np.random.normal(size=(2, 3, 4, 5, 6, 7)) aff = np.diag([2., 3, 4, 5, 6, 7, 1]) xyz_names = talairach_csm(3).coord_names in_cs = CS('ijklmn') for time_like in ('t', 'hz', 'ppm', 'rads'): cmap = AT(in_cs, CS(xyz_names + (time_like, 'q', 'r')), aff) img = Image(data, cmap) # Time-like in correct position ni_img = nipy2nifti(img) assert_array_equal(ni_img.get_data(), data) assert_array_equal(ni_img.get_header().get_zooms(), (2, 3, 4, 5, 6, 7)) # Time-like needs reordering cmap = AT(in_cs, CS(xyz_names + ('q', time_like, 'r')), aff) ni_img = nipy2nifti(Image(data, cmap)) assert_array_equal(ni_img.get_data(), np.rollaxis(data, 4, 3)) assert_array_equal(ni_img.get_header().get_zooms(), (2, 3, 4, 6, 5, 7)) # And again cmap = AT(in_cs, CS(xyz_names + ('q', 'r', time_like)), aff) ni_img = nipy2nifti(Image(data, cmap)) assert_array_equal(ni_img.get_data(), np.rollaxis(data, 5, 3)) assert_array_equal(ni_img.get_header().get_zooms(), (2, 3, 4, 7, 5, 6)) def test_save_toffset(): # Check toffset only gets set for time shape = (2, 3, 4, 5, 6, 7) data = np.random.normal(size = shape) aff = from_matvec(np.diag([2., 3, 4, 5, 6, 7]), [11, 12, 13, 14, 15, 16]) xyz_names = talairach_csm(3).coord_names in_cs = CS('ijklmn') for t_name in 't', 'time': cmap = AT(in_cs, CS(xyz_names + (t_name, 'q', 'r')), aff) ni_img = nipy2nifti(Image(data, cmap)) assert_equal(ni_img.get_header()['toffset'], 14) for time_like in ('hz', 'ppm', 'rads'): cmap = AT(in_cs, CS(xyz_names + (time_like, 'q', 'r')), aff) ni_img = nipy2nifti(Image(data, cmap)) assert_equal(ni_img.get_header()['toffset'], 0) # Check that non-matching time causes a nifti error when toffset !=0 shape_shifted = (2, 3, 4, 6, 5, 7) for t_name in 't', 'time': # No toffset, this is OK cmap = AT(CS(('i', 'j', 'k', 'u', t_name, 'v')), CS(xyz_names + ('u', t_name, 'v')), np.diag([3, 4, 5, 6, 0, 7, 1])) assert_equal(nipy2nifti(Image(data, cmap)).shape, shape_shifted) # toffset with 0 on TR (time) diagonal aff_z1 = from_matvec(np.diag([2., 3, 4, 5, 0, 7]), [11, 12, 13, 14, 15, 16]) cmap = AT(CS(('i', 'j', 'k', 'u', t_name, 'v')), CS(xyz_names + ('u', t_name, 'v')), aff_z1) # Default is to fix the zero assert_equal(nipy2nifti(Image(data, cmap)).shape, shape_shifted) assert_equal(nipy2nifti(Image(data, cmap), fix0=True).shape, shape_shifted) # Unless fix0 is False assert_raises(NiftiError, nipy2nifti, Image(data, cmap), fix0=False) # Fix doesn't work if there is more than one zero row and column aff_z2 = from_matvec(np.diag([2., 3, 4, 0, 0, 7]), [11, 12, 13, 14, 15, 16]) cmap = AT(CS(('i', 'j', 'k', 'u', t_name, 'v')), CS(xyz_names + ('u', t_name, 'v')), aff_z2) assert_raises(NiftiError, nipy2nifti, Image(data, cmap), fix0=True) # zeros on the diagonal are not a problem for non-time, with toffset, # because we don't need to set the 'time' part of the translation vector, # and therefore we don't need to know which *output axis* is time-like for t_name in 'hz', 'ppm', 'rads': cmap = AT(CS(('i', 'j', 'k', 'u', t_name, 'v')), CS(xyz_names + ('u', t_name, 'v')), aff_z1) assert_equal(nipy2nifti(Image(data, cmap), fix0=False).shape, shape_shifted) cmap = AT(CS(('i', 'j', 'k', 'u', t_name, 'v')), CS(xyz_names + ('u', t_name, 'v')), aff_z2) assert_equal(nipy2nifti(Image(data, cmap), fix0=False).shape, shape_shifted) def test_too_many_dims(): data0 = np.zeros(range(2, 9)) xyz_names = talairach_csm(3).coord_names cmap = AT(CS('ijktuvw'), CS(xyz_names + tuple('tuvw')), np.eye(8)) assert_equal(nipy2nifti(Image(data0, cmap)).shape, tuple(range(2, 9))) # Too many dimensions data1 = np.zeros(range(2, 10)) cmap = AT(CS('ijktuvwq'), CS(xyz_names + tuple('tuvwq')), np.eye(9)) assert_raises(NiftiError, nipy2nifti, Image(data1, cmap)) # No time adds a dimension cmap = AT(CS('ijkpuvw'), CS(xyz_names + tuple('puvw')), np.eye(8)) assert_raises(NiftiError, nipy2nifti, Image(data0, cmap)) def test_no_time(): # Check that no time axis results in extra length 1 dimension data = np.random.normal(size=(2, 3, 4, 5, 6, 7)) aff = np.diag([2., 3, 4, 5, 6, 7, 1]) xyz_names = talairach_csm(3).coord_names in_cs = CS('ijklmn') # No change in shape if there's a time-like axis for time_like in ('t', 'hz', 'ppm', 'rads'): cmap = AT(in_cs, CS(xyz_names + (time_like, 'q', 'r')), aff) ni_img = nipy2nifti(Image(data, cmap)) assert_array_equal(ni_img.get_data(), data) # But there is if no time-like for no_time in ('random', 'words', 'I', 'thought', 'of'): cmap = AT(in_cs, CS(xyz_names + (no_time, 'q', 'r')), aff) ni_img = nipy2nifti(Image(data, cmap)) assert_array_equal(ni_img.get_data(), data[:, :, :, None, :, :]) def test_save_spaces(): # Test that intended output spaces get set into nifti data = np.random.normal(size=(2, 3, 4)) aff = np.diag([2., 3, 4, 1]) in_cs = CS('ijk') for label, csm in (('scanner', scanner_csm), ('aligned', aligned_csm), ('talairach', talairach_csm), ('mni', mni_csm)): img = Image(data, AT(in_cs, csm(3), aff)) ni_img = nipy2nifti(img) assert_equal(ni_img.get_header().get_value_label('sform_code'), label) def test_save_dtype(): # Test we can specify the dtype on conversion data = np.random.normal(size=(2, 3, 4)) cmap = vox2mni(np.diag([2., 3, 4, 1])) for dt_code in ('i1', 'u1', 'i2', 'u2', 'i4', 'u4', 'i8', 'u8', 'f4', 'f8', 'c8', 'c16'): dt = np.dtype(dt_code) img = Image(data.astype(dt_code), cmap) ni_img = nipy2nifti(img, data_dtype=dt_code) assert_equal(ni_img.get_header().get_data_dtype(), dt) ni_img = nipy2nifti(img, data_dtype=dt) assert_equal(ni_img.get_header().get_data_dtype(), dt) # None results in trying to get the code from the input header, then from the # data. # From data, when there's nothing in the header img = Image(data.astype(np.int16), cmap) ni_img = nipy2nifti(img, data_dtype=None) assert_equal(ni_img.get_header().get_data_dtype(), np.dtype(np.int16)) # From the header hdr = nib.Nifti1Header() hdr.set_data_dtype(np.int32) img = Image(data.astype(np.int16), cmap, metadata={'header': hdr}) ni_img = nipy2nifti(img, data_dtype=None) assert_equal(ni_img.get_header().get_data_dtype(), np.dtype(np.int32)) # Bad dtype assert_raises(TypeError, nipy2nifti, img, data_dtype='foo') # Fancy dtype data = np.zeros((2, 3, 4), dtype=[('f0', 'i2'), ('f1', 'f4')]) img = Image(data, cmap) assert_raises(HeaderDataError, nipy2nifti, img, data_dtype=None) def test_basic_load(): # Just basic load data = np.random.normal(size=(2, 3, 4, 5)) aff = np.diag([2., 3, 4, 1]) ni_img = nib.Nifti1Image(data, aff) img = nifti2nipy(ni_img) assert_array_equal(img.get_data(), data) def test_expand_to_3d(): # Test 1D and 2D niftis # 1D and 2D with full sform or qform affines raise a NiftiError, because we # can't be sure which axes the affine refers to. Should the image have 1 # length axes prepended? Or appended? xyz_aff = np.diag([2, 3, 4, 1]) for size in (10,), (10, 2): data = np.random.normal(size=size) ni_img = nib.Nifti1Image(data, xyz_aff) # Default is aligned assert_raises(NiftiError, nifti2nipy, ni_img) hdr = ni_img.get_header() # The pixdim affine for label in 'scanner', 'aligned', 'talairach', 'mni': hdr.set_sform(xyz_aff, label) assert_raises(NiftiError, nifti2nipy, ni_img) hdr.set_sform(None) assert_raises(NiftiError, nifti2nipy, ni_img) hdr.set_sform(xyz_aff, label) assert_raises(NiftiError, nifti2nipy, ni_img) hdr.set_qform(None) def test_load_cmaps(): data = np.random.normal(size=range(7)) xyz_aff = np.diag([2, 3, 4, 1]) # Default with time-like ni_img = nib.Nifti1Image(data, xyz_aff) img = nifti2nipy(ni_img) exp_cmap = AT(CS('ijktuvw', name='voxels'), aligned_csm(7), np.diag([2, 3, 4, 1, 1, 1, 1, 1])) assert_equal(img.coordmap, exp_cmap) # xyzt_units sets time axis name hdr = ni_img.get_header() xyz_names = aligned_csm(3).coord_names full_aff = exp_cmap.affine reduced_data = data[:, :, :, 1:2, ...] for t_like, units, scaling in ( ('t', 'sec', 1), ('t', 'msec', 1/1000.), ('t', 'usec', 1/1000000.), ('hz', 'hz', 1), ('ppm', 'ppm', 1), ('rads', 'rads', 1)): hdr.set_xyzt_units('mm', units) img = nifti2nipy(ni_img) in_cs = CS(('i', 'j', 'k', t_like, 'u', 'v', 'w'), name='voxels') out_cs = CS(xyz_names + (t_like, 'u', 'v', 'w'), name='aligned') if scaling == 1: exp_aff = full_aff else: diag = np.ones((8,)) diag[3] = scaling exp_aff = np.dot(np.diag(diag), full_aff) exp_cmap = AT(in_cs, out_cs, exp_aff) assert_equal(img.coordmap, exp_cmap) assert_array_equal(img.get_data(), data) # Even if the image axis length is 1, we keep out time dimension, if # there is specific scaling implying time-like ni_img_t = nib.Nifti1Image(reduced_data, xyz_aff, hdr) img = nifti2nipy(ni_img_t) assert_equal(img.coordmap, exp_cmap) assert_array_equal(img.get_data(), reduced_data) def test_load_no_time(): # Without setting anything else, length 1 at position 3 makes time go away ns_dims = (5, 6, 7) xyz_aff = np.diag([2, 3, 4, 1]) xyz_names = aligned_csm(3).coord_names[:3] in_names = tuple('ijkuvw') out_names = xyz_names + tuple('uvw') for n_ns in 1, 2, 3: ndim = 3 + n_ns data = np.random.normal(size=(2, 3, 4, 1) + ns_dims[:n_ns]) ni_img_no_t = nib.Nifti1Image(data, xyz_aff) cmap_no_t = AT(CS(in_names[:ndim], name='voxels'), CS(out_names[:ndim], name='aligned'), np.diag([2, 3, 4] + [1] * n_ns + [1])) img = nifti2nipy(ni_img_no_t) assert_equal(img.coordmap, cmap_no_t) # We add do time if 4th axis of length 1 is the last axis data41 = np.zeros((3, 4, 5, 1)) ni_img_41 = nib.Nifti1Image(data41, xyz_aff) cmap_41 = AT(CS('ijkt', name='voxels'), CS(xyz_names + ('t',), name='aligned'), np.diag([2, 3, 4, 1, 1])) img = nifti2nipy(ni_img_41) assert_equal(img.coordmap, cmap_41) def test_load_toffset(): # Test toffset gets set into affine only for time data = np.random.normal(size=range(5)) xyz_aff = np.diag([2, 3, 4, 1]) # Default with time-like and no toffset ni_img = nib.Nifti1Image(data, xyz_aff) hdr = ni_img.get_header() img = nifti2nipy(ni_img) exp_aff = np.diag([2., 3, 4, 1, 1, 1]) in_cs = CS('ijktu', name='voxels') xyz_names = aligned_csm(3).coord_names out_cs = CS(xyz_names + tuple('tu'), name='aligned') assert_equal(hdr['toffset'], 0) assert_equal(img.coordmap, AT(in_cs, out_cs, exp_aff)) # Set toffset and expect in affine hdr['toffset'] = 42 exp_aff[3, -1] = 42 assert_equal(nifti2nipy(ni_img).coordmap, AT(in_cs, out_cs, exp_aff)) # Make time axis into hz and expect not to see toffset hdr.set_xyzt_units('mm', 'hz') in_cs_hz = CS(('i', 'j', 'k', 'hz', 'u'), name='voxels') out_cs_hz = CS(xyz_names + ('hz', 'u'), name='aligned') exp_aff[3, -1] = 0 assert_equal(nifti2nipy(ni_img).coordmap, AT(in_cs_hz, out_cs_hz, exp_aff)) def test_load_spaces(): # Test spaces get read correctly shape = np.array((6, 5, 4, 3, 2)) zooms = np.array((2, 3, 4, 5, 6)) data = np.random.normal(size=shape) # Default with no affine in header, or in image ni_img = nib.Nifti1Image(data, None) hdr = ni_img.get_header() hdr.set_zooms(zooms) # Expected affine is from the pixdims and the center of the image. Default # is also flipped X. offsets = (1 - shape[:3]) / 2. * zooms[:3] * (-1, 1, 1) exp_aff = from_matvec(np.diag([-2, 3, 4, 5, 6]), list(offsets) + [0, 0]) in_cs = CS('ijktu', name='voxels') exp_cmap = AT(in_cs, unknown_csm(5), exp_aff) assert_equal(nifti2nipy(ni_img).coordmap, exp_cmap) an_aff = from_matvec(np.diag([1.1, 2.2, 3.3]), [10, 11, 12]) exp_aff = from_matvec(np.diag([1.1, 2.2, 3.3, 5, 6]), [10, 11, 12, 0, 0]) for label, csm in (('scanner', scanner_csm), ('aligned', aligned_csm), ('talairach', talairach_csm), ('mni', mni_csm)): hdr.set_sform(an_aff, label) assert_equal(nifti2nipy(ni_img).coordmap, AT(in_cs, csm(5), exp_aff)) def test_mm_scaling(): # Test the micron and meter scale the affine right data = np.random.normal(size=range(4)) xyz_aff = from_matvec(np.diag([2, 3, 4]), [11, 12, 13]) exp_aff = from_matvec(np.diag([2, 3, 4, 1]), [11, 12, 13, 0]) in_cs = CS('ijkt', name='voxels') out_cs = aligned_csm(4) # No space scaling ni_img = nib.Nifti1Image(data, xyz_aff) hdr = ni_img.get_header() assert_equal(hdr.get_xyzt_units(), ('unknown', 'unknown')) assert_equal(nifti2nipy(ni_img).coordmap, AT(in_cs, out_cs, exp_aff)) # mm is assumed hdr.set_xyzt_units('mm') assert_equal(nifti2nipy(ni_img).coordmap, AT(in_cs, out_cs, exp_aff)) # microns ! hdr.set_xyzt_units('micron') scaler = np.diag([1 / 1000., 1 / 1000., 1 / 1000., 1, 1]) assert_equal(nifti2nipy(ni_img).coordmap, AT(in_cs, out_cs, np.dot(scaler, exp_aff))) # mm again ! This test implicitly asserts that the nifti image affine is # not being changed by the conversion routine, otherwise we'd pick up the # microns scaling above. hdr.set_xyzt_units('mm') assert_equal(nifti2nipy(ni_img).coordmap, AT(in_cs, out_cs, exp_aff)) # meters ! hdr.set_xyzt_units('meter') scaler = np.diag([1000., 1000., 1000., 1, 1]) assert_equal(nifti2nipy(ni_img).coordmap, AT(in_cs, out_cs, np.dot(scaler, exp_aff))) def test_load_dim_info(): # Test freq, phase, slice get set correctly on load data = np.random.normal(size=range(3)) xyz_aff = from_matvec(np.diag([2, 3, 4]), [11, 12, 13]) in_cs = CS('ijk', name='voxels') out_cs = aligned_csm(3) # Just confirm that the default leads to no axis renaming ni_img = nib.Nifti1Image(data, xyz_aff) hdr = ni_img.get_header() assert_equal(hdr.get_dim_info(), (None, None, None)) assert_equal(nifti2nipy(ni_img).coordmap, AT(in_cs, out_cs, xyz_aff)) # But now... hdr.set_dim_info(freq=1) assert_equal(nifti2nipy(ni_img).coordmap, AT(CS(('i', 'freq', 'k'), "voxels"), out_cs, xyz_aff)) hdr.set_dim_info(freq=2) assert_equal(nifti2nipy(ni_img).coordmap, AT(CS(('i', 'j', 'freq'), "voxels"), out_cs, xyz_aff)) hdr.set_dim_info(phase=1) assert_equal(nifti2nipy(ni_img).coordmap, AT(CS(('i', 'phase', 'k'), "voxels"), out_cs, xyz_aff)) hdr.set_dim_info(slice=0) assert_equal(nifti2nipy(ni_img).coordmap, AT(CS(('slice', 'j', 'k'), "voxels"), out_cs, xyz_aff)) hdr.set_dim_info(freq=1, phase=0, slice=2) assert_equal(nifti2nipy(ni_img).coordmap, AT(CS(('phase', 'freq', 'slice'), "voxels"), out_cs, xyz_aff)) nipy-0.3.0/nipy/io/tests/test_save.py000066400000000000000000000134541210344137400175760ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import with_statement import numpy as np from nibabel.affines import from_matvec from nipy.io.api import load_image, save_image from nipy.core import api from nipy.core.reference.coordinate_map import ( AffineTransform as AT) from nipy.core.reference.coordinate_system import ( CoordinateSystem as CS) from nipy.core.reference.spaces import mni_csm from nibabel.tmpdirs import InTemporaryDirectory from nose.tools import (assert_true, assert_false, assert_equal, assert_not_equal) from numpy.testing import assert_array_almost_equal from nipy.testing import funcfile TMP_FNAME = 'afile.nii' def test_save1(): # A test to ensure that when a file is saved, the affine and the # data agree. This image comes from a NIFTI file img = load_image(funcfile) with InTemporaryDirectory(): save_image(img, TMP_FNAME) img2 = load_image(TMP_FNAME) assert_array_almost_equal(img.affine, img2.affine) assert_equal(img.shape, img2.shape) assert_array_almost_equal(img2.get_data(), img.get_data()) del img2 def test_save2(): # A test to ensure that when a file is saved, the affine and the # data agree. This image comes from a NIFTI file shape = (13,5,7,3) step = np.array([3.45,2.3,4.5,6.93]) cmap = api.AffineTransform.from_start_step('ijkt', 'xyzt', [1,3,5,0], step) data = np.random.standard_normal(shape) img = api.Image(data, cmap) with InTemporaryDirectory(): save_image(img, TMP_FNAME) img2 = load_image(TMP_FNAME) assert_array_almost_equal(img.affine, img2.affine) assert_equal(img.shape, img2.shape) assert_array_almost_equal(img2.get_data(), img.get_data()) del img2 def test_save2b(): # A test to ensure that when a file is saved, the affine and the # data agree. This image comes from a NIFTI file. This example has a # non-diagonal affine matrix for the spatial part, but is 'diagonal' for the # space part. # # make a 5x5 transformation (for 4d image) step = np.array([3.45, 2.3, 4.5, 6.9]) A = np.random.standard_normal((3,3)) B = np.diag(list(step)+[1]) B[:3, :3] = A shape = (13,5,7,3) cmap = api.vox2mni(B) data = np.random.standard_normal(shape) img = api.Image(data, cmap) with InTemporaryDirectory(): save_image(img, TMP_FNAME) img2 = load_image(TMP_FNAME) assert_array_almost_equal(img.affine, img2.affine) assert_equal(img.shape, img2.shape) assert_array_almost_equal(img2.get_data(), img.get_data()) del img2 def test_save3(): # A test to ensure that when a file is saved, the affine # and the data agree. In this case, things don't agree: # i) the pixdim is off # ii) makes the affine off step = np.array([3.45,2.3,4.5,6.9]) shape = (13,5,7,3) mni_xyz = mni_csm(3).coord_names cmap = AT(CS('jkli'), CS(('t',) + mni_xyz[::-1]), from_matvec(np.diag([0,3,5,1]), step)) data = np.random.standard_normal(shape) img = api.Image(data, cmap) # with InTemporaryDirectory(): with InTemporaryDirectory(): save_image(img, TMP_FNAME) tmp = load_image(TMP_FNAME) # Detach image from file so we can delete it data = tmp.get_data().copy() img2 = api.Image(data, tmp.coordmap, tmp.metadata) del tmp assert_equal(tuple([img.shape[l] for l in [3,2,1,0]]), img2.shape) a = np.transpose(img.get_data(), [3,2,1,0]) assert_false(np.allclose(img.affine, img2.affine)) assert_true(np.allclose(a, img2.get_data())) def test_save4(): # Same as test_save3 except we have reordered the 'ijk' input axes. shape = (13,5,7,3) step = np.array([3.45,2.3,4.5,6.9]) # When the input coords are in the 'ljki' order, the affines get # rearranged. Note that the 'start' below, must be 0 for # non-spatial dimensions, because we have no way to store them in # most cases. For example, a 'start' of [1,5,3,1] would be lost on # reload mni_xyz = mni_csm(3).coord_names cmap = AT(CS('tkji'), CS((('t',) + mni_xyz[::-1])), from_matvec(np.diag([2., 3, 5, 1]), step)) data = np.random.standard_normal(shape) img = api.Image(data, cmap) with InTemporaryDirectory(): save_image(img, TMP_FNAME) tmp = load_image(TMP_FNAME) data = tmp.get_data().copy() # Detach image from file so we can delete it img2 = api.Image(data, tmp.coordmap, tmp.metadata) del tmp P = np.array([[0,0,0,1,0], [0,0,1,0,0], [0,1,0,0,0], [1,0,0,0,0], [0,0,0,0,1]]) res = np.dot(P, np.dot(img.affine, P.T)) # the step part of the affine should be set correctly assert_array_almost_equal(res[:4,:4], img2.affine[:4,:4]) # start in the spatial dimensions should be set correctly assert_array_almost_equal(res[:3,-1], img2.affine[:3,-1]) # start in the time dimension should be 3.45 as in img, because NIFTI stores # the time offset in hdr[``toffset``] assert_not_equal(res[3,-1], img2.affine[3,-1]) assert_equal(res[3,-1], 3.45) # shapes should be reversed because img has coordinates reversed assert_equal(img.shape[::-1], img2.shape) # data should be transposed because coordinates are reversed assert_array_almost_equal( np.transpose(img2.get_data(),[3,2,1,0]), img.get_data()) # coordinate names should be reversed as well assert_equal(img2.coordmap.function_domain.coord_names, img.coordmap.function_domain.coord_names[::-1]) assert_equal(img2.coordmap.function_domain.coord_names, ('i', 'j', 'k', 't')) nipy-0.3.0/nipy/labs/000077500000000000000000000000001210344137400143705ustar00rootroot00000000000000nipy-0.3.0/nipy/labs/README.txt000066400000000000000000000016561210344137400160760ustar00rootroot00000000000000================================== FFF2 - "neurospin" porting notes ================================== This module was ported from the old ``fff2`` module as ``nipy.neurospin``. Keep here notes on the porting work, including tips on how to update existing codes that used ``fff2`` to work with the new system. Replacements: the following are the most common text replacements that typically will update an existing ``fff2`` code to use the new module: * import fff2. -> import nipy.neurospin. * import fff2 -> import nipy.neurospin as fff2 * from fff2 -> from nipy.neurospin * fff2 -> nipy.neurospin ==================================== nipy.neuropsin module organization ==================================== In the directory root are modules (*.py files) that expose high-level APIs and may know about nipy classes. Each of these modules calls one or several lower-level subpackages corresponding to the various subdirectories. nipy-0.3.0/nipy/labs/__init__.py000066400000000000000000000016211210344137400165010ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """\ Neurospin functions and classes for nipy. (c) Copyright CEA-INRIA-INSERM, 2003-2009. Distributed under the terms of the BSD License. http://www.lnao.fr functions for fMRI This module contains several objects and functions for fMRI processing. """ from nipy.testing import Tester # No subpackage should be imported here to avoid run-time errors # related to missing dependencies or binary incompatibilities test = Tester().test bench = Tester().bench # Import here only files that don't draw in compiled code: that way the # basic functionality is still usable even if the compiled # code is messed up (32/64 bit issues, or binary incompatibilities) from .mask import compute_mask_files, compute_mask_sessions, \ series_from_mask from .datasets import as_volume_img, save nipy-0.3.0/nipy/labs/bindings/000077500000000000000000000000001210344137400161655ustar00rootroot00000000000000nipy-0.3.0/nipy/labs/bindings/__init__.py000066400000000000000000000015551210344137400203040ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from .linalg import (blas_dnrm2, blas_dasum, blas_ddot, blas_daxpy, blas_dscal, blas_dgemm, blas_dsymm, blas_dtrmm, blas_dtrsm, blas_dsyrk, blas_dsyr2k, matrix_add, matrix_get, matrix_transpose, vector_get, vector_set, vector_add, vector_sub, vector_mul, vector_div, vector_sum) from .array import (array_get, array_get_block, array_add, array_sub, array_mul, array_div) from .wrapper import (c_types, fff_type, npy_type, copy_vector, pass_matrix, pass_vector, pass_array, pass_vector_via_iterator, sum_via_iterators, copy_via_iterators) from nipy.testing import Tester test = Tester().test bench = Tester().bench nipy-0.3.0/nipy/labs/bindings/array.c000066400000000000000000007676041210344137400174730ustar00rootroot00000000000000/* Generated by Cython 0.17.4 on Sat Jan 12 17:27:32 2013 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02040000 #error Cython requires Python 2.4+. #else #include /* For offsetof */ #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #endif #if PY_VERSION_HEX < 0x02050000 typedef int Py_ssize_t; #define PY_SSIZE_T_MAX INT_MAX #define PY_SSIZE_T_MIN INT_MIN #define PY_FORMAT_SIZE_T "" #define CYTHON_FORMAT_SSIZE_T "" #define PyInt_FromSsize_t(z) PyInt_FromLong(z) #define PyInt_AsSsize_t(o) __Pyx_PyInt_AsInt(o) #define PyNumber_Index(o) ((PyNumber_Check(o) && !PyFloat_Check(o)) ? PyNumber_Int(o) : \ (PyErr_Format(PyExc_TypeError, \ "expected index value, got %.200s", Py_TYPE(o)->tp_name), \ (PyObject*)0)) #define __Pyx_PyIndex_Check(o) (PyNumber_Check(o) && !PyFloat_Check(o) && \ !PyComplex_Check(o)) #define PyIndex_Check __Pyx_PyIndex_Check #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message) #define __PYX_BUILD_PY_SSIZE_T "i" #else #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #define __Pyx_PyIndex_Check PyIndex_Check #endif #if PY_VERSION_HEX < 0x02060000 #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) #define PyVarObject_HEAD_INIT(type, size) \ PyObject_HEAD_INIT(type) size, #define PyType_Modified(t) typedef struct { void *buf; PyObject *obj; Py_ssize_t len; Py_ssize_t itemsize; int readonly; int ndim; char *format; Py_ssize_t *shape; Py_ssize_t *strides; Py_ssize_t *suboffsets; void *internal; } Py_buffer; #define PyBUF_SIMPLE 0 #define PyBUF_WRITABLE 0x0001 #define PyBUF_FORMAT 0x0004 #define PyBUF_ND 0x0008 #define PyBUF_STRIDES (0x0010 | PyBUF_ND) #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) #define PyBUF_RECORDS (PyBUF_STRIDES | PyBUF_FORMAT | PyBUF_WRITABLE) #define PyBUF_FULL (PyBUF_INDIRECT | PyBUF_FORMAT | PyBUF_WRITABLE) typedef int (*getbufferproc)(PyObject *, Py_buffer *, int); typedef void (*releasebufferproc)(PyObject *, Py_buffer *); #endif #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #endif #if PY_MAJOR_VERSION < 3 && PY_MINOR_VERSION < 6 #define PyUnicode_FromString(s) PyUnicode_Decode(s, strlen(s), "UTF-8", "strict") #endif #if PY_MAJOR_VERSION >= 3 #define Py_TPFLAGS_CHECKTYPES 0 #define Py_TPFLAGS_HAVE_INDEX 0 #endif #if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3) #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ? \ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #else #define CYTHON_PEP393_ENABLED 0 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_READ(k, d, i) ((k=k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_VERSION_HEX < 0x02060000 #define PyBytesObject PyStringObject #define PyBytes_Type PyString_Type #define PyBytes_Check PyString_Check #define PyBytes_CheckExact PyString_CheckExact #define PyBytes_FromString PyString_FromString #define PyBytes_FromStringAndSize PyString_FromStringAndSize #define PyBytes_FromFormat PyString_FromFormat #define PyBytes_DecodeEscape PyString_DecodeEscape #define PyBytes_AsString PyString_AsString #define PyBytes_AsStringAndSize PyString_AsStringAndSize #define PyBytes_Size PyString_Size #define PyBytes_AS_STRING PyString_AS_STRING #define PyBytes_GET_SIZE PyString_GET_SIZE #define PyBytes_Repr PyString_Repr #define PyBytes_Concat PyString_Concat #define PyBytes_ConcatAndDel PyString_ConcatAndDel #endif #if PY_VERSION_HEX < 0x02060000 #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type) #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_VERSION_HEX < 0x03020000 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300) #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b) #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value) #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b) #else #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0))) #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1))) #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1))) #endif #if PY_MAJOR_VERSION >= 3 #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n))) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n))) #else #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n)) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_NAMESTR(n) ((char *)(n)) #define __Pyx_DOCSTR(n) ((char *)(n)) #else #define __Pyx_NAMESTR(n) (n) #define __Pyx_DOCSTR(n) (n) #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include #define __PYX_HAVE__nipy__labs__bindings__array #define __PYX_HAVE_API__nipy__labs__bindings__array #include "stdio.h" #include "stdlib.h" #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "fff_base.h" #include "fff_vector.h" #include "fff_matrix.h" #include "fff_array.h" #include "fffpy.h" #ifdef _OPENMP #include #endif /* _OPENMP */ #ifdef PYREX_WITHOUT_ASSERTIONS #define CYTHON_WITHOUT_ASSERTIONS #endif /* inline attribute */ #ifndef CYTHON_INLINE #if defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif /* unused attribute */ #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif typedef struct {PyObject **p; char *s; const long n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/ /* Type Conversion Predeclarations */ #define __Pyx_PyBytes_FromUString(s) PyBytes_FromString((char*)s) #define __Pyx_PyBytes_AsUString(s) ((unsigned char*) PyBytes_AsString(s)) #define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x); static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject*); #if CYTHON_COMPILING_IN_CPYTHON #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #ifdef __GNUC__ /* Test for GCC > 2.95 */ #if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* __GNUC__ > 2 ... */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ > 2 ... */ #else /* __GNUC__ */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static PyObject *__pyx_m; static PyObject *__pyx_b; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include #else #include #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "array.pyx", "numpy.pxd", "type.pxd", }; /* "numpy.pxd":723 * # in Cython to enable them only on the right systems. * * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t */ typedef npy_int8 __pyx_t_5numpy_int8_t; /* "numpy.pxd":724 * * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t */ typedef npy_int16 __pyx_t_5numpy_int16_t; /* "numpy.pxd":725 * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< * ctypedef npy_int64 int64_t * #ctypedef npy_int96 int96_t */ typedef npy_int32 __pyx_t_5numpy_int32_t; /* "numpy.pxd":726 * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< * #ctypedef npy_int96 int96_t * #ctypedef npy_int128 int128_t */ typedef npy_int64 __pyx_t_5numpy_int64_t; /* "numpy.pxd":730 * #ctypedef npy_int128 int128_t * * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t */ typedef npy_uint8 __pyx_t_5numpy_uint8_t; /* "numpy.pxd":731 * * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t */ typedef npy_uint16 __pyx_t_5numpy_uint16_t; /* "numpy.pxd":732 * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< * ctypedef npy_uint64 uint64_t * #ctypedef npy_uint96 uint96_t */ typedef npy_uint32 __pyx_t_5numpy_uint32_t; /* "numpy.pxd":733 * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< * #ctypedef npy_uint96 uint96_t * #ctypedef npy_uint128 uint128_t */ typedef npy_uint64 __pyx_t_5numpy_uint64_t; /* "numpy.pxd":737 * #ctypedef npy_uint128 uint128_t * * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< * ctypedef npy_float64 float64_t * #ctypedef npy_float80 float80_t */ typedef npy_float32 __pyx_t_5numpy_float32_t; /* "numpy.pxd":738 * * ctypedef npy_float32 float32_t * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< * #ctypedef npy_float80 float80_t * #ctypedef npy_float128 float128_t */ typedef npy_float64 __pyx_t_5numpy_float64_t; /* "numpy.pxd":747 * # The int types are mapped a bit surprising -- * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t # <<<<<<<<<<<<<< * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t */ typedef npy_long __pyx_t_5numpy_int_t; /* "numpy.pxd":748 * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< * ctypedef npy_longlong longlong_t * */ typedef npy_longlong __pyx_t_5numpy_long_t; /* "numpy.pxd":749 * ctypedef npy_long int_t * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< * * ctypedef npy_ulong uint_t */ typedef npy_longlong __pyx_t_5numpy_longlong_t; /* "numpy.pxd":751 * ctypedef npy_longlong longlong_t * * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t */ typedef npy_ulong __pyx_t_5numpy_uint_t; /* "numpy.pxd":752 * * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulonglong_t * */ typedef npy_ulonglong __pyx_t_5numpy_ulong_t; /* "numpy.pxd":753 * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< * * ctypedef npy_intp intp_t */ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; /* "numpy.pxd":755 * ctypedef npy_ulonglong ulonglong_t * * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< * ctypedef npy_uintp uintp_t * */ typedef npy_intp __pyx_t_5numpy_intp_t; /* "numpy.pxd":756 * * ctypedef npy_intp intp_t * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< * * ctypedef npy_double float_t */ typedef npy_uintp __pyx_t_5numpy_uintp_t; /* "numpy.pxd":758 * ctypedef npy_uintp uintp_t * * ctypedef npy_double float_t # <<<<<<<<<<<<<< * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t */ typedef npy_double __pyx_t_5numpy_float_t; /* "numpy.pxd":759 * * ctypedef npy_double float_t * ctypedef npy_double double_t # <<<<<<<<<<<<<< * ctypedef npy_longdouble longdouble_t * */ typedef npy_double __pyx_t_5numpy_double_t; /* "numpy.pxd":760 * ctypedef npy_double float_t * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cfloat cfloat_t */ typedef npy_longdouble __pyx_t_5numpy_longdouble_t; /* "fff.pxd":9 * * # Redefine size_t * ctypedef unsigned long int size_t # <<<<<<<<<<<<<< * * */ typedef unsigned long __pyx_t_3fff_size_t; #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< float > __pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< double > __pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif /*--- Type declarations ---*/ /* "numpy.pxd":762 * ctypedef npy_longdouble longdouble_t * * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t */ typedef npy_cfloat __pyx_t_5numpy_cfloat_t; /* "numpy.pxd":763 * * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< * ctypedef npy_clongdouble clongdouble_t * */ typedef npy_cdouble __pyx_t_5numpy_cdouble_t; /* "numpy.pxd":764 * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cdouble complex_t */ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; /* "numpy.pxd":766 * ctypedef npy_clongdouble clongdouble_t * * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew1(a): */ typedef npy_cdouble __pyx_t_5numpy_complex_t; #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/ #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil) \ if (acquire_gil) { \ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ PyGILState_Release(__pyx_gilstate_save); \ } else { \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil) \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext() \ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif /* CYTHON_REFNANNY */ #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /*proto*/ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], \ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, \ const char* function_name); /*proto*/ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/ static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/ static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/ static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); static CYTHON_INLINE int __Pyx_IterFinish(void); /*proto*/ static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); /*proto*/ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level); /*proto*/ #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if defined(_WIN32) && defined(__cplusplus) && CYTHON_CCOMPLEX #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); #if CYTHON_CCOMPLEX #define __Pyx_c_eqf(a, b) ((a)==(b)) #define __Pyx_c_sumf(a, b) ((a)+(b)) #define __Pyx_c_difff(a, b) ((a)-(b)) #define __Pyx_c_prodf(a, b) ((a)*(b)) #define __Pyx_c_quotf(a, b) ((a)/(b)) #define __Pyx_c_negf(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zerof(z) ((z)==(float)0) #define __Pyx_c_conjf(z) (::std::conj(z)) #if 1 #define __Pyx_c_absf(z) (::std::abs(z)) #define __Pyx_c_powf(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zerof(z) ((z)==0) #define __Pyx_c_conjf(z) (conjf(z)) #if 1 #define __Pyx_c_absf(z) (cabsf(z)) #define __Pyx_c_powf(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); #if CYTHON_CCOMPLEX #define __Pyx_c_eq(a, b) ((a)==(b)) #define __Pyx_c_sum(a, b) ((a)+(b)) #define __Pyx_c_diff(a, b) ((a)-(b)) #define __Pyx_c_prod(a, b) ((a)*(b)) #define __Pyx_c_quot(a, b) ((a)/(b)) #define __Pyx_c_neg(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero(z) ((z)==(double)0) #define __Pyx_c_conj(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs(z) (::std::abs(z)) #define __Pyx_c_pow(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero(z) ((z)==0) #define __Pyx_c_conj(z) (conj(z)) #if 1 #define __Pyx_c_abs(z) (cabs(z)) #define __Pyx_c_pow(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject *); static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject *); static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject *); static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject *); static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject *); static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject *); static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject *); static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject *); static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject *); static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject *); static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject *); static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject *); static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject *); static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject *); static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject *); static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject *); static int __Pyx_check_binary_version(void); #if !defined(__Pyx_PyIdentifier_FromString) #if PY_MAJOR_VERSION < 3 #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) #else #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) #endif #endif static PyObject *__Pyx_ImportModule(const char *name); /*proto*/ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /*proto*/ typedef struct { int code_line; PyCodeObject* code_object; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /*proto*/ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/ /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from 'cpython.object' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'libc.stdlib' */ /* Module declarations from 'numpy' */ /* Module declarations from 'numpy' */ static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ /* Module declarations from 'fff' */ /* Module declarations from 'nipy.labs.bindings.array' */ #define __Pyx_MODULE_NAME "nipy.labs.bindings.array" int __pyx_module_is_main_nipy__labs__bindings__array = 0; /* Implementation of 'nipy.labs.bindings.array' */ static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_RuntimeError; static PyObject *__pyx_pf_4nipy_4labs_8bindings_5array_array_get(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_A, size_t __pyx_v_x, size_t __pyx_v_y, size_t __pyx_v_z, size_t __pyx_v_t); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_5array_2array_get_block(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_A, size_t __pyx_v_x0, size_t __pyx_v_x1, size_t __pyx_v_fX, size_t __pyx_v_y0, size_t __pyx_v_y1, size_t __pyx_v_fY, size_t __pyx_v_z0, size_t __pyx_v_z1, size_t __pyx_v_fZ, size_t __pyx_v_t0, size_t __pyx_v_t1, size_t __pyx_v_fT); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_5array_4array_add(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_A, PyObject *__pyx_v_B); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_5array_6array_mul(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_A, PyObject *__pyx_v_B); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_5array_8array_sub(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_A, PyObject *__pyx_v_B); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_5array_10array_div(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_A, PyObject *__pyx_v_B); /* proto */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ static char __pyx_k_1[] = "ndarray is not C contiguous"; static char __pyx_k_3[] = "ndarray is not Fortran contiguous"; static char __pyx_k_5[] = "Non-native byte order not supported"; static char __pyx_k_7[] = "unknown dtype code in numpy.pxd (%d)"; static char __pyx_k_8[] = "Format string allocated too short, see comment in numpy.pxd"; static char __pyx_k_11[] = "Format string allocated too short."; static char __pyx_k_13[] = "\nPython access to core fff functions written in C. This module is\nmainly used for unitary tests.\n\nAuthor: Alexis Roche, 2008.\n"; static char __pyx_k_14[] = "0.1"; static char __pyx_k_17[] = "/Users/mb312/dev_trees/nipy/nipy/labs/bindings/array.pyx"; static char __pyx_k_18[] = "nipy.labs.bindings.array"; static char __pyx_k__A[] = "A"; static char __pyx_k__B[] = "B"; static char __pyx_k__C[] = "C"; static char __pyx_k__H[] = "H"; static char __pyx_k__I[] = "I"; static char __pyx_k__L[] = "L"; static char __pyx_k__O[] = "O"; static char __pyx_k__Q[] = "Q"; static char __pyx_k__a[] = "a"; static char __pyx_k__b[] = "b"; static char __pyx_k__c[] = "c"; static char __pyx_k__d[] = "d"; static char __pyx_k__f[] = "f"; static char __pyx_k__g[] = "g"; static char __pyx_k__h[] = "h"; static char __pyx_k__i[] = "i"; static char __pyx_k__l[] = "l"; static char __pyx_k__q[] = "q"; static char __pyx_k__t[] = "t"; static char __pyx_k__x[] = "x"; static char __pyx_k__y[] = "y"; static char __pyx_k__z[] = "z"; static char __pyx_k__Zd[] = "Zd"; static char __pyx_k__Zf[] = "Zf"; static char __pyx_k__Zg[] = "Zg"; static char __pyx_k__fT[] = "fT"; static char __pyx_k__fX[] = "fX"; static char __pyx_k__fY[] = "fY"; static char __pyx_k__fZ[] = "fZ"; static char __pyx_k__np[] = "np"; static char __pyx_k__t0[] = "t0"; static char __pyx_k__t1[] = "t1"; static char __pyx_k__va[] = "va"; static char __pyx_k__x0[] = "x0"; static char __pyx_k__x1[] = "x1"; static char __pyx_k__y0[] = "y0"; static char __pyx_k__y1[] = "y1"; static char __pyx_k__z0[] = "z0"; static char __pyx_k__z1[] = "z1"; static char __pyx_k__asub[] = "asub"; static char __pyx_k__numpy[] = "numpy"; static char __pyx_k__range[] = "range"; static char __pyx_k____main__[] = "__main__"; static char __pyx_k____test__[] = "__test__"; static char __pyx_k__array_add[] = "array_add"; static char __pyx_k__array_div[] = "array_div"; static char __pyx_k__array_get[] = "array_get"; static char __pyx_k__array_mul[] = "array_mul"; static char __pyx_k__array_sub[] = "array_sub"; static char __pyx_k__ValueError[] = "ValueError"; static char __pyx_k____version__[] = "__version__"; static char __pyx_k__RuntimeError[] = "RuntimeError"; static char __pyx_k__array_get_block[] = "array_get_block"; static PyObject *__pyx_kp_u_1; static PyObject *__pyx_kp_u_11; static PyObject *__pyx_kp_s_14; static PyObject *__pyx_kp_s_17; static PyObject *__pyx_n_s_18; static PyObject *__pyx_kp_u_3; static PyObject *__pyx_kp_u_5; static PyObject *__pyx_kp_u_7; static PyObject *__pyx_kp_u_8; static PyObject *__pyx_n_s__A; static PyObject *__pyx_n_s__B; static PyObject *__pyx_n_s__C; static PyObject *__pyx_n_s__RuntimeError; static PyObject *__pyx_n_s__ValueError; static PyObject *__pyx_n_s____main__; static PyObject *__pyx_n_s____test__; static PyObject *__pyx_n_s____version__; static PyObject *__pyx_n_s__a; static PyObject *__pyx_n_s__array_add; static PyObject *__pyx_n_s__array_div; static PyObject *__pyx_n_s__array_get; static PyObject *__pyx_n_s__array_get_block; static PyObject *__pyx_n_s__array_mul; static PyObject *__pyx_n_s__array_sub; static PyObject *__pyx_n_s__asub; static PyObject *__pyx_n_s__b; static PyObject *__pyx_n_s__c; static PyObject *__pyx_n_s__fT; static PyObject *__pyx_n_s__fX; static PyObject *__pyx_n_s__fY; static PyObject *__pyx_n_s__fZ; static PyObject *__pyx_n_s__np; static PyObject *__pyx_n_s__numpy; static PyObject *__pyx_n_s__range; static PyObject *__pyx_n_s__t; static PyObject *__pyx_n_s__t0; static PyObject *__pyx_n_s__t1; static PyObject *__pyx_n_s__va; static PyObject *__pyx_n_s__x; static PyObject *__pyx_n_s__x0; static PyObject *__pyx_n_s__x1; static PyObject *__pyx_n_s__y; static PyObject *__pyx_n_s__y0; static PyObject *__pyx_n_s__y1; static PyObject *__pyx_n_s__z; static PyObject *__pyx_n_s__z0; static PyObject *__pyx_n_s__z1; static PyObject *__pyx_int_15; static PyObject *__pyx_k_tuple_2; static PyObject *__pyx_k_tuple_4; static PyObject *__pyx_k_tuple_6; static PyObject *__pyx_k_tuple_9; static PyObject *__pyx_k_tuple_10; static PyObject *__pyx_k_tuple_12; static PyObject *__pyx_k_tuple_15; static PyObject *__pyx_k_tuple_19; static PyObject *__pyx_k_tuple_21; static PyObject *__pyx_k_tuple_23; static PyObject *__pyx_k_tuple_25; static PyObject *__pyx_k_tuple_27; static PyObject *__pyx_k_codeobj_16; static PyObject *__pyx_k_codeobj_20; static PyObject *__pyx_k_codeobj_22; static PyObject *__pyx_k_codeobj_24; static PyObject *__pyx_k_codeobj_26; static PyObject *__pyx_k_codeobj_28; /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_5array_1array_get(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_8bindings_5array_array_get[] = "\n Get array element.\n va = array_get(A, size_t x, size_t y=0, size_t z=0, size_t t=0):\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_5array_1array_get = {__Pyx_NAMESTR("array_get"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_5array_1array_get, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_8bindings_5array_array_get)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_5array_1array_get(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_A = 0; size_t __pyx_v_x; size_t __pyx_v_y; size_t __pyx_v_z; size_t __pyx_v_t; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("array_get (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__A,&__pyx_n_s__x,&__pyx_n_s__y,&__pyx_n_s__z,&__pyx_n_s__t,0}; PyObject* values[5] = {0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__A)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("array_get", 0, 2, 5, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__y); if (value) { values[2] = value; kw_args--; } } case 3: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__z); if (value) { values[3] = value; kw_args--; } } case 4: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__t); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "array_get") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_A = values[0]; __pyx_v_x = __Pyx_PyInt_AsSize_t(values[1]); if (unlikely((__pyx_v_x == (size_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L3_error;} if (values[2]) { __pyx_v_y = __Pyx_PyInt_AsSize_t(values[2]); if (unlikely((__pyx_v_y == (size_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_y = ((size_t)0); } if (values[3]) { __pyx_v_z = __Pyx_PyInt_AsSize_t(values[3]); if (unlikely((__pyx_v_z == (size_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_z = ((size_t)0); } if (values[4]) { __pyx_v_t = __Pyx_PyInt_AsSize_t(values[4]); if (unlikely((__pyx_v_t == (size_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_t = ((size_t)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("array_get", 0, 2, 5, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.bindings.array.array_get", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_4labs_8bindings_5array_array_get(__pyx_self, __pyx_v_A, __pyx_v_x, __pyx_v_y, __pyx_v_z, __pyx_v_t); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/array.pyx":21 * * # Binded routines * def array_get(A, size_t x, size_t y=0, size_t z=0, size_t t=0): # <<<<<<<<<<<<<< * """ * Get array element. */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_5array_array_get(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_A, size_t __pyx_v_x, size_t __pyx_v_y, size_t __pyx_v_z, size_t __pyx_v_t) { fff_array *__pyx_v_a; double __pyx_v_va; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("array_get", 0); /* "nipy/labs/bindings/array.pyx":28 * cdef fff_array* a * cdef double va * a = fff_array_fromPyArray(A) # <<<<<<<<<<<<<< * va = fff_array_get(a, x, y, z, t) * fff_array_delete(a) */ if (!(likely(((__pyx_v_A) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_A, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_A; __Pyx_INCREF(__pyx_t_1); __pyx_v_a = fff_array_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/array.pyx":29 * cdef double va * a = fff_array_fromPyArray(A) * va = fff_array_get(a, x, y, z, t) # <<<<<<<<<<<<<< * fff_array_delete(a) * return va */ __pyx_v_va = fff_array_get(__pyx_v_a, __pyx_v_x, __pyx_v_y, __pyx_v_z, __pyx_v_t); /* "nipy/labs/bindings/array.pyx":30 * a = fff_array_fromPyArray(A) * va = fff_array_get(a, x, y, z, t) * fff_array_delete(a) # <<<<<<<<<<<<<< * return va * */ fff_array_delete(__pyx_v_a); /* "nipy/labs/bindings/array.pyx":31 * va = fff_array_get(a, x, y, z, t) * fff_array_delete(a) * return va # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyFloat_FromDouble(__pyx_v_va); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.labs.bindings.array.array_get", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_5array_3array_get_block(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_8bindings_5array_2array_get_block[] = "\n Get block\n Asub = array_get_block( A, size_t x0, size_t x1, size_t fX=1,\n size_t y0=0, size_t y1=0, size_t fY=1,\n size_t z0=0, size_t z1=0, size_t fZ=1,\n size_t t0=0, size_t t1=0, size_t fT=1 )\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_5array_3array_get_block = {__Pyx_NAMESTR("array_get_block"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_5array_3array_get_block, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_8bindings_5array_2array_get_block)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_5array_3array_get_block(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_A = 0; size_t __pyx_v_x0; size_t __pyx_v_x1; size_t __pyx_v_fX; size_t __pyx_v_y0; size_t __pyx_v_y1; size_t __pyx_v_fY; size_t __pyx_v_z0; size_t __pyx_v_z1; size_t __pyx_v_fZ; size_t __pyx_v_t0; size_t __pyx_v_t1; size_t __pyx_v_fT; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("array_get_block (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__A,&__pyx_n_s__x0,&__pyx_n_s__x1,&__pyx_n_s__fX,&__pyx_n_s__y0,&__pyx_n_s__y1,&__pyx_n_s__fY,&__pyx_n_s__z0,&__pyx_n_s__z1,&__pyx_n_s__fZ,&__pyx_n_s__t0,&__pyx_n_s__t1,&__pyx_n_s__fT,0}; PyObject* values[13] = {0,0,0,0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 13: values[12] = PyTuple_GET_ITEM(__pyx_args, 12); case 12: values[11] = PyTuple_GET_ITEM(__pyx_args, 11); case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__A)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x0)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("array_get_block", 0, 3, 13, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("array_get_block", 0, 3, 13, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__fX); if (value) { values[3] = value; kw_args--; } } case 4: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__y0); if (value) { values[4] = value; kw_args--; } } case 5: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__y1); if (value) { values[5] = value; kw_args--; } } case 6: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__fY); if (value) { values[6] = value; kw_args--; } } case 7: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__z0); if (value) { values[7] = value; kw_args--; } } case 8: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__z1); if (value) { values[8] = value; kw_args--; } } case 9: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__fZ); if (value) { values[9] = value; kw_args--; } } case 10: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__t0); if (value) { values[10] = value; kw_args--; } } case 11: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__t1); if (value) { values[11] = value; kw_args--; } } case 12: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__fT); if (value) { values[12] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "array_get_block") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 13: values[12] = PyTuple_GET_ITEM(__pyx_args, 12); case 12: values[11] = PyTuple_GET_ITEM(__pyx_args, 11); case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_A = values[0]; __pyx_v_x0 = __Pyx_PyInt_AsSize_t(values[1]); if (unlikely((__pyx_v_x0 == (size_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_x1 = __Pyx_PyInt_AsSize_t(values[2]); if (unlikely((__pyx_v_x1 == (size_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L3_error;} if (values[3]) { __pyx_v_fX = __Pyx_PyInt_AsSize_t(values[3]); if (unlikely((__pyx_v_fX == (size_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_fX = ((size_t)1); } if (values[4]) { __pyx_v_y0 = __Pyx_PyInt_AsSize_t(values[4]); if (unlikely((__pyx_v_y0 == (size_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 35; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_y0 = ((size_t)0); } if (values[5]) { __pyx_v_y1 = __Pyx_PyInt_AsSize_t(values[5]); if (unlikely((__pyx_v_y1 == (size_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 35; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_y1 = ((size_t)0); } if (values[6]) { __pyx_v_fY = __Pyx_PyInt_AsSize_t(values[6]); if (unlikely((__pyx_v_fY == (size_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 35; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_fY = ((size_t)1); } if (values[7]) { __pyx_v_z0 = __Pyx_PyInt_AsSize_t(values[7]); if (unlikely((__pyx_v_z0 == (size_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_z0 = ((size_t)0); } if (values[8]) { __pyx_v_z1 = __Pyx_PyInt_AsSize_t(values[8]); if (unlikely((__pyx_v_z1 == (size_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_z1 = ((size_t)0); } if (values[9]) { __pyx_v_fZ = __Pyx_PyInt_AsSize_t(values[9]); if (unlikely((__pyx_v_fZ == (size_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_fZ = ((size_t)1); } if (values[10]) { __pyx_v_t0 = __Pyx_PyInt_AsSize_t(values[10]); if (unlikely((__pyx_v_t0 == (size_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 37; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_t0 = ((size_t)0); } if (values[11]) { __pyx_v_t1 = __Pyx_PyInt_AsSize_t(values[11]); if (unlikely((__pyx_v_t1 == (size_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 37; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_t1 = ((size_t)0); } if (values[12]) { __pyx_v_fT = __Pyx_PyInt_AsSize_t(values[12]); if (unlikely((__pyx_v_fT == (size_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 37; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_fT = ((size_t)1); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("array_get_block", 0, 3, 13, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.bindings.array.array_get_block", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_4labs_8bindings_5array_2array_get_block(__pyx_self, __pyx_v_A, __pyx_v_x0, __pyx_v_x1, __pyx_v_fX, __pyx_v_y0, __pyx_v_y1, __pyx_v_fY, __pyx_v_z0, __pyx_v_z1, __pyx_v_fZ, __pyx_v_t0, __pyx_v_t1, __pyx_v_fT); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/array.pyx":34 * * * def array_get_block( A, size_t x0, size_t x1, size_t fX=1, # <<<<<<<<<<<<<< * size_t y0=0, size_t y1=0, size_t fY=1, * size_t z0=0, size_t z1=0, size_t fZ=1, */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_5array_2array_get_block(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_A, size_t __pyx_v_x0, size_t __pyx_v_x1, size_t __pyx_v_fX, size_t __pyx_v_y0, size_t __pyx_v_y1, size_t __pyx_v_fY, size_t __pyx_v_z0, size_t __pyx_v_z1, size_t __pyx_v_fZ, size_t __pyx_v_t0, size_t __pyx_v_t1, size_t __pyx_v_fT) { fff_array *__pyx_v_a; fff_array *__pyx_v_b; fff_array __pyx_v_asub; PyArrayObject *__pyx_v_B = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("array_get_block", 0); /* "nipy/labs/bindings/array.pyx":47 * cdef fff_array *a, *b * cdef fff_array asub * a = fff_array_fromPyArray(A) # <<<<<<<<<<<<<< * asub = fff_array_get_block(a, x0, x1, fX, y0, y1, fY, z0, z1, fZ, t0, t1, fT) * b = fff_array_new(asub.datatype, asub.dimX, asub.dimY, asub.dimZ, asub.dimT) */ if (!(likely(((__pyx_v_A) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_A, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_A; __Pyx_INCREF(__pyx_t_1); __pyx_v_a = fff_array_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/array.pyx":48 * cdef fff_array asub * a = fff_array_fromPyArray(A) * asub = fff_array_get_block(a, x0, x1, fX, y0, y1, fY, z0, z1, fZ, t0, t1, fT) # <<<<<<<<<<<<<< * b = fff_array_new(asub.datatype, asub.dimX, asub.dimY, asub.dimZ, asub.dimT) * fff_array_copy(b, &asub) */ __pyx_v_asub = fff_array_get_block(__pyx_v_a, __pyx_v_x0, __pyx_v_x1, __pyx_v_fX, __pyx_v_y0, __pyx_v_y1, __pyx_v_fY, __pyx_v_z0, __pyx_v_z1, __pyx_v_fZ, __pyx_v_t0, __pyx_v_t1, __pyx_v_fT); /* "nipy/labs/bindings/array.pyx":49 * a = fff_array_fromPyArray(A) * asub = fff_array_get_block(a, x0, x1, fX, y0, y1, fY, z0, z1, fZ, t0, t1, fT) * b = fff_array_new(asub.datatype, asub.dimX, asub.dimY, asub.dimZ, asub.dimT) # <<<<<<<<<<<<<< * fff_array_copy(b, &asub) * B = fff_array_toPyArray(b) */ __pyx_v_b = fff_array_new(__pyx_v_asub.datatype, __pyx_v_asub.dimX, __pyx_v_asub.dimY, __pyx_v_asub.dimZ, __pyx_v_asub.dimT); /* "nipy/labs/bindings/array.pyx":50 * asub = fff_array_get_block(a, x0, x1, fX, y0, y1, fY, z0, z1, fZ, t0, t1, fT) * b = fff_array_new(asub.datatype, asub.dimX, asub.dimY, asub.dimZ, asub.dimT) * fff_array_copy(b, &asub) # <<<<<<<<<<<<<< * B = fff_array_toPyArray(b) * fff_array_delete(a) */ fff_array_copy(__pyx_v_b, (&__pyx_v_asub)); /* "nipy/labs/bindings/array.pyx":51 * b = fff_array_new(asub.datatype, asub.dimX, asub.dimY, asub.dimZ, asub.dimT) * fff_array_copy(b, &asub) * B = fff_array_toPyArray(b) # <<<<<<<<<<<<<< * fff_array_delete(a) * return B */ __pyx_t_1 = ((PyObject *)fff_array_toPyArray(__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_B = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/array.pyx":52 * fff_array_copy(b, &asub) * B = fff_array_toPyArray(b) * fff_array_delete(a) # <<<<<<<<<<<<<< * return B * */ fff_array_delete(__pyx_v_a); /* "nipy/labs/bindings/array.pyx":53 * B = fff_array_toPyArray(b) * fff_array_delete(a) * return B # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_B)); __pyx_r = ((PyObject *)__pyx_v_B); goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.labs.bindings.array.array_get_block", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_B); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_5array_5array_add(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_8bindings_5array_4array_add[] = "\n C = A + B \n "; static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_5array_5array_add = {__Pyx_NAMESTR("array_add"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_5array_5array_add, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_8bindings_5array_4array_add)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_5array_5array_add(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_A = 0; PyObject *__pyx_v_B = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("array_add (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__A,&__pyx_n_s__B,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__A)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__B)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("array_add", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "array_add") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_A = values[0]; __pyx_v_B = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("array_add", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.bindings.array.array_add", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_4labs_8bindings_5array_4array_add(__pyx_self, __pyx_v_A, __pyx_v_B); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/array.pyx":56 * * * def array_add(A, B): # <<<<<<<<<<<<<< * """ * C = A + B */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_5array_4array_add(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_A, PyObject *__pyx_v_B) { fff_array *__pyx_v_a; fff_array *__pyx_v_b; fff_array *__pyx_v_c; PyArrayObject *__pyx_v_C = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("array_add", 0); /* "nipy/labs/bindings/array.pyx":62 * cdef fff_array *a, *b, *c * * a = fff_array_fromPyArray(A) # <<<<<<<<<<<<<< * b = fff_array_fromPyArray(B) * c = fff_array_new(a.datatype, a.dimX, a.dimY, a.dimZ, a.dimT) */ if (!(likely(((__pyx_v_A) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_A, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 62; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_A; __Pyx_INCREF(__pyx_t_1); __pyx_v_a = fff_array_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/array.pyx":63 * * a = fff_array_fromPyArray(A) * b = fff_array_fromPyArray(B) # <<<<<<<<<<<<<< * c = fff_array_new(a.datatype, a.dimX, a.dimY, a.dimZ, a.dimT) * fff_array_copy(c, a) */ if (!(likely(((__pyx_v_B) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_B, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_B; __Pyx_INCREF(__pyx_t_1); __pyx_v_b = fff_array_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/array.pyx":64 * a = fff_array_fromPyArray(A) * b = fff_array_fromPyArray(B) * c = fff_array_new(a.datatype, a.dimX, a.dimY, a.dimZ, a.dimT) # <<<<<<<<<<<<<< * fff_array_copy(c, a) * fff_array_add(c, b) */ __pyx_v_c = fff_array_new(__pyx_v_a->datatype, __pyx_v_a->dimX, __pyx_v_a->dimY, __pyx_v_a->dimZ, __pyx_v_a->dimT); /* "nipy/labs/bindings/array.pyx":65 * b = fff_array_fromPyArray(B) * c = fff_array_new(a.datatype, a.dimX, a.dimY, a.dimZ, a.dimT) * fff_array_copy(c, a) # <<<<<<<<<<<<<< * fff_array_add(c, b) * C = fff_array_toPyArray(c) */ fff_array_copy(__pyx_v_c, __pyx_v_a); /* "nipy/labs/bindings/array.pyx":66 * c = fff_array_new(a.datatype, a.dimX, a.dimY, a.dimZ, a.dimT) * fff_array_copy(c, a) * fff_array_add(c, b) # <<<<<<<<<<<<<< * C = fff_array_toPyArray(c) * fff_array_delete(a) */ fff_array_add(__pyx_v_c, __pyx_v_b); /* "nipy/labs/bindings/array.pyx":67 * fff_array_copy(c, a) * fff_array_add(c, b) * C = fff_array_toPyArray(c) # <<<<<<<<<<<<<< * fff_array_delete(a) * fff_array_delete(b) */ __pyx_t_1 = ((PyObject *)fff_array_toPyArray(__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 67; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_C = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/array.pyx":68 * fff_array_add(c, b) * C = fff_array_toPyArray(c) * fff_array_delete(a) # <<<<<<<<<<<<<< * fff_array_delete(b) * return C */ fff_array_delete(__pyx_v_a); /* "nipy/labs/bindings/array.pyx":69 * C = fff_array_toPyArray(c) * fff_array_delete(a) * fff_array_delete(b) # <<<<<<<<<<<<<< * return C * */ fff_array_delete(__pyx_v_b); /* "nipy/labs/bindings/array.pyx":70 * fff_array_delete(a) * fff_array_delete(b) * return C # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_C)); __pyx_r = ((PyObject *)__pyx_v_C); goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.labs.bindings.array.array_add", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_C); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_5array_7array_mul(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_8bindings_5array_6array_mul[] = "\n C = A * B \n "; static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_5array_7array_mul = {__Pyx_NAMESTR("array_mul"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_5array_7array_mul, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_8bindings_5array_6array_mul)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_5array_7array_mul(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_A = 0; PyObject *__pyx_v_B = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("array_mul (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__A,&__pyx_n_s__B,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__A)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__B)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("array_mul", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "array_mul") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_A = values[0]; __pyx_v_B = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("array_mul", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.bindings.array.array_mul", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_4labs_8bindings_5array_6array_mul(__pyx_self, __pyx_v_A, __pyx_v_B); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/array.pyx":73 * * * def array_mul(A, B): # <<<<<<<<<<<<<< * """ * C = A * B */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_5array_6array_mul(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_A, PyObject *__pyx_v_B) { fff_array *__pyx_v_a; fff_array *__pyx_v_b; fff_array *__pyx_v_c; PyArrayObject *__pyx_v_C = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("array_mul", 0); /* "nipy/labs/bindings/array.pyx":79 * cdef fff_array *a, *b, *c * * a = fff_array_fromPyArray(A) # <<<<<<<<<<<<<< * b = fff_array_fromPyArray(B) * c = fff_array_new(a.datatype, a.dimX, a.dimY, a.dimZ, a.dimT) */ if (!(likely(((__pyx_v_A) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_A, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_A; __Pyx_INCREF(__pyx_t_1); __pyx_v_a = fff_array_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/array.pyx":80 * * a = fff_array_fromPyArray(A) * b = fff_array_fromPyArray(B) # <<<<<<<<<<<<<< * c = fff_array_new(a.datatype, a.dimX, a.dimY, a.dimZ, a.dimT) * fff_array_copy(c, a) */ if (!(likely(((__pyx_v_B) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_B, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_B; __Pyx_INCREF(__pyx_t_1); __pyx_v_b = fff_array_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/array.pyx":81 * a = fff_array_fromPyArray(A) * b = fff_array_fromPyArray(B) * c = fff_array_new(a.datatype, a.dimX, a.dimY, a.dimZ, a.dimT) # <<<<<<<<<<<<<< * fff_array_copy(c, a) * fff_array_mul(c, b) */ __pyx_v_c = fff_array_new(__pyx_v_a->datatype, __pyx_v_a->dimX, __pyx_v_a->dimY, __pyx_v_a->dimZ, __pyx_v_a->dimT); /* "nipy/labs/bindings/array.pyx":82 * b = fff_array_fromPyArray(B) * c = fff_array_new(a.datatype, a.dimX, a.dimY, a.dimZ, a.dimT) * fff_array_copy(c, a) # <<<<<<<<<<<<<< * fff_array_mul(c, b) * C = fff_array_toPyArray(c) */ fff_array_copy(__pyx_v_c, __pyx_v_a); /* "nipy/labs/bindings/array.pyx":83 * c = fff_array_new(a.datatype, a.dimX, a.dimY, a.dimZ, a.dimT) * fff_array_copy(c, a) * fff_array_mul(c, b) # <<<<<<<<<<<<<< * C = fff_array_toPyArray(c) * fff_array_delete(a) */ fff_array_mul(__pyx_v_c, __pyx_v_b); /* "nipy/labs/bindings/array.pyx":84 * fff_array_copy(c, a) * fff_array_mul(c, b) * C = fff_array_toPyArray(c) # <<<<<<<<<<<<<< * fff_array_delete(a) * fff_array_delete(b) */ __pyx_t_1 = ((PyObject *)fff_array_toPyArray(__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_C = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/array.pyx":85 * fff_array_mul(c, b) * C = fff_array_toPyArray(c) * fff_array_delete(a) # <<<<<<<<<<<<<< * fff_array_delete(b) * return C */ fff_array_delete(__pyx_v_a); /* "nipy/labs/bindings/array.pyx":86 * C = fff_array_toPyArray(c) * fff_array_delete(a) * fff_array_delete(b) # <<<<<<<<<<<<<< * return C * */ fff_array_delete(__pyx_v_b); /* "nipy/labs/bindings/array.pyx":87 * fff_array_delete(a) * fff_array_delete(b) * return C # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_C)); __pyx_r = ((PyObject *)__pyx_v_C); goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.labs.bindings.array.array_mul", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_C); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_5array_9array_sub(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_8bindings_5array_8array_sub[] = "\n C = A - B \n "; static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_5array_9array_sub = {__Pyx_NAMESTR("array_sub"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_5array_9array_sub, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_8bindings_5array_8array_sub)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_5array_9array_sub(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_A = 0; PyObject *__pyx_v_B = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("array_sub (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__A,&__pyx_n_s__B,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__A)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__B)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("array_sub", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "array_sub") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_A = values[0]; __pyx_v_B = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("array_sub", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.bindings.array.array_sub", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_4labs_8bindings_5array_8array_sub(__pyx_self, __pyx_v_A, __pyx_v_B); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/array.pyx":90 * * * def array_sub(A, B): # <<<<<<<<<<<<<< * """ * C = A - B */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_5array_8array_sub(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_A, PyObject *__pyx_v_B) { fff_array *__pyx_v_a; fff_array *__pyx_v_b; fff_array *__pyx_v_c; PyArrayObject *__pyx_v_C = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("array_sub", 0); /* "nipy/labs/bindings/array.pyx":96 * cdef fff_array *a, *b, *c * * a = fff_array_fromPyArray(A) # <<<<<<<<<<<<<< * b = fff_array_fromPyArray(B) * c = fff_array_new(a.datatype, a.dimX, a.dimY, a.dimZ, a.dimT) */ if (!(likely(((__pyx_v_A) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_A, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 96; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_A; __Pyx_INCREF(__pyx_t_1); __pyx_v_a = fff_array_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/array.pyx":97 * * a = fff_array_fromPyArray(A) * b = fff_array_fromPyArray(B) # <<<<<<<<<<<<<< * c = fff_array_new(a.datatype, a.dimX, a.dimY, a.dimZ, a.dimT) * fff_array_copy(c, a) */ if (!(likely(((__pyx_v_B) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_B, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 97; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_B; __Pyx_INCREF(__pyx_t_1); __pyx_v_b = fff_array_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/array.pyx":98 * a = fff_array_fromPyArray(A) * b = fff_array_fromPyArray(B) * c = fff_array_new(a.datatype, a.dimX, a.dimY, a.dimZ, a.dimT) # <<<<<<<<<<<<<< * fff_array_copy(c, a) * fff_array_sub(c, b) */ __pyx_v_c = fff_array_new(__pyx_v_a->datatype, __pyx_v_a->dimX, __pyx_v_a->dimY, __pyx_v_a->dimZ, __pyx_v_a->dimT); /* "nipy/labs/bindings/array.pyx":99 * b = fff_array_fromPyArray(B) * c = fff_array_new(a.datatype, a.dimX, a.dimY, a.dimZ, a.dimT) * fff_array_copy(c, a) # <<<<<<<<<<<<<< * fff_array_sub(c, b) * C = fff_array_toPyArray(c) */ fff_array_copy(__pyx_v_c, __pyx_v_a); /* "nipy/labs/bindings/array.pyx":100 * c = fff_array_new(a.datatype, a.dimX, a.dimY, a.dimZ, a.dimT) * fff_array_copy(c, a) * fff_array_sub(c, b) # <<<<<<<<<<<<<< * C = fff_array_toPyArray(c) * fff_array_delete(a) */ fff_array_sub(__pyx_v_c, __pyx_v_b); /* "nipy/labs/bindings/array.pyx":101 * fff_array_copy(c, a) * fff_array_sub(c, b) * C = fff_array_toPyArray(c) # <<<<<<<<<<<<<< * fff_array_delete(a) * fff_array_delete(b) */ __pyx_t_1 = ((PyObject *)fff_array_toPyArray(__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_C = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/array.pyx":102 * fff_array_sub(c, b) * C = fff_array_toPyArray(c) * fff_array_delete(a) # <<<<<<<<<<<<<< * fff_array_delete(b) * return C */ fff_array_delete(__pyx_v_a); /* "nipy/labs/bindings/array.pyx":103 * C = fff_array_toPyArray(c) * fff_array_delete(a) * fff_array_delete(b) # <<<<<<<<<<<<<< * return C * */ fff_array_delete(__pyx_v_b); /* "nipy/labs/bindings/array.pyx":104 * fff_array_delete(a) * fff_array_delete(b) * return C # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_C)); __pyx_r = ((PyObject *)__pyx_v_C); goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.labs.bindings.array.array_sub", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_C); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_5array_11array_div(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_8bindings_5array_10array_div[] = "\n C = A / B \n "; static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_5array_11array_div = {__Pyx_NAMESTR("array_div"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_5array_11array_div, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_8bindings_5array_10array_div)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_5array_11array_div(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_A = 0; PyObject *__pyx_v_B = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("array_div (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__A,&__pyx_n_s__B,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__A)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__B)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("array_div", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "array_div") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_A = values[0]; __pyx_v_B = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("array_div", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.bindings.array.array_div", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_4labs_8bindings_5array_10array_div(__pyx_self, __pyx_v_A, __pyx_v_B); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/array.pyx":107 * * * def array_div(A, B): # <<<<<<<<<<<<<< * """ * C = A / B */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_5array_10array_div(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_A, PyObject *__pyx_v_B) { fff_array *__pyx_v_a; fff_array *__pyx_v_b; fff_array *__pyx_v_c; PyArrayObject *__pyx_v_C = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("array_div", 0); /* "nipy/labs/bindings/array.pyx":113 * cdef fff_array *a, *b, *c * * a = fff_array_fromPyArray(A) # <<<<<<<<<<<<<< * b = fff_array_fromPyArray(B) * c = fff_array_new(a.datatype, a.dimX, a.dimY, a.dimZ, a.dimT) */ if (!(likely(((__pyx_v_A) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_A, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_A; __Pyx_INCREF(__pyx_t_1); __pyx_v_a = fff_array_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/array.pyx":114 * * a = fff_array_fromPyArray(A) * b = fff_array_fromPyArray(B) # <<<<<<<<<<<<<< * c = fff_array_new(a.datatype, a.dimX, a.dimY, a.dimZ, a.dimT) * fff_array_copy(c, a) */ if (!(likely(((__pyx_v_B) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_B, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 114; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_B; __Pyx_INCREF(__pyx_t_1); __pyx_v_b = fff_array_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/array.pyx":115 * a = fff_array_fromPyArray(A) * b = fff_array_fromPyArray(B) * c = fff_array_new(a.datatype, a.dimX, a.dimY, a.dimZ, a.dimT) # <<<<<<<<<<<<<< * fff_array_copy(c, a) * fff_array_div(c, b) */ __pyx_v_c = fff_array_new(__pyx_v_a->datatype, __pyx_v_a->dimX, __pyx_v_a->dimY, __pyx_v_a->dimZ, __pyx_v_a->dimT); /* "nipy/labs/bindings/array.pyx":116 * b = fff_array_fromPyArray(B) * c = fff_array_new(a.datatype, a.dimX, a.dimY, a.dimZ, a.dimT) * fff_array_copy(c, a) # <<<<<<<<<<<<<< * fff_array_div(c, b) * C = fff_array_toPyArray(c) */ fff_array_copy(__pyx_v_c, __pyx_v_a); /* "nipy/labs/bindings/array.pyx":117 * c = fff_array_new(a.datatype, a.dimX, a.dimY, a.dimZ, a.dimT) * fff_array_copy(c, a) * fff_array_div(c, b) # <<<<<<<<<<<<<< * C = fff_array_toPyArray(c) * fff_array_delete(a) */ fff_array_div(__pyx_v_c, __pyx_v_b); /* "nipy/labs/bindings/array.pyx":118 * fff_array_copy(c, a) * fff_array_div(c, b) * C = fff_array_toPyArray(c) # <<<<<<<<<<<<<< * fff_array_delete(a) * fff_array_delete(b) */ __pyx_t_1 = ((PyObject *)fff_array_toPyArray(__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_C = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/array.pyx":119 * fff_array_div(c, b) * C = fff_array_toPyArray(c) * fff_array_delete(a) # <<<<<<<<<<<<<< * fff_array_delete(b) * return C */ fff_array_delete(__pyx_v_a); /* "nipy/labs/bindings/array.pyx":120 * C = fff_array_toPyArray(c) * fff_array_delete(a) * fff_array_delete(b) # <<<<<<<<<<<<<< * return C */ fff_array_delete(__pyx_v_b); /* "nipy/labs/bindings/array.pyx":121 * fff_array_delete(a) * fff_array_delete(b) * return C # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_C)); __pyx_r = ((PyObject *)__pyx_v_C); goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.labs.bindings.array.array_div", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_C); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":194 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_copy_shape; int __pyx_v_i; int __pyx_v_ndim; int __pyx_v_endian_detector; int __pyx_v_little_endian; int __pyx_v_t; char *__pyx_v_f; PyArray_Descr *__pyx_v_descr = 0; int __pyx_v_offset; int __pyx_v_hasfields; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; char *__pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "numpy.pxd":200 * # of flags * * if info == NULL: return # <<<<<<<<<<<<<< * * cdef int copy_shape, i, ndim */ __pyx_t_1 = (__pyx_v_info == NULL); if (__pyx_t_1) { __pyx_r = 0; goto __pyx_L0; goto __pyx_L3; } __pyx_L3:; /* "numpy.pxd":203 * * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((&endian_detector)[0] != 0) * */ __pyx_v_endian_detector = 1; /* "numpy.pxd":204 * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * * ndim = PyArray_NDIM(self) */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "numpy.pxd":206 * cdef bint little_endian = ((&endian_detector)[0] != 0) * * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< * * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); /* "numpy.pxd":208 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); if (__pyx_t_1) { /* "numpy.pxd":209 * * if sizeof(npy_intp) != sizeof(Py_ssize_t): * copy_shape = 1 # <<<<<<<<<<<<<< * else: * copy_shape = 0 */ __pyx_v_copy_shape = 1; goto __pyx_L4; } /*else*/ { /* "numpy.pxd":211 * copy_shape = 1 * else: * copy_shape = 0 # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) */ __pyx_v_copy_shape = 0; } __pyx_L4:; /* "numpy.pxd":213 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS); if (__pyx_t_1) { /* "numpy.pxd":214 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not C contiguous") * */ __pyx_t_2 = (!PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS)); __pyx_t_3 = __pyx_t_2; } else { __pyx_t_3 = __pyx_t_1; } if (__pyx_t_3) { /* "numpy.pxd":215 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L5; } __pyx_L5:; /* "numpy.pxd":217 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ __pyx_t_3 = ((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS); if (__pyx_t_3) { /* "numpy.pxd":218 * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not Fortran contiguous") * */ __pyx_t_1 = (!PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS)); __pyx_t_2 = __pyx_t_1; } else { __pyx_t_2 = __pyx_t_3; } if (__pyx_t_2) { /* "numpy.pxd":219 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_4), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L6; } __pyx_L6:; /* "numpy.pxd":221 * raise ValueError(u"ndarray is not Fortran contiguous") * * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< * info.ndim = ndim * if copy_shape: */ __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); /* "numpy.pxd":222 * * info.buf = PyArray_DATA(self) * info.ndim = ndim # <<<<<<<<<<<<<< * if copy_shape: * # Allocate new buffer for strides and shape info. */ __pyx_v_info->ndim = __pyx_v_ndim; /* "numpy.pxd":223 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ if (__pyx_v_copy_shape) { /* "numpy.pxd":226 * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) # <<<<<<<<<<<<<< * info.shape = info.strides + ndim * for i in range(ndim): */ __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2))); /* "numpy.pxd":227 * # This is allocated as one block, strides first. * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) * info.shape = info.strides + ndim # <<<<<<<<<<<<<< * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] */ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); /* "numpy.pxd":228 * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) * info.shape = info.strides + ndim * for i in range(ndim): # <<<<<<<<<<<<<< * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] */ __pyx_t_5 = __pyx_v_ndim; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "numpy.pxd":229 * info.shape = info.strides + ndim * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< * info.shape[i] = PyArray_DIMS(self)[i] * else: */ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); /* "numpy.pxd":230 * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< * else: * info.strides = PyArray_STRIDES(self) */ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); } goto __pyx_L7; } /*else*/ { /* "numpy.pxd":232 * info.shape[i] = PyArray_DIMS(self)[i] * else: * info.strides = PyArray_STRIDES(self) # <<<<<<<<<<<<<< * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL */ __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); /* "numpy.pxd":233 * else: * info.strides = PyArray_STRIDES(self) * info.shape = PyArray_DIMS(self) # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) */ __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); } __pyx_L7:; /* "numpy.pxd":234 * info.strides = PyArray_STRIDES(self) * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) */ __pyx_v_info->suboffsets = NULL; /* "numpy.pxd":235 * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< * info.readonly = not PyArray_ISWRITEABLE(self) * */ __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); /* "numpy.pxd":236 * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< * * cdef int t */ __pyx_v_info->readonly = (!PyArray_ISWRITEABLE(__pyx_v_self)); /* "numpy.pxd":239 * * cdef int t * cdef char* f = NULL # <<<<<<<<<<<<<< * cdef dtype descr = self.descr * cdef list stack */ __pyx_v_f = NULL; /* "numpy.pxd":240 * cdef int t * cdef char* f = NULL * cdef dtype descr = self.descr # <<<<<<<<<<<<<< * cdef list stack * cdef int offset */ __pyx_t_4 = ((PyObject *)__pyx_v_self->descr); __Pyx_INCREF(__pyx_t_4); __pyx_v_descr = ((PyArray_Descr *)__pyx_t_4); __pyx_t_4 = 0; /* "numpy.pxd":244 * cdef int offset * * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< * * if not hasfields and not copy_shape: */ __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); /* "numpy.pxd":246 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ __pyx_t_2 = (!__pyx_v_hasfields); if (__pyx_t_2) { __pyx_t_3 = (!__pyx_v_copy_shape); __pyx_t_1 = __pyx_t_3; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "numpy.pxd":248 * if not hasfields and not copy_shape: * # do not call releasebuffer * info.obj = None # <<<<<<<<<<<<<< * else: * # need to call releasebuffer */ __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = Py_None; goto __pyx_L10; } /*else*/ { /* "numpy.pxd":251 * else: * # need to call releasebuffer * info.obj = self # <<<<<<<<<<<<<< * * if not hasfields: */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); } __pyx_L10:; /* "numpy.pxd":253 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ __pyx_t_1 = (!__pyx_v_hasfields); if (__pyx_t_1) { /* "numpy.pxd":254 * * if not hasfields: * t = descr.type_num # <<<<<<<<<<<<<< * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): */ __pyx_t_5 = __pyx_v_descr->type_num; __pyx_v_t = __pyx_t_5; /* "numpy.pxd":255 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_1 = (__pyx_v_descr->byteorder == '>'); if (__pyx_t_1) { __pyx_t_2 = __pyx_v_little_endian; } else { __pyx_t_2 = __pyx_t_1; } if (!__pyx_t_2) { /* "numpy.pxd":256 * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" */ __pyx_t_1 = (__pyx_v_descr->byteorder == '<'); if (__pyx_t_1) { __pyx_t_3 = (!__pyx_v_little_endian); __pyx_t_7 = __pyx_t_3; } else { __pyx_t_7 = __pyx_t_1; } __pyx_t_1 = __pyx_t_7; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "numpy.pxd":257 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_6), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L12; } __pyx_L12:; /* "numpy.pxd":258 * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" */ __pyx_t_1 = (__pyx_v_t == NPY_BYTE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__b; goto __pyx_L13; } /* "numpy.pxd":259 * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" */ __pyx_t_1 = (__pyx_v_t == NPY_UBYTE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__B; goto __pyx_L13; } /* "numpy.pxd":260 * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" */ __pyx_t_1 = (__pyx_v_t == NPY_SHORT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__h; goto __pyx_L13; } /* "numpy.pxd":261 * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" */ __pyx_t_1 = (__pyx_v_t == NPY_USHORT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__H; goto __pyx_L13; } /* "numpy.pxd":262 * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" */ __pyx_t_1 = (__pyx_v_t == NPY_INT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__i; goto __pyx_L13; } /* "numpy.pxd":263 * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" */ __pyx_t_1 = (__pyx_v_t == NPY_UINT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__I; goto __pyx_L13; } /* "numpy.pxd":264 * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" */ __pyx_t_1 = (__pyx_v_t == NPY_LONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__l; goto __pyx_L13; } /* "numpy.pxd":265 * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" */ __pyx_t_1 = (__pyx_v_t == NPY_ULONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__L; goto __pyx_L13; } /* "numpy.pxd":266 * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" */ __pyx_t_1 = (__pyx_v_t == NPY_LONGLONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__q; goto __pyx_L13; } /* "numpy.pxd":267 * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" */ __pyx_t_1 = (__pyx_v_t == NPY_ULONGLONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Q; goto __pyx_L13; } /* "numpy.pxd":268 * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" */ __pyx_t_1 = (__pyx_v_t == NPY_FLOAT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__f; goto __pyx_L13; } /* "numpy.pxd":269 * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" */ __pyx_t_1 = (__pyx_v_t == NPY_DOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__d; goto __pyx_L13; } /* "numpy.pxd":270 * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" */ __pyx_t_1 = (__pyx_v_t == NPY_LONGDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__g; goto __pyx_L13; } /* "numpy.pxd":271 * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" */ __pyx_t_1 = (__pyx_v_t == NPY_CFLOAT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zf; goto __pyx_L13; } /* "numpy.pxd":272 * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" */ __pyx_t_1 = (__pyx_v_t == NPY_CDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zd; goto __pyx_L13; } /* "numpy.pxd":273 * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f = "O" * else: */ __pyx_t_1 = (__pyx_v_t == NPY_CLONGDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zg; goto __pyx_L13; } /* "numpy.pxd":274 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_1 = (__pyx_v_t == NPY_OBJECT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__O; goto __pyx_L13; } /*else*/ { /* "numpy.pxd":276 * elif t == NPY_OBJECT: f = "O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * info.format = f * return */ __pyx_t_4 = PyInt_FromLong(__pyx_v_t); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_8 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_7), __pyx_t_4); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_8)); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_8)); __Pyx_GIVEREF(((PyObject *)__pyx_t_8)); __pyx_t_8 = 0; __pyx_t_8 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L13:; /* "numpy.pxd":277 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f # <<<<<<<<<<<<<< * return * else: */ __pyx_v_info->format = __pyx_v_f; /* "numpy.pxd":278 * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f * return # <<<<<<<<<<<<<< * else: * info.format = stdlib.malloc(_buffer_format_string_len) */ __pyx_r = 0; goto __pyx_L0; goto __pyx_L11; } /*else*/ { /* "numpy.pxd":280 * return * else: * info.format = stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 */ __pyx_v_info->format = ((char *)malloc(255)); /* "numpy.pxd":281 * else: * info.format = stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< * offset = 0 * f = _util_dtypestring(descr, info.format + 1, */ (__pyx_v_info->format[0]) = '^'; /* "numpy.pxd":282 * info.format = stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 # <<<<<<<<<<<<<< * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, */ __pyx_v_offset = 0; /* "numpy.pxd":285 * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, * &offset) # <<<<<<<<<<<<<< * f[0] = c'\0' # Terminate format string * */ __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_9; /* "numpy.pxd":286 * info.format + _buffer_format_string_len, * &offset) * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< * * def __releasebuffer__(ndarray self, Py_buffer* info): */ (__pyx_v_f[0]) = '\x00'; } __pyx_L11:; __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_descr); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":288 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__releasebuffer__", 0); /* "numpy.pxd":289 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_t_1 = PyArray_HASFIELDS(__pyx_v_self); if (__pyx_t_1) { /* "numpy.pxd":290 * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): * stdlib.free(info.format) # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) */ free(__pyx_v_info->format); goto __pyx_L3; } __pyx_L3:; /* "numpy.pxd":291 * if PyArray_HASFIELDS(self): * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * stdlib.free(info.strides) * # info.shape was stored after info.strides in the same block */ __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); if (__pyx_t_1) { /* "numpy.pxd":292 * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) # <<<<<<<<<<<<<< * # info.shape was stored after info.strides in the same block * */ free(__pyx_v_info->strides); goto __pyx_L4; } __pyx_L4:; __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":768 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, a) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); /* "numpy.pxd":769 * * cdef inline object PyArray_MultiIterNew1(a): * return PyArray_MultiIterNew(1, a) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew2(a, b): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 769; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":771 * return PyArray_MultiIterNew(1, a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, a, b) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); /* "numpy.pxd":772 * * cdef inline object PyArray_MultiIterNew2(a, b): * return PyArray_MultiIterNew(2, a, b) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew3(a, b, c): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":774 * return PyArray_MultiIterNew(2, a, b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, a, b, c) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); /* "numpy.pxd":775 * * cdef inline object PyArray_MultiIterNew3(a, b, c): * return PyArray_MultiIterNew(3, a, b, c) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":777 * return PyArray_MultiIterNew(3, a, b, c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, a, b, c, d) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); /* "numpy.pxd":778 * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): * return PyArray_MultiIterNew(4, a, b, c, d) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 778; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":780 * return PyArray_MultiIterNew(4, a, b, c, d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, a, b, c, d, e) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); /* "numpy.pxd":781 * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): * return PyArray_MultiIterNew(5, a, b, c, d, e) # <<<<<<<<<<<<<< * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 781; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":783 * return PyArray_MultiIterNew(5, a, b, c, d, e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { PyArray_Descr *__pyx_v_child = 0; int __pyx_v_endian_detector; int __pyx_v_little_endian; PyObject *__pyx_v_fields = 0; PyObject *__pyx_v_childname = NULL; PyObject *__pyx_v_new_offset = NULL; PyObject *__pyx_v_t = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *(*__pyx_t_6)(PyObject *); int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_t_10; long __pyx_t_11; char *__pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_util_dtypestring", 0); /* "numpy.pxd":790 * cdef int delta_offset * cdef tuple i * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((&endian_detector)[0] != 0) * cdef tuple fields */ __pyx_v_endian_detector = 1; /* "numpy.pxd":791 * cdef tuple i * cdef int endian_detector = 1 * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * cdef tuple fields * */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "numpy.pxd":794 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ if (unlikely(((PyObject *)__pyx_v_descr->names) == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_1 = ((PyObject *)__pyx_v_descr->names); __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; for (;;) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif __Pyx_XDECREF(__pyx_v_childname); __pyx_v_childname = __pyx_t_3; __pyx_t_3 = 0; /* "numpy.pxd":795 * * for childname in descr.names: * fields = descr.fields[childname] # <<<<<<<<<<<<<< * child, new_offset = fields * */ __pyx_t_3 = PyObject_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (!__pyx_t_3) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected tuple, got %.200s", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF(((PyObject *)__pyx_v_fields)); __pyx_v_fields = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "numpy.pxd":796 * for childname in descr.names: * fields = descr.fields[childname] * child, new_offset = fields # <<<<<<<<<<<<<< * * if (end - f) - (new_offset - offset[0]) < 15: */ if (likely(PyTuple_CheckExact(((PyObject *)__pyx_v_fields)))) { PyObject* sequence = ((PyObject *)__pyx_v_fields); #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else if (1) { __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } else { Py_ssize_t index = -1; __pyx_t_5 = PyObject_GetIter(((PyObject *)__pyx_v_fields)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = Py_TYPE(__pyx_t_5)->tp_iternext; index = 0; __pyx_t_3 = __pyx_t_6(__pyx_t_5); if (unlikely(!__pyx_t_3)) goto __pyx_L5_unpacking_failed; __Pyx_GOTREF(__pyx_t_3); index = 1; __pyx_t_4 = __pyx_t_6(__pyx_t_5); if (unlikely(!__pyx_t_4)) goto __pyx_L5_unpacking_failed; __Pyx_GOTREF(__pyx_t_4); if (__Pyx_IternextUnpackEndCheck(__pyx_t_6(__pyx_t_5), 2) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = NULL; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L6_unpacking_done; __pyx_L5_unpacking_failed:; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_L6_unpacking_done:; } if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF(((PyObject *)__pyx_v_child)); __pyx_v_child = ((PyArray_Descr *)__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_v_new_offset); __pyx_v_new_offset = __pyx_t_4; __pyx_t_4 = 0; /* "numpy.pxd":798 * child, new_offset = fields * * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ __pyx_t_4 = PyInt_FromLong((__pyx_v_end - __pyx_v_f)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_Subtract(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyObject_RichCompare(__pyx_t_3, __pyx_int_15, Py_LT); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { /* "numpy.pxd":799 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_t_5 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_9), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L7; } __pyx_L7:; /* "numpy.pxd":801 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_7 = (__pyx_v_child->byteorder == '>'); if (__pyx_t_7) { __pyx_t_8 = __pyx_v_little_endian; } else { __pyx_t_8 = __pyx_t_7; } if (!__pyx_t_8) { /* "numpy.pxd":802 * * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * # One could encode it in the format string and have Cython */ __pyx_t_7 = (__pyx_v_child->byteorder == '<'); if (__pyx_t_7) { __pyx_t_9 = (!__pyx_v_little_endian); __pyx_t_10 = __pyx_t_9; } else { __pyx_t_10 = __pyx_t_7; } __pyx_t_7 = __pyx_t_10; } else { __pyx_t_7 = __pyx_t_8; } if (__pyx_t_7) { /* "numpy.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_10), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L8; } __pyx_L8:; /* "numpy.pxd":813 * * # Output padding bytes * while offset[0] < new_offset: # <<<<<<<<<<<<<< * f[0] = 120 # "x"; pad byte * f += 1 */ while (1) { __pyx_t_5 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_t_5, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!__pyx_t_7) break; /* "numpy.pxd":814 * # Output padding bytes * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< * f += 1 * offset[0] += 1 */ (__pyx_v_f[0]) = 120; /* "numpy.pxd":815 * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte * f += 1 # <<<<<<<<<<<<<< * offset[0] += 1 * */ __pyx_v_f = (__pyx_v_f + 1); /* "numpy.pxd":816 * f[0] = 120 # "x"; pad byte * f += 1 * offset[0] += 1 # <<<<<<<<<<<<<< * * offset[0] += child.itemsize */ __pyx_t_11 = 0; (__pyx_v_offset[__pyx_t_11]) = ((__pyx_v_offset[__pyx_t_11]) + 1); } /* "numpy.pxd":818 * offset[0] += 1 * * offset[0] += child.itemsize # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(child): */ __pyx_t_11 = 0; (__pyx_v_offset[__pyx_t_11]) = ((__pyx_v_offset[__pyx_t_11]) + __pyx_v_child->elsize); /* "numpy.pxd":820 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ __pyx_t_7 = (!PyDataType_HASFIELDS(__pyx_v_child)); if (__pyx_t_7) { /* "numpy.pxd":821 * * if not PyDataType_HASFIELDS(child): * t = child.type_num # <<<<<<<<<<<<<< * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") */ __pyx_t_3 = PyInt_FromLong(__pyx_v_child->type_num); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 821; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_XDECREF(__pyx_v_t); __pyx_v_t = __pyx_t_3; __pyx_t_3 = 0; /* "numpy.pxd":822 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ __pyx_t_7 = ((__pyx_v_end - __pyx_v_f) < 5); if (__pyx_t_7) { /* "numpy.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_t_3 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_12), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L12; } __pyx_L12:; /* "numpy.pxd":826 * * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" */ __pyx_t_3 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 98; goto __pyx_L13; } /* "numpy.pxd":827 * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" */ __pyx_t_5 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 66; goto __pyx_L13; } /* "numpy.pxd":828 * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" */ __pyx_t_3 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 104; goto __pyx_L13; } /* "numpy.pxd":829 * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" */ __pyx_t_5 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 72; goto __pyx_L13; } /* "numpy.pxd":830 * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" */ __pyx_t_3 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 105; goto __pyx_L13; } /* "numpy.pxd":831 * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" */ __pyx_t_5 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 73; goto __pyx_L13; } /* "numpy.pxd":832 * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" */ __pyx_t_3 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 108; goto __pyx_L13; } /* "numpy.pxd":833 * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" */ __pyx_t_5 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 76; goto __pyx_L13; } /* "numpy.pxd":834 * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" */ __pyx_t_3 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 113; goto __pyx_L13; } /* "numpy.pxd":835 * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" */ __pyx_t_5 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 81; goto __pyx_L13; } /* "numpy.pxd":836 * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" */ __pyx_t_3 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 102; goto __pyx_L13; } /* "numpy.pxd":837 * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf */ __pyx_t_5 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 100; goto __pyx_L13; } /* "numpy.pxd":838 * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd */ __pyx_t_3 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 103; goto __pyx_L13; } /* "numpy.pxd":839 * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg */ __pyx_t_5 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 102; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":840 * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" */ __pyx_t_3 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 100; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":841 * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: */ __pyx_t_5 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 103; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":842 * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_3 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 79; goto __pyx_L13; } /*else*/ { /* "numpy.pxd":844 * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * f += 1 * else: */ __pyx_t_5 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_7), __pyx_v_t); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_5)); __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_5)); __Pyx_GIVEREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L13:; /* "numpy.pxd":845 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * f += 1 # <<<<<<<<<<<<<< * else: * # Cython ignores struct boundary information ("T{...}"), */ __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L11; } /*else*/ { /* "numpy.pxd":849 * # Cython ignores struct boundary information ("T{...}"), * # so don't output it * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< * return f * */ __pyx_t_12 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_12 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 849; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_12; } __pyx_L11:; } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "numpy.pxd":850 * # so don't output it * f = _util_dtypestring(child, f, end, offset) * return f # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_f; goto __pyx_L0; __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_child); __Pyx_XDECREF(__pyx_v_fields); __Pyx_XDECREF(__pyx_v_childname); __Pyx_XDECREF(__pyx_v_new_offset); __Pyx_XDECREF(__pyx_v_t); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":965 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { PyObject *__pyx_v_baseptr; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("set_array_base", 0); /* "numpy.pxd":967 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ __pyx_t_1 = (__pyx_v_base == Py_None); if (__pyx_t_1) { /* "numpy.pxd":968 * cdef PyObject* baseptr * if base is None: * baseptr = NULL # <<<<<<<<<<<<<< * else: * Py_INCREF(base) # important to do this before decref below! */ __pyx_v_baseptr = NULL; goto __pyx_L3; } /*else*/ { /* "numpy.pxd":970 * baseptr = NULL * else: * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< * baseptr = base * Py_XDECREF(arr.base) */ Py_INCREF(__pyx_v_base); /* "numpy.pxd":971 * else: * Py_INCREF(base) # important to do this before decref below! * baseptr = base # <<<<<<<<<<<<<< * Py_XDECREF(arr.base) * arr.base = baseptr */ __pyx_v_baseptr = ((PyObject *)__pyx_v_base); } __pyx_L3:; /* "numpy.pxd":972 * Py_INCREF(base) # important to do this before decref below! * baseptr = base * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< * arr.base = baseptr * */ Py_XDECREF(__pyx_v_arr->base); /* "numpy.pxd":973 * baseptr = base * Py_XDECREF(arr.base) * arr.base = baseptr # <<<<<<<<<<<<<< * * cdef inline object get_array_base(ndarray arr): */ __pyx_v_arr->base = __pyx_v_baseptr; __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":975 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("get_array_base", 0); /* "numpy.pxd":976 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ __pyx_t_1 = (__pyx_v_arr->base == NULL); if (__pyx_t_1) { /* "numpy.pxd":977 * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: * return None # <<<<<<<<<<<<<< * else: * return arr.base */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; goto __pyx_L3; } /*else*/ { /* "numpy.pxd":979 * return None * else: * return arr.base # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); __pyx_r = ((PyObject *)__pyx_v_arr->base); goto __pyx_L0; } __pyx_L3:; __pyx_r = Py_None; __Pyx_INCREF(Py_None); __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, __Pyx_NAMESTR("array"), __Pyx_DOCSTR(__pyx_k_13), /* m_doc */ -1, /* m_size */ __pyx_methods /* m_methods */, NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_kp_u_1, __pyx_k_1, sizeof(__pyx_k_1), 0, 1, 0, 0}, {&__pyx_kp_u_11, __pyx_k_11, sizeof(__pyx_k_11), 0, 1, 0, 0}, {&__pyx_kp_s_14, __pyx_k_14, sizeof(__pyx_k_14), 0, 0, 1, 0}, {&__pyx_kp_s_17, __pyx_k_17, sizeof(__pyx_k_17), 0, 0, 1, 0}, {&__pyx_n_s_18, __pyx_k_18, sizeof(__pyx_k_18), 0, 0, 1, 1}, {&__pyx_kp_u_3, __pyx_k_3, sizeof(__pyx_k_3), 0, 1, 0, 0}, {&__pyx_kp_u_5, __pyx_k_5, sizeof(__pyx_k_5), 0, 1, 0, 0}, {&__pyx_kp_u_7, __pyx_k_7, sizeof(__pyx_k_7), 0, 1, 0, 0}, {&__pyx_kp_u_8, __pyx_k_8, sizeof(__pyx_k_8), 0, 1, 0, 0}, {&__pyx_n_s__A, __pyx_k__A, sizeof(__pyx_k__A), 0, 0, 1, 1}, {&__pyx_n_s__B, __pyx_k__B, sizeof(__pyx_k__B), 0, 0, 1, 1}, {&__pyx_n_s__C, __pyx_k__C, sizeof(__pyx_k__C), 0, 0, 1, 1}, {&__pyx_n_s__RuntimeError, __pyx_k__RuntimeError, sizeof(__pyx_k__RuntimeError), 0, 0, 1, 1}, {&__pyx_n_s__ValueError, __pyx_k__ValueError, sizeof(__pyx_k__ValueError), 0, 0, 1, 1}, {&__pyx_n_s____main__, __pyx_k____main__, sizeof(__pyx_k____main__), 0, 0, 1, 1}, {&__pyx_n_s____test__, __pyx_k____test__, sizeof(__pyx_k____test__), 0, 0, 1, 1}, {&__pyx_n_s____version__, __pyx_k____version__, sizeof(__pyx_k____version__), 0, 0, 1, 1}, {&__pyx_n_s__a, __pyx_k__a, sizeof(__pyx_k__a), 0, 0, 1, 1}, {&__pyx_n_s__array_add, __pyx_k__array_add, sizeof(__pyx_k__array_add), 0, 0, 1, 1}, {&__pyx_n_s__array_div, __pyx_k__array_div, sizeof(__pyx_k__array_div), 0, 0, 1, 1}, {&__pyx_n_s__array_get, __pyx_k__array_get, sizeof(__pyx_k__array_get), 0, 0, 1, 1}, {&__pyx_n_s__array_get_block, __pyx_k__array_get_block, sizeof(__pyx_k__array_get_block), 0, 0, 1, 1}, {&__pyx_n_s__array_mul, __pyx_k__array_mul, sizeof(__pyx_k__array_mul), 0, 0, 1, 1}, {&__pyx_n_s__array_sub, __pyx_k__array_sub, sizeof(__pyx_k__array_sub), 0, 0, 1, 1}, {&__pyx_n_s__asub, __pyx_k__asub, sizeof(__pyx_k__asub), 0, 0, 1, 1}, {&__pyx_n_s__b, __pyx_k__b, sizeof(__pyx_k__b), 0, 0, 1, 1}, {&__pyx_n_s__c, __pyx_k__c, sizeof(__pyx_k__c), 0, 0, 1, 1}, {&__pyx_n_s__fT, __pyx_k__fT, sizeof(__pyx_k__fT), 0, 0, 1, 1}, {&__pyx_n_s__fX, __pyx_k__fX, sizeof(__pyx_k__fX), 0, 0, 1, 1}, {&__pyx_n_s__fY, __pyx_k__fY, sizeof(__pyx_k__fY), 0, 0, 1, 1}, {&__pyx_n_s__fZ, __pyx_k__fZ, sizeof(__pyx_k__fZ), 0, 0, 1, 1}, {&__pyx_n_s__np, __pyx_k__np, sizeof(__pyx_k__np), 0, 0, 1, 1}, {&__pyx_n_s__numpy, __pyx_k__numpy, sizeof(__pyx_k__numpy), 0, 0, 1, 1}, {&__pyx_n_s__range, __pyx_k__range, sizeof(__pyx_k__range), 0, 0, 1, 1}, {&__pyx_n_s__t, __pyx_k__t, sizeof(__pyx_k__t), 0, 0, 1, 1}, {&__pyx_n_s__t0, __pyx_k__t0, sizeof(__pyx_k__t0), 0, 0, 1, 1}, {&__pyx_n_s__t1, __pyx_k__t1, sizeof(__pyx_k__t1), 0, 0, 1, 1}, {&__pyx_n_s__va, __pyx_k__va, sizeof(__pyx_k__va), 0, 0, 1, 1}, {&__pyx_n_s__x, __pyx_k__x, sizeof(__pyx_k__x), 0, 0, 1, 1}, {&__pyx_n_s__x0, __pyx_k__x0, sizeof(__pyx_k__x0), 0, 0, 1, 1}, {&__pyx_n_s__x1, __pyx_k__x1, sizeof(__pyx_k__x1), 0, 0, 1, 1}, {&__pyx_n_s__y, __pyx_k__y, sizeof(__pyx_k__y), 0, 0, 1, 1}, {&__pyx_n_s__y0, __pyx_k__y0, sizeof(__pyx_k__y0), 0, 0, 1, 1}, {&__pyx_n_s__y1, __pyx_k__y1, sizeof(__pyx_k__y1), 0, 0, 1, 1}, {&__pyx_n_s__z, __pyx_k__z, sizeof(__pyx_k__z), 0, 0, 1, 1}, {&__pyx_n_s__z0, __pyx_k__z0, sizeof(__pyx_k__z0), 0, 0, 1, 1}, {&__pyx_n_s__z1, __pyx_k__z1, sizeof(__pyx_k__z1), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_ValueError = __Pyx_GetName(__pyx_b, __pyx_n_s__ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_range = __Pyx_GetName(__pyx_b, __pyx_n_s__range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 228; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_RuntimeError = __Pyx_GetName(__pyx_b, __pyx_n_s__RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "numpy.pxd":215 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_k_tuple_2 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_2); __Pyx_INCREF(((PyObject *)__pyx_kp_u_1)); PyTuple_SET_ITEM(__pyx_k_tuple_2, 0, ((PyObject *)__pyx_kp_u_1)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_1)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_2)); /* "numpy.pxd":219 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_k_tuple_4 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_4); __Pyx_INCREF(((PyObject *)__pyx_kp_u_3)); PyTuple_SET_ITEM(__pyx_k_tuple_4, 0, ((PyObject *)__pyx_kp_u_3)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_3)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_4)); /* "numpy.pxd":257 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_k_tuple_6 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_6); __Pyx_INCREF(((PyObject *)__pyx_kp_u_5)); PyTuple_SET_ITEM(__pyx_k_tuple_6, 0, ((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_6)); /* "numpy.pxd":799 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_k_tuple_9 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_9); __Pyx_INCREF(((PyObject *)__pyx_kp_u_8)); PyTuple_SET_ITEM(__pyx_k_tuple_9, 0, ((PyObject *)__pyx_kp_u_8)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_8)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_9)); /* "numpy.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_k_tuple_10 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_10)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_10); __Pyx_INCREF(((PyObject *)__pyx_kp_u_5)); PyTuple_SET_ITEM(__pyx_k_tuple_10, 0, ((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_10)); /* "numpy.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_k_tuple_12 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_12)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_12); __Pyx_INCREF(((PyObject *)__pyx_kp_u_11)); PyTuple_SET_ITEM(__pyx_k_tuple_12, 0, ((PyObject *)__pyx_kp_u_11)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_11)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_12)); /* "nipy/labs/bindings/array.pyx":21 * * # Binded routines * def array_get(A, size_t x, size_t y=0, size_t z=0, size_t t=0): # <<<<<<<<<<<<<< * """ * Get array element. */ __pyx_k_tuple_15 = PyTuple_New(7); if (unlikely(!__pyx_k_tuple_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_15); __Pyx_INCREF(((PyObject *)__pyx_n_s__A)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 0, ((PyObject *)__pyx_n_s__A)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__A)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 1, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__y)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 2, ((PyObject *)__pyx_n_s__y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__z)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 3, ((PyObject *)__pyx_n_s__z)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__z)); __Pyx_INCREF(((PyObject *)__pyx_n_s__t)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 4, ((PyObject *)__pyx_n_s__t)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__t)); __Pyx_INCREF(((PyObject *)__pyx_n_s__a)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 5, ((PyObject *)__pyx_n_s__a)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__a)); __Pyx_INCREF(((PyObject *)__pyx_n_s__va)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 6, ((PyObject *)__pyx_n_s__va)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__va)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_15)); __pyx_k_codeobj_16 = (PyObject*)__Pyx_PyCode_New(5, 0, 7, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_15, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__array_get, 21, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/array.pyx":34 * * * def array_get_block( A, size_t x0, size_t x1, size_t fX=1, # <<<<<<<<<<<<<< * size_t y0=0, size_t y1=0, size_t fY=1, * size_t z0=0, size_t z1=0, size_t fZ=1, */ __pyx_k_tuple_19 = PyTuple_New(17); if (unlikely(!__pyx_k_tuple_19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_19); __Pyx_INCREF(((PyObject *)__pyx_n_s__A)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 0, ((PyObject *)__pyx_n_s__A)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__A)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x0)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 1, ((PyObject *)__pyx_n_s__x0)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x0)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x1)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 2, ((PyObject *)__pyx_n_s__x1)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x1)); __Pyx_INCREF(((PyObject *)__pyx_n_s__fX)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 3, ((PyObject *)__pyx_n_s__fX)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__fX)); __Pyx_INCREF(((PyObject *)__pyx_n_s__y0)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 4, ((PyObject *)__pyx_n_s__y0)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__y0)); __Pyx_INCREF(((PyObject *)__pyx_n_s__y1)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 5, ((PyObject *)__pyx_n_s__y1)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__y1)); __Pyx_INCREF(((PyObject *)__pyx_n_s__fY)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 6, ((PyObject *)__pyx_n_s__fY)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__fY)); __Pyx_INCREF(((PyObject *)__pyx_n_s__z0)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 7, ((PyObject *)__pyx_n_s__z0)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__z0)); __Pyx_INCREF(((PyObject *)__pyx_n_s__z1)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 8, ((PyObject *)__pyx_n_s__z1)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__z1)); __Pyx_INCREF(((PyObject *)__pyx_n_s__fZ)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 9, ((PyObject *)__pyx_n_s__fZ)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__fZ)); __Pyx_INCREF(((PyObject *)__pyx_n_s__t0)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 10, ((PyObject *)__pyx_n_s__t0)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__t0)); __Pyx_INCREF(((PyObject *)__pyx_n_s__t1)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 11, ((PyObject *)__pyx_n_s__t1)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__t1)); __Pyx_INCREF(((PyObject *)__pyx_n_s__fT)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 12, ((PyObject *)__pyx_n_s__fT)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__fT)); __Pyx_INCREF(((PyObject *)__pyx_n_s__a)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 13, ((PyObject *)__pyx_n_s__a)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__a)); __Pyx_INCREF(((PyObject *)__pyx_n_s__b)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 14, ((PyObject *)__pyx_n_s__b)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__b)); __Pyx_INCREF(((PyObject *)__pyx_n_s__asub)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 15, ((PyObject *)__pyx_n_s__asub)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__asub)); __Pyx_INCREF(((PyObject *)__pyx_n_s__B)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 16, ((PyObject *)__pyx_n_s__B)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__B)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_19)); __pyx_k_codeobj_20 = (PyObject*)__Pyx_PyCode_New(13, 0, 17, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_19, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__array_get_block, 34, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_20)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/array.pyx":56 * * * def array_add(A, B): # <<<<<<<<<<<<<< * """ * C = A + B */ __pyx_k_tuple_21 = PyTuple_New(6); if (unlikely(!__pyx_k_tuple_21)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_21); __Pyx_INCREF(((PyObject *)__pyx_n_s__A)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 0, ((PyObject *)__pyx_n_s__A)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__A)); __Pyx_INCREF(((PyObject *)__pyx_n_s__B)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 1, ((PyObject *)__pyx_n_s__B)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__B)); __Pyx_INCREF(((PyObject *)__pyx_n_s__a)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 2, ((PyObject *)__pyx_n_s__a)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__a)); __Pyx_INCREF(((PyObject *)__pyx_n_s__b)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 3, ((PyObject *)__pyx_n_s__b)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__b)); __Pyx_INCREF(((PyObject *)__pyx_n_s__c)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 4, ((PyObject *)__pyx_n_s__c)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__c)); __Pyx_INCREF(((PyObject *)__pyx_n_s__C)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 5, ((PyObject *)__pyx_n_s__C)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__C)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_21)); __pyx_k_codeobj_22 = (PyObject*)__Pyx_PyCode_New(2, 0, 6, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_21, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__array_add, 56, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_22)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/array.pyx":73 * * * def array_mul(A, B): # <<<<<<<<<<<<<< * """ * C = A * B */ __pyx_k_tuple_23 = PyTuple_New(6); if (unlikely(!__pyx_k_tuple_23)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_23); __Pyx_INCREF(((PyObject *)__pyx_n_s__A)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 0, ((PyObject *)__pyx_n_s__A)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__A)); __Pyx_INCREF(((PyObject *)__pyx_n_s__B)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 1, ((PyObject *)__pyx_n_s__B)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__B)); __Pyx_INCREF(((PyObject *)__pyx_n_s__a)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 2, ((PyObject *)__pyx_n_s__a)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__a)); __Pyx_INCREF(((PyObject *)__pyx_n_s__b)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 3, ((PyObject *)__pyx_n_s__b)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__b)); __Pyx_INCREF(((PyObject *)__pyx_n_s__c)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 4, ((PyObject *)__pyx_n_s__c)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__c)); __Pyx_INCREF(((PyObject *)__pyx_n_s__C)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 5, ((PyObject *)__pyx_n_s__C)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__C)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_23)); __pyx_k_codeobj_24 = (PyObject*)__Pyx_PyCode_New(2, 0, 6, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_23, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__array_mul, 73, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_24)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/array.pyx":90 * * * def array_sub(A, B): # <<<<<<<<<<<<<< * """ * C = A - B */ __pyx_k_tuple_25 = PyTuple_New(6); if (unlikely(!__pyx_k_tuple_25)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_25); __Pyx_INCREF(((PyObject *)__pyx_n_s__A)); PyTuple_SET_ITEM(__pyx_k_tuple_25, 0, ((PyObject *)__pyx_n_s__A)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__A)); __Pyx_INCREF(((PyObject *)__pyx_n_s__B)); PyTuple_SET_ITEM(__pyx_k_tuple_25, 1, ((PyObject *)__pyx_n_s__B)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__B)); __Pyx_INCREF(((PyObject *)__pyx_n_s__a)); PyTuple_SET_ITEM(__pyx_k_tuple_25, 2, ((PyObject *)__pyx_n_s__a)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__a)); __Pyx_INCREF(((PyObject *)__pyx_n_s__b)); PyTuple_SET_ITEM(__pyx_k_tuple_25, 3, ((PyObject *)__pyx_n_s__b)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__b)); __Pyx_INCREF(((PyObject *)__pyx_n_s__c)); PyTuple_SET_ITEM(__pyx_k_tuple_25, 4, ((PyObject *)__pyx_n_s__c)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__c)); __Pyx_INCREF(((PyObject *)__pyx_n_s__C)); PyTuple_SET_ITEM(__pyx_k_tuple_25, 5, ((PyObject *)__pyx_n_s__C)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__C)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_25)); __pyx_k_codeobj_26 = (PyObject*)__Pyx_PyCode_New(2, 0, 6, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_25, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__array_sub, 90, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_26)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/array.pyx":107 * * * def array_div(A, B): # <<<<<<<<<<<<<< * """ * C = A / B */ __pyx_k_tuple_27 = PyTuple_New(6); if (unlikely(!__pyx_k_tuple_27)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_27); __Pyx_INCREF(((PyObject *)__pyx_n_s__A)); PyTuple_SET_ITEM(__pyx_k_tuple_27, 0, ((PyObject *)__pyx_n_s__A)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__A)); __Pyx_INCREF(((PyObject *)__pyx_n_s__B)); PyTuple_SET_ITEM(__pyx_k_tuple_27, 1, ((PyObject *)__pyx_n_s__B)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__B)); __Pyx_INCREF(((PyObject *)__pyx_n_s__a)); PyTuple_SET_ITEM(__pyx_k_tuple_27, 2, ((PyObject *)__pyx_n_s__a)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__a)); __Pyx_INCREF(((PyObject *)__pyx_n_s__b)); PyTuple_SET_ITEM(__pyx_k_tuple_27, 3, ((PyObject *)__pyx_n_s__b)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__b)); __Pyx_INCREF(((PyObject *)__pyx_n_s__c)); PyTuple_SET_ITEM(__pyx_k_tuple_27, 4, ((PyObject *)__pyx_n_s__c)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__c)); __Pyx_INCREF(((PyObject *)__pyx_n_s__C)); PyTuple_SET_ITEM(__pyx_k_tuple_27, 5, ((PyObject *)__pyx_n_s__C)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__C)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_27)); __pyx_k_codeobj_28 = (PyObject*)__Pyx_PyCode_New(2, 0, 6, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_27, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__array_div, 107, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_28)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_15 = PyInt_FromLong(15); if (unlikely(!__pyx_int_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC initarray(void); /*proto*/ PyMODINIT_FUNC initarray(void) #else PyMODINIT_FUNC PyInit_array(void); /*proto*/ PyMODINIT_FUNC PyInit_array(void) #endif { PyObject *__pyx_t_1 = NULL; __Pyx_RefNannyDeclarations #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_array(void)", 0); if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #ifdef __Pyx_CyFunction_USED if (__Pyx_CyFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4(__Pyx_NAMESTR("array"), __pyx_methods, __Pyx_DOCSTR(__pyx_k_13), 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (!PyDict_GetItemString(modules, "nipy.labs.bindings.array")) { if (unlikely(PyDict_SetItemString(modules, "nipy.labs.bindings.array", __pyx_m) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } } #endif __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); if (unlikely(!__pyx_b)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; /*--- Initialize various global constants etc. ---*/ if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_module_is_main_nipy__labs__bindings__array) { if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s____main__) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; } /*--- Builtin init code ---*/ if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Constants init code ---*/ if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Global init code ---*/ /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ /*--- Type import code ---*/ __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", #if CYTHON_COMPILING_IN_PYPY sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 861; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Variable import code ---*/ /*--- Function import code ---*/ /*--- Execution code ---*/ /* "nipy/labs/bindings/array.pyx":10 * """ * * __version__ = '0.1' # <<<<<<<<<<<<<< * * # Includes */ if (PyObject_SetAttr(__pyx_m, __pyx_n_s____version__, ((PyObject *)__pyx_kp_s_14)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 10; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/array.pyx":16 * * # Initialize numpy * fffpy_import_array() # <<<<<<<<<<<<<< * import_array() * import numpy as np */ fffpy_import_array(); /* "nipy/labs/bindings/array.pyx":17 * # Initialize numpy * fffpy_import_array() * import_array() # <<<<<<<<<<<<<< * import numpy as np * */ import_array(); /* "nipy/labs/bindings/array.pyx":18 * fffpy_import_array() * import_array() * import numpy as np # <<<<<<<<<<<<<< * * # Binded routines */ __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__numpy), 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 18; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__np, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 18; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/array.pyx":21 * * # Binded routines * def array_get(A, size_t x, size_t y=0, size_t z=0, size_t t=0): # <<<<<<<<<<<<<< * """ * Get array element. */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_5array_1array_get, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__array_get, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/array.pyx":34 * * * def array_get_block( A, size_t x0, size_t x1, size_t fX=1, # <<<<<<<<<<<<<< * size_t y0=0, size_t y1=0, size_t fY=1, * size_t z0=0, size_t z1=0, size_t fZ=1, */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_5array_3array_get_block, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__array_get_block, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/array.pyx":56 * * * def array_add(A, B): # <<<<<<<<<<<<<< * """ * C = A + B */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_5array_5array_add, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__array_add, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/array.pyx":73 * * * def array_mul(A, B): # <<<<<<<<<<<<<< * """ * C = A * B */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_5array_7array_mul, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__array_mul, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/array.pyx":90 * * * def array_sub(A, B): # <<<<<<<<<<<<<< * """ * C = A - B */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_5array_9array_sub, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__array_sub, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/array.pyx":107 * * * def array_div(A, B): # <<<<<<<<<<<<<< * """ * C = A / B */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_5array_11array_div, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__array_div, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/array.pyx":1 * # -*- Mode: Python -*- Not really, but the syntax is close enough # <<<<<<<<<<<<<< * * """ */ __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); if (PyObject_SetAttr(__pyx_m, __pyx_n_s____test__, ((PyObject *)__pyx_t_1)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; /* "numpy.pxd":975 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); if (__pyx_m) { __Pyx_AddTraceback("init nipy.labs.bindings.array", __pyx_clineno, __pyx_lineno, __pyx_filename); Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init nipy.labs.bindings.array"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if PY_MAJOR_VERSION < 3 return; #else return __pyx_m; #endif } /* Runtime support code */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* CYTHON_REFNANNY */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%s() takes %s %" CYTHON_FORMAT_SSIZE_T "d positional argument%s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%s() got an unexpected keyword argument '%s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_Format(PyExc_SystemError, "Missing type object"); return 0; } if (likely(PyObject_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) { PyObject *result; result = PyObject_GetAttr(dict, name); if (!result) { if (dict != __pyx_b) { PyErr_Clear(); result = PyObject_GetAttr(__pyx_b, name); } if (!result) { PyErr_SetObject(PyExc_NameError, name); } } return result; } static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) { #if CYTHON_COMPILING_IN_CPYTHON PyObject *tmp_type, *tmp_value, *tmp_tb; PyThreadState *tstate = PyThreadState_GET(); tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_Restore(type, value, tb); #endif } static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(type, value, tb); #endif } #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } #if PY_VERSION_HEX < 0x02050000 if (PyClass_Check(type)) { #else if (PyType_Check(type)) { #endif #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; #if PY_VERSION_HEX < 0x02050000 if (PyInstance_Check(type)) { type = (PyObject*) ((PyInstanceObject*)type)->in_class; Py_INCREF(type); } else { type = 0; PyErr_SetString(PyExc_TypeError, "raise: exception must be an old-style class or instance"); goto raise_error; } #else type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } #endif } __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else /* Python 3+ */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyEval_CallObject(type, args); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause && cause != Py_None) { PyObject *fixed_cause; if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { PyThreadState *tstate = PyThreadState_GET(); PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } } bad: Py_XDECREF(owned_instance); return; } #endif static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%s to unpack", index, (index == 1) ? "" : "s"); } static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } static CYTHON_INLINE int __Pyx_IterFinish(void) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); PyObject* exc_type = tstate->curexc_type; if (unlikely(exc_type)) { if (likely(exc_type == PyExc_StopIteration) || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration)) { PyObject *exc_value, *exc_tb; exc_value = tstate->curexc_value; exc_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; Py_DECREF(exc_type); Py_XDECREF(exc_value); Py_XDECREF(exc_tb); return 0; } else { return -1; } } return 0; #else if (unlikely(PyErr_Occurred())) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) { PyErr_Clear(); return 0; } else { return -1; } } return 0; #endif } static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) { if (unlikely(retval)) { Py_DECREF(retval); __Pyx_RaiseTooManyValuesError(expected); return -1; } else { return __Pyx_IterFinish(); } return 0; } static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level) { PyObject *py_import = 0; PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; py_import = __Pyx_GetAttrString(__pyx_b, "__import__"); if (!py_import) goto bad; if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; #if PY_VERSION_HEX >= 0x02050000 { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { /* try package relative import first */ PyObject *py_level = PyInt_FromLong(1); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; /* try absolute import on failure */ } #endif if (!module) { PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); } } #else if (level>0) { PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4."); goto bad; } module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, NULL); #endif bad: Py_XDECREF(empty_list); Py_XDECREF(py_import); Py_XDECREF(empty_dict); return module; } #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return ::std::complex< float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y*(__pyx_t_float_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real*z.real + z.imag*z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(a, a); case 3: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, a); case 4: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_absf(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return ::std::complex< double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y*(__pyx_t_double_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real*z.real + z.imag*z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(a, a); case 3: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, a); case 4: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_abs(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject* x) { const unsigned char neg_one = (unsigned char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned char" : "value too large to convert to unsigned char"); } return (unsigned char)-1; } return (unsigned char)val; } return (unsigned char)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject* x) { const unsigned short neg_one = (unsigned short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned short" : "value too large to convert to unsigned short"); } return (unsigned short)-1; } return (unsigned short)val; } return (unsigned short)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject* x) { const unsigned int neg_one = (unsigned int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned int" : "value too large to convert to unsigned int"); } return (unsigned int)-1; } return (unsigned int)val; } return (unsigned int)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject* x) { const char neg_one = (char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to char" : "value too large to convert to char"); } return (char)-1; } return (char)val; } return (char)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject* x) { const short neg_one = (short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to short" : "value too large to convert to short"); } return (short)-1; } return (short)val; } return (short)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject* x) { const int neg_one = (int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to int" : "value too large to convert to int"); } return (int)-1; } return (int)val; } return (int)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject* x) { const signed char neg_one = (signed char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed char" : "value too large to convert to signed char"); } return (signed char)-1; } return (signed char)val; } return (signed char)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject* x) { const signed short neg_one = (signed short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed short" : "value too large to convert to signed short"); } return (signed short)-1; } return (signed short)val; } return (signed short)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject* x) { const signed int neg_one = (signed int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed int" : "value too large to convert to signed int"); } return (signed int)-1; } return (signed int)val; } return (signed int)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject* x) { const int neg_one = (int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to int" : "value too large to convert to int"); } return (int)-1; } return (int)val; } return (int)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) { const unsigned long neg_one = (unsigned long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned long"); return (unsigned long)-1; } return (unsigned long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned long"); return (unsigned long)-1; } return (unsigned long)PyLong_AsUnsignedLong(x); } else { return (unsigned long)PyLong_AsLong(x); } } else { unsigned long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned long)-1; val = __Pyx_PyInt_AsUnsignedLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject* x) { const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned PY_LONG_LONG"); return (unsigned PY_LONG_LONG)-1; } return (unsigned PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned PY_LONG_LONG"); return (unsigned PY_LONG_LONG)-1; } return (unsigned PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (unsigned PY_LONG_LONG)PyLong_AsLongLong(x); } } else { unsigned PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned PY_LONG_LONG)-1; val = __Pyx_PyInt_AsUnsignedLongLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject* x) { const long neg_one = (long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long)-1; } return (long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long)-1; } return (long)PyLong_AsUnsignedLong(x); } else { return (long)PyLong_AsLong(x); } } else { long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (long)-1; val = __Pyx_PyInt_AsLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) { const PY_LONG_LONG neg_one = (PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to PY_LONG_LONG"); return (PY_LONG_LONG)-1; } return (PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to PY_LONG_LONG"); return (PY_LONG_LONG)-1; } return (PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (PY_LONG_LONG)PyLong_AsLongLong(x); } } else { PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (PY_LONG_LONG)-1; val = __Pyx_PyInt_AsLongLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) { const signed long neg_one = (signed long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed long"); return (signed long)-1; } return (signed long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed long"); return (signed long)-1; } return (signed long)PyLong_AsUnsignedLong(x); } else { return (signed long)PyLong_AsLong(x); } } else { signed long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (signed long)-1; val = __Pyx_PyInt_AsSignedLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject* x) { const signed PY_LONG_LONG neg_one = (signed PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed PY_LONG_LONG"); return (signed PY_LONG_LONG)-1; } return (signed PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed PY_LONG_LONG"); return (signed PY_LONG_LONG)-1; } return (signed PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (signed PY_LONG_LONG)PyLong_AsLongLong(x); } } else { signed PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (signed PY_LONG_LONG)-1; val = __Pyx_PyInt_AsSignedLongLong(tmp); Py_DECREF(tmp); return val; } } static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); #if PY_VERSION_HEX < 0x02050000 return PyErr_Warn(NULL, message); #else return PyErr_WarnEx(NULL, message, 1); #endif } return 0; } #ifndef __PYX_HAVE_RT_ImportModule #define __PYX_HAVE_RT_ImportModule static PyObject *__Pyx_ImportModule(const char *name) { PyObject *py_name = 0; PyObject *py_module = 0; py_name = __Pyx_PyIdentifier_FromString(name); if (!py_name) goto bad; py_module = PyImport_Import(py_name); Py_DECREF(py_name); return py_module; bad: Py_XDECREF(py_name); return 0; } #endif #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict) { PyObject *py_module = 0; PyObject *result = 0; PyObject *py_name = 0; char warning[200]; py_module = __Pyx_ImportModule(module_name); if (!py_module) goto bad; py_name = __Pyx_PyIdentifier_FromString(class_name); if (!py_name) goto bad; result = PyObject_GetAttr(py_module, py_name); Py_DECREF(py_name); py_name = 0; Py_DECREF(py_module); py_module = 0; if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%s.%s is not a type object", module_name, class_name); goto bad; } if (!strict && (size_t)((PyTypeObject *)result)->tp_basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility", module_name, class_name); #if PY_VERSION_HEX < 0x02050000 if (PyErr_Warn(NULL, warning) < 0) goto bad; #else if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; #endif } else if ((size_t)((PyTypeObject *)result)->tp_basicsize != size) { PyErr_Format(PyExc_ValueError, "%s.%s has the wrong size, try recompiling", module_name, class_name); goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(py_module); Py_XDECREF(result); return NULL; } #endif static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = (start + end) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, /*int argcount,*/ 0, /*int kwonlyargcount,*/ 0, /*int nlocals,*/ 0, /*int stacksize,*/ 0, /*int flags,*/ __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, /*int firstlineno,*/ __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_globals = 0; PyFrameObject *py_frame = 0; py_code = __pyx_find_code_object(c_line ? c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? c_line : py_line, py_code); } py_globals = PyModule_GetDict(__pyx_m); if (!py_globals) goto bad; py_frame = PyFrame_New( PyThreadState_GET(), /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ py_globals, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; py_frame->f_lineno = py_line; PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else /* Python 3+ has unicode identifiers */ if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; ++t; } return 0; } /* Type Conversion Functions */ static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) { PyNumberMethods *m; const char *name = NULL; PyObject *res = NULL; #if PY_VERSION_HEX < 0x03000000 if (PyInt_Check(x) || PyLong_Check(x)) #else if (PyLong_Check(x)) #endif return Py_INCREF(x), x; m = Py_TYPE(x)->tp_as_number; #if PY_VERSION_HEX < 0x03000000 if (m && m->nb_int) { name = "int"; res = PyNumber_Int(x); } else if (m && m->nb_long) { name = "long"; res = PyNumber_Long(x); } #else if (m && m->nb_int) { name = "int"; res = PyNumber_Long(x); } #endif if (res) { #if PY_VERSION_HEX < 0x03000000 if (!PyInt_Check(res) && !PyLong_Check(res)) { #else if (!PyLong_Check(res)) { #endif PyErr_Format(PyExc_TypeError, "__%s__ returned non-%s (type %.200s)", name, name, Py_TYPE(res)->tp_name); Py_DECREF(res); return NULL; } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject* x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { #if PY_VERSION_HEX < 0x02050000 if (ival <= LONG_MAX) return PyInt_FromLong((long)ival); else { unsigned char *bytes = (unsigned char *) &ival; int one = 1; int little = (int)*(unsigned char*)&one; return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0); } #else return PyInt_FromSize_t(ival); #endif } static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject* x) { unsigned PY_LONG_LONG val = __Pyx_PyInt_AsUnsignedLongLong(x); if (unlikely(val == (unsigned PY_LONG_LONG)-1 && PyErr_Occurred())) { return (size_t)-1; } else if (unlikely(val != (unsigned PY_LONG_LONG)(size_t)val)) { PyErr_SetString(PyExc_OverflowError, "value too large to convert to size_t"); return (size_t)-1; } return (size_t)val; } #endif /* Py_PYTHON_H */ nipy-0.3.0/nipy/labs/bindings/array.pyx000066400000000000000000000057221210344137400200530ustar00rootroot00000000000000# -*- Mode: Python -*- Not really, but the syntax is close enough """ Python access to core fff functions written in C. This module is mainly used for unitary tests. Author: Alexis Roche, 2008. """ __version__ = '0.1' # Includes from fff cimport * # Initialize numpy fffpy_import_array() import_array() import numpy as np # Binded routines def array_get(A, size_t x, size_t y=0, size_t z=0, size_t t=0): """ Get array element. va = array_get(A, size_t x, size_t y=0, size_t z=0, size_t t=0): """ cdef fff_array* a cdef double va a = fff_array_fromPyArray(A) va = fff_array_get(a, x, y, z, t) fff_array_delete(a) return va def array_get_block( A, size_t x0, size_t x1, size_t fX=1, size_t y0=0, size_t y1=0, size_t fY=1, size_t z0=0, size_t z1=0, size_t fZ=1, size_t t0=0, size_t t1=0, size_t fT=1 ): """ Get block Asub = array_get_block( A, size_t x0, size_t x1, size_t fX=1, size_t y0=0, size_t y1=0, size_t fY=1, size_t z0=0, size_t z1=0, size_t fZ=1, size_t t0=0, size_t t1=0, size_t fT=1 ) """ cdef fff_array *a, *b cdef fff_array asub a = fff_array_fromPyArray(A) asub = fff_array_get_block(a, x0, x1, fX, y0, y1, fY, z0, z1, fZ, t0, t1, fT) b = fff_array_new(asub.datatype, asub.dimX, asub.dimY, asub.dimZ, asub.dimT) fff_array_copy(b, &asub) B = fff_array_toPyArray(b) fff_array_delete(a) return B def array_add(A, B): """ C = A + B """ cdef fff_array *a, *b, *c a = fff_array_fromPyArray(A) b = fff_array_fromPyArray(B) c = fff_array_new(a.datatype, a.dimX, a.dimY, a.dimZ, a.dimT) fff_array_copy(c, a) fff_array_add(c, b) C = fff_array_toPyArray(c) fff_array_delete(a) fff_array_delete(b) return C def array_mul(A, B): """ C = A * B """ cdef fff_array *a, *b, *c a = fff_array_fromPyArray(A) b = fff_array_fromPyArray(B) c = fff_array_new(a.datatype, a.dimX, a.dimY, a.dimZ, a.dimT) fff_array_copy(c, a) fff_array_mul(c, b) C = fff_array_toPyArray(c) fff_array_delete(a) fff_array_delete(b) return C def array_sub(A, B): """ C = A - B """ cdef fff_array *a, *b, *c a = fff_array_fromPyArray(A) b = fff_array_fromPyArray(B) c = fff_array_new(a.datatype, a.dimX, a.dimY, a.dimZ, a.dimT) fff_array_copy(c, a) fff_array_sub(c, b) C = fff_array_toPyArray(c) fff_array_delete(a) fff_array_delete(b) return C def array_div(A, B): """ C = A / B """ cdef fff_array *a, *b, *c a = fff_array_fromPyArray(A) b = fff_array_fromPyArray(B) c = fff_array_new(a.datatype, a.dimX, a.dimY, a.dimZ, a.dimT) fff_array_copy(c, a) fff_array_div(c, b) C = fff_array_toPyArray(c) fff_array_delete(a) fff_array_delete(b) return C nipy-0.3.0/nipy/labs/bindings/benchmarks/000077500000000000000000000000001210344137400203025ustar00rootroot00000000000000nipy-0.3.0/nipy/labs/bindings/benchmarks/__init__.py000066400000000000000000000000501210344137400224060ustar00rootroot00000000000000# Init to make test directory a package nipy-0.3.0/nipy/labs/bindings/benchmarks/bench_numpy.py000066400000000000000000000016701210344137400231670ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import time import numpy as np from .. import copy_vector def time_ratio(t0,t1): if t1==0: return np.inf else: return t0/t1 def time_copy_vector(x): t0 = time.clock() y0 = copy_vector(x, 0) dt0 = time.clock()-t0 t1 = time.clock() y1 = copy_vector(x, 1) dt1 = time.clock()-t1 ratio = time_ratio(dt0,dt1) print(' using fff_array: %f sec' % dt0) print(' using numpy C API: %f sec' % dt1) print(' ratio: %f' % ratio) def bench_copy_vector_contiguous(): x = (1000*np.random.rand(1e6)).astype('int32') print('Contiguous buffer copy (int32-->double)') time_copy_vector(x) def bench_copy_vector_strided(): x0 = (1000*np.random.rand(2e6)).astype('int32') x = x0[::2] print('Non-contiguous buffer copy (int32-->double)') time_copy_vector(x) nipy-0.3.0/nipy/labs/bindings/linalg.c000066400000000000000000016452601210344137400176150ustar00rootroot00000000000000/* Generated by Cython 0.17.4 on Sat Jan 12 17:27:33 2013 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02040000 #error Cython requires Python 2.4+. #else #include /* For offsetof */ #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #endif #if PY_VERSION_HEX < 0x02050000 typedef int Py_ssize_t; #define PY_SSIZE_T_MAX INT_MAX #define PY_SSIZE_T_MIN INT_MIN #define PY_FORMAT_SIZE_T "" #define CYTHON_FORMAT_SSIZE_T "" #define PyInt_FromSsize_t(z) PyInt_FromLong(z) #define PyInt_AsSsize_t(o) __Pyx_PyInt_AsInt(o) #define PyNumber_Index(o) ((PyNumber_Check(o) && !PyFloat_Check(o)) ? PyNumber_Int(o) : \ (PyErr_Format(PyExc_TypeError, \ "expected index value, got %.200s", Py_TYPE(o)->tp_name), \ (PyObject*)0)) #define __Pyx_PyIndex_Check(o) (PyNumber_Check(o) && !PyFloat_Check(o) && \ !PyComplex_Check(o)) #define PyIndex_Check __Pyx_PyIndex_Check #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message) #define __PYX_BUILD_PY_SSIZE_T "i" #else #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #define __Pyx_PyIndex_Check PyIndex_Check #endif #if PY_VERSION_HEX < 0x02060000 #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) #define PyVarObject_HEAD_INIT(type, size) \ PyObject_HEAD_INIT(type) size, #define PyType_Modified(t) typedef struct { void *buf; PyObject *obj; Py_ssize_t len; Py_ssize_t itemsize; int readonly; int ndim; char *format; Py_ssize_t *shape; Py_ssize_t *strides; Py_ssize_t *suboffsets; void *internal; } Py_buffer; #define PyBUF_SIMPLE 0 #define PyBUF_WRITABLE 0x0001 #define PyBUF_FORMAT 0x0004 #define PyBUF_ND 0x0008 #define PyBUF_STRIDES (0x0010 | PyBUF_ND) #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) #define PyBUF_RECORDS (PyBUF_STRIDES | PyBUF_FORMAT | PyBUF_WRITABLE) #define PyBUF_FULL (PyBUF_INDIRECT | PyBUF_FORMAT | PyBUF_WRITABLE) typedef int (*getbufferproc)(PyObject *, Py_buffer *, int); typedef void (*releasebufferproc)(PyObject *, Py_buffer *); #endif #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #endif #if PY_MAJOR_VERSION < 3 && PY_MINOR_VERSION < 6 #define PyUnicode_FromString(s) PyUnicode_Decode(s, strlen(s), "UTF-8", "strict") #endif #if PY_MAJOR_VERSION >= 3 #define Py_TPFLAGS_CHECKTYPES 0 #define Py_TPFLAGS_HAVE_INDEX 0 #endif #if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3) #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ? \ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #else #define CYTHON_PEP393_ENABLED 0 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_READ(k, d, i) ((k=k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_VERSION_HEX < 0x02060000 #define PyBytesObject PyStringObject #define PyBytes_Type PyString_Type #define PyBytes_Check PyString_Check #define PyBytes_CheckExact PyString_CheckExact #define PyBytes_FromString PyString_FromString #define PyBytes_FromStringAndSize PyString_FromStringAndSize #define PyBytes_FromFormat PyString_FromFormat #define PyBytes_DecodeEscape PyString_DecodeEscape #define PyBytes_AsString PyString_AsString #define PyBytes_AsStringAndSize PyString_AsStringAndSize #define PyBytes_Size PyString_Size #define PyBytes_AS_STRING PyString_AS_STRING #define PyBytes_GET_SIZE PyString_GET_SIZE #define PyBytes_Repr PyString_Repr #define PyBytes_Concat PyString_Concat #define PyBytes_ConcatAndDel PyString_ConcatAndDel #endif #if PY_VERSION_HEX < 0x02060000 #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type) #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_VERSION_HEX < 0x03020000 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300) #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b) #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value) #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b) #else #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0))) #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1))) #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1))) #endif #if PY_MAJOR_VERSION >= 3 #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n))) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n))) #else #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n)) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_NAMESTR(n) ((char *)(n)) #define __Pyx_DOCSTR(n) ((char *)(n)) #else #define __Pyx_NAMESTR(n) (n) #define __Pyx_DOCSTR(n) (n) #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include #define __PYX_HAVE__nipy__labs__bindings__linalg #define __PYX_HAVE_API__nipy__labs__bindings__linalg #include "stdio.h" #include "stdlib.h" #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "fff_base.h" #include "fff_vector.h" #include "fff_matrix.h" #include "fff_array.h" #include "fffpy.h" #include "fff_blas.h" #ifdef _OPENMP #include #endif /* _OPENMP */ #ifdef PYREX_WITHOUT_ASSERTIONS #define CYTHON_WITHOUT_ASSERTIONS #endif /* inline attribute */ #ifndef CYTHON_INLINE #if defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif /* unused attribute */ #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif typedef struct {PyObject **p; char *s; const long n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/ /* Type Conversion Predeclarations */ #define __Pyx_PyBytes_FromUString(s) PyBytes_FromString((char*)s) #define __Pyx_PyBytes_AsUString(s) ((unsigned char*) PyBytes_AsString(s)) #define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x); static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject*); #if CYTHON_COMPILING_IN_CPYTHON #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #ifdef __GNUC__ /* Test for GCC > 2.95 */ #if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* __GNUC__ > 2 ... */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ > 2 ... */ #else /* __GNUC__ */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static PyObject *__pyx_m; static PyObject *__pyx_b; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include #else #include #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "linalg.pyx", "numpy.pxd", "type.pxd", }; /* "numpy.pxd":723 * # in Cython to enable them only on the right systems. * * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t */ typedef npy_int8 __pyx_t_5numpy_int8_t; /* "numpy.pxd":724 * * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t */ typedef npy_int16 __pyx_t_5numpy_int16_t; /* "numpy.pxd":725 * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< * ctypedef npy_int64 int64_t * #ctypedef npy_int96 int96_t */ typedef npy_int32 __pyx_t_5numpy_int32_t; /* "numpy.pxd":726 * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< * #ctypedef npy_int96 int96_t * #ctypedef npy_int128 int128_t */ typedef npy_int64 __pyx_t_5numpy_int64_t; /* "numpy.pxd":730 * #ctypedef npy_int128 int128_t * * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t */ typedef npy_uint8 __pyx_t_5numpy_uint8_t; /* "numpy.pxd":731 * * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t */ typedef npy_uint16 __pyx_t_5numpy_uint16_t; /* "numpy.pxd":732 * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< * ctypedef npy_uint64 uint64_t * #ctypedef npy_uint96 uint96_t */ typedef npy_uint32 __pyx_t_5numpy_uint32_t; /* "numpy.pxd":733 * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< * #ctypedef npy_uint96 uint96_t * #ctypedef npy_uint128 uint128_t */ typedef npy_uint64 __pyx_t_5numpy_uint64_t; /* "numpy.pxd":737 * #ctypedef npy_uint128 uint128_t * * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< * ctypedef npy_float64 float64_t * #ctypedef npy_float80 float80_t */ typedef npy_float32 __pyx_t_5numpy_float32_t; /* "numpy.pxd":738 * * ctypedef npy_float32 float32_t * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< * #ctypedef npy_float80 float80_t * #ctypedef npy_float128 float128_t */ typedef npy_float64 __pyx_t_5numpy_float64_t; /* "numpy.pxd":747 * # The int types are mapped a bit surprising -- * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t # <<<<<<<<<<<<<< * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t */ typedef npy_long __pyx_t_5numpy_int_t; /* "numpy.pxd":748 * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< * ctypedef npy_longlong longlong_t * */ typedef npy_longlong __pyx_t_5numpy_long_t; /* "numpy.pxd":749 * ctypedef npy_long int_t * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< * * ctypedef npy_ulong uint_t */ typedef npy_longlong __pyx_t_5numpy_longlong_t; /* "numpy.pxd":751 * ctypedef npy_longlong longlong_t * * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t */ typedef npy_ulong __pyx_t_5numpy_uint_t; /* "numpy.pxd":752 * * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulonglong_t * */ typedef npy_ulonglong __pyx_t_5numpy_ulong_t; /* "numpy.pxd":753 * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< * * ctypedef npy_intp intp_t */ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; /* "numpy.pxd":755 * ctypedef npy_ulonglong ulonglong_t * * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< * ctypedef npy_uintp uintp_t * */ typedef npy_intp __pyx_t_5numpy_intp_t; /* "numpy.pxd":756 * * ctypedef npy_intp intp_t * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< * * ctypedef npy_double float_t */ typedef npy_uintp __pyx_t_5numpy_uintp_t; /* "numpy.pxd":758 * ctypedef npy_uintp uintp_t * * ctypedef npy_double float_t # <<<<<<<<<<<<<< * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t */ typedef npy_double __pyx_t_5numpy_float_t; /* "numpy.pxd":759 * * ctypedef npy_double float_t * ctypedef npy_double double_t # <<<<<<<<<<<<<< * ctypedef npy_longdouble longdouble_t * */ typedef npy_double __pyx_t_5numpy_double_t; /* "numpy.pxd":760 * ctypedef npy_double float_t * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cfloat cfloat_t */ typedef npy_longdouble __pyx_t_5numpy_longdouble_t; /* "fff.pxd":9 * * # Redefine size_t * ctypedef unsigned long int size_t # <<<<<<<<<<<<<< * * */ typedef unsigned long __pyx_t_3fff_size_t; #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< float > __pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< double > __pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif /*--- Type declarations ---*/ /* "numpy.pxd":762 * ctypedef npy_longdouble longdouble_t * * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t */ typedef npy_cfloat __pyx_t_5numpy_cfloat_t; /* "numpy.pxd":763 * * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< * ctypedef npy_clongdouble clongdouble_t * */ typedef npy_cdouble __pyx_t_5numpy_cdouble_t; /* "numpy.pxd":764 * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cdouble complex_t */ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; /* "numpy.pxd":766 * ctypedef npy_clongdouble clongdouble_t * * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew1(a): */ typedef npy_cdouble __pyx_t_5numpy_complex_t; #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/ #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil) \ if (acquire_gil) { \ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ PyGILState_Release(__pyx_gilstate_save); \ } else { \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil) \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext() \ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif /* CYTHON_REFNANNY */ #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /*proto*/ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], \ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, \ const char* function_name); /*proto*/ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/ static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/ static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/ static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); static CYTHON_INLINE int __Pyx_IterFinish(void); /*proto*/ static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); /*proto*/ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level); /*proto*/ #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if defined(_WIN32) && defined(__cplusplus) && CYTHON_CCOMPLEX #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); #if CYTHON_CCOMPLEX #define __Pyx_c_eqf(a, b) ((a)==(b)) #define __Pyx_c_sumf(a, b) ((a)+(b)) #define __Pyx_c_difff(a, b) ((a)-(b)) #define __Pyx_c_prodf(a, b) ((a)*(b)) #define __Pyx_c_quotf(a, b) ((a)/(b)) #define __Pyx_c_negf(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zerof(z) ((z)==(float)0) #define __Pyx_c_conjf(z) (::std::conj(z)) #if 1 #define __Pyx_c_absf(z) (::std::abs(z)) #define __Pyx_c_powf(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zerof(z) ((z)==0) #define __Pyx_c_conjf(z) (conjf(z)) #if 1 #define __Pyx_c_absf(z) (cabsf(z)) #define __Pyx_c_powf(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); #if CYTHON_CCOMPLEX #define __Pyx_c_eq(a, b) ((a)==(b)) #define __Pyx_c_sum(a, b) ((a)+(b)) #define __Pyx_c_diff(a, b) ((a)-(b)) #define __Pyx_c_prod(a, b) ((a)*(b)) #define __Pyx_c_quot(a, b) ((a)/(b)) #define __Pyx_c_neg(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero(z) ((z)==(double)0) #define __Pyx_c_conj(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs(z) (::std::abs(z)) #define __Pyx_c_pow(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero(z) ((z)==0) #define __Pyx_c_conj(z) (conj(z)) #if 1 #define __Pyx_c_abs(z) (cabs(z)) #define __Pyx_c_pow(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject *); static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject *); static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject *); static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject *); static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject *); static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject *); static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject *); static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject *); static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject *); static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject *); static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject *); static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject *); static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject *); static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject *); static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject *); static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject *); static int __Pyx_check_binary_version(void); #if !defined(__Pyx_PyIdentifier_FromString) #if PY_MAJOR_VERSION < 3 #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) #else #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) #endif #endif static PyObject *__Pyx_ImportModule(const char *name); /*proto*/ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /*proto*/ typedef struct { int code_line; PyCodeObject* code_object; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /*proto*/ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/ /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from 'cpython.object' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'libc.stdlib' */ /* Module declarations from 'numpy' */ /* Module declarations from 'numpy' */ static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ /* Module declarations from 'fff' */ /* Module declarations from 'nipy.labs.bindings.linalg' */ static CBLAS_TRANSPOSE_t __pyx_f_4nipy_4labs_8bindings_6linalg_flag_transpose(int); /*proto*/ static CBLAS_UPLO_t __pyx_f_4nipy_4labs_8bindings_6linalg_flag_uplo(int); /*proto*/ static CBLAS_DIAG_t __pyx_f_4nipy_4labs_8bindings_6linalg_flag_diag(int); /*proto*/ static CBLAS_SIDE_t __pyx_f_4nipy_4labs_8bindings_6linalg_flag_side(int); /*proto*/ #define __Pyx_MODULE_NAME "nipy.labs.bindings.linalg" int __pyx_module_is_main_nipy__labs__bindings__linalg = 0; /* Implementation of 'nipy.labs.bindings.linalg' */ static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_RuntimeError; static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_vector_get(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X, size_t __pyx_v_i); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_2vector_set(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X, size_t __pyx_v_i, double __pyx_v_a); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_4vector_set_all(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X, double __pyx_v_a); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_6vector_scale(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X, double __pyx_v_a); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_8vector_add_constant(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X, double __pyx_v_a); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_10vector_add(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X, PyObject *__pyx_v_Y); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_12vector_sub(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X, PyObject *__pyx_v_Y); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_14vector_mul(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X, PyObject *__pyx_v_Y); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_16vector_div(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X, PyObject *__pyx_v_Y); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_18vector_sum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_20vector_ssd(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X, double __pyx_v_m, int __pyx_v_fixed); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_22vector_sad(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X, double __pyx_v_m); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_24vector_median(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_26vector_quantile(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X, double __pyx_v_r, int __pyx_v_interp); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_28matrix_get(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_A, size_t __pyx_v_i, size_t __pyx_v_j); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_30matrix_transpose(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_A); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_32matrix_add(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_A, PyObject *__pyx_v_B); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_34blas_dnrm2(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_36blas_dasum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_38blas_ddot(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X, PyObject *__pyx_v_Y); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_40blas_daxpy(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_alpha, PyObject *__pyx_v_X, PyObject *__pyx_v_Y); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_42blas_dscal(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_alpha, PyObject *__pyx_v_X); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_44blas_dgemm(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_TransA, int __pyx_v_TransB, double __pyx_v_alpha, PyObject *__pyx_v_A, PyObject *__pyx_v_B, double __pyx_v_beta, PyObject *__pyx_v_C); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_46blas_dsymm(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_Side, int __pyx_v_Uplo, double __pyx_v_alpha, PyObject *__pyx_v_A, PyObject *__pyx_v_B, PyObject *__pyx_v_beta, PyObject *__pyx_v_C); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_48blas_dtrmm(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_Side, int __pyx_v_Uplo, int __pyx_v_TransA, int __pyx_v_Diag, double __pyx_v_alpha, PyObject *__pyx_v_A, PyObject *__pyx_v_B); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_50blas_dtrsm(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_Side, int __pyx_v_Uplo, int __pyx_v_TransA, int __pyx_v_Diag, double __pyx_v_alpha, PyObject *__pyx_v_A, PyObject *__pyx_v_B); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_52blas_dsyrk(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_Uplo, int __pyx_v_Trans, double __pyx_v_alpha, PyObject *__pyx_v_A, double __pyx_v_beta, PyObject *__pyx_v_C); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_54blas_dsyr2k(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_Uplo, int __pyx_v_Trans, double __pyx_v_alpha, PyObject *__pyx_v_A, PyObject *__pyx_v_B, double __pyx_v_beta, PyObject *__pyx_v_C); /* proto */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ static char __pyx_k_1[] = "ndarray is not C contiguous"; static char __pyx_k_3[] = "ndarray is not Fortran contiguous"; static char __pyx_k_5[] = "Non-native byte order not supported"; static char __pyx_k_7[] = "unknown dtype code in numpy.pxd (%d)"; static char __pyx_k_8[] = "Format string allocated too short, see comment in numpy.pxd"; static char __pyx_k_11[] = "Format string allocated too short."; static char __pyx_k_13[] = "\nPython access to core fff functions written in C. This module is\nmainly used for unitary tests.\n\nAuthor: Alexis Roche, 2008.\n"; static char __pyx_k_14[] = "0.1"; static char __pyx_k_17[] = "/Users/mb312/dev_trees/nipy/nipy/labs/bindings/linalg.pyx"; static char __pyx_k_18[] = "nipy.labs.bindings.linalg"; static char __pyx_k__A[] = "A"; static char __pyx_k__B[] = "B"; static char __pyx_k__C[] = "C"; static char __pyx_k__D[] = "D"; static char __pyx_k__H[] = "H"; static char __pyx_k__I[] = "I"; static char __pyx_k__L[] = "L"; static char __pyx_k__O[] = "O"; static char __pyx_k__Q[] = "Q"; static char __pyx_k__X[] = "X"; static char __pyx_k__Y[] = "Y"; static char __pyx_k__Z[] = "Z"; static char __pyx_k__a[] = "a"; static char __pyx_k__b[] = "b"; static char __pyx_k__c[] = "c"; static char __pyx_k__d[] = "d"; static char __pyx_k__f[] = "f"; static char __pyx_k__g[] = "g"; static char __pyx_k__h[] = "h"; static char __pyx_k__i[] = "i"; static char __pyx_k__j[] = "j"; static char __pyx_k__l[] = "l"; static char __pyx_k__m[] = "m"; static char __pyx_k__q[] = "q"; static char __pyx_k__r[] = "r"; static char __pyx_k__s[] = "s"; static char __pyx_k__x[] = "x"; static char __pyx_k__y[] = "y"; static char __pyx_k__z[] = "z"; static char __pyx_k__Zd[] = "Zd"; static char __pyx_k__Zf[] = "Zf"; static char __pyx_k__Zg[] = "Zg"; static char __pyx_k__np[] = "np"; static char __pyx_k__xi[] = "xi"; static char __pyx_k__aij[] = "aij"; static char __pyx_k__Diag[] = "Diag"; static char __pyx_k__Side[] = "Side"; static char __pyx_k__Uplo[] = "Uplo"; static char __pyx_k__beta[] = "beta"; static char __pyx_k__Trans[] = "Trans"; static char __pyx_k__alpha[] = "alpha"; static char __pyx_k__fixed[] = "fixed"; static char __pyx_k__numpy[] = "numpy"; static char __pyx_k__range[] = "range"; static char __pyx_k__TransA[] = "TransA"; static char __pyx_k__TransB[] = "TransB"; static char __pyx_k__interp[] = "interp"; static char __pyx_k____main__[] = "__main__"; static char __pyx_k____test__[] = "__test__"; static char __pyx_k__blas_ddot[] = "blas_ddot"; static char __pyx_k__ValueError[] = "ValueError"; static char __pyx_k__blas_dasum[] = "blas_dasum"; static char __pyx_k__blas_daxpy[] = "blas_daxpy"; static char __pyx_k__blas_dgemm[] = "blas_dgemm"; static char __pyx_k__blas_dnrm2[] = "blas_dnrm2"; static char __pyx_k__blas_dscal[] = "blas_dscal"; static char __pyx_k__blas_dsymm[] = "blas_dsymm"; static char __pyx_k__blas_dsyrk[] = "blas_dsyrk"; static char __pyx_k__blas_dtrmm[] = "blas_dtrmm"; static char __pyx_k__blas_dtrsm[] = "blas_dtrsm"; static char __pyx_k__matrix_add[] = "matrix_add"; static char __pyx_k__matrix_get[] = "matrix_get"; static char __pyx_k__vector_add[] = "vector_add"; static char __pyx_k__vector_div[] = "vector_div"; static char __pyx_k__vector_get[] = "vector_get"; static char __pyx_k__vector_mul[] = "vector_mul"; static char __pyx_k__vector_sad[] = "vector_sad"; static char __pyx_k__vector_set[] = "vector_set"; static char __pyx_k__vector_ssd[] = "vector_ssd"; static char __pyx_k__vector_sub[] = "vector_sub"; static char __pyx_k__vector_sum[] = "vector_sum"; static char __pyx_k____version__[] = "__version__"; static char __pyx_k__blas_dsyr2k[] = "blas_dsyr2k"; static char __pyx_k__RuntimeError[] = "RuntimeError"; static char __pyx_k__vector_scale[] = "vector_scale"; static char __pyx_k__vector_median[] = "vector_median"; static char __pyx_k__vector_set_all[] = "vector_set_all"; static char __pyx_k__vector_quantile[] = "vector_quantile"; static char __pyx_k__matrix_transpose[] = "matrix_transpose"; static char __pyx_k__vector_add_constant[] = "vector_add_constant"; static PyObject *__pyx_kp_u_1; static PyObject *__pyx_kp_u_11; static PyObject *__pyx_kp_s_14; static PyObject *__pyx_kp_s_17; static PyObject *__pyx_n_s_18; static PyObject *__pyx_kp_u_3; static PyObject *__pyx_kp_u_5; static PyObject *__pyx_kp_u_7; static PyObject *__pyx_kp_u_8; static PyObject *__pyx_n_s__A; static PyObject *__pyx_n_s__B; static PyObject *__pyx_n_s__C; static PyObject *__pyx_n_s__D; static PyObject *__pyx_n_s__Diag; static PyObject *__pyx_n_s__RuntimeError; static PyObject *__pyx_n_s__Side; static PyObject *__pyx_n_s__Trans; static PyObject *__pyx_n_s__TransA; static PyObject *__pyx_n_s__TransB; static PyObject *__pyx_n_s__Uplo; static PyObject *__pyx_n_s__ValueError; static PyObject *__pyx_n_s__X; static PyObject *__pyx_n_s__Y; static PyObject *__pyx_n_s__Z; static PyObject *__pyx_n_s____main__; static PyObject *__pyx_n_s____test__; static PyObject *__pyx_n_s____version__; static PyObject *__pyx_n_s__a; static PyObject *__pyx_n_s__aij; static PyObject *__pyx_n_s__alpha; static PyObject *__pyx_n_s__b; static PyObject *__pyx_n_s__beta; static PyObject *__pyx_n_s__blas_dasum; static PyObject *__pyx_n_s__blas_daxpy; static PyObject *__pyx_n_s__blas_ddot; static PyObject *__pyx_n_s__blas_dgemm; static PyObject *__pyx_n_s__blas_dnrm2; static PyObject *__pyx_n_s__blas_dscal; static PyObject *__pyx_n_s__blas_dsymm; static PyObject *__pyx_n_s__blas_dsyr2k; static PyObject *__pyx_n_s__blas_dsyrk; static PyObject *__pyx_n_s__blas_dtrmm; static PyObject *__pyx_n_s__blas_dtrsm; static PyObject *__pyx_n_s__c; static PyObject *__pyx_n_s__d; static PyObject *__pyx_n_s__fixed; static PyObject *__pyx_n_s__i; static PyObject *__pyx_n_s__interp; static PyObject *__pyx_n_s__j; static PyObject *__pyx_n_s__m; static PyObject *__pyx_n_s__matrix_add; static PyObject *__pyx_n_s__matrix_get; static PyObject *__pyx_n_s__matrix_transpose; static PyObject *__pyx_n_s__np; static PyObject *__pyx_n_s__numpy; static PyObject *__pyx_n_s__q; static PyObject *__pyx_n_s__r; static PyObject *__pyx_n_s__range; static PyObject *__pyx_n_s__s; static PyObject *__pyx_n_s__vector_add; static PyObject *__pyx_n_s__vector_add_constant; static PyObject *__pyx_n_s__vector_div; static PyObject *__pyx_n_s__vector_get; static PyObject *__pyx_n_s__vector_median; static PyObject *__pyx_n_s__vector_mul; static PyObject *__pyx_n_s__vector_quantile; static PyObject *__pyx_n_s__vector_sad; static PyObject *__pyx_n_s__vector_scale; static PyObject *__pyx_n_s__vector_set; static PyObject *__pyx_n_s__vector_set_all; static PyObject *__pyx_n_s__vector_ssd; static PyObject *__pyx_n_s__vector_sub; static PyObject *__pyx_n_s__vector_sum; static PyObject *__pyx_n_s__x; static PyObject *__pyx_n_s__xi; static PyObject *__pyx_n_s__y; static PyObject *__pyx_n_s__z; static PyObject *__pyx_int_15; static PyObject *__pyx_k_tuple_2; static PyObject *__pyx_k_tuple_4; static PyObject *__pyx_k_tuple_6; static PyObject *__pyx_k_tuple_9; static PyObject *__pyx_k_tuple_10; static PyObject *__pyx_k_tuple_12; static PyObject *__pyx_k_tuple_15; static PyObject *__pyx_k_tuple_19; static PyObject *__pyx_k_tuple_21; static PyObject *__pyx_k_tuple_23; static PyObject *__pyx_k_tuple_25; static PyObject *__pyx_k_tuple_27; static PyObject *__pyx_k_tuple_29; static PyObject *__pyx_k_tuple_31; static PyObject *__pyx_k_tuple_33; static PyObject *__pyx_k_tuple_35; static PyObject *__pyx_k_tuple_37; static PyObject *__pyx_k_tuple_39; static PyObject *__pyx_k_tuple_41; static PyObject *__pyx_k_tuple_43; static PyObject *__pyx_k_tuple_45; static PyObject *__pyx_k_tuple_47; static PyObject *__pyx_k_tuple_49; static PyObject *__pyx_k_tuple_51; static PyObject *__pyx_k_tuple_53; static PyObject *__pyx_k_tuple_55; static PyObject *__pyx_k_tuple_57; static PyObject *__pyx_k_tuple_59; static PyObject *__pyx_k_tuple_61; static PyObject *__pyx_k_tuple_63; static PyObject *__pyx_k_tuple_65; static PyObject *__pyx_k_tuple_67; static PyObject *__pyx_k_tuple_69; static PyObject *__pyx_k_tuple_71; static PyObject *__pyx_k_codeobj_16; static PyObject *__pyx_k_codeobj_20; static PyObject *__pyx_k_codeobj_22; static PyObject *__pyx_k_codeobj_24; static PyObject *__pyx_k_codeobj_26; static PyObject *__pyx_k_codeobj_28; static PyObject *__pyx_k_codeobj_30; static PyObject *__pyx_k_codeobj_32; static PyObject *__pyx_k_codeobj_34; static PyObject *__pyx_k_codeobj_36; static PyObject *__pyx_k_codeobj_38; static PyObject *__pyx_k_codeobj_40; static PyObject *__pyx_k_codeobj_42; static PyObject *__pyx_k_codeobj_44; static PyObject *__pyx_k_codeobj_46; static PyObject *__pyx_k_codeobj_48; static PyObject *__pyx_k_codeobj_50; static PyObject *__pyx_k_codeobj_52; static PyObject *__pyx_k_codeobj_54; static PyObject *__pyx_k_codeobj_56; static PyObject *__pyx_k_codeobj_58; static PyObject *__pyx_k_codeobj_60; static PyObject *__pyx_k_codeobj_62; static PyObject *__pyx_k_codeobj_64; static PyObject *__pyx_k_codeobj_66; static PyObject *__pyx_k_codeobj_68; static PyObject *__pyx_k_codeobj_70; static PyObject *__pyx_k_codeobj_72; /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_1vector_get(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_8bindings_6linalg_vector_get[] = "\n Get i-th element.\n xi = vector_get(x, i)\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_6linalg_1vector_get = {__Pyx_NAMESTR("vector_get"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_6linalg_1vector_get, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_8bindings_6linalg_vector_get)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_1vector_get(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_X = 0; size_t __pyx_v_i; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("vector_get (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__X,&__pyx_n_s__i,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__X)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__i)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("vector_get", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "vector_get") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_X = values[0]; __pyx_v_i = __Pyx_PyInt_AsSize_t(values[1]); if (unlikely((__pyx_v_i == (size_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("vector_get", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.bindings.linalg.vector_get", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_4labs_8bindings_6linalg_vector_get(__pyx_self, __pyx_v_X, __pyx_v_i); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/linalg.pyx":91 * * ## fff_vector.h * def vector_get(X, size_t i): # <<<<<<<<<<<<<< * """ * Get i-th element. */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_vector_get(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X, size_t __pyx_v_i) { fff_vector *__pyx_v_x; double __pyx_v_xi; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("vector_get", 0); /* "nipy/labs/bindings/linalg.pyx":98 * cdef fff_vector* x * cdef double xi * x = fff_vector_fromPyArray(X) # <<<<<<<<<<<<<< * xi = fff_vector_get(x, i) * fff_vector_delete(x) */ if (!(likely(((__pyx_v_X) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_X, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_X; __Pyx_INCREF(__pyx_t_1); __pyx_v_x = fff_vector_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":99 * cdef double xi * x = fff_vector_fromPyArray(X) * xi = fff_vector_get(x, i) # <<<<<<<<<<<<<< * fff_vector_delete(x) * return xi */ __pyx_v_xi = fff_vector_get(__pyx_v_x, __pyx_v_i); /* "nipy/labs/bindings/linalg.pyx":100 * x = fff_vector_fromPyArray(X) * xi = fff_vector_get(x, i) * fff_vector_delete(x) # <<<<<<<<<<<<<< * return xi * */ fff_vector_delete(__pyx_v_x); /* "nipy/labs/bindings/linalg.pyx":101 * xi = fff_vector_get(x, i) * fff_vector_delete(x) * return xi # <<<<<<<<<<<<<< * * def vector_set(X, size_t i, double a): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyFloat_FromDouble(__pyx_v_xi); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.labs.bindings.linalg.vector_get", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_3vector_set(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_8bindings_6linalg_2vector_set[] = "\n Set i-th element.\n vector_set(x, i, a)\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_6linalg_3vector_set = {__Pyx_NAMESTR("vector_set"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_6linalg_3vector_set, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_8bindings_6linalg_2vector_set)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_3vector_set(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_X = 0; size_t __pyx_v_i; double __pyx_v_a; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("vector_set (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__X,&__pyx_n_s__i,&__pyx_n_s__a,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__X)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__i)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("vector_set", 1, 3, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__a)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("vector_set", 1, 3, 3, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "vector_set") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_X = values[0]; __pyx_v_i = __Pyx_PyInt_AsSize_t(values[1]); if (unlikely((__pyx_v_i == (size_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_a = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_a == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("vector_set", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.bindings.linalg.vector_set", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_4labs_8bindings_6linalg_2vector_set(__pyx_self, __pyx_v_X, __pyx_v_i, __pyx_v_a); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/linalg.pyx":103 * return xi * * def vector_set(X, size_t i, double a): # <<<<<<<<<<<<<< * """ * Set i-th element. */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_2vector_set(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X, size_t __pyx_v_i, double __pyx_v_a) { fff_vector *__pyx_v_x; fff_vector *__pyx_v_y; PyArrayObject *__pyx_v_Y = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("vector_set", 0); /* "nipy/labs/bindings/linalg.pyx":109 * """ * cdef fff_vector *x, *y * x = fff_vector_fromPyArray(X) # <<<<<<<<<<<<<< * y = fff_vector_new(x.size) * fff_vector_memcpy(y, x) */ if (!(likely(((__pyx_v_X) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_X, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_X; __Pyx_INCREF(__pyx_t_1); __pyx_v_x = fff_vector_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":110 * cdef fff_vector *x, *y * x = fff_vector_fromPyArray(X) * y = fff_vector_new(x.size) # <<<<<<<<<<<<<< * fff_vector_memcpy(y, x) * fff_vector_set(y, i, a) */ __pyx_v_y = fff_vector_new(__pyx_v_x->size); /* "nipy/labs/bindings/linalg.pyx":111 * x = fff_vector_fromPyArray(X) * y = fff_vector_new(x.size) * fff_vector_memcpy(y, x) # <<<<<<<<<<<<<< * fff_vector_set(y, i, a) * fff_vector_delete(x) */ fff_vector_memcpy(__pyx_v_y, __pyx_v_x); /* "nipy/labs/bindings/linalg.pyx":112 * y = fff_vector_new(x.size) * fff_vector_memcpy(y, x) * fff_vector_set(y, i, a) # <<<<<<<<<<<<<< * fff_vector_delete(x) * Y = fff_vector_toPyArray(y) */ fff_vector_set(__pyx_v_y, __pyx_v_i, __pyx_v_a); /* "nipy/labs/bindings/linalg.pyx":113 * fff_vector_memcpy(y, x) * fff_vector_set(y, i, a) * fff_vector_delete(x) # <<<<<<<<<<<<<< * Y = fff_vector_toPyArray(y) * return Y */ fff_vector_delete(__pyx_v_x); /* "nipy/labs/bindings/linalg.pyx":114 * fff_vector_set(y, i, a) * fff_vector_delete(x) * Y = fff_vector_toPyArray(y) # <<<<<<<<<<<<<< * return Y * */ __pyx_t_1 = ((PyObject *)fff_vector_toPyArray(__pyx_v_y)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 114; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_Y = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":115 * fff_vector_delete(x) * Y = fff_vector_toPyArray(y) * return Y # <<<<<<<<<<<<<< * * def vector_set_all(X, double a): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_Y)); __pyx_r = ((PyObject *)__pyx_v_Y); goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.labs.bindings.linalg.vector_set", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_Y); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_5vector_set_all(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_8bindings_6linalg_4vector_set_all[] = "\n Set to a constant value.\n vector_set_all(x, a)\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_6linalg_5vector_set_all = {__Pyx_NAMESTR("vector_set_all"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_6linalg_5vector_set_all, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_8bindings_6linalg_4vector_set_all)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_5vector_set_all(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_X = 0; double __pyx_v_a; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("vector_set_all (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__X,&__pyx_n_s__a,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__X)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__a)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("vector_set_all", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 117; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "vector_set_all") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 117; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_X = values[0]; __pyx_v_a = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_a == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 117; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("vector_set_all", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 117; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.bindings.linalg.vector_set_all", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_4labs_8bindings_6linalg_4vector_set_all(__pyx_self, __pyx_v_X, __pyx_v_a); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/linalg.pyx":117 * return Y * * def vector_set_all(X, double a): # <<<<<<<<<<<<<< * """ * Set to a constant value. */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_4vector_set_all(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X, double __pyx_v_a) { fff_vector *__pyx_v_x; fff_vector *__pyx_v_y; PyArrayObject *__pyx_v_Y = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("vector_set_all", 0); /* "nipy/labs/bindings/linalg.pyx":123 * """ * cdef fff_vector *x, *y * x = fff_vector_fromPyArray(X) # <<<<<<<<<<<<<< * y = fff_vector_new(x.size) * fff_vector_memcpy(y, x) */ if (!(likely(((__pyx_v_X) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_X, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_X; __Pyx_INCREF(__pyx_t_1); __pyx_v_x = fff_vector_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":124 * cdef fff_vector *x, *y * x = fff_vector_fromPyArray(X) * y = fff_vector_new(x.size) # <<<<<<<<<<<<<< * fff_vector_memcpy(y, x) * fff_vector_set_all(y, a) */ __pyx_v_y = fff_vector_new(__pyx_v_x->size); /* "nipy/labs/bindings/linalg.pyx":125 * x = fff_vector_fromPyArray(X) * y = fff_vector_new(x.size) * fff_vector_memcpy(y, x) # <<<<<<<<<<<<<< * fff_vector_set_all(y, a) * fff_vector_delete(x) */ fff_vector_memcpy(__pyx_v_y, __pyx_v_x); /* "nipy/labs/bindings/linalg.pyx":126 * y = fff_vector_new(x.size) * fff_vector_memcpy(y, x) * fff_vector_set_all(y, a) # <<<<<<<<<<<<<< * fff_vector_delete(x) * Y = fff_vector_toPyArray(y) */ fff_vector_set_all(__pyx_v_y, __pyx_v_a); /* "nipy/labs/bindings/linalg.pyx":127 * fff_vector_memcpy(y, x) * fff_vector_set_all(y, a) * fff_vector_delete(x) # <<<<<<<<<<<<<< * Y = fff_vector_toPyArray(y) * return Y */ fff_vector_delete(__pyx_v_x); /* "nipy/labs/bindings/linalg.pyx":128 * fff_vector_set_all(y, a) * fff_vector_delete(x) * Y = fff_vector_toPyArray(y) # <<<<<<<<<<<<<< * return Y * */ __pyx_t_1 = ((PyObject *)fff_vector_toPyArray(__pyx_v_y)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_Y = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":129 * fff_vector_delete(x) * Y = fff_vector_toPyArray(y) * return Y # <<<<<<<<<<<<<< * * def vector_scale(X, double a): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_Y)); __pyx_r = ((PyObject *)__pyx_v_Y); goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.labs.bindings.linalg.vector_set_all", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_Y); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_7vector_scale(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_8bindings_6linalg_6vector_scale[] = "\n Multiply by a constant value.\n y = vector_scale(x, a)\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_6linalg_7vector_scale = {__Pyx_NAMESTR("vector_scale"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_6linalg_7vector_scale, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_8bindings_6linalg_6vector_scale)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_7vector_scale(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_X = 0; double __pyx_v_a; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("vector_scale (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__X,&__pyx_n_s__a,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__X)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__a)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("vector_scale", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "vector_scale") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_X = values[0]; __pyx_v_a = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_a == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("vector_scale", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.bindings.linalg.vector_scale", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_4labs_8bindings_6linalg_6vector_scale(__pyx_self, __pyx_v_X, __pyx_v_a); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/linalg.pyx":131 * return Y * * def vector_scale(X, double a): # <<<<<<<<<<<<<< * """ * Multiply by a constant value. */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_6vector_scale(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X, double __pyx_v_a) { fff_vector *__pyx_v_x; fff_vector *__pyx_v_y; PyArrayObject *__pyx_v_Y = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("vector_scale", 0); /* "nipy/labs/bindings/linalg.pyx":137 * """ * cdef fff_vector *x, *y * x = fff_vector_fromPyArray(X) # <<<<<<<<<<<<<< * y = fff_vector_new(x.size) * fff_vector_memcpy(y, x) */ if (!(likely(((__pyx_v_X) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_X, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 137; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_X; __Pyx_INCREF(__pyx_t_1); __pyx_v_x = fff_vector_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":138 * cdef fff_vector *x, *y * x = fff_vector_fromPyArray(X) * y = fff_vector_new(x.size) # <<<<<<<<<<<<<< * fff_vector_memcpy(y, x) * fff_vector_scale(y, a) */ __pyx_v_y = fff_vector_new(__pyx_v_x->size); /* "nipy/labs/bindings/linalg.pyx":139 * x = fff_vector_fromPyArray(X) * y = fff_vector_new(x.size) * fff_vector_memcpy(y, x) # <<<<<<<<<<<<<< * fff_vector_scale(y, a) * fff_vector_delete(x) */ fff_vector_memcpy(__pyx_v_y, __pyx_v_x); /* "nipy/labs/bindings/linalg.pyx":140 * y = fff_vector_new(x.size) * fff_vector_memcpy(y, x) * fff_vector_scale(y, a) # <<<<<<<<<<<<<< * fff_vector_delete(x) * Y = fff_vector_toPyArray(y) */ fff_vector_scale(__pyx_v_y, __pyx_v_a); /* "nipy/labs/bindings/linalg.pyx":141 * fff_vector_memcpy(y, x) * fff_vector_scale(y, a) * fff_vector_delete(x) # <<<<<<<<<<<<<< * Y = fff_vector_toPyArray(y) * return Y */ fff_vector_delete(__pyx_v_x); /* "nipy/labs/bindings/linalg.pyx":142 * fff_vector_scale(y, a) * fff_vector_delete(x) * Y = fff_vector_toPyArray(y) # <<<<<<<<<<<<<< * return Y * */ __pyx_t_1 = ((PyObject *)fff_vector_toPyArray(__pyx_v_y)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_Y = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":143 * fff_vector_delete(x) * Y = fff_vector_toPyArray(y) * return Y # <<<<<<<<<<<<<< * * def vector_add_constant(X, double a): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_Y)); __pyx_r = ((PyObject *)__pyx_v_Y); goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.labs.bindings.linalg.vector_scale", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_Y); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_9vector_add_constant(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_8bindings_6linalg_8vector_add_constant[] = "\n Add a constant value.\n y = vector_add_constant(x, a)\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_6linalg_9vector_add_constant = {__Pyx_NAMESTR("vector_add_constant"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_6linalg_9vector_add_constant, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_8bindings_6linalg_8vector_add_constant)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_9vector_add_constant(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_X = 0; double __pyx_v_a; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("vector_add_constant (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__X,&__pyx_n_s__a,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__X)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__a)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("vector_add_constant", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "vector_add_constant") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_X = values[0]; __pyx_v_a = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_a == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("vector_add_constant", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.bindings.linalg.vector_add_constant", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_4labs_8bindings_6linalg_8vector_add_constant(__pyx_self, __pyx_v_X, __pyx_v_a); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/linalg.pyx":145 * return Y * * def vector_add_constant(X, double a): # <<<<<<<<<<<<<< * """ * Add a constant value. */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_8vector_add_constant(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X, double __pyx_v_a) { fff_vector *__pyx_v_x; fff_vector *__pyx_v_y; PyArrayObject *__pyx_v_Y = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("vector_add_constant", 0); /* "nipy/labs/bindings/linalg.pyx":151 * """ * cdef fff_vector *x, *y * x = fff_vector_fromPyArray(X) # <<<<<<<<<<<<<< * y = fff_vector_new(x.size) * fff_vector_memcpy(y, x) */ if (!(likely(((__pyx_v_X) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_X, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 151; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_X; __Pyx_INCREF(__pyx_t_1); __pyx_v_x = fff_vector_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":152 * cdef fff_vector *x, *y * x = fff_vector_fromPyArray(X) * y = fff_vector_new(x.size) # <<<<<<<<<<<<<< * fff_vector_memcpy(y, x) * fff_vector_add_constant(y, a) */ __pyx_v_y = fff_vector_new(__pyx_v_x->size); /* "nipy/labs/bindings/linalg.pyx":153 * x = fff_vector_fromPyArray(X) * y = fff_vector_new(x.size) * fff_vector_memcpy(y, x) # <<<<<<<<<<<<<< * fff_vector_add_constant(y, a) * fff_vector_delete(x) */ fff_vector_memcpy(__pyx_v_y, __pyx_v_x); /* "nipy/labs/bindings/linalg.pyx":154 * y = fff_vector_new(x.size) * fff_vector_memcpy(y, x) * fff_vector_add_constant(y, a) # <<<<<<<<<<<<<< * fff_vector_delete(x) * Y = fff_vector_toPyArray(y) */ fff_vector_add_constant(__pyx_v_y, __pyx_v_a); /* "nipy/labs/bindings/linalg.pyx":155 * fff_vector_memcpy(y, x) * fff_vector_add_constant(y, a) * fff_vector_delete(x) # <<<<<<<<<<<<<< * Y = fff_vector_toPyArray(y) * return Y */ fff_vector_delete(__pyx_v_x); /* "nipy/labs/bindings/linalg.pyx":156 * fff_vector_add_constant(y, a) * fff_vector_delete(x) * Y = fff_vector_toPyArray(y) # <<<<<<<<<<<<<< * return Y * */ __pyx_t_1 = ((PyObject *)fff_vector_toPyArray(__pyx_v_y)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_Y = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":157 * fff_vector_delete(x) * Y = fff_vector_toPyArray(y) * return Y # <<<<<<<<<<<<<< * * def vector_add(X, Y): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_Y)); __pyx_r = ((PyObject *)__pyx_v_Y); goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.labs.bindings.linalg.vector_add_constant", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_Y); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_11vector_add(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_8bindings_6linalg_10vector_add[] = "\n Add two vectors.\n z = vector_add(x, y)\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_6linalg_11vector_add = {__Pyx_NAMESTR("vector_add"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_6linalg_11vector_add, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_8bindings_6linalg_10vector_add)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_11vector_add(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_X = 0; PyObject *__pyx_v_Y = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("vector_add (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__X,&__pyx_n_s__Y,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__X)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("vector_add", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 159; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "vector_add") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 159; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_X = values[0]; __pyx_v_Y = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("vector_add", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 159; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.bindings.linalg.vector_add", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_4labs_8bindings_6linalg_10vector_add(__pyx_self, __pyx_v_X, __pyx_v_Y); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/linalg.pyx":159 * return Y * * def vector_add(X, Y): # <<<<<<<<<<<<<< * """ * Add two vectors. */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_10vector_add(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X, PyObject *__pyx_v_Y) { fff_vector *__pyx_v_x; fff_vector *__pyx_v_y; fff_vector *__pyx_v_z; PyArrayObject *__pyx_v_Z = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("vector_add", 0); /* "nipy/labs/bindings/linalg.pyx":165 * """ * cdef fff_vector *x, *y, *z * x = fff_vector_fromPyArray(X) # <<<<<<<<<<<<<< * y = fff_vector_fromPyArray(Y) * z = fff_vector_new(x.size) */ if (!(likely(((__pyx_v_X) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_X, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_X; __Pyx_INCREF(__pyx_t_1); __pyx_v_x = fff_vector_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":166 * cdef fff_vector *x, *y, *z * x = fff_vector_fromPyArray(X) * y = fff_vector_fromPyArray(Y) # <<<<<<<<<<<<<< * z = fff_vector_new(x.size) * fff_vector_memcpy(z, x) */ if (!(likely(((__pyx_v_Y) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_Y, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_Y; __Pyx_INCREF(__pyx_t_1); __pyx_v_y = fff_vector_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":167 * x = fff_vector_fromPyArray(X) * y = fff_vector_fromPyArray(Y) * z = fff_vector_new(x.size) # <<<<<<<<<<<<<< * fff_vector_memcpy(z, x) * fff_vector_add(z, y) */ __pyx_v_z = fff_vector_new(__pyx_v_x->size); /* "nipy/labs/bindings/linalg.pyx":168 * y = fff_vector_fromPyArray(Y) * z = fff_vector_new(x.size) * fff_vector_memcpy(z, x) # <<<<<<<<<<<<<< * fff_vector_add(z, y) * fff_vector_delete(x) */ fff_vector_memcpy(__pyx_v_z, __pyx_v_x); /* "nipy/labs/bindings/linalg.pyx":169 * z = fff_vector_new(x.size) * fff_vector_memcpy(z, x) * fff_vector_add(z, y) # <<<<<<<<<<<<<< * fff_vector_delete(x) * fff_vector_delete(y) */ fff_vector_add(__pyx_v_z, __pyx_v_y); /* "nipy/labs/bindings/linalg.pyx":170 * fff_vector_memcpy(z, x) * fff_vector_add(z, y) * fff_vector_delete(x) # <<<<<<<<<<<<<< * fff_vector_delete(y) * Z = fff_vector_toPyArray(z) */ fff_vector_delete(__pyx_v_x); /* "nipy/labs/bindings/linalg.pyx":171 * fff_vector_add(z, y) * fff_vector_delete(x) * fff_vector_delete(y) # <<<<<<<<<<<<<< * Z = fff_vector_toPyArray(z) * return Z */ fff_vector_delete(__pyx_v_y); /* "nipy/labs/bindings/linalg.pyx":172 * fff_vector_delete(x) * fff_vector_delete(y) * Z = fff_vector_toPyArray(z) # <<<<<<<<<<<<<< * return Z * */ __pyx_t_1 = ((PyObject *)fff_vector_toPyArray(__pyx_v_z)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_Z = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":173 * fff_vector_delete(y) * Z = fff_vector_toPyArray(z) * return Z # <<<<<<<<<<<<<< * * def vector_sub(X, Y): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_Z)); __pyx_r = ((PyObject *)__pyx_v_Z); goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.labs.bindings.linalg.vector_add", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_Z); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_13vector_sub(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_8bindings_6linalg_12vector_sub[] = "\n Substract two vectors: x - y\n z = vector_sub(x, y)\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_6linalg_13vector_sub = {__Pyx_NAMESTR("vector_sub"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_6linalg_13vector_sub, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_8bindings_6linalg_12vector_sub)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_13vector_sub(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_X = 0; PyObject *__pyx_v_Y = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("vector_sub (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__X,&__pyx_n_s__Y,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__X)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("vector_sub", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 175; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "vector_sub") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 175; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_X = values[0]; __pyx_v_Y = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("vector_sub", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 175; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.bindings.linalg.vector_sub", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_4labs_8bindings_6linalg_12vector_sub(__pyx_self, __pyx_v_X, __pyx_v_Y); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/linalg.pyx":175 * return Z * * def vector_sub(X, Y): # <<<<<<<<<<<<<< * """ * Substract two vectors: x - y */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_12vector_sub(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X, PyObject *__pyx_v_Y) { fff_vector *__pyx_v_x; fff_vector *__pyx_v_y; fff_vector *__pyx_v_z; PyArrayObject *__pyx_v_Z = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("vector_sub", 0); /* "nipy/labs/bindings/linalg.pyx":181 * """ * cdef fff_vector *x, *y, *z * x = fff_vector_fromPyArray(X) # <<<<<<<<<<<<<< * y = fff_vector_fromPyArray(Y) * z = fff_vector_new(x.size) */ if (!(likely(((__pyx_v_X) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_X, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 181; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_X; __Pyx_INCREF(__pyx_t_1); __pyx_v_x = fff_vector_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":182 * cdef fff_vector *x, *y, *z * x = fff_vector_fromPyArray(X) * y = fff_vector_fromPyArray(Y) # <<<<<<<<<<<<<< * z = fff_vector_new(x.size) * fff_vector_memcpy(z, x) */ if (!(likely(((__pyx_v_Y) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_Y, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 182; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_Y; __Pyx_INCREF(__pyx_t_1); __pyx_v_y = fff_vector_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":183 * x = fff_vector_fromPyArray(X) * y = fff_vector_fromPyArray(Y) * z = fff_vector_new(x.size) # <<<<<<<<<<<<<< * fff_vector_memcpy(z, x) * fff_vector_sub(z, y) */ __pyx_v_z = fff_vector_new(__pyx_v_x->size); /* "nipy/labs/bindings/linalg.pyx":184 * y = fff_vector_fromPyArray(Y) * z = fff_vector_new(x.size) * fff_vector_memcpy(z, x) # <<<<<<<<<<<<<< * fff_vector_sub(z, y) * fff_vector_delete(x) */ fff_vector_memcpy(__pyx_v_z, __pyx_v_x); /* "nipy/labs/bindings/linalg.pyx":185 * z = fff_vector_new(x.size) * fff_vector_memcpy(z, x) * fff_vector_sub(z, y) # <<<<<<<<<<<<<< * fff_vector_delete(x) * fff_vector_delete(y) */ fff_vector_sub(__pyx_v_z, __pyx_v_y); /* "nipy/labs/bindings/linalg.pyx":186 * fff_vector_memcpy(z, x) * fff_vector_sub(z, y) * fff_vector_delete(x) # <<<<<<<<<<<<<< * fff_vector_delete(y) * Z = fff_vector_toPyArray(z) */ fff_vector_delete(__pyx_v_x); /* "nipy/labs/bindings/linalg.pyx":187 * fff_vector_sub(z, y) * fff_vector_delete(x) * fff_vector_delete(y) # <<<<<<<<<<<<<< * Z = fff_vector_toPyArray(z) * return Z */ fff_vector_delete(__pyx_v_y); /* "nipy/labs/bindings/linalg.pyx":188 * fff_vector_delete(x) * fff_vector_delete(y) * Z = fff_vector_toPyArray(z) # <<<<<<<<<<<<<< * return Z * */ __pyx_t_1 = ((PyObject *)fff_vector_toPyArray(__pyx_v_z)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 188; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_Z = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":189 * fff_vector_delete(y) * Z = fff_vector_toPyArray(z) * return Z # <<<<<<<<<<<<<< * * def vector_mul(X, Y): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_Z)); __pyx_r = ((PyObject *)__pyx_v_Z); goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.labs.bindings.linalg.vector_sub", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_Z); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_15vector_mul(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_8bindings_6linalg_14vector_mul[] = "\n Element-wise multiplication.\n z = vector_mul(x, y)\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_6linalg_15vector_mul = {__Pyx_NAMESTR("vector_mul"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_6linalg_15vector_mul, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_8bindings_6linalg_14vector_mul)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_15vector_mul(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_X = 0; PyObject *__pyx_v_Y = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("vector_mul (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__X,&__pyx_n_s__Y,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__X)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("vector_mul", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "vector_mul") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_X = values[0]; __pyx_v_Y = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("vector_mul", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.bindings.linalg.vector_mul", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_4labs_8bindings_6linalg_14vector_mul(__pyx_self, __pyx_v_X, __pyx_v_Y); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/linalg.pyx":191 * return Z * * def vector_mul(X, Y): # <<<<<<<<<<<<<< * """ * Element-wise multiplication. */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_14vector_mul(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X, PyObject *__pyx_v_Y) { fff_vector *__pyx_v_x; fff_vector *__pyx_v_y; fff_vector *__pyx_v_z; PyArrayObject *__pyx_v_Z = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("vector_mul", 0); /* "nipy/labs/bindings/linalg.pyx":197 * """ * cdef fff_vector *x, *y, *z * x = fff_vector_fromPyArray(X) # <<<<<<<<<<<<<< * y = fff_vector_fromPyArray(Y) * z = fff_vector_new(x.size) */ if (!(likely(((__pyx_v_X) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_X, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 197; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_X; __Pyx_INCREF(__pyx_t_1); __pyx_v_x = fff_vector_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":198 * cdef fff_vector *x, *y, *z * x = fff_vector_fromPyArray(X) * y = fff_vector_fromPyArray(Y) # <<<<<<<<<<<<<< * z = fff_vector_new(x.size) * fff_vector_memcpy(z, x) */ if (!(likely(((__pyx_v_Y) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_Y, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 198; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_Y; __Pyx_INCREF(__pyx_t_1); __pyx_v_y = fff_vector_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":199 * x = fff_vector_fromPyArray(X) * y = fff_vector_fromPyArray(Y) * z = fff_vector_new(x.size) # <<<<<<<<<<<<<< * fff_vector_memcpy(z, x) * fff_vector_mul(z, y) */ __pyx_v_z = fff_vector_new(__pyx_v_x->size); /* "nipy/labs/bindings/linalg.pyx":200 * y = fff_vector_fromPyArray(Y) * z = fff_vector_new(x.size) * fff_vector_memcpy(z, x) # <<<<<<<<<<<<<< * fff_vector_mul(z, y) * fff_vector_delete(x) */ fff_vector_memcpy(__pyx_v_z, __pyx_v_x); /* "nipy/labs/bindings/linalg.pyx":201 * z = fff_vector_new(x.size) * fff_vector_memcpy(z, x) * fff_vector_mul(z, y) # <<<<<<<<<<<<<< * fff_vector_delete(x) * fff_vector_delete(y) */ fff_vector_mul(__pyx_v_z, __pyx_v_y); /* "nipy/labs/bindings/linalg.pyx":202 * fff_vector_memcpy(z, x) * fff_vector_mul(z, y) * fff_vector_delete(x) # <<<<<<<<<<<<<< * fff_vector_delete(y) * Z = fff_vector_toPyArray(z) */ fff_vector_delete(__pyx_v_x); /* "nipy/labs/bindings/linalg.pyx":203 * fff_vector_mul(z, y) * fff_vector_delete(x) * fff_vector_delete(y) # <<<<<<<<<<<<<< * Z = fff_vector_toPyArray(z) * return Z */ fff_vector_delete(__pyx_v_y); /* "nipy/labs/bindings/linalg.pyx":204 * fff_vector_delete(x) * fff_vector_delete(y) * Z = fff_vector_toPyArray(z) # <<<<<<<<<<<<<< * return Z * */ __pyx_t_1 = ((PyObject *)fff_vector_toPyArray(__pyx_v_z)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 204; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_Z = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":205 * fff_vector_delete(y) * Z = fff_vector_toPyArray(z) * return Z # <<<<<<<<<<<<<< * * def vector_div(X, Y): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_Z)); __pyx_r = ((PyObject *)__pyx_v_Z); goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.labs.bindings.linalg.vector_mul", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_Z); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_17vector_div(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_8bindings_6linalg_16vector_div[] = "\n Element-wise division.\n z = vector_div(x, y)\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_6linalg_17vector_div = {__Pyx_NAMESTR("vector_div"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_6linalg_17vector_div, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_8bindings_6linalg_16vector_div)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_17vector_div(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_X = 0; PyObject *__pyx_v_Y = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("vector_div (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__X,&__pyx_n_s__Y,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__X)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("vector_div", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "vector_div") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_X = values[0]; __pyx_v_Y = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("vector_div", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.bindings.linalg.vector_div", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_4labs_8bindings_6linalg_16vector_div(__pyx_self, __pyx_v_X, __pyx_v_Y); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/linalg.pyx":207 * return Z * * def vector_div(X, Y): # <<<<<<<<<<<<<< * """ * Element-wise division. */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_16vector_div(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X, PyObject *__pyx_v_Y) { fff_vector *__pyx_v_x; fff_vector *__pyx_v_y; fff_vector *__pyx_v_z; PyArrayObject *__pyx_v_Z = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("vector_div", 0); /* "nipy/labs/bindings/linalg.pyx":213 * """ * cdef fff_vector *x, *y, *z * x = fff_vector_fromPyArray(X) # <<<<<<<<<<<<<< * y = fff_vector_fromPyArray(Y) * z = fff_vector_new(x.size) */ if (!(likely(((__pyx_v_X) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_X, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 213; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_X; __Pyx_INCREF(__pyx_t_1); __pyx_v_x = fff_vector_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":214 * cdef fff_vector *x, *y, *z * x = fff_vector_fromPyArray(X) * y = fff_vector_fromPyArray(Y) # <<<<<<<<<<<<<< * z = fff_vector_new(x.size) * fff_vector_memcpy(z, x) */ if (!(likely(((__pyx_v_Y) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_Y, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_Y; __Pyx_INCREF(__pyx_t_1); __pyx_v_y = fff_vector_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":215 * x = fff_vector_fromPyArray(X) * y = fff_vector_fromPyArray(Y) * z = fff_vector_new(x.size) # <<<<<<<<<<<<<< * fff_vector_memcpy(z, x) * fff_vector_mul(z, y) */ __pyx_v_z = fff_vector_new(__pyx_v_x->size); /* "nipy/labs/bindings/linalg.pyx":216 * y = fff_vector_fromPyArray(Y) * z = fff_vector_new(x.size) * fff_vector_memcpy(z, x) # <<<<<<<<<<<<<< * fff_vector_mul(z, y) * fff_vector_delete(x) */ fff_vector_memcpy(__pyx_v_z, __pyx_v_x); /* "nipy/labs/bindings/linalg.pyx":217 * z = fff_vector_new(x.size) * fff_vector_memcpy(z, x) * fff_vector_mul(z, y) # <<<<<<<<<<<<<< * fff_vector_delete(x) * fff_vector_delete(y) */ fff_vector_mul(__pyx_v_z, __pyx_v_y); /* "nipy/labs/bindings/linalg.pyx":218 * fff_vector_memcpy(z, x) * fff_vector_mul(z, y) * fff_vector_delete(x) # <<<<<<<<<<<<<< * fff_vector_delete(y) * Z = fff_vector_toPyArray(z) */ fff_vector_delete(__pyx_v_x); /* "nipy/labs/bindings/linalg.pyx":219 * fff_vector_mul(z, y) * fff_vector_delete(x) * fff_vector_delete(y) # <<<<<<<<<<<<<< * Z = fff_vector_toPyArray(z) * return Z */ fff_vector_delete(__pyx_v_y); /* "nipy/labs/bindings/linalg.pyx":220 * fff_vector_delete(x) * fff_vector_delete(y) * Z = fff_vector_toPyArray(z) # <<<<<<<<<<<<<< * return Z * */ __pyx_t_1 = ((PyObject *)fff_vector_toPyArray(__pyx_v_z)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 220; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_Z = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":221 * fff_vector_delete(y) * Z = fff_vector_toPyArray(z) * return Z # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_Z)); __pyx_r = ((PyObject *)__pyx_v_Z); goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.labs.bindings.linalg.vector_div", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_Z); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_19vector_sum(PyObject *__pyx_self, PyObject *__pyx_v_X); /*proto*/ static char __pyx_doc_4nipy_4labs_8bindings_6linalg_18vector_sum[] = "\n Sum up array elements.\n s = vector_sum(x)\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_6linalg_19vector_sum = {__Pyx_NAMESTR("vector_sum"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_6linalg_19vector_sum, METH_O, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_8bindings_6linalg_18vector_sum)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_19vector_sum(PyObject *__pyx_self, PyObject *__pyx_v_X) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("vector_sum (wrapper)", 0); __pyx_r = __pyx_pf_4nipy_4labs_8bindings_6linalg_18vector_sum(__pyx_self, ((PyObject *)__pyx_v_X)); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/linalg.pyx":224 * * * def vector_sum(X): # <<<<<<<<<<<<<< * """ * Sum up array elements. */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_18vector_sum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X) { fff_vector *__pyx_v_x; long double __pyx_v_s; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("vector_sum", 0); /* "nipy/labs/bindings/linalg.pyx":231 * cdef fff_vector* x * cdef long double s * x = fff_vector_fromPyArray(X) # <<<<<<<<<<<<<< * s = fff_vector_sum(x) * fff_vector_delete(x) */ if (!(likely(((__pyx_v_X) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_X, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 231; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_X; __Pyx_INCREF(__pyx_t_1); __pyx_v_x = fff_vector_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":232 * cdef long double s * x = fff_vector_fromPyArray(X) * s = fff_vector_sum(x) # <<<<<<<<<<<<<< * fff_vector_delete(x) * return s */ __pyx_v_s = fff_vector_sum(__pyx_v_x); /* "nipy/labs/bindings/linalg.pyx":233 * x = fff_vector_fromPyArray(X) * s = fff_vector_sum(x) * fff_vector_delete(x) # <<<<<<<<<<<<<< * return s * */ fff_vector_delete(__pyx_v_x); /* "nipy/labs/bindings/linalg.pyx":234 * s = fff_vector_sum(x) * fff_vector_delete(x) * return s # <<<<<<<<<<<<<< * * def vector_ssd(X, double m=0, int fixed=1): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyFloat_FromDouble(__pyx_v_s); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 234; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.labs.bindings.linalg.vector_sum", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_21vector_ssd(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_8bindings_6linalg_20vector_ssd[] = "\n (Minimal) sum of squared differences.\n s = vector_ssd(x, m=0, fixed=1)\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_6linalg_21vector_ssd = {__Pyx_NAMESTR("vector_ssd"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_6linalg_21vector_ssd, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_8bindings_6linalg_20vector_ssd)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_21vector_ssd(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_X = 0; double __pyx_v_m; int __pyx_v_fixed; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("vector_ssd (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__X,&__pyx_n_s__m,&__pyx_n_s__fixed,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__X)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__m); if (value) { values[1] = value; kw_args--; } } case 2: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__fixed); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "vector_ssd") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 236; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_X = values[0]; if (values[1]) { __pyx_v_m = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_m == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 236; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { /* "nipy/labs/bindings/linalg.pyx":236 * return s * * def vector_ssd(X, double m=0, int fixed=1): # <<<<<<<<<<<<<< * """ * (Minimal) sum of squared differences. */ __pyx_v_m = ((double)0.0); } if (values[2]) { __pyx_v_fixed = __Pyx_PyInt_AsInt(values[2]); if (unlikely((__pyx_v_fixed == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 236; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_fixed = ((int)1); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("vector_ssd", 0, 1, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 236; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.bindings.linalg.vector_ssd", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_4labs_8bindings_6linalg_20vector_ssd(__pyx_self, __pyx_v_X, __pyx_v_m, __pyx_v_fixed); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_20vector_ssd(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X, double __pyx_v_m, int __pyx_v_fixed) { fff_vector *__pyx_v_x; long double __pyx_v_s; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("vector_ssd", 0); /* "nipy/labs/bindings/linalg.pyx":243 * cdef fff_vector* x * cdef long double s * x = fff_vector_fromPyArray(X) # <<<<<<<<<<<<<< * s = fff_vector_ssd(x, &m, fixed) * fff_vector_delete(x) */ if (!(likely(((__pyx_v_X) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_X, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_X; __Pyx_INCREF(__pyx_t_1); __pyx_v_x = fff_vector_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":244 * cdef long double s * x = fff_vector_fromPyArray(X) * s = fff_vector_ssd(x, &m, fixed) # <<<<<<<<<<<<<< * fff_vector_delete(x) * return s */ __pyx_v_s = fff_vector_ssd(__pyx_v_x, (&__pyx_v_m), __pyx_v_fixed); /* "nipy/labs/bindings/linalg.pyx":245 * x = fff_vector_fromPyArray(X) * s = fff_vector_ssd(x, &m, fixed) * fff_vector_delete(x) # <<<<<<<<<<<<<< * return s * */ fff_vector_delete(__pyx_v_x); /* "nipy/labs/bindings/linalg.pyx":246 * s = fff_vector_ssd(x, &m, fixed) * fff_vector_delete(x) * return s # <<<<<<<<<<<<<< * * def vector_sad(X, double m=0): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyFloat_FromDouble(__pyx_v_s); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 246; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.labs.bindings.linalg.vector_ssd", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_23vector_sad(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_8bindings_6linalg_22vector_sad[] = "\n Sum of absolute differences.\n s = vector_sad(x, m=0)\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_6linalg_23vector_sad = {__Pyx_NAMESTR("vector_sad"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_6linalg_23vector_sad, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_8bindings_6linalg_22vector_sad)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_23vector_sad(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_X = 0; double __pyx_v_m; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("vector_sad (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__X,&__pyx_n_s__m,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__X)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__m); if (value) { values[1] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "vector_sad") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 248; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_X = values[0]; if (values[1]) { __pyx_v_m = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_m == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 248; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { /* "nipy/labs/bindings/linalg.pyx":248 * return s * * def vector_sad(X, double m=0): # <<<<<<<<<<<<<< * """ * Sum of absolute differences. */ __pyx_v_m = ((double)0.0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("vector_sad", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 248; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.bindings.linalg.vector_sad", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_4labs_8bindings_6linalg_22vector_sad(__pyx_self, __pyx_v_X, __pyx_v_m); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_22vector_sad(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X, double __pyx_v_m) { fff_vector *__pyx_v_x; long double __pyx_v_s; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("vector_sad", 0); /* "nipy/labs/bindings/linalg.pyx":255 * cdef fff_vector* x * cdef long double s * x = fff_vector_fromPyArray(X) # <<<<<<<<<<<<<< * s = fff_vector_sad(x, m) * fff_vector_delete(x) */ if (!(likely(((__pyx_v_X) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_X, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 255; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_X; __Pyx_INCREF(__pyx_t_1); __pyx_v_x = fff_vector_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":256 * cdef long double s * x = fff_vector_fromPyArray(X) * s = fff_vector_sad(x, m) # <<<<<<<<<<<<<< * fff_vector_delete(x) * return s */ __pyx_v_s = fff_vector_sad(__pyx_v_x, __pyx_v_m); /* "nipy/labs/bindings/linalg.pyx":257 * x = fff_vector_fromPyArray(X) * s = fff_vector_sad(x, m) * fff_vector_delete(x) # <<<<<<<<<<<<<< * return s * */ fff_vector_delete(__pyx_v_x); /* "nipy/labs/bindings/linalg.pyx":258 * s = fff_vector_sad(x, m) * fff_vector_delete(x) * return s # <<<<<<<<<<<<<< * * def vector_median(X): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyFloat_FromDouble(__pyx_v_s); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 258; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.labs.bindings.linalg.vector_sad", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_25vector_median(PyObject *__pyx_self, PyObject *__pyx_v_X); /*proto*/ static char __pyx_doc_4nipy_4labs_8bindings_6linalg_24vector_median[] = "\n Median.\n m = vector_median(x)\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_6linalg_25vector_median = {__Pyx_NAMESTR("vector_median"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_6linalg_25vector_median, METH_O, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_8bindings_6linalg_24vector_median)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_25vector_median(PyObject *__pyx_self, PyObject *__pyx_v_X) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("vector_median (wrapper)", 0); __pyx_r = __pyx_pf_4nipy_4labs_8bindings_6linalg_24vector_median(__pyx_self, ((PyObject *)__pyx_v_X)); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/linalg.pyx":260 * return s * * def vector_median(X): # <<<<<<<<<<<<<< * """ * Median. */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_24vector_median(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X) { fff_vector *__pyx_v_x; double __pyx_v_m; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("vector_median", 0); /* "nipy/labs/bindings/linalg.pyx":267 * cdef fff_vector* x * cdef double m * x = fff_vector_fromPyArray(X) # <<<<<<<<<<<<<< * m = fff_vector_median(x) * fff_vector_delete(x) */ if (!(likely(((__pyx_v_X) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_X, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 267; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_X; __Pyx_INCREF(__pyx_t_1); __pyx_v_x = fff_vector_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":268 * cdef double m * x = fff_vector_fromPyArray(X) * m = fff_vector_median(x) # <<<<<<<<<<<<<< * fff_vector_delete(x) * return m */ __pyx_v_m = fff_vector_median(__pyx_v_x); /* "nipy/labs/bindings/linalg.pyx":269 * x = fff_vector_fromPyArray(X) * m = fff_vector_median(x) * fff_vector_delete(x) # <<<<<<<<<<<<<< * return m * */ fff_vector_delete(__pyx_v_x); /* "nipy/labs/bindings/linalg.pyx":270 * m = fff_vector_median(x) * fff_vector_delete(x) * return m # <<<<<<<<<<<<<< * * def vector_quantile(X, double r, int interp): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyFloat_FromDouble(__pyx_v_m); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 270; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.labs.bindings.linalg.vector_median", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_27vector_quantile(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_8bindings_6linalg_26vector_quantile[] = "\n Quantile.\n q = vector_quantile(x, r=0.5, interp=1)\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_6linalg_27vector_quantile = {__Pyx_NAMESTR("vector_quantile"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_6linalg_27vector_quantile, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_8bindings_6linalg_26vector_quantile)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_27vector_quantile(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_X = 0; double __pyx_v_r; int __pyx_v_interp; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("vector_quantile (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__X,&__pyx_n_s__r,&__pyx_n_s__interp,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__X)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__r)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("vector_quantile", 1, 3, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__interp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("vector_quantile", 1, 3, 3, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "vector_quantile") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_X = values[0]; __pyx_v_r = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_r == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_interp = __Pyx_PyInt_AsInt(values[2]); if (unlikely((__pyx_v_interp == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("vector_quantile", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.bindings.linalg.vector_quantile", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_4labs_8bindings_6linalg_26vector_quantile(__pyx_self, __pyx_v_X, __pyx_v_r, __pyx_v_interp); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/linalg.pyx":272 * return m * * def vector_quantile(X, double r, int interp): # <<<<<<<<<<<<<< * """ * Quantile. */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_26vector_quantile(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X, double __pyx_v_r, int __pyx_v_interp) { fff_vector *__pyx_v_x; double __pyx_v_q; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("vector_quantile", 0); /* "nipy/labs/bindings/linalg.pyx":279 * cdef fff_vector* x * cdef double q * x = fff_vector_fromPyArray(X) # <<<<<<<<<<<<<< * q = fff_vector_quantile(x, r, interp) * fff_vector_delete(x) */ if (!(likely(((__pyx_v_X) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_X, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_X; __Pyx_INCREF(__pyx_t_1); __pyx_v_x = fff_vector_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":280 * cdef double q * x = fff_vector_fromPyArray(X) * q = fff_vector_quantile(x, r, interp) # <<<<<<<<<<<<<< * fff_vector_delete(x) * return q */ __pyx_v_q = fff_vector_quantile(__pyx_v_x, __pyx_v_r, __pyx_v_interp); /* "nipy/labs/bindings/linalg.pyx":281 * x = fff_vector_fromPyArray(X) * q = fff_vector_quantile(x, r, interp) * fff_vector_delete(x) # <<<<<<<<<<<<<< * return q * */ fff_vector_delete(__pyx_v_x); /* "nipy/labs/bindings/linalg.pyx":282 * q = fff_vector_quantile(x, r, interp) * fff_vector_delete(x) * return q # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyFloat_FromDouble(__pyx_v_q); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 282; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.labs.bindings.linalg.vector_quantile", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_29matrix_get(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_8bindings_6linalg_28matrix_get[] = "\n Get (i,j) element.\n aij = matrix_get(A, i, j)\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_6linalg_29matrix_get = {__Pyx_NAMESTR("matrix_get"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_6linalg_29matrix_get, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_8bindings_6linalg_28matrix_get)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_29matrix_get(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_A = 0; size_t __pyx_v_i; size_t __pyx_v_j; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("matrix_get (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__A,&__pyx_n_s__i,&__pyx_n_s__j,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__A)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__i)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("matrix_get", 1, 3, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__j)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("matrix_get", 1, 3, 3, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "matrix_get") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_A = values[0]; __pyx_v_i = __Pyx_PyInt_AsSize_t(values[1]); if (unlikely((__pyx_v_i == (size_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_j = __Pyx_PyInt_AsSize_t(values[2]); if (unlikely((__pyx_v_j == (size_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("matrix_get", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.bindings.linalg.matrix_get", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_4labs_8bindings_6linalg_28matrix_get(__pyx_self, __pyx_v_A, __pyx_v_i, __pyx_v_j); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/linalg.pyx":286 * * ## fff_matrix.h * def matrix_get(A, size_t i, size_t j): # <<<<<<<<<<<<<< * """ * Get (i,j) element. */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_28matrix_get(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_A, size_t __pyx_v_i, size_t __pyx_v_j) { fff_matrix *__pyx_v_a; double __pyx_v_aij; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("matrix_get", 0); /* "nipy/labs/bindings/linalg.pyx":293 * cdef fff_matrix* a * cdef double aij * a = fff_matrix_fromPyArray(A) # <<<<<<<<<<<<<< * aij = fff_matrix_get(a, i, j) * fff_matrix_delete(a) */ if (!(likely(((__pyx_v_A) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_A, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 293; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_A; __Pyx_INCREF(__pyx_t_1); __pyx_v_a = fff_matrix_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":294 * cdef double aij * a = fff_matrix_fromPyArray(A) * aij = fff_matrix_get(a, i, j) # <<<<<<<<<<<<<< * fff_matrix_delete(a) * return aij */ __pyx_v_aij = fff_matrix_get(__pyx_v_a, __pyx_v_i, __pyx_v_j); /* "nipy/labs/bindings/linalg.pyx":295 * a = fff_matrix_fromPyArray(A) * aij = fff_matrix_get(a, i, j) * fff_matrix_delete(a) # <<<<<<<<<<<<<< * return aij * */ fff_matrix_delete(__pyx_v_a); /* "nipy/labs/bindings/linalg.pyx":296 * aij = fff_matrix_get(a, i, j) * fff_matrix_delete(a) * return aij # <<<<<<<<<<<<<< * * def matrix_transpose(A): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyFloat_FromDouble(__pyx_v_aij); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 296; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.labs.bindings.linalg.matrix_get", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_31matrix_transpose(PyObject *__pyx_self, PyObject *__pyx_v_A); /*proto*/ static char __pyx_doc_4nipy_4labs_8bindings_6linalg_30matrix_transpose[] = "\n Transpose a matrix.\n B = matrix_transpose(A)\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_6linalg_31matrix_transpose = {__Pyx_NAMESTR("matrix_transpose"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_6linalg_31matrix_transpose, METH_O, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_8bindings_6linalg_30matrix_transpose)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_31matrix_transpose(PyObject *__pyx_self, PyObject *__pyx_v_A) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("matrix_transpose (wrapper)", 0); __pyx_r = __pyx_pf_4nipy_4labs_8bindings_6linalg_30matrix_transpose(__pyx_self, ((PyObject *)__pyx_v_A)); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/linalg.pyx":298 * return aij * * def matrix_transpose(A): # <<<<<<<<<<<<<< * """ * Transpose a matrix. */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_30matrix_transpose(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_A) { fff_matrix *__pyx_v_a; fff_matrix *__pyx_v_b; PyArrayObject *__pyx_v_B = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("matrix_transpose", 0); /* "nipy/labs/bindings/linalg.pyx":304 * """ * cdef fff_matrix *a, *b * a = fff_matrix_fromPyArray(A) # <<<<<<<<<<<<<< * b = fff_matrix_new(a.size2, a.size1) * fff_matrix_transpose(b, a) */ if (!(likely(((__pyx_v_A) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_A, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 304; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_A; __Pyx_INCREF(__pyx_t_1); __pyx_v_a = fff_matrix_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":305 * cdef fff_matrix *a, *b * a = fff_matrix_fromPyArray(A) * b = fff_matrix_new(a.size2, a.size1) # <<<<<<<<<<<<<< * fff_matrix_transpose(b, a) * fff_matrix_delete(a) */ __pyx_v_b = fff_matrix_new(__pyx_v_a->size2, __pyx_v_a->size1); /* "nipy/labs/bindings/linalg.pyx":306 * a = fff_matrix_fromPyArray(A) * b = fff_matrix_new(a.size2, a.size1) * fff_matrix_transpose(b, a) # <<<<<<<<<<<<<< * fff_matrix_delete(a) * B = fff_matrix_toPyArray(b) */ fff_matrix_transpose(__pyx_v_b, __pyx_v_a); /* "nipy/labs/bindings/linalg.pyx":307 * b = fff_matrix_new(a.size2, a.size1) * fff_matrix_transpose(b, a) * fff_matrix_delete(a) # <<<<<<<<<<<<<< * B = fff_matrix_toPyArray(b) * return B */ fff_matrix_delete(__pyx_v_a); /* "nipy/labs/bindings/linalg.pyx":308 * fff_matrix_transpose(b, a) * fff_matrix_delete(a) * B = fff_matrix_toPyArray(b) # <<<<<<<<<<<<<< * return B * */ __pyx_t_1 = ((PyObject *)fff_matrix_toPyArray(__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 308; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_B = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":309 * fff_matrix_delete(a) * B = fff_matrix_toPyArray(b) * return B # <<<<<<<<<<<<<< * * def matrix_add(A, B): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_B)); __pyx_r = ((PyObject *)__pyx_v_B); goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.labs.bindings.linalg.matrix_transpose", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_B); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_33matrix_add(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_8bindings_6linalg_32matrix_add[] = "\n C = matrix_add(A, B)\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_6linalg_33matrix_add = {__Pyx_NAMESTR("matrix_add"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_6linalg_33matrix_add, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_8bindings_6linalg_32matrix_add)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_33matrix_add(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_A = 0; PyObject *__pyx_v_B = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("matrix_add (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__A,&__pyx_n_s__B,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__A)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__B)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("matrix_add", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 311; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "matrix_add") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 311; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_A = values[0]; __pyx_v_B = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("matrix_add", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 311; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.bindings.linalg.matrix_add", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_4labs_8bindings_6linalg_32matrix_add(__pyx_self, __pyx_v_A, __pyx_v_B); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/linalg.pyx":311 * return B * * def matrix_add(A, B): # <<<<<<<<<<<<<< * """ * C = matrix_add(A, B) */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_32matrix_add(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_A, PyObject *__pyx_v_B) { fff_matrix *__pyx_v_a; fff_matrix *__pyx_v_b; fff_matrix *__pyx_v_c; PyArrayObject *__pyx_v_C = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("matrix_add", 0); /* "nipy/labs/bindings/linalg.pyx":316 * """ * cdef fff_matrix *a, *b, *c * a = fff_matrix_fromPyArray(A) # <<<<<<<<<<<<<< * b = fff_matrix_fromPyArray(B) * c = fff_matrix_new(a.size1, a.size2) */ if (!(likely(((__pyx_v_A) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_A, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 316; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_A; __Pyx_INCREF(__pyx_t_1); __pyx_v_a = fff_matrix_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":317 * cdef fff_matrix *a, *b, *c * a = fff_matrix_fromPyArray(A) * b = fff_matrix_fromPyArray(B) # <<<<<<<<<<<<<< * c = fff_matrix_new(a.size1, a.size2) * fff_matrix_memcpy(c, a) */ if (!(likely(((__pyx_v_B) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_B, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 317; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_B; __Pyx_INCREF(__pyx_t_1); __pyx_v_b = fff_matrix_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":318 * a = fff_matrix_fromPyArray(A) * b = fff_matrix_fromPyArray(B) * c = fff_matrix_new(a.size1, a.size2) # <<<<<<<<<<<<<< * fff_matrix_memcpy(c, a) * fff_matrix_add(c, b) */ __pyx_v_c = fff_matrix_new(__pyx_v_a->size1, __pyx_v_a->size2); /* "nipy/labs/bindings/linalg.pyx":319 * b = fff_matrix_fromPyArray(B) * c = fff_matrix_new(a.size1, a.size2) * fff_matrix_memcpy(c, a) # <<<<<<<<<<<<<< * fff_matrix_add(c, b) * C = fff_matrix_toPyArray(c) */ fff_matrix_memcpy(__pyx_v_c, __pyx_v_a); /* "nipy/labs/bindings/linalg.pyx":320 * c = fff_matrix_new(a.size1, a.size2) * fff_matrix_memcpy(c, a) * fff_matrix_add(c, b) # <<<<<<<<<<<<<< * C = fff_matrix_toPyArray(c) * return C */ fff_matrix_add(__pyx_v_c, __pyx_v_b); /* "nipy/labs/bindings/linalg.pyx":321 * fff_matrix_memcpy(c, a) * fff_matrix_add(c, b) * C = fff_matrix_toPyArray(c) # <<<<<<<<<<<<<< * return C * */ __pyx_t_1 = ((PyObject *)fff_matrix_toPyArray(__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 321; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_C = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":322 * fff_matrix_add(c, b) * C = fff_matrix_toPyArray(c) * return C # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_C)); __pyx_r = ((PyObject *)__pyx_v_C); goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.labs.bindings.linalg.matrix_add", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_C); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/linalg.pyx":326 * * ## fff_blas.h * cdef CBLAS_TRANSPOSE_t flag_transpose( int flag ): # <<<<<<<<<<<<<< * cdef CBLAS_TRANSPOSE_t x * if flag <= 0: */ static CBLAS_TRANSPOSE_t __pyx_f_4nipy_4labs_8bindings_6linalg_flag_transpose(int __pyx_v_flag) { CBLAS_TRANSPOSE_t __pyx_v_x; CBLAS_TRANSPOSE_t __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("flag_transpose", 0); /* "nipy/labs/bindings/linalg.pyx":328 * cdef CBLAS_TRANSPOSE_t flag_transpose( int flag ): * cdef CBLAS_TRANSPOSE_t x * if flag <= 0: # <<<<<<<<<<<<<< * x = CblasNoTrans * else: */ __pyx_t_1 = (__pyx_v_flag <= 0); if (__pyx_t_1) { /* "nipy/labs/bindings/linalg.pyx":329 * cdef CBLAS_TRANSPOSE_t x * if flag <= 0: * x = CblasNoTrans # <<<<<<<<<<<<<< * else: * x = CblasTrans */ __pyx_v_x = CblasNoTrans; goto __pyx_L3; } /*else*/ { /* "nipy/labs/bindings/linalg.pyx":331 * x = CblasNoTrans * else: * x = CblasTrans # <<<<<<<<<<<<<< * return x * */ __pyx_v_x = CblasTrans; } __pyx_L3:; /* "nipy/labs/bindings/linalg.pyx":332 * else: * x = CblasTrans * return x # <<<<<<<<<<<<<< * * cdef CBLAS_UPLO_t flag_uplo( int flag ): */ __pyx_r = __pyx_v_x; goto __pyx_L0; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/linalg.pyx":334 * return x * * cdef CBLAS_UPLO_t flag_uplo( int flag ): # <<<<<<<<<<<<<< * cdef CBLAS_UPLO_t x * if flag <= 0: */ static CBLAS_UPLO_t __pyx_f_4nipy_4labs_8bindings_6linalg_flag_uplo(int __pyx_v_flag) { CBLAS_UPLO_t __pyx_v_x; CBLAS_UPLO_t __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("flag_uplo", 0); /* "nipy/labs/bindings/linalg.pyx":336 * cdef CBLAS_UPLO_t flag_uplo( int flag ): * cdef CBLAS_UPLO_t x * if flag <= 0: # <<<<<<<<<<<<<< * x = CblasUpper * else: */ __pyx_t_1 = (__pyx_v_flag <= 0); if (__pyx_t_1) { /* "nipy/labs/bindings/linalg.pyx":337 * cdef CBLAS_UPLO_t x * if flag <= 0: * x = CblasUpper # <<<<<<<<<<<<<< * else: * x = CblasLower */ __pyx_v_x = CblasUpper; goto __pyx_L3; } /*else*/ { /* "nipy/labs/bindings/linalg.pyx":339 * x = CblasUpper * else: * x = CblasLower # <<<<<<<<<<<<<< * return x * */ __pyx_v_x = CblasLower; } __pyx_L3:; /* "nipy/labs/bindings/linalg.pyx":340 * else: * x = CblasLower * return x # <<<<<<<<<<<<<< * * cdef CBLAS_DIAG_t flag_diag( int flag ): */ __pyx_r = __pyx_v_x; goto __pyx_L0; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/linalg.pyx":342 * return x * * cdef CBLAS_DIAG_t flag_diag( int flag ): # <<<<<<<<<<<<<< * cdef CBLAS_DIAG_t x * if flag <= 0: */ static CBLAS_DIAG_t __pyx_f_4nipy_4labs_8bindings_6linalg_flag_diag(int __pyx_v_flag) { CBLAS_DIAG_t __pyx_v_x; CBLAS_DIAG_t __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("flag_diag", 0); /* "nipy/labs/bindings/linalg.pyx":344 * cdef CBLAS_DIAG_t flag_diag( int flag ): * cdef CBLAS_DIAG_t x * if flag <= 0: # <<<<<<<<<<<<<< * x = CblasNonUnit * else: */ __pyx_t_1 = (__pyx_v_flag <= 0); if (__pyx_t_1) { /* "nipy/labs/bindings/linalg.pyx":345 * cdef CBLAS_DIAG_t x * if flag <= 0: * x = CblasNonUnit # <<<<<<<<<<<<<< * else: * x = CblasUnit */ __pyx_v_x = CblasNonUnit; goto __pyx_L3; } /*else*/ { /* "nipy/labs/bindings/linalg.pyx":347 * x = CblasNonUnit * else: * x = CblasUnit # <<<<<<<<<<<<<< * return x * */ __pyx_v_x = CblasUnit; } __pyx_L3:; /* "nipy/labs/bindings/linalg.pyx":348 * else: * x = CblasUnit * return x # <<<<<<<<<<<<<< * * cdef CBLAS_SIDE_t flag_side( int flag ): */ __pyx_r = __pyx_v_x; goto __pyx_L0; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/linalg.pyx":350 * return x * * cdef CBLAS_SIDE_t flag_side( int flag ): # <<<<<<<<<<<<<< * cdef CBLAS_SIDE_t x * if flag <= 0: */ static CBLAS_SIDE_t __pyx_f_4nipy_4labs_8bindings_6linalg_flag_side(int __pyx_v_flag) { CBLAS_SIDE_t __pyx_v_x; CBLAS_SIDE_t __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("flag_side", 0); /* "nipy/labs/bindings/linalg.pyx":352 * cdef CBLAS_SIDE_t flag_side( int flag ): * cdef CBLAS_SIDE_t x * if flag <= 0: # <<<<<<<<<<<<<< * x = CblasLeft * else: */ __pyx_t_1 = (__pyx_v_flag <= 0); if (__pyx_t_1) { /* "nipy/labs/bindings/linalg.pyx":353 * cdef CBLAS_SIDE_t x * if flag <= 0: * x = CblasLeft # <<<<<<<<<<<<<< * else: * x = CblasRight */ __pyx_v_x = CblasLeft; goto __pyx_L3; } /*else*/ { /* "nipy/labs/bindings/linalg.pyx":355 * x = CblasLeft * else: * x = CblasRight # <<<<<<<<<<<<<< * return x * */ __pyx_v_x = CblasRight; } __pyx_L3:; /* "nipy/labs/bindings/linalg.pyx":356 * else: * x = CblasRight * return x # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_x; goto __pyx_L0; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_35blas_dnrm2(PyObject *__pyx_self, PyObject *__pyx_v_X); /*proto*/ static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_6linalg_35blas_dnrm2 = {__Pyx_NAMESTR("blas_dnrm2"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_6linalg_35blas_dnrm2, METH_O, __Pyx_DOCSTR(0)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_35blas_dnrm2(PyObject *__pyx_self, PyObject *__pyx_v_X) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("blas_dnrm2 (wrapper)", 0); __pyx_r = __pyx_pf_4nipy_4labs_8bindings_6linalg_34blas_dnrm2(__pyx_self, ((PyObject *)__pyx_v_X)); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/linalg.pyx":360 * * ### BLAS 1 * def blas_dnrm2(X): # <<<<<<<<<<<<<< * cdef fff_vector *x * x = fff_vector_fromPyArray(X) */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_34blas_dnrm2(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X) { fff_vector *__pyx_v_x; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("blas_dnrm2", 0); /* "nipy/labs/bindings/linalg.pyx":362 * def blas_dnrm2(X): * cdef fff_vector *x * x = fff_vector_fromPyArray(X) # <<<<<<<<<<<<<< * return fff_blas_dnrm2(x) * */ if (!(likely(((__pyx_v_X) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_X, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 362; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_X; __Pyx_INCREF(__pyx_t_1); __pyx_v_x = fff_vector_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":363 * cdef fff_vector *x * x = fff_vector_fromPyArray(X) * return fff_blas_dnrm2(x) # <<<<<<<<<<<<<< * * def blas_dasum(X): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyFloat_FromDouble(fff_blas_dnrm2(__pyx_v_x)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 363; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.labs.bindings.linalg.blas_dnrm2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_37blas_dasum(PyObject *__pyx_self, PyObject *__pyx_v_X); /*proto*/ static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_6linalg_37blas_dasum = {__Pyx_NAMESTR("blas_dasum"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_6linalg_37blas_dasum, METH_O, __Pyx_DOCSTR(0)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_37blas_dasum(PyObject *__pyx_self, PyObject *__pyx_v_X) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("blas_dasum (wrapper)", 0); __pyx_r = __pyx_pf_4nipy_4labs_8bindings_6linalg_36blas_dasum(__pyx_self, ((PyObject *)__pyx_v_X)); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/linalg.pyx":365 * return fff_blas_dnrm2(x) * * def blas_dasum(X): # <<<<<<<<<<<<<< * cdef fff_vector *x * x = fff_vector_fromPyArray(X) */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_36blas_dasum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X) { fff_vector *__pyx_v_x; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("blas_dasum", 0); /* "nipy/labs/bindings/linalg.pyx":367 * def blas_dasum(X): * cdef fff_vector *x * x = fff_vector_fromPyArray(X) # <<<<<<<<<<<<<< * return fff_blas_dasum(x) * */ if (!(likely(((__pyx_v_X) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_X, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 367; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_X; __Pyx_INCREF(__pyx_t_1); __pyx_v_x = fff_vector_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":368 * cdef fff_vector *x * x = fff_vector_fromPyArray(X) * return fff_blas_dasum(x) # <<<<<<<<<<<<<< * * def blas_ddot(X, Y): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyFloat_FromDouble(fff_blas_dasum(__pyx_v_x)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 368; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.labs.bindings.linalg.blas_dasum", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_39blas_ddot(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_6linalg_39blas_ddot = {__Pyx_NAMESTR("blas_ddot"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_6linalg_39blas_ddot, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_39blas_ddot(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_X = 0; PyObject *__pyx_v_Y = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("blas_ddot (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__X,&__pyx_n_s__Y,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__X)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("blas_ddot", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "blas_ddot") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_X = values[0]; __pyx_v_Y = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("blas_ddot", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.bindings.linalg.blas_ddot", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_4labs_8bindings_6linalg_38blas_ddot(__pyx_self, __pyx_v_X, __pyx_v_Y); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/linalg.pyx":370 * return fff_blas_dasum(x) * * def blas_ddot(X, Y): # <<<<<<<<<<<<<< * cdef fff_vector *x, *y * x = fff_vector_fromPyArray(X) */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_38blas_ddot(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X, PyObject *__pyx_v_Y) { fff_vector *__pyx_v_x; fff_vector *__pyx_v_y; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("blas_ddot", 0); /* "nipy/labs/bindings/linalg.pyx":372 * def blas_ddot(X, Y): * cdef fff_vector *x, *y * x = fff_vector_fromPyArray(X) # <<<<<<<<<<<<<< * y = fff_vector_fromPyArray(Y) * return fff_blas_ddot(x, y) */ if (!(likely(((__pyx_v_X) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_X, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 372; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_X; __Pyx_INCREF(__pyx_t_1); __pyx_v_x = fff_vector_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":373 * cdef fff_vector *x, *y * x = fff_vector_fromPyArray(X) * y = fff_vector_fromPyArray(Y) # <<<<<<<<<<<<<< * return fff_blas_ddot(x, y) * */ if (!(likely(((__pyx_v_Y) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_Y, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 373; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_Y; __Pyx_INCREF(__pyx_t_1); __pyx_v_y = fff_vector_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":374 * x = fff_vector_fromPyArray(X) * y = fff_vector_fromPyArray(Y) * return fff_blas_ddot(x, y) # <<<<<<<<<<<<<< * * def blas_daxpy(double alpha, X, Y): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyFloat_FromDouble(fff_blas_ddot(__pyx_v_x, __pyx_v_y)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 374; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.labs.bindings.linalg.blas_ddot", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_41blas_daxpy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_6linalg_41blas_daxpy = {__Pyx_NAMESTR("blas_daxpy"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_6linalg_41blas_daxpy, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_41blas_daxpy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { double __pyx_v_alpha; PyObject *__pyx_v_X = 0; PyObject *__pyx_v_Y = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("blas_daxpy (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__alpha,&__pyx_n_s__X,&__pyx_n_s__Y,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__alpha)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__X)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("blas_daxpy", 1, 3, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 376; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("blas_daxpy", 1, 3, 3, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 376; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "blas_daxpy") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 376; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_alpha = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_alpha == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 376; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_X = values[1]; __pyx_v_Y = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("blas_daxpy", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 376; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.bindings.linalg.blas_daxpy", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_4labs_8bindings_6linalg_40blas_daxpy(__pyx_self, __pyx_v_alpha, __pyx_v_X, __pyx_v_Y); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/linalg.pyx":376 * return fff_blas_ddot(x, y) * * def blas_daxpy(double alpha, X, Y): # <<<<<<<<<<<<<< * cdef fff_vector *x, *y, *z * x = fff_vector_fromPyArray(X) */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_40blas_daxpy(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_alpha, PyObject *__pyx_v_X, PyObject *__pyx_v_Y) { fff_vector *__pyx_v_x; fff_vector *__pyx_v_y; fff_vector *__pyx_v_z; PyArrayObject *__pyx_v_Z = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("blas_daxpy", 0); /* "nipy/labs/bindings/linalg.pyx":378 * def blas_daxpy(double alpha, X, Y): * cdef fff_vector *x, *y, *z * x = fff_vector_fromPyArray(X) # <<<<<<<<<<<<<< * y = fff_vector_fromPyArray(Y) * z = fff_vector_new(y.size) */ if (!(likely(((__pyx_v_X) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_X, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 378; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_X; __Pyx_INCREF(__pyx_t_1); __pyx_v_x = fff_vector_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":379 * cdef fff_vector *x, *y, *z * x = fff_vector_fromPyArray(X) * y = fff_vector_fromPyArray(Y) # <<<<<<<<<<<<<< * z = fff_vector_new(y.size) * fff_vector_memcpy(z, y) */ if (!(likely(((__pyx_v_Y) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_Y, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 379; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_Y; __Pyx_INCREF(__pyx_t_1); __pyx_v_y = fff_vector_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":380 * x = fff_vector_fromPyArray(X) * y = fff_vector_fromPyArray(Y) * z = fff_vector_new(y.size) # <<<<<<<<<<<<<< * fff_vector_memcpy(z, y) * fff_blas_daxpy(alpha, x, z) */ __pyx_v_z = fff_vector_new(__pyx_v_y->size); /* "nipy/labs/bindings/linalg.pyx":381 * y = fff_vector_fromPyArray(Y) * z = fff_vector_new(y.size) * fff_vector_memcpy(z, y) # <<<<<<<<<<<<<< * fff_blas_daxpy(alpha, x, z) * Z = fff_vector_toPyArray(z) */ fff_vector_memcpy(__pyx_v_z, __pyx_v_y); /* "nipy/labs/bindings/linalg.pyx":382 * z = fff_vector_new(y.size) * fff_vector_memcpy(z, y) * fff_blas_daxpy(alpha, x, z) # <<<<<<<<<<<<<< * Z = fff_vector_toPyArray(z) * return Z */ fff_blas_daxpy(__pyx_v_alpha, __pyx_v_x, __pyx_v_z); /* "nipy/labs/bindings/linalg.pyx":383 * fff_vector_memcpy(z, y) * fff_blas_daxpy(alpha, x, z) * Z = fff_vector_toPyArray(z) # <<<<<<<<<<<<<< * return Z * */ __pyx_t_1 = ((PyObject *)fff_vector_toPyArray(__pyx_v_z)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 383; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_Z = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":384 * fff_blas_daxpy(alpha, x, z) * Z = fff_vector_toPyArray(z) * return Z # <<<<<<<<<<<<<< * * def blas_dscal(double alpha, X): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_Z)); __pyx_r = ((PyObject *)__pyx_v_Z); goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.labs.bindings.linalg.blas_daxpy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_Z); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_43blas_dscal(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_6linalg_43blas_dscal = {__Pyx_NAMESTR("blas_dscal"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_6linalg_43blas_dscal, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_43blas_dscal(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { double __pyx_v_alpha; PyObject *__pyx_v_X = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("blas_dscal (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__alpha,&__pyx_n_s__X,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__alpha)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__X)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("blas_dscal", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 386; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "blas_dscal") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 386; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_alpha = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_alpha == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 386; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_X = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("blas_dscal", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 386; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.bindings.linalg.blas_dscal", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_4labs_8bindings_6linalg_42blas_dscal(__pyx_self, __pyx_v_alpha, __pyx_v_X); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/linalg.pyx":386 * return Z * * def blas_dscal(double alpha, X): # <<<<<<<<<<<<<< * cdef fff_vector *x, *y * x = fff_vector_fromPyArray(X) */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_42blas_dscal(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_alpha, PyObject *__pyx_v_X) { fff_vector *__pyx_v_x; fff_vector *__pyx_v_y; PyArrayObject *__pyx_v_Y = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("blas_dscal", 0); /* "nipy/labs/bindings/linalg.pyx":388 * def blas_dscal(double alpha, X): * cdef fff_vector *x, *y * x = fff_vector_fromPyArray(X) # <<<<<<<<<<<<<< * y = fff_vector_new(x.size) * fff_vector_memcpy(y, x) */ if (!(likely(((__pyx_v_X) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_X, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 388; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_X; __Pyx_INCREF(__pyx_t_1); __pyx_v_x = fff_vector_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":389 * cdef fff_vector *x, *y * x = fff_vector_fromPyArray(X) * y = fff_vector_new(x.size) # <<<<<<<<<<<<<< * fff_vector_memcpy(y, x) * fff_blas_dscal(alpha, y) */ __pyx_v_y = fff_vector_new(__pyx_v_x->size); /* "nipy/labs/bindings/linalg.pyx":390 * x = fff_vector_fromPyArray(X) * y = fff_vector_new(x.size) * fff_vector_memcpy(y, x) # <<<<<<<<<<<<<< * fff_blas_dscal(alpha, y) * Y = fff_vector_toPyArray(y) */ fff_vector_memcpy(__pyx_v_y, __pyx_v_x); /* "nipy/labs/bindings/linalg.pyx":391 * y = fff_vector_new(x.size) * fff_vector_memcpy(y, x) * fff_blas_dscal(alpha, y) # <<<<<<<<<<<<<< * Y = fff_vector_toPyArray(y) * return Y */ fff_blas_dscal(__pyx_v_alpha, __pyx_v_y); /* "nipy/labs/bindings/linalg.pyx":392 * fff_vector_memcpy(y, x) * fff_blas_dscal(alpha, y) * Y = fff_vector_toPyArray(y) # <<<<<<<<<<<<<< * return Y * */ __pyx_t_1 = ((PyObject *)fff_vector_toPyArray(__pyx_v_y)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 392; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_Y = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":393 * fff_blas_dscal(alpha, y) * Y = fff_vector_toPyArray(y) * return Y # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_Y)); __pyx_r = ((PyObject *)__pyx_v_Y); goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.labs.bindings.linalg.blas_dscal", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_Y); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_45blas_dgemm(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_8bindings_6linalg_44blas_dgemm[] = "\n D = blas_dgemm(int TransA, int TransB, double alpha, A, B, double beta, C).\n \n Compute the matrix-matrix product and sum D = alpha op(A) op(B) +\n beta C where op(A) = A, A^T, A^H for TransA = CblasNoTrans,\n CblasTrans, CblasConjTrans and similarly for the parameter TransB.\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_6linalg_45blas_dgemm = {__Pyx_NAMESTR("blas_dgemm"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_6linalg_45blas_dgemm, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_8bindings_6linalg_44blas_dgemm)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_45blas_dgemm(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { int __pyx_v_TransA; int __pyx_v_TransB; double __pyx_v_alpha; PyObject *__pyx_v_A = 0; PyObject *__pyx_v_B = 0; double __pyx_v_beta; PyObject *__pyx_v_C = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("blas_dgemm (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__TransA,&__pyx_n_s__TransB,&__pyx_n_s__alpha,&__pyx_n_s__A,&__pyx_n_s__B,&__pyx_n_s__beta,&__pyx_n_s__C,0}; PyObject* values[7] = {0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__TransA)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__TransB)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("blas_dgemm", 1, 7, 7, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 398; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__alpha)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("blas_dgemm", 1, 7, 7, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 398; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__A)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("blas_dgemm", 1, 7, 7, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 398; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__B)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("blas_dgemm", 1, 7, 7, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 398; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__beta)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("blas_dgemm", 1, 7, 7, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 398; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__C)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("blas_dgemm", 1, 7, 7, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 398; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "blas_dgemm") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 398; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 7) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); } __pyx_v_TransA = __Pyx_PyInt_AsInt(values[0]); if (unlikely((__pyx_v_TransA == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 398; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_TransB = __Pyx_PyInt_AsInt(values[1]); if (unlikely((__pyx_v_TransB == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 398; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_alpha = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_alpha == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 398; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_A = values[3]; __pyx_v_B = values[4]; __pyx_v_beta = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_beta == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 398; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_C = values[6]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("blas_dgemm", 1, 7, 7, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 398; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.bindings.linalg.blas_dgemm", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_4labs_8bindings_6linalg_44blas_dgemm(__pyx_self, __pyx_v_TransA, __pyx_v_TransB, __pyx_v_alpha, __pyx_v_A, __pyx_v_B, __pyx_v_beta, __pyx_v_C); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/linalg.pyx":398 * * ### BLAS 3 * def blas_dgemm(int TransA, int TransB, double alpha, A, B, double beta, C): # <<<<<<<<<<<<<< * """ * D = blas_dgemm(int TransA, int TransB, double alpha, A, B, double beta, C). */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_44blas_dgemm(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_TransA, int __pyx_v_TransB, double __pyx_v_alpha, PyObject *__pyx_v_A, PyObject *__pyx_v_B, double __pyx_v_beta, PyObject *__pyx_v_C) { fff_matrix *__pyx_v_a; fff_matrix *__pyx_v_b; fff_matrix *__pyx_v_c; fff_matrix *__pyx_v_d; PyArrayObject *__pyx_v_D = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("blas_dgemm", 0); /* "nipy/labs/bindings/linalg.pyx":407 * """ * cdef fff_matrix *a, *b, *c, *d * a = fff_matrix_fromPyArray(A) # <<<<<<<<<<<<<< * b = fff_matrix_fromPyArray(B) * c = fff_matrix_fromPyArray(C) */ if (!(likely(((__pyx_v_A) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_A, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 407; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_A; __Pyx_INCREF(__pyx_t_1); __pyx_v_a = fff_matrix_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":408 * cdef fff_matrix *a, *b, *c, *d * a = fff_matrix_fromPyArray(A) * b = fff_matrix_fromPyArray(B) # <<<<<<<<<<<<<< * c = fff_matrix_fromPyArray(C) * d = fff_matrix_new(c.size1, c.size2) */ if (!(likely(((__pyx_v_B) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_B, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 408; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_B; __Pyx_INCREF(__pyx_t_1); __pyx_v_b = fff_matrix_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":409 * a = fff_matrix_fromPyArray(A) * b = fff_matrix_fromPyArray(B) * c = fff_matrix_fromPyArray(C) # <<<<<<<<<<<<<< * d = fff_matrix_new(c.size1, c.size2) * fff_matrix_memcpy(d, c) */ if (!(likely(((__pyx_v_C) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_C, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 409; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_C; __Pyx_INCREF(__pyx_t_1); __pyx_v_c = fff_matrix_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":410 * b = fff_matrix_fromPyArray(B) * c = fff_matrix_fromPyArray(C) * d = fff_matrix_new(c.size1, c.size2) # <<<<<<<<<<<<<< * fff_matrix_memcpy(d, c) * fff_blas_dgemm(flag_transpose(TransA), flag_transpose(TransB), alpha, a, b, beta, d) */ __pyx_v_d = fff_matrix_new(__pyx_v_c->size1, __pyx_v_c->size2); /* "nipy/labs/bindings/linalg.pyx":411 * c = fff_matrix_fromPyArray(C) * d = fff_matrix_new(c.size1, c.size2) * fff_matrix_memcpy(d, c) # <<<<<<<<<<<<<< * fff_blas_dgemm(flag_transpose(TransA), flag_transpose(TransB), alpha, a, b, beta, d) * fff_matrix_delete(a) */ fff_matrix_memcpy(__pyx_v_d, __pyx_v_c); /* "nipy/labs/bindings/linalg.pyx":412 * d = fff_matrix_new(c.size1, c.size2) * fff_matrix_memcpy(d, c) * fff_blas_dgemm(flag_transpose(TransA), flag_transpose(TransB), alpha, a, b, beta, d) # <<<<<<<<<<<<<< * fff_matrix_delete(a) * fff_matrix_delete(b) */ fff_blas_dgemm(__pyx_f_4nipy_4labs_8bindings_6linalg_flag_transpose(__pyx_v_TransA), __pyx_f_4nipy_4labs_8bindings_6linalg_flag_transpose(__pyx_v_TransB), __pyx_v_alpha, __pyx_v_a, __pyx_v_b, __pyx_v_beta, __pyx_v_d); /* "nipy/labs/bindings/linalg.pyx":413 * fff_matrix_memcpy(d, c) * fff_blas_dgemm(flag_transpose(TransA), flag_transpose(TransB), alpha, a, b, beta, d) * fff_matrix_delete(a) # <<<<<<<<<<<<<< * fff_matrix_delete(b) * fff_matrix_delete(c) */ fff_matrix_delete(__pyx_v_a); /* "nipy/labs/bindings/linalg.pyx":414 * fff_blas_dgemm(flag_transpose(TransA), flag_transpose(TransB), alpha, a, b, beta, d) * fff_matrix_delete(a) * fff_matrix_delete(b) # <<<<<<<<<<<<<< * fff_matrix_delete(c) * D = fff_matrix_toPyArray(d) */ fff_matrix_delete(__pyx_v_b); /* "nipy/labs/bindings/linalg.pyx":415 * fff_matrix_delete(a) * fff_matrix_delete(b) * fff_matrix_delete(c) # <<<<<<<<<<<<<< * D = fff_matrix_toPyArray(d) * return D */ fff_matrix_delete(__pyx_v_c); /* "nipy/labs/bindings/linalg.pyx":416 * fff_matrix_delete(b) * fff_matrix_delete(c) * D = fff_matrix_toPyArray(d) # <<<<<<<<<<<<<< * return D * */ __pyx_t_1 = ((PyObject *)fff_matrix_toPyArray(__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 416; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_D = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":417 * fff_matrix_delete(c) * D = fff_matrix_toPyArray(d) * return D # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_D)); __pyx_r = ((PyObject *)__pyx_v_D); goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.labs.bindings.linalg.blas_dgemm", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_D); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_47blas_dsymm(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_8bindings_6linalg_46blas_dsymm[] = "\n D = blas_dsymm(int Side, int Uplo, double alpha, A, B, beta, C).\n \n Compute the matrix-matrix product and sum C = \007lpha A B + \010eta C\n for Side is CblasLeft and C = \007lpha B A + \010eta C for Side is\n CblasRight, where the matrix A is symmetric. When Uplo is\n CblasUpper then the upper triangle and diagonal of A are used, and\n when Uplo is CblasLower then the lower triangle and diagonal of A\n are used.\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_6linalg_47blas_dsymm = {__Pyx_NAMESTR("blas_dsymm"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_6linalg_47blas_dsymm, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_8bindings_6linalg_46blas_dsymm)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_47blas_dsymm(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { int __pyx_v_Side; int __pyx_v_Uplo; double __pyx_v_alpha; PyObject *__pyx_v_A = 0; PyObject *__pyx_v_B = 0; PyObject *__pyx_v_beta = 0; PyObject *__pyx_v_C = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("blas_dsymm (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__Side,&__pyx_n_s__Uplo,&__pyx_n_s__alpha,&__pyx_n_s__A,&__pyx_n_s__B,&__pyx_n_s__beta,&__pyx_n_s__C,0}; PyObject* values[7] = {0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Side)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Uplo)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("blas_dsymm", 1, 7, 7, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 420; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__alpha)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("blas_dsymm", 1, 7, 7, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 420; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__A)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("blas_dsymm", 1, 7, 7, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 420; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__B)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("blas_dsymm", 1, 7, 7, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 420; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__beta)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("blas_dsymm", 1, 7, 7, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 420; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__C)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("blas_dsymm", 1, 7, 7, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 420; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "blas_dsymm") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 420; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 7) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); } __pyx_v_Side = __Pyx_PyInt_AsInt(values[0]); if (unlikely((__pyx_v_Side == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 420; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_Uplo = __Pyx_PyInt_AsInt(values[1]); if (unlikely((__pyx_v_Uplo == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 420; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_alpha = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_alpha == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 420; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_A = values[3]; __pyx_v_B = values[4]; __pyx_v_beta = values[5]; __pyx_v_C = values[6]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("blas_dsymm", 1, 7, 7, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 420; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.bindings.linalg.blas_dsymm", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_4labs_8bindings_6linalg_46blas_dsymm(__pyx_self, __pyx_v_Side, __pyx_v_Uplo, __pyx_v_alpha, __pyx_v_A, __pyx_v_B, __pyx_v_beta, __pyx_v_C); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/linalg.pyx":420 * * * def blas_dsymm(int Side, int Uplo, double alpha, A, B, beta, C): # <<<<<<<<<<<<<< * """ * D = blas_dsymm(int Side, int Uplo, double alpha, A, B, beta, C). */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_46blas_dsymm(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_Side, int __pyx_v_Uplo, double __pyx_v_alpha, PyObject *__pyx_v_A, PyObject *__pyx_v_B, PyObject *__pyx_v_beta, PyObject *__pyx_v_C) { fff_matrix *__pyx_v_a; fff_matrix *__pyx_v_b; fff_matrix *__pyx_v_c; fff_matrix *__pyx_v_d; PyArrayObject *__pyx_v_D = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; double __pyx_t_2; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("blas_dsymm", 0); /* "nipy/labs/bindings/linalg.pyx":432 * """ * cdef fff_matrix *a, *b, *c, *d * a = fff_matrix_fromPyArray(A) # <<<<<<<<<<<<<< * b = fff_matrix_fromPyArray(B) * c = fff_matrix_fromPyArray(C) */ if (!(likely(((__pyx_v_A) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_A, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 432; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_A; __Pyx_INCREF(__pyx_t_1); __pyx_v_a = fff_matrix_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":433 * cdef fff_matrix *a, *b, *c, *d * a = fff_matrix_fromPyArray(A) * b = fff_matrix_fromPyArray(B) # <<<<<<<<<<<<<< * c = fff_matrix_fromPyArray(C) * d = fff_matrix_new(c.size1, c.size2) */ if (!(likely(((__pyx_v_B) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_B, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 433; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_B; __Pyx_INCREF(__pyx_t_1); __pyx_v_b = fff_matrix_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":434 * a = fff_matrix_fromPyArray(A) * b = fff_matrix_fromPyArray(B) * c = fff_matrix_fromPyArray(C) # <<<<<<<<<<<<<< * d = fff_matrix_new(c.size1, c.size2) * fff_matrix_memcpy(d, c) */ if (!(likely(((__pyx_v_C) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_C, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 434; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_C; __Pyx_INCREF(__pyx_t_1); __pyx_v_c = fff_matrix_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":435 * b = fff_matrix_fromPyArray(B) * c = fff_matrix_fromPyArray(C) * d = fff_matrix_new(c.size1, c.size2) # <<<<<<<<<<<<<< * fff_matrix_memcpy(d, c) * fff_blas_dsymm(flag_side(Side), flag_uplo(Uplo), alpha, a, b, beta, d) */ __pyx_v_d = fff_matrix_new(__pyx_v_c->size1, __pyx_v_c->size2); /* "nipy/labs/bindings/linalg.pyx":436 * c = fff_matrix_fromPyArray(C) * d = fff_matrix_new(c.size1, c.size2) * fff_matrix_memcpy(d, c) # <<<<<<<<<<<<<< * fff_blas_dsymm(flag_side(Side), flag_uplo(Uplo), alpha, a, b, beta, d) * fff_matrix_delete(a) */ fff_matrix_memcpy(__pyx_v_d, __pyx_v_c); /* "nipy/labs/bindings/linalg.pyx":437 * d = fff_matrix_new(c.size1, c.size2) * fff_matrix_memcpy(d, c) * fff_blas_dsymm(flag_side(Side), flag_uplo(Uplo), alpha, a, b, beta, d) # <<<<<<<<<<<<<< * fff_matrix_delete(a) * fff_matrix_delete(b) */ __pyx_t_2 = __pyx_PyFloat_AsDouble(__pyx_v_beta); if (unlikely((__pyx_t_2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 437; __pyx_clineno = __LINE__; goto __pyx_L1_error;} fff_blas_dsymm(__pyx_f_4nipy_4labs_8bindings_6linalg_flag_side(__pyx_v_Side), __pyx_f_4nipy_4labs_8bindings_6linalg_flag_uplo(__pyx_v_Uplo), __pyx_v_alpha, __pyx_v_a, __pyx_v_b, __pyx_t_2, __pyx_v_d); /* "nipy/labs/bindings/linalg.pyx":438 * fff_matrix_memcpy(d, c) * fff_blas_dsymm(flag_side(Side), flag_uplo(Uplo), alpha, a, b, beta, d) * fff_matrix_delete(a) # <<<<<<<<<<<<<< * fff_matrix_delete(b) * fff_matrix_delete(c) */ fff_matrix_delete(__pyx_v_a); /* "nipy/labs/bindings/linalg.pyx":439 * fff_blas_dsymm(flag_side(Side), flag_uplo(Uplo), alpha, a, b, beta, d) * fff_matrix_delete(a) * fff_matrix_delete(b) # <<<<<<<<<<<<<< * fff_matrix_delete(c) * D = fff_matrix_toPyArray(d) */ fff_matrix_delete(__pyx_v_b); /* "nipy/labs/bindings/linalg.pyx":440 * fff_matrix_delete(a) * fff_matrix_delete(b) * fff_matrix_delete(c) # <<<<<<<<<<<<<< * D = fff_matrix_toPyArray(d) * return D */ fff_matrix_delete(__pyx_v_c); /* "nipy/labs/bindings/linalg.pyx":441 * fff_matrix_delete(b) * fff_matrix_delete(c) * D = fff_matrix_toPyArray(d) # <<<<<<<<<<<<<< * return D * */ __pyx_t_1 = ((PyObject *)fff_matrix_toPyArray(__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 441; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_D = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":442 * fff_matrix_delete(c) * D = fff_matrix_toPyArray(d) * return D # <<<<<<<<<<<<<< * * def blas_dtrmm(int Side, int Uplo, int TransA, int Diag, double alpha, A, B): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_D)); __pyx_r = ((PyObject *)__pyx_v_D); goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.labs.bindings.linalg.blas_dsymm", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_D); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_49blas_dtrmm(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_8bindings_6linalg_48blas_dtrmm[] = "\n C = blas_dtrmm(int Side, int Uplo, int TransA, int Diag, double alpha, A, B).\n \n Compute the matrix-matrix product B = \007lpha op(A) B for Side\n is CblasLeft and B = \007lpha B op(A) for Side is CblasRight. The\n matrix A is triangular and op(A) = A, A^T, A^H for TransA =\n CblasNoTrans, CblasTrans, CblasConjTrans. When Uplo is CblasUpper\n then the upper triangle of A is used, and when Uplo is CblasLower\n then the lower triangle of A is used. If Diag is CblasNonUnit then\n the diagonal of A is used, but if Diag is CblasUnit then the\n diagonal elements of the matrix A are taken as unity and are not\n referenced.\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_6linalg_49blas_dtrmm = {__Pyx_NAMESTR("blas_dtrmm"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_6linalg_49blas_dtrmm, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_8bindings_6linalg_48blas_dtrmm)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_49blas_dtrmm(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { int __pyx_v_Side; int __pyx_v_Uplo; int __pyx_v_TransA; int __pyx_v_Diag; double __pyx_v_alpha; PyObject *__pyx_v_A = 0; PyObject *__pyx_v_B = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("blas_dtrmm (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__Side,&__pyx_n_s__Uplo,&__pyx_n_s__TransA,&__pyx_n_s__Diag,&__pyx_n_s__alpha,&__pyx_n_s__A,&__pyx_n_s__B,0}; PyObject* values[7] = {0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Side)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Uplo)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("blas_dtrmm", 1, 7, 7, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 444; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__TransA)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("blas_dtrmm", 1, 7, 7, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 444; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Diag)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("blas_dtrmm", 1, 7, 7, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 444; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__alpha)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("blas_dtrmm", 1, 7, 7, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 444; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__A)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("blas_dtrmm", 1, 7, 7, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 444; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__B)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("blas_dtrmm", 1, 7, 7, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 444; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "blas_dtrmm") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 444; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 7) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); } __pyx_v_Side = __Pyx_PyInt_AsInt(values[0]); if (unlikely((__pyx_v_Side == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 444; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_Uplo = __Pyx_PyInt_AsInt(values[1]); if (unlikely((__pyx_v_Uplo == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 444; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_TransA = __Pyx_PyInt_AsInt(values[2]); if (unlikely((__pyx_v_TransA == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 444; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_Diag = __Pyx_PyInt_AsInt(values[3]); if (unlikely((__pyx_v_Diag == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 444; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_alpha = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_alpha == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 444; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_A = values[5]; __pyx_v_B = values[6]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("blas_dtrmm", 1, 7, 7, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 444; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.bindings.linalg.blas_dtrmm", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_4labs_8bindings_6linalg_48blas_dtrmm(__pyx_self, __pyx_v_Side, __pyx_v_Uplo, __pyx_v_TransA, __pyx_v_Diag, __pyx_v_alpha, __pyx_v_A, __pyx_v_B); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/linalg.pyx":444 * return D * * def blas_dtrmm(int Side, int Uplo, int TransA, int Diag, double alpha, A, B): # <<<<<<<<<<<<<< * """ * C = blas_dtrmm(int Side, int Uplo, int TransA, int Diag, double alpha, A, B). */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_48blas_dtrmm(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_Side, int __pyx_v_Uplo, int __pyx_v_TransA, int __pyx_v_Diag, double __pyx_v_alpha, PyObject *__pyx_v_A, PyObject *__pyx_v_B) { fff_matrix *__pyx_v_a; fff_matrix *__pyx_v_b; fff_matrix *__pyx_v_c; PyArrayObject *__pyx_v_C = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("blas_dtrmm", 0); /* "nipy/labs/bindings/linalg.pyx":459 * """ * cdef fff_matrix *a, *b, *c * a = fff_matrix_fromPyArray(A) # <<<<<<<<<<<<<< * b = fff_matrix_fromPyArray(B) * c = fff_matrix_new(a.size1, a.size2) */ if (!(likely(((__pyx_v_A) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_A, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 459; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_A; __Pyx_INCREF(__pyx_t_1); __pyx_v_a = fff_matrix_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":460 * cdef fff_matrix *a, *b, *c * a = fff_matrix_fromPyArray(A) * b = fff_matrix_fromPyArray(B) # <<<<<<<<<<<<<< * c = fff_matrix_new(a.size1, a.size2) * fff_matrix_memcpy(c, b) */ if (!(likely(((__pyx_v_B) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_B, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 460; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_B; __Pyx_INCREF(__pyx_t_1); __pyx_v_b = fff_matrix_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":461 * a = fff_matrix_fromPyArray(A) * b = fff_matrix_fromPyArray(B) * c = fff_matrix_new(a.size1, a.size2) # <<<<<<<<<<<<<< * fff_matrix_memcpy(c, b) * fff_blas_dtrmm(flag_side(Side), flag_uplo(Uplo), flag_transpose(TransA), flag_diag(Diag), */ __pyx_v_c = fff_matrix_new(__pyx_v_a->size1, __pyx_v_a->size2); /* "nipy/labs/bindings/linalg.pyx":462 * b = fff_matrix_fromPyArray(B) * c = fff_matrix_new(a.size1, a.size2) * fff_matrix_memcpy(c, b) # <<<<<<<<<<<<<< * fff_blas_dtrmm(flag_side(Side), flag_uplo(Uplo), flag_transpose(TransA), flag_diag(Diag), * alpha, a, c) */ fff_matrix_memcpy(__pyx_v_c, __pyx_v_b); /* "nipy/labs/bindings/linalg.pyx":464 * fff_matrix_memcpy(c, b) * fff_blas_dtrmm(flag_side(Side), flag_uplo(Uplo), flag_transpose(TransA), flag_diag(Diag), * alpha, a, c) # <<<<<<<<<<<<<< * fff_matrix_delete(a) * fff_matrix_delete(b) */ fff_blas_dtrmm(__pyx_f_4nipy_4labs_8bindings_6linalg_flag_side(__pyx_v_Side), __pyx_f_4nipy_4labs_8bindings_6linalg_flag_uplo(__pyx_v_Uplo), __pyx_f_4nipy_4labs_8bindings_6linalg_flag_transpose(__pyx_v_TransA), __pyx_f_4nipy_4labs_8bindings_6linalg_flag_diag(__pyx_v_Diag), __pyx_v_alpha, __pyx_v_a, __pyx_v_c); /* "nipy/labs/bindings/linalg.pyx":465 * fff_blas_dtrmm(flag_side(Side), flag_uplo(Uplo), flag_transpose(TransA), flag_diag(Diag), * alpha, a, c) * fff_matrix_delete(a) # <<<<<<<<<<<<<< * fff_matrix_delete(b) * C = fff_matrix_toPyArray(c) */ fff_matrix_delete(__pyx_v_a); /* "nipy/labs/bindings/linalg.pyx":466 * alpha, a, c) * fff_matrix_delete(a) * fff_matrix_delete(b) # <<<<<<<<<<<<<< * C = fff_matrix_toPyArray(c) * return C */ fff_matrix_delete(__pyx_v_b); /* "nipy/labs/bindings/linalg.pyx":467 * fff_matrix_delete(a) * fff_matrix_delete(b) * C = fff_matrix_toPyArray(c) # <<<<<<<<<<<<<< * return C * */ __pyx_t_1 = ((PyObject *)fff_matrix_toPyArray(__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 467; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_C = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":468 * fff_matrix_delete(b) * C = fff_matrix_toPyArray(c) * return C # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_C)); __pyx_r = ((PyObject *)__pyx_v_C); goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.labs.bindings.linalg.blas_dtrmm", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_C); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_51blas_dtrsm(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_8bindings_6linalg_50blas_dtrsm[] = "\n blas_dtrsm(int Side, int Uplo, int TransA, int Diag, double alpha, A, B).\n \n Compute the inverse-matrix matrix product B = \007lpha\n op(inv(A))B for Side is CblasLeft and B = \007lpha B op(inv(A)) for\n Side is CblasRight. The matrix A is triangular and op(A) = A, A^T,\n A^H for TransA = CblasNoTrans, CblasTrans, CblasConjTrans. When\n Uplo is CblasUpper then the upper triangle of A is used, and when\n Uplo is CblasLower then the lower triangle of A is used. If Diag\n is CblasNonUnit then the diagonal of A is used, but if Diag is\n CblasUnit then the diagonal elements of the matrix A are taken as\n unity and are not referenced.\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_6linalg_51blas_dtrsm = {__Pyx_NAMESTR("blas_dtrsm"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_6linalg_51blas_dtrsm, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_8bindings_6linalg_50blas_dtrsm)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_51blas_dtrsm(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { int __pyx_v_Side; int __pyx_v_Uplo; int __pyx_v_TransA; int __pyx_v_Diag; double __pyx_v_alpha; PyObject *__pyx_v_A = 0; PyObject *__pyx_v_B = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("blas_dtrsm (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__Side,&__pyx_n_s__Uplo,&__pyx_n_s__TransA,&__pyx_n_s__Diag,&__pyx_n_s__alpha,&__pyx_n_s__A,&__pyx_n_s__B,0}; PyObject* values[7] = {0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Side)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Uplo)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("blas_dtrsm", 1, 7, 7, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 471; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__TransA)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("blas_dtrsm", 1, 7, 7, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 471; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Diag)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("blas_dtrsm", 1, 7, 7, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 471; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__alpha)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("blas_dtrsm", 1, 7, 7, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 471; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__A)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("blas_dtrsm", 1, 7, 7, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 471; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__B)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("blas_dtrsm", 1, 7, 7, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 471; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "blas_dtrsm") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 471; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 7) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); } __pyx_v_Side = __Pyx_PyInt_AsInt(values[0]); if (unlikely((__pyx_v_Side == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 471; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_Uplo = __Pyx_PyInt_AsInt(values[1]); if (unlikely((__pyx_v_Uplo == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 471; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_TransA = __Pyx_PyInt_AsInt(values[2]); if (unlikely((__pyx_v_TransA == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 471; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_Diag = __Pyx_PyInt_AsInt(values[3]); if (unlikely((__pyx_v_Diag == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 471; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_alpha = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_alpha == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 471; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_A = values[5]; __pyx_v_B = values[6]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("blas_dtrsm", 1, 7, 7, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 471; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.bindings.linalg.blas_dtrsm", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_4labs_8bindings_6linalg_50blas_dtrsm(__pyx_self, __pyx_v_Side, __pyx_v_Uplo, __pyx_v_TransA, __pyx_v_Diag, __pyx_v_alpha, __pyx_v_A, __pyx_v_B); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/linalg.pyx":471 * * * def blas_dtrsm(int Side, int Uplo, int TransA, int Diag, double alpha, A, B): # <<<<<<<<<<<<<< * """ * blas_dtrsm(int Side, int Uplo, int TransA, int Diag, double alpha, A, B). */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_50blas_dtrsm(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_Side, int __pyx_v_Uplo, int __pyx_v_TransA, int __pyx_v_Diag, double __pyx_v_alpha, PyObject *__pyx_v_A, PyObject *__pyx_v_B) { fff_matrix *__pyx_v_a; fff_matrix *__pyx_v_b; fff_matrix *__pyx_v_c; PyArrayObject *__pyx_v_C = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("blas_dtrsm", 0); /* "nipy/labs/bindings/linalg.pyx":486 * """ * cdef fff_matrix *a, *b, *c * a = fff_matrix_fromPyArray(A) # <<<<<<<<<<<<<< * b = fff_matrix_fromPyArray(B) * c = fff_matrix_new(a.size1, a.size2) */ if (!(likely(((__pyx_v_A) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_A, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 486; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_A; __Pyx_INCREF(__pyx_t_1); __pyx_v_a = fff_matrix_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":487 * cdef fff_matrix *a, *b, *c * a = fff_matrix_fromPyArray(A) * b = fff_matrix_fromPyArray(B) # <<<<<<<<<<<<<< * c = fff_matrix_new(a.size1, a.size2) * fff_matrix_memcpy(c, b) */ if (!(likely(((__pyx_v_B) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_B, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 487; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_B; __Pyx_INCREF(__pyx_t_1); __pyx_v_b = fff_matrix_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":488 * a = fff_matrix_fromPyArray(A) * b = fff_matrix_fromPyArray(B) * c = fff_matrix_new(a.size1, a.size2) # <<<<<<<<<<<<<< * fff_matrix_memcpy(c, b) * fff_blas_dtrsm(flag_side(Side), flag_uplo(Uplo), flag_transpose(TransA), flag_diag(Diag), */ __pyx_v_c = fff_matrix_new(__pyx_v_a->size1, __pyx_v_a->size2); /* "nipy/labs/bindings/linalg.pyx":489 * b = fff_matrix_fromPyArray(B) * c = fff_matrix_new(a.size1, a.size2) * fff_matrix_memcpy(c, b) # <<<<<<<<<<<<<< * fff_blas_dtrsm(flag_side(Side), flag_uplo(Uplo), flag_transpose(TransA), flag_diag(Diag), * alpha, a, c) */ fff_matrix_memcpy(__pyx_v_c, __pyx_v_b); /* "nipy/labs/bindings/linalg.pyx":491 * fff_matrix_memcpy(c, b) * fff_blas_dtrsm(flag_side(Side), flag_uplo(Uplo), flag_transpose(TransA), flag_diag(Diag), * alpha, a, c) # <<<<<<<<<<<<<< * fff_matrix_delete(a) * fff_matrix_delete(b) */ fff_blas_dtrsm(__pyx_f_4nipy_4labs_8bindings_6linalg_flag_side(__pyx_v_Side), __pyx_f_4nipy_4labs_8bindings_6linalg_flag_uplo(__pyx_v_Uplo), __pyx_f_4nipy_4labs_8bindings_6linalg_flag_transpose(__pyx_v_TransA), __pyx_f_4nipy_4labs_8bindings_6linalg_flag_diag(__pyx_v_Diag), __pyx_v_alpha, __pyx_v_a, __pyx_v_c); /* "nipy/labs/bindings/linalg.pyx":492 * fff_blas_dtrsm(flag_side(Side), flag_uplo(Uplo), flag_transpose(TransA), flag_diag(Diag), * alpha, a, c) * fff_matrix_delete(a) # <<<<<<<<<<<<<< * fff_matrix_delete(b) * C = fff_matrix_toPyArray(c) */ fff_matrix_delete(__pyx_v_a); /* "nipy/labs/bindings/linalg.pyx":493 * alpha, a, c) * fff_matrix_delete(a) * fff_matrix_delete(b) # <<<<<<<<<<<<<< * C = fff_matrix_toPyArray(c) * return C */ fff_matrix_delete(__pyx_v_b); /* "nipy/labs/bindings/linalg.pyx":494 * fff_matrix_delete(a) * fff_matrix_delete(b) * C = fff_matrix_toPyArray(c) # <<<<<<<<<<<<<< * return C * */ __pyx_t_1 = ((PyObject *)fff_matrix_toPyArray(__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 494; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_C = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":495 * fff_matrix_delete(b) * C = fff_matrix_toPyArray(c) * return C # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_C)); __pyx_r = ((PyObject *)__pyx_v_C); goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.labs.bindings.linalg.blas_dtrsm", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_C); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_53blas_dsyrk(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_8bindings_6linalg_52blas_dsyrk[] = "\n D = blas_dsyrk(int Uplo, int Trans, double alpha, A, double beta, C).\n \n Compute a rank-k update of the symmetric matrix C, C = \007lpha A\n A^T + \010eta C when Trans is CblasNoTrans and C = \007lpha A^T A +\n \010eta C when Trans is CblasTrans. Since the matrix C is symmetric\n only its upper half or lower half need to be stored. When Uplo is\n CblasUpper then the upper triangle and diagonal of C are used, and\n when Uplo is CblasLower then the lower triangle and diagonal of C\n are used.\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_6linalg_53blas_dsyrk = {__Pyx_NAMESTR("blas_dsyrk"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_6linalg_53blas_dsyrk, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_8bindings_6linalg_52blas_dsyrk)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_53blas_dsyrk(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { int __pyx_v_Uplo; int __pyx_v_Trans; double __pyx_v_alpha; PyObject *__pyx_v_A = 0; double __pyx_v_beta; PyObject *__pyx_v_C = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("blas_dsyrk (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__Uplo,&__pyx_n_s__Trans,&__pyx_n_s__alpha,&__pyx_n_s__A,&__pyx_n_s__beta,&__pyx_n_s__C,0}; PyObject* values[6] = {0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Uplo)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Trans)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("blas_dsyrk", 1, 6, 6, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 498; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__alpha)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("blas_dsyrk", 1, 6, 6, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 498; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__A)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("blas_dsyrk", 1, 6, 6, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 498; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__beta)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("blas_dsyrk", 1, 6, 6, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 498; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__C)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("blas_dsyrk", 1, 6, 6, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 498; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "blas_dsyrk") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 498; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 6) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); } __pyx_v_Uplo = __Pyx_PyInt_AsInt(values[0]); if (unlikely((__pyx_v_Uplo == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 498; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_Trans = __Pyx_PyInt_AsInt(values[1]); if (unlikely((__pyx_v_Trans == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 498; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_alpha = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_alpha == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 498; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_A = values[3]; __pyx_v_beta = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_beta == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 498; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_C = values[5]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("blas_dsyrk", 1, 6, 6, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 498; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.bindings.linalg.blas_dsyrk", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_4labs_8bindings_6linalg_52blas_dsyrk(__pyx_self, __pyx_v_Uplo, __pyx_v_Trans, __pyx_v_alpha, __pyx_v_A, __pyx_v_beta, __pyx_v_C); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/linalg.pyx":498 * * * def blas_dsyrk(int Uplo, int Trans, double alpha, A, double beta, C): # <<<<<<<<<<<<<< * """ * D = blas_dsyrk(int Uplo, int Trans, double alpha, A, double beta, C). */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_52blas_dsyrk(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_Uplo, int __pyx_v_Trans, double __pyx_v_alpha, PyObject *__pyx_v_A, double __pyx_v_beta, PyObject *__pyx_v_C) { fff_matrix *__pyx_v_a; fff_matrix *__pyx_v_c; fff_matrix *__pyx_v_d; PyArrayObject *__pyx_v_D = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("blas_dsyrk", 0); /* "nipy/labs/bindings/linalg.pyx":511 * """ * cdef fff_matrix *a, *c, *d * a = fff_matrix_fromPyArray(A) # <<<<<<<<<<<<<< * c = fff_matrix_fromPyArray(C) * d = fff_matrix_new(a.size1, a.size2) */ if (!(likely(((__pyx_v_A) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_A, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 511; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_A; __Pyx_INCREF(__pyx_t_1); __pyx_v_a = fff_matrix_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":512 * cdef fff_matrix *a, *c, *d * a = fff_matrix_fromPyArray(A) * c = fff_matrix_fromPyArray(C) # <<<<<<<<<<<<<< * d = fff_matrix_new(a.size1, a.size2) * fff_matrix_memcpy(d, c) */ if (!(likely(((__pyx_v_C) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_C, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 512; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_C; __Pyx_INCREF(__pyx_t_1); __pyx_v_c = fff_matrix_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":513 * a = fff_matrix_fromPyArray(A) * c = fff_matrix_fromPyArray(C) * d = fff_matrix_new(a.size1, a.size2) # <<<<<<<<<<<<<< * fff_matrix_memcpy(d, c) * fff_blas_dsyrk(flag_uplo(Uplo), flag_transpose(Trans), alpha, a, beta, d) */ __pyx_v_d = fff_matrix_new(__pyx_v_a->size1, __pyx_v_a->size2); /* "nipy/labs/bindings/linalg.pyx":514 * c = fff_matrix_fromPyArray(C) * d = fff_matrix_new(a.size1, a.size2) * fff_matrix_memcpy(d, c) # <<<<<<<<<<<<<< * fff_blas_dsyrk(flag_uplo(Uplo), flag_transpose(Trans), alpha, a, beta, d) * fff_matrix_delete(a) */ fff_matrix_memcpy(__pyx_v_d, __pyx_v_c); /* "nipy/labs/bindings/linalg.pyx":515 * d = fff_matrix_new(a.size1, a.size2) * fff_matrix_memcpy(d, c) * fff_blas_dsyrk(flag_uplo(Uplo), flag_transpose(Trans), alpha, a, beta, d) # <<<<<<<<<<<<<< * fff_matrix_delete(a) * fff_matrix_delete(c) */ fff_blas_dsyrk(__pyx_f_4nipy_4labs_8bindings_6linalg_flag_uplo(__pyx_v_Uplo), __pyx_f_4nipy_4labs_8bindings_6linalg_flag_transpose(__pyx_v_Trans), __pyx_v_alpha, __pyx_v_a, __pyx_v_beta, __pyx_v_d); /* "nipy/labs/bindings/linalg.pyx":516 * fff_matrix_memcpy(d, c) * fff_blas_dsyrk(flag_uplo(Uplo), flag_transpose(Trans), alpha, a, beta, d) * fff_matrix_delete(a) # <<<<<<<<<<<<<< * fff_matrix_delete(c) * D = fff_matrix_toPyArray(d) */ fff_matrix_delete(__pyx_v_a); /* "nipy/labs/bindings/linalg.pyx":517 * fff_blas_dsyrk(flag_uplo(Uplo), flag_transpose(Trans), alpha, a, beta, d) * fff_matrix_delete(a) * fff_matrix_delete(c) # <<<<<<<<<<<<<< * D = fff_matrix_toPyArray(d) * return D */ fff_matrix_delete(__pyx_v_c); /* "nipy/labs/bindings/linalg.pyx":518 * fff_matrix_delete(a) * fff_matrix_delete(c) * D = fff_matrix_toPyArray(d) # <<<<<<<<<<<<<< * return D * */ __pyx_t_1 = ((PyObject *)fff_matrix_toPyArray(__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 518; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_D = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":519 * fff_matrix_delete(c) * D = fff_matrix_toPyArray(d) * return D # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_D)); __pyx_r = ((PyObject *)__pyx_v_D); goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.labs.bindings.linalg.blas_dsyrk", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_D); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_55blas_dsyr2k(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_8bindings_6linalg_54blas_dsyr2k[] = "\n Compute a rank-2k update of the symmetric matrix C, C = \007lpha A B^T +\n \007lpha B A^T + \010eta C when Trans is CblasNoTrans and C = \007lpha A^T B\n + \007lpha B^T A + \010eta C when Trans is CblasTrans. Since the matrix C\n is symmetric only its upper half or lower half need to be stored. When\n Uplo is CblasUpper then the upper triangle and diagonal of C are used,\n and when Uplo is CblasLower then the lower triangle and diagonal of C\n are used.\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_6linalg_55blas_dsyr2k = {__Pyx_NAMESTR("blas_dsyr2k"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_6linalg_55blas_dsyr2k, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_8bindings_6linalg_54blas_dsyr2k)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_6linalg_55blas_dsyr2k(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { int __pyx_v_Uplo; int __pyx_v_Trans; double __pyx_v_alpha; PyObject *__pyx_v_A = 0; PyObject *__pyx_v_B = 0; double __pyx_v_beta; PyObject *__pyx_v_C = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("blas_dsyr2k (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__Uplo,&__pyx_n_s__Trans,&__pyx_n_s__alpha,&__pyx_n_s__A,&__pyx_n_s__B,&__pyx_n_s__beta,&__pyx_n_s__C,0}; PyObject* values[7] = {0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Uplo)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Trans)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("blas_dsyr2k", 1, 7, 7, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 522; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__alpha)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("blas_dsyr2k", 1, 7, 7, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 522; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__A)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("blas_dsyr2k", 1, 7, 7, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 522; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__B)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("blas_dsyr2k", 1, 7, 7, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 522; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__beta)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("blas_dsyr2k", 1, 7, 7, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 522; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__C)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("blas_dsyr2k", 1, 7, 7, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 522; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "blas_dsyr2k") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 522; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 7) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); } __pyx_v_Uplo = __Pyx_PyInt_AsInt(values[0]); if (unlikely((__pyx_v_Uplo == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 522; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_Trans = __Pyx_PyInt_AsInt(values[1]); if (unlikely((__pyx_v_Trans == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 522; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_alpha = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_alpha == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 522; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_A = values[3]; __pyx_v_B = values[4]; __pyx_v_beta = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_beta == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 522; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_C = values[6]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("blas_dsyr2k", 1, 7, 7, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 522; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.bindings.linalg.blas_dsyr2k", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_4labs_8bindings_6linalg_54blas_dsyr2k(__pyx_self, __pyx_v_Uplo, __pyx_v_Trans, __pyx_v_alpha, __pyx_v_A, __pyx_v_B, __pyx_v_beta, __pyx_v_C); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/linalg.pyx":522 * * * def blas_dsyr2k(int Uplo, int Trans, double alpha, A, B, double beta, C): # <<<<<<<<<<<<<< * """ * Compute a rank-2k update of the symmetric matrix C, C = \alpha A B^T + */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_6linalg_54blas_dsyr2k(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_Uplo, int __pyx_v_Trans, double __pyx_v_alpha, PyObject *__pyx_v_A, PyObject *__pyx_v_B, double __pyx_v_beta, PyObject *__pyx_v_C) { fff_matrix *__pyx_v_a; fff_matrix *__pyx_v_b; fff_matrix *__pyx_v_c; fff_matrix *__pyx_v_d; PyArrayObject *__pyx_v_D = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("blas_dsyr2k", 0); /* "nipy/labs/bindings/linalg.pyx":533 * """ * cdef fff_matrix *a, *b, *c, *d * a = fff_matrix_fromPyArray(A) # <<<<<<<<<<<<<< * b = fff_matrix_fromPyArray(B) * c = fff_matrix_fromPyArray(C) */ if (!(likely(((__pyx_v_A) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_A, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 533; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_A; __Pyx_INCREF(__pyx_t_1); __pyx_v_a = fff_matrix_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":534 * cdef fff_matrix *a, *b, *c, *d * a = fff_matrix_fromPyArray(A) * b = fff_matrix_fromPyArray(B) # <<<<<<<<<<<<<< * c = fff_matrix_fromPyArray(C) * d = fff_matrix_new(a.size1, a.size2) */ if (!(likely(((__pyx_v_B) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_B, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 534; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_B; __Pyx_INCREF(__pyx_t_1); __pyx_v_b = fff_matrix_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":535 * a = fff_matrix_fromPyArray(A) * b = fff_matrix_fromPyArray(B) * c = fff_matrix_fromPyArray(C) # <<<<<<<<<<<<<< * d = fff_matrix_new(a.size1, a.size2) * fff_matrix_memcpy(d, c) */ if (!(likely(((__pyx_v_C) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_C, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 535; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_C; __Pyx_INCREF(__pyx_t_1); __pyx_v_c = fff_matrix_fromPyArray(((PyArrayObject *)__pyx_t_1)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":536 * b = fff_matrix_fromPyArray(B) * c = fff_matrix_fromPyArray(C) * d = fff_matrix_new(a.size1, a.size2) # <<<<<<<<<<<<<< * fff_matrix_memcpy(d, c) * fff_blas_dsyr2k(flag_uplo(Uplo), flag_transpose(Trans), alpha, a, b, beta, d) */ __pyx_v_d = fff_matrix_new(__pyx_v_a->size1, __pyx_v_a->size2); /* "nipy/labs/bindings/linalg.pyx":537 * c = fff_matrix_fromPyArray(C) * d = fff_matrix_new(a.size1, a.size2) * fff_matrix_memcpy(d, c) # <<<<<<<<<<<<<< * fff_blas_dsyr2k(flag_uplo(Uplo), flag_transpose(Trans), alpha, a, b, beta, d) * fff_matrix_delete(a) */ fff_matrix_memcpy(__pyx_v_d, __pyx_v_c); /* "nipy/labs/bindings/linalg.pyx":538 * d = fff_matrix_new(a.size1, a.size2) * fff_matrix_memcpy(d, c) * fff_blas_dsyr2k(flag_uplo(Uplo), flag_transpose(Trans), alpha, a, b, beta, d) # <<<<<<<<<<<<<< * fff_matrix_delete(a) * fff_matrix_delete(b) */ fff_blas_dsyr2k(__pyx_f_4nipy_4labs_8bindings_6linalg_flag_uplo(__pyx_v_Uplo), __pyx_f_4nipy_4labs_8bindings_6linalg_flag_transpose(__pyx_v_Trans), __pyx_v_alpha, __pyx_v_a, __pyx_v_b, __pyx_v_beta, __pyx_v_d); /* "nipy/labs/bindings/linalg.pyx":539 * fff_matrix_memcpy(d, c) * fff_blas_dsyr2k(flag_uplo(Uplo), flag_transpose(Trans), alpha, a, b, beta, d) * fff_matrix_delete(a) # <<<<<<<<<<<<<< * fff_matrix_delete(b) * fff_matrix_delete(c) */ fff_matrix_delete(__pyx_v_a); /* "nipy/labs/bindings/linalg.pyx":540 * fff_blas_dsyr2k(flag_uplo(Uplo), flag_transpose(Trans), alpha, a, b, beta, d) * fff_matrix_delete(a) * fff_matrix_delete(b) # <<<<<<<<<<<<<< * fff_matrix_delete(c) * D = fff_matrix_toPyArray(d) */ fff_matrix_delete(__pyx_v_b); /* "nipy/labs/bindings/linalg.pyx":541 * fff_matrix_delete(a) * fff_matrix_delete(b) * fff_matrix_delete(c) # <<<<<<<<<<<<<< * D = fff_matrix_toPyArray(d) * return D */ fff_matrix_delete(__pyx_v_c); /* "nipy/labs/bindings/linalg.pyx":542 * fff_matrix_delete(b) * fff_matrix_delete(c) * D = fff_matrix_toPyArray(d) # <<<<<<<<<<<<<< * return D * */ __pyx_t_1 = ((PyObject *)fff_matrix_toPyArray(__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 542; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_D = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":543 * fff_matrix_delete(c) * D = fff_matrix_toPyArray(d) * return D # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_D)); __pyx_r = ((PyObject *)__pyx_v_D); goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.labs.bindings.linalg.blas_dsyr2k", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_D); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":194 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_copy_shape; int __pyx_v_i; int __pyx_v_ndim; int __pyx_v_endian_detector; int __pyx_v_little_endian; int __pyx_v_t; char *__pyx_v_f; PyArray_Descr *__pyx_v_descr = 0; int __pyx_v_offset; int __pyx_v_hasfields; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; char *__pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "numpy.pxd":200 * # of flags * * if info == NULL: return # <<<<<<<<<<<<<< * * cdef int copy_shape, i, ndim */ __pyx_t_1 = (__pyx_v_info == NULL); if (__pyx_t_1) { __pyx_r = 0; goto __pyx_L0; goto __pyx_L3; } __pyx_L3:; /* "numpy.pxd":203 * * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((&endian_detector)[0] != 0) * */ __pyx_v_endian_detector = 1; /* "numpy.pxd":204 * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * * ndim = PyArray_NDIM(self) */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "numpy.pxd":206 * cdef bint little_endian = ((&endian_detector)[0] != 0) * * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< * * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); /* "numpy.pxd":208 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); if (__pyx_t_1) { /* "numpy.pxd":209 * * if sizeof(npy_intp) != sizeof(Py_ssize_t): * copy_shape = 1 # <<<<<<<<<<<<<< * else: * copy_shape = 0 */ __pyx_v_copy_shape = 1; goto __pyx_L4; } /*else*/ { /* "numpy.pxd":211 * copy_shape = 1 * else: * copy_shape = 0 # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) */ __pyx_v_copy_shape = 0; } __pyx_L4:; /* "numpy.pxd":213 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS); if (__pyx_t_1) { /* "numpy.pxd":214 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not C contiguous") * */ __pyx_t_2 = (!PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS)); __pyx_t_3 = __pyx_t_2; } else { __pyx_t_3 = __pyx_t_1; } if (__pyx_t_3) { /* "numpy.pxd":215 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L5; } __pyx_L5:; /* "numpy.pxd":217 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ __pyx_t_3 = ((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS); if (__pyx_t_3) { /* "numpy.pxd":218 * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not Fortran contiguous") * */ __pyx_t_1 = (!PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS)); __pyx_t_2 = __pyx_t_1; } else { __pyx_t_2 = __pyx_t_3; } if (__pyx_t_2) { /* "numpy.pxd":219 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_4), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L6; } __pyx_L6:; /* "numpy.pxd":221 * raise ValueError(u"ndarray is not Fortran contiguous") * * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< * info.ndim = ndim * if copy_shape: */ __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); /* "numpy.pxd":222 * * info.buf = PyArray_DATA(self) * info.ndim = ndim # <<<<<<<<<<<<<< * if copy_shape: * # Allocate new buffer for strides and shape info. */ __pyx_v_info->ndim = __pyx_v_ndim; /* "numpy.pxd":223 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ if (__pyx_v_copy_shape) { /* "numpy.pxd":226 * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) # <<<<<<<<<<<<<< * info.shape = info.strides + ndim * for i in range(ndim): */ __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2))); /* "numpy.pxd":227 * # This is allocated as one block, strides first. * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) * info.shape = info.strides + ndim # <<<<<<<<<<<<<< * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] */ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); /* "numpy.pxd":228 * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) * info.shape = info.strides + ndim * for i in range(ndim): # <<<<<<<<<<<<<< * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] */ __pyx_t_5 = __pyx_v_ndim; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "numpy.pxd":229 * info.shape = info.strides + ndim * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< * info.shape[i] = PyArray_DIMS(self)[i] * else: */ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); /* "numpy.pxd":230 * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< * else: * info.strides = PyArray_STRIDES(self) */ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); } goto __pyx_L7; } /*else*/ { /* "numpy.pxd":232 * info.shape[i] = PyArray_DIMS(self)[i] * else: * info.strides = PyArray_STRIDES(self) # <<<<<<<<<<<<<< * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL */ __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); /* "numpy.pxd":233 * else: * info.strides = PyArray_STRIDES(self) * info.shape = PyArray_DIMS(self) # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) */ __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); } __pyx_L7:; /* "numpy.pxd":234 * info.strides = PyArray_STRIDES(self) * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) */ __pyx_v_info->suboffsets = NULL; /* "numpy.pxd":235 * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< * info.readonly = not PyArray_ISWRITEABLE(self) * */ __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); /* "numpy.pxd":236 * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< * * cdef int t */ __pyx_v_info->readonly = (!PyArray_ISWRITEABLE(__pyx_v_self)); /* "numpy.pxd":239 * * cdef int t * cdef char* f = NULL # <<<<<<<<<<<<<< * cdef dtype descr = self.descr * cdef list stack */ __pyx_v_f = NULL; /* "numpy.pxd":240 * cdef int t * cdef char* f = NULL * cdef dtype descr = self.descr # <<<<<<<<<<<<<< * cdef list stack * cdef int offset */ __pyx_t_4 = ((PyObject *)__pyx_v_self->descr); __Pyx_INCREF(__pyx_t_4); __pyx_v_descr = ((PyArray_Descr *)__pyx_t_4); __pyx_t_4 = 0; /* "numpy.pxd":244 * cdef int offset * * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< * * if not hasfields and not copy_shape: */ __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); /* "numpy.pxd":246 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ __pyx_t_2 = (!__pyx_v_hasfields); if (__pyx_t_2) { __pyx_t_3 = (!__pyx_v_copy_shape); __pyx_t_1 = __pyx_t_3; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "numpy.pxd":248 * if not hasfields and not copy_shape: * # do not call releasebuffer * info.obj = None # <<<<<<<<<<<<<< * else: * # need to call releasebuffer */ __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = Py_None; goto __pyx_L10; } /*else*/ { /* "numpy.pxd":251 * else: * # need to call releasebuffer * info.obj = self # <<<<<<<<<<<<<< * * if not hasfields: */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); } __pyx_L10:; /* "numpy.pxd":253 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ __pyx_t_1 = (!__pyx_v_hasfields); if (__pyx_t_1) { /* "numpy.pxd":254 * * if not hasfields: * t = descr.type_num # <<<<<<<<<<<<<< * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): */ __pyx_t_5 = __pyx_v_descr->type_num; __pyx_v_t = __pyx_t_5; /* "numpy.pxd":255 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_1 = (__pyx_v_descr->byteorder == '>'); if (__pyx_t_1) { __pyx_t_2 = __pyx_v_little_endian; } else { __pyx_t_2 = __pyx_t_1; } if (!__pyx_t_2) { /* "numpy.pxd":256 * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" */ __pyx_t_1 = (__pyx_v_descr->byteorder == '<'); if (__pyx_t_1) { __pyx_t_3 = (!__pyx_v_little_endian); __pyx_t_7 = __pyx_t_3; } else { __pyx_t_7 = __pyx_t_1; } __pyx_t_1 = __pyx_t_7; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "numpy.pxd":257 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_6), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L12; } __pyx_L12:; /* "numpy.pxd":258 * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" */ __pyx_t_1 = (__pyx_v_t == NPY_BYTE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__b; goto __pyx_L13; } /* "numpy.pxd":259 * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" */ __pyx_t_1 = (__pyx_v_t == NPY_UBYTE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__B; goto __pyx_L13; } /* "numpy.pxd":260 * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" */ __pyx_t_1 = (__pyx_v_t == NPY_SHORT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__h; goto __pyx_L13; } /* "numpy.pxd":261 * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" */ __pyx_t_1 = (__pyx_v_t == NPY_USHORT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__H; goto __pyx_L13; } /* "numpy.pxd":262 * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" */ __pyx_t_1 = (__pyx_v_t == NPY_INT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__i; goto __pyx_L13; } /* "numpy.pxd":263 * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" */ __pyx_t_1 = (__pyx_v_t == NPY_UINT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__I; goto __pyx_L13; } /* "numpy.pxd":264 * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" */ __pyx_t_1 = (__pyx_v_t == NPY_LONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__l; goto __pyx_L13; } /* "numpy.pxd":265 * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" */ __pyx_t_1 = (__pyx_v_t == NPY_ULONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__L; goto __pyx_L13; } /* "numpy.pxd":266 * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" */ __pyx_t_1 = (__pyx_v_t == NPY_LONGLONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__q; goto __pyx_L13; } /* "numpy.pxd":267 * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" */ __pyx_t_1 = (__pyx_v_t == NPY_ULONGLONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Q; goto __pyx_L13; } /* "numpy.pxd":268 * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" */ __pyx_t_1 = (__pyx_v_t == NPY_FLOAT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__f; goto __pyx_L13; } /* "numpy.pxd":269 * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" */ __pyx_t_1 = (__pyx_v_t == NPY_DOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__d; goto __pyx_L13; } /* "numpy.pxd":270 * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" */ __pyx_t_1 = (__pyx_v_t == NPY_LONGDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__g; goto __pyx_L13; } /* "numpy.pxd":271 * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" */ __pyx_t_1 = (__pyx_v_t == NPY_CFLOAT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zf; goto __pyx_L13; } /* "numpy.pxd":272 * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" */ __pyx_t_1 = (__pyx_v_t == NPY_CDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zd; goto __pyx_L13; } /* "numpy.pxd":273 * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f = "O" * else: */ __pyx_t_1 = (__pyx_v_t == NPY_CLONGDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zg; goto __pyx_L13; } /* "numpy.pxd":274 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_1 = (__pyx_v_t == NPY_OBJECT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__O; goto __pyx_L13; } /*else*/ { /* "numpy.pxd":276 * elif t == NPY_OBJECT: f = "O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * info.format = f * return */ __pyx_t_4 = PyInt_FromLong(__pyx_v_t); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_8 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_7), __pyx_t_4); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_8)); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_8)); __Pyx_GIVEREF(((PyObject *)__pyx_t_8)); __pyx_t_8 = 0; __pyx_t_8 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L13:; /* "numpy.pxd":277 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f # <<<<<<<<<<<<<< * return * else: */ __pyx_v_info->format = __pyx_v_f; /* "numpy.pxd":278 * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f * return # <<<<<<<<<<<<<< * else: * info.format = stdlib.malloc(_buffer_format_string_len) */ __pyx_r = 0; goto __pyx_L0; goto __pyx_L11; } /*else*/ { /* "numpy.pxd":280 * return * else: * info.format = stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 */ __pyx_v_info->format = ((char *)malloc(255)); /* "numpy.pxd":281 * else: * info.format = stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< * offset = 0 * f = _util_dtypestring(descr, info.format + 1, */ (__pyx_v_info->format[0]) = '^'; /* "numpy.pxd":282 * info.format = stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 # <<<<<<<<<<<<<< * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, */ __pyx_v_offset = 0; /* "numpy.pxd":285 * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, * &offset) # <<<<<<<<<<<<<< * f[0] = c'\0' # Terminate format string * */ __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_9; /* "numpy.pxd":286 * info.format + _buffer_format_string_len, * &offset) * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< * * def __releasebuffer__(ndarray self, Py_buffer* info): */ (__pyx_v_f[0]) = '\x00'; } __pyx_L11:; __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_descr); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":288 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__releasebuffer__", 0); /* "numpy.pxd":289 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_t_1 = PyArray_HASFIELDS(__pyx_v_self); if (__pyx_t_1) { /* "numpy.pxd":290 * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): * stdlib.free(info.format) # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) */ free(__pyx_v_info->format); goto __pyx_L3; } __pyx_L3:; /* "numpy.pxd":291 * if PyArray_HASFIELDS(self): * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * stdlib.free(info.strides) * # info.shape was stored after info.strides in the same block */ __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); if (__pyx_t_1) { /* "numpy.pxd":292 * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) # <<<<<<<<<<<<<< * # info.shape was stored after info.strides in the same block * */ free(__pyx_v_info->strides); goto __pyx_L4; } __pyx_L4:; __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":768 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, a) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); /* "numpy.pxd":769 * * cdef inline object PyArray_MultiIterNew1(a): * return PyArray_MultiIterNew(1, a) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew2(a, b): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 769; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":771 * return PyArray_MultiIterNew(1, a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, a, b) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); /* "numpy.pxd":772 * * cdef inline object PyArray_MultiIterNew2(a, b): * return PyArray_MultiIterNew(2, a, b) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew3(a, b, c): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":774 * return PyArray_MultiIterNew(2, a, b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, a, b, c) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); /* "numpy.pxd":775 * * cdef inline object PyArray_MultiIterNew3(a, b, c): * return PyArray_MultiIterNew(3, a, b, c) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":777 * return PyArray_MultiIterNew(3, a, b, c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, a, b, c, d) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); /* "numpy.pxd":778 * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): * return PyArray_MultiIterNew(4, a, b, c, d) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 778; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":780 * return PyArray_MultiIterNew(4, a, b, c, d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, a, b, c, d, e) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); /* "numpy.pxd":781 * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): * return PyArray_MultiIterNew(5, a, b, c, d, e) # <<<<<<<<<<<<<< * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 781; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":783 * return PyArray_MultiIterNew(5, a, b, c, d, e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { PyArray_Descr *__pyx_v_child = 0; int __pyx_v_endian_detector; int __pyx_v_little_endian; PyObject *__pyx_v_fields = 0; PyObject *__pyx_v_childname = NULL; PyObject *__pyx_v_new_offset = NULL; PyObject *__pyx_v_t = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *(*__pyx_t_6)(PyObject *); int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_t_10; long __pyx_t_11; char *__pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_util_dtypestring", 0); /* "numpy.pxd":790 * cdef int delta_offset * cdef tuple i * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((&endian_detector)[0] != 0) * cdef tuple fields */ __pyx_v_endian_detector = 1; /* "numpy.pxd":791 * cdef tuple i * cdef int endian_detector = 1 * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * cdef tuple fields * */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "numpy.pxd":794 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ if (unlikely(((PyObject *)__pyx_v_descr->names) == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_1 = ((PyObject *)__pyx_v_descr->names); __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; for (;;) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif __Pyx_XDECREF(__pyx_v_childname); __pyx_v_childname = __pyx_t_3; __pyx_t_3 = 0; /* "numpy.pxd":795 * * for childname in descr.names: * fields = descr.fields[childname] # <<<<<<<<<<<<<< * child, new_offset = fields * */ __pyx_t_3 = PyObject_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (!__pyx_t_3) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected tuple, got %.200s", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF(((PyObject *)__pyx_v_fields)); __pyx_v_fields = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "numpy.pxd":796 * for childname in descr.names: * fields = descr.fields[childname] * child, new_offset = fields # <<<<<<<<<<<<<< * * if (end - f) - (new_offset - offset[0]) < 15: */ if (likely(PyTuple_CheckExact(((PyObject *)__pyx_v_fields)))) { PyObject* sequence = ((PyObject *)__pyx_v_fields); #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else if (1) { __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } else { Py_ssize_t index = -1; __pyx_t_5 = PyObject_GetIter(((PyObject *)__pyx_v_fields)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = Py_TYPE(__pyx_t_5)->tp_iternext; index = 0; __pyx_t_3 = __pyx_t_6(__pyx_t_5); if (unlikely(!__pyx_t_3)) goto __pyx_L5_unpacking_failed; __Pyx_GOTREF(__pyx_t_3); index = 1; __pyx_t_4 = __pyx_t_6(__pyx_t_5); if (unlikely(!__pyx_t_4)) goto __pyx_L5_unpacking_failed; __Pyx_GOTREF(__pyx_t_4); if (__Pyx_IternextUnpackEndCheck(__pyx_t_6(__pyx_t_5), 2) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = NULL; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L6_unpacking_done; __pyx_L5_unpacking_failed:; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_L6_unpacking_done:; } if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF(((PyObject *)__pyx_v_child)); __pyx_v_child = ((PyArray_Descr *)__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_v_new_offset); __pyx_v_new_offset = __pyx_t_4; __pyx_t_4 = 0; /* "numpy.pxd":798 * child, new_offset = fields * * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ __pyx_t_4 = PyInt_FromLong((__pyx_v_end - __pyx_v_f)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_Subtract(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyObject_RichCompare(__pyx_t_3, __pyx_int_15, Py_LT); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { /* "numpy.pxd":799 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_t_5 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_9), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L7; } __pyx_L7:; /* "numpy.pxd":801 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_7 = (__pyx_v_child->byteorder == '>'); if (__pyx_t_7) { __pyx_t_8 = __pyx_v_little_endian; } else { __pyx_t_8 = __pyx_t_7; } if (!__pyx_t_8) { /* "numpy.pxd":802 * * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * # One could encode it in the format string and have Cython */ __pyx_t_7 = (__pyx_v_child->byteorder == '<'); if (__pyx_t_7) { __pyx_t_9 = (!__pyx_v_little_endian); __pyx_t_10 = __pyx_t_9; } else { __pyx_t_10 = __pyx_t_7; } __pyx_t_7 = __pyx_t_10; } else { __pyx_t_7 = __pyx_t_8; } if (__pyx_t_7) { /* "numpy.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_10), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L8; } __pyx_L8:; /* "numpy.pxd":813 * * # Output padding bytes * while offset[0] < new_offset: # <<<<<<<<<<<<<< * f[0] = 120 # "x"; pad byte * f += 1 */ while (1) { __pyx_t_5 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_t_5, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!__pyx_t_7) break; /* "numpy.pxd":814 * # Output padding bytes * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< * f += 1 * offset[0] += 1 */ (__pyx_v_f[0]) = 120; /* "numpy.pxd":815 * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte * f += 1 # <<<<<<<<<<<<<< * offset[0] += 1 * */ __pyx_v_f = (__pyx_v_f + 1); /* "numpy.pxd":816 * f[0] = 120 # "x"; pad byte * f += 1 * offset[0] += 1 # <<<<<<<<<<<<<< * * offset[0] += child.itemsize */ __pyx_t_11 = 0; (__pyx_v_offset[__pyx_t_11]) = ((__pyx_v_offset[__pyx_t_11]) + 1); } /* "numpy.pxd":818 * offset[0] += 1 * * offset[0] += child.itemsize # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(child): */ __pyx_t_11 = 0; (__pyx_v_offset[__pyx_t_11]) = ((__pyx_v_offset[__pyx_t_11]) + __pyx_v_child->elsize); /* "numpy.pxd":820 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ __pyx_t_7 = (!PyDataType_HASFIELDS(__pyx_v_child)); if (__pyx_t_7) { /* "numpy.pxd":821 * * if not PyDataType_HASFIELDS(child): * t = child.type_num # <<<<<<<<<<<<<< * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") */ __pyx_t_3 = PyInt_FromLong(__pyx_v_child->type_num); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 821; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_XDECREF(__pyx_v_t); __pyx_v_t = __pyx_t_3; __pyx_t_3 = 0; /* "numpy.pxd":822 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ __pyx_t_7 = ((__pyx_v_end - __pyx_v_f) < 5); if (__pyx_t_7) { /* "numpy.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_t_3 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_12), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L12; } __pyx_L12:; /* "numpy.pxd":826 * * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" */ __pyx_t_3 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 98; goto __pyx_L13; } /* "numpy.pxd":827 * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" */ __pyx_t_5 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 66; goto __pyx_L13; } /* "numpy.pxd":828 * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" */ __pyx_t_3 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 104; goto __pyx_L13; } /* "numpy.pxd":829 * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" */ __pyx_t_5 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 72; goto __pyx_L13; } /* "numpy.pxd":830 * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" */ __pyx_t_3 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 105; goto __pyx_L13; } /* "numpy.pxd":831 * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" */ __pyx_t_5 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 73; goto __pyx_L13; } /* "numpy.pxd":832 * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" */ __pyx_t_3 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 108; goto __pyx_L13; } /* "numpy.pxd":833 * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" */ __pyx_t_5 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 76; goto __pyx_L13; } /* "numpy.pxd":834 * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" */ __pyx_t_3 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 113; goto __pyx_L13; } /* "numpy.pxd":835 * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" */ __pyx_t_5 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 81; goto __pyx_L13; } /* "numpy.pxd":836 * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" */ __pyx_t_3 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 102; goto __pyx_L13; } /* "numpy.pxd":837 * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf */ __pyx_t_5 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 100; goto __pyx_L13; } /* "numpy.pxd":838 * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd */ __pyx_t_3 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 103; goto __pyx_L13; } /* "numpy.pxd":839 * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg */ __pyx_t_5 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 102; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":840 * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" */ __pyx_t_3 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 100; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":841 * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: */ __pyx_t_5 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 103; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":842 * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_3 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 79; goto __pyx_L13; } /*else*/ { /* "numpy.pxd":844 * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * f += 1 * else: */ __pyx_t_5 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_7), __pyx_v_t); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_5)); __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_5)); __Pyx_GIVEREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L13:; /* "numpy.pxd":845 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * f += 1 # <<<<<<<<<<<<<< * else: * # Cython ignores struct boundary information ("T{...}"), */ __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L11; } /*else*/ { /* "numpy.pxd":849 * # Cython ignores struct boundary information ("T{...}"), * # so don't output it * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< * return f * */ __pyx_t_12 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_12 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 849; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_12; } __pyx_L11:; } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "numpy.pxd":850 * # so don't output it * f = _util_dtypestring(child, f, end, offset) * return f # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_f; goto __pyx_L0; __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_child); __Pyx_XDECREF(__pyx_v_fields); __Pyx_XDECREF(__pyx_v_childname); __Pyx_XDECREF(__pyx_v_new_offset); __Pyx_XDECREF(__pyx_v_t); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":965 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { PyObject *__pyx_v_baseptr; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("set_array_base", 0); /* "numpy.pxd":967 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ __pyx_t_1 = (__pyx_v_base == Py_None); if (__pyx_t_1) { /* "numpy.pxd":968 * cdef PyObject* baseptr * if base is None: * baseptr = NULL # <<<<<<<<<<<<<< * else: * Py_INCREF(base) # important to do this before decref below! */ __pyx_v_baseptr = NULL; goto __pyx_L3; } /*else*/ { /* "numpy.pxd":970 * baseptr = NULL * else: * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< * baseptr = base * Py_XDECREF(arr.base) */ Py_INCREF(__pyx_v_base); /* "numpy.pxd":971 * else: * Py_INCREF(base) # important to do this before decref below! * baseptr = base # <<<<<<<<<<<<<< * Py_XDECREF(arr.base) * arr.base = baseptr */ __pyx_v_baseptr = ((PyObject *)__pyx_v_base); } __pyx_L3:; /* "numpy.pxd":972 * Py_INCREF(base) # important to do this before decref below! * baseptr = base * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< * arr.base = baseptr * */ Py_XDECREF(__pyx_v_arr->base); /* "numpy.pxd":973 * baseptr = base * Py_XDECREF(arr.base) * arr.base = baseptr # <<<<<<<<<<<<<< * * cdef inline object get_array_base(ndarray arr): */ __pyx_v_arr->base = __pyx_v_baseptr; __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":975 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("get_array_base", 0); /* "numpy.pxd":976 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ __pyx_t_1 = (__pyx_v_arr->base == NULL); if (__pyx_t_1) { /* "numpy.pxd":977 * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: * return None # <<<<<<<<<<<<<< * else: * return arr.base */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; goto __pyx_L3; } /*else*/ { /* "numpy.pxd":979 * return None * else: * return arr.base # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); __pyx_r = ((PyObject *)__pyx_v_arr->base); goto __pyx_L0; } __pyx_L3:; __pyx_r = Py_None; __Pyx_INCREF(Py_None); __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, __Pyx_NAMESTR("linalg"), __Pyx_DOCSTR(__pyx_k_13), /* m_doc */ -1, /* m_size */ __pyx_methods /* m_methods */, NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_kp_u_1, __pyx_k_1, sizeof(__pyx_k_1), 0, 1, 0, 0}, {&__pyx_kp_u_11, __pyx_k_11, sizeof(__pyx_k_11), 0, 1, 0, 0}, {&__pyx_kp_s_14, __pyx_k_14, sizeof(__pyx_k_14), 0, 0, 1, 0}, {&__pyx_kp_s_17, __pyx_k_17, sizeof(__pyx_k_17), 0, 0, 1, 0}, {&__pyx_n_s_18, __pyx_k_18, sizeof(__pyx_k_18), 0, 0, 1, 1}, {&__pyx_kp_u_3, __pyx_k_3, sizeof(__pyx_k_3), 0, 1, 0, 0}, {&__pyx_kp_u_5, __pyx_k_5, sizeof(__pyx_k_5), 0, 1, 0, 0}, {&__pyx_kp_u_7, __pyx_k_7, sizeof(__pyx_k_7), 0, 1, 0, 0}, {&__pyx_kp_u_8, __pyx_k_8, sizeof(__pyx_k_8), 0, 1, 0, 0}, {&__pyx_n_s__A, __pyx_k__A, sizeof(__pyx_k__A), 0, 0, 1, 1}, {&__pyx_n_s__B, __pyx_k__B, sizeof(__pyx_k__B), 0, 0, 1, 1}, {&__pyx_n_s__C, __pyx_k__C, sizeof(__pyx_k__C), 0, 0, 1, 1}, {&__pyx_n_s__D, __pyx_k__D, sizeof(__pyx_k__D), 0, 0, 1, 1}, {&__pyx_n_s__Diag, __pyx_k__Diag, sizeof(__pyx_k__Diag), 0, 0, 1, 1}, {&__pyx_n_s__RuntimeError, __pyx_k__RuntimeError, sizeof(__pyx_k__RuntimeError), 0, 0, 1, 1}, {&__pyx_n_s__Side, __pyx_k__Side, sizeof(__pyx_k__Side), 0, 0, 1, 1}, {&__pyx_n_s__Trans, __pyx_k__Trans, sizeof(__pyx_k__Trans), 0, 0, 1, 1}, {&__pyx_n_s__TransA, __pyx_k__TransA, sizeof(__pyx_k__TransA), 0, 0, 1, 1}, {&__pyx_n_s__TransB, __pyx_k__TransB, sizeof(__pyx_k__TransB), 0, 0, 1, 1}, {&__pyx_n_s__Uplo, __pyx_k__Uplo, sizeof(__pyx_k__Uplo), 0, 0, 1, 1}, {&__pyx_n_s__ValueError, __pyx_k__ValueError, sizeof(__pyx_k__ValueError), 0, 0, 1, 1}, {&__pyx_n_s__X, __pyx_k__X, sizeof(__pyx_k__X), 0, 0, 1, 1}, {&__pyx_n_s__Y, __pyx_k__Y, sizeof(__pyx_k__Y), 0, 0, 1, 1}, {&__pyx_n_s__Z, __pyx_k__Z, sizeof(__pyx_k__Z), 0, 0, 1, 1}, {&__pyx_n_s____main__, __pyx_k____main__, sizeof(__pyx_k____main__), 0, 0, 1, 1}, {&__pyx_n_s____test__, __pyx_k____test__, sizeof(__pyx_k____test__), 0, 0, 1, 1}, {&__pyx_n_s____version__, __pyx_k____version__, sizeof(__pyx_k____version__), 0, 0, 1, 1}, {&__pyx_n_s__a, __pyx_k__a, sizeof(__pyx_k__a), 0, 0, 1, 1}, {&__pyx_n_s__aij, __pyx_k__aij, sizeof(__pyx_k__aij), 0, 0, 1, 1}, {&__pyx_n_s__alpha, __pyx_k__alpha, sizeof(__pyx_k__alpha), 0, 0, 1, 1}, {&__pyx_n_s__b, __pyx_k__b, sizeof(__pyx_k__b), 0, 0, 1, 1}, {&__pyx_n_s__beta, __pyx_k__beta, sizeof(__pyx_k__beta), 0, 0, 1, 1}, {&__pyx_n_s__blas_dasum, __pyx_k__blas_dasum, sizeof(__pyx_k__blas_dasum), 0, 0, 1, 1}, {&__pyx_n_s__blas_daxpy, __pyx_k__blas_daxpy, sizeof(__pyx_k__blas_daxpy), 0, 0, 1, 1}, {&__pyx_n_s__blas_ddot, __pyx_k__blas_ddot, sizeof(__pyx_k__blas_ddot), 0, 0, 1, 1}, {&__pyx_n_s__blas_dgemm, __pyx_k__blas_dgemm, sizeof(__pyx_k__blas_dgemm), 0, 0, 1, 1}, {&__pyx_n_s__blas_dnrm2, __pyx_k__blas_dnrm2, sizeof(__pyx_k__blas_dnrm2), 0, 0, 1, 1}, {&__pyx_n_s__blas_dscal, __pyx_k__blas_dscal, sizeof(__pyx_k__blas_dscal), 0, 0, 1, 1}, {&__pyx_n_s__blas_dsymm, __pyx_k__blas_dsymm, sizeof(__pyx_k__blas_dsymm), 0, 0, 1, 1}, {&__pyx_n_s__blas_dsyr2k, __pyx_k__blas_dsyr2k, sizeof(__pyx_k__blas_dsyr2k), 0, 0, 1, 1}, {&__pyx_n_s__blas_dsyrk, __pyx_k__blas_dsyrk, sizeof(__pyx_k__blas_dsyrk), 0, 0, 1, 1}, {&__pyx_n_s__blas_dtrmm, __pyx_k__blas_dtrmm, sizeof(__pyx_k__blas_dtrmm), 0, 0, 1, 1}, {&__pyx_n_s__blas_dtrsm, __pyx_k__blas_dtrsm, sizeof(__pyx_k__blas_dtrsm), 0, 0, 1, 1}, {&__pyx_n_s__c, __pyx_k__c, sizeof(__pyx_k__c), 0, 0, 1, 1}, {&__pyx_n_s__d, __pyx_k__d, sizeof(__pyx_k__d), 0, 0, 1, 1}, {&__pyx_n_s__fixed, __pyx_k__fixed, sizeof(__pyx_k__fixed), 0, 0, 1, 1}, {&__pyx_n_s__i, __pyx_k__i, sizeof(__pyx_k__i), 0, 0, 1, 1}, {&__pyx_n_s__interp, __pyx_k__interp, sizeof(__pyx_k__interp), 0, 0, 1, 1}, {&__pyx_n_s__j, __pyx_k__j, sizeof(__pyx_k__j), 0, 0, 1, 1}, {&__pyx_n_s__m, __pyx_k__m, sizeof(__pyx_k__m), 0, 0, 1, 1}, {&__pyx_n_s__matrix_add, __pyx_k__matrix_add, sizeof(__pyx_k__matrix_add), 0, 0, 1, 1}, {&__pyx_n_s__matrix_get, __pyx_k__matrix_get, sizeof(__pyx_k__matrix_get), 0, 0, 1, 1}, {&__pyx_n_s__matrix_transpose, __pyx_k__matrix_transpose, sizeof(__pyx_k__matrix_transpose), 0, 0, 1, 1}, {&__pyx_n_s__np, __pyx_k__np, sizeof(__pyx_k__np), 0, 0, 1, 1}, {&__pyx_n_s__numpy, __pyx_k__numpy, sizeof(__pyx_k__numpy), 0, 0, 1, 1}, {&__pyx_n_s__q, __pyx_k__q, sizeof(__pyx_k__q), 0, 0, 1, 1}, {&__pyx_n_s__r, __pyx_k__r, sizeof(__pyx_k__r), 0, 0, 1, 1}, {&__pyx_n_s__range, __pyx_k__range, sizeof(__pyx_k__range), 0, 0, 1, 1}, {&__pyx_n_s__s, __pyx_k__s, sizeof(__pyx_k__s), 0, 0, 1, 1}, {&__pyx_n_s__vector_add, __pyx_k__vector_add, sizeof(__pyx_k__vector_add), 0, 0, 1, 1}, {&__pyx_n_s__vector_add_constant, __pyx_k__vector_add_constant, sizeof(__pyx_k__vector_add_constant), 0, 0, 1, 1}, {&__pyx_n_s__vector_div, __pyx_k__vector_div, sizeof(__pyx_k__vector_div), 0, 0, 1, 1}, {&__pyx_n_s__vector_get, __pyx_k__vector_get, sizeof(__pyx_k__vector_get), 0, 0, 1, 1}, {&__pyx_n_s__vector_median, __pyx_k__vector_median, sizeof(__pyx_k__vector_median), 0, 0, 1, 1}, {&__pyx_n_s__vector_mul, __pyx_k__vector_mul, sizeof(__pyx_k__vector_mul), 0, 0, 1, 1}, {&__pyx_n_s__vector_quantile, __pyx_k__vector_quantile, sizeof(__pyx_k__vector_quantile), 0, 0, 1, 1}, {&__pyx_n_s__vector_sad, __pyx_k__vector_sad, sizeof(__pyx_k__vector_sad), 0, 0, 1, 1}, {&__pyx_n_s__vector_scale, __pyx_k__vector_scale, sizeof(__pyx_k__vector_scale), 0, 0, 1, 1}, {&__pyx_n_s__vector_set, __pyx_k__vector_set, sizeof(__pyx_k__vector_set), 0, 0, 1, 1}, {&__pyx_n_s__vector_set_all, __pyx_k__vector_set_all, sizeof(__pyx_k__vector_set_all), 0, 0, 1, 1}, {&__pyx_n_s__vector_ssd, __pyx_k__vector_ssd, sizeof(__pyx_k__vector_ssd), 0, 0, 1, 1}, {&__pyx_n_s__vector_sub, __pyx_k__vector_sub, sizeof(__pyx_k__vector_sub), 0, 0, 1, 1}, {&__pyx_n_s__vector_sum, __pyx_k__vector_sum, sizeof(__pyx_k__vector_sum), 0, 0, 1, 1}, {&__pyx_n_s__x, __pyx_k__x, sizeof(__pyx_k__x), 0, 0, 1, 1}, {&__pyx_n_s__xi, __pyx_k__xi, sizeof(__pyx_k__xi), 0, 0, 1, 1}, {&__pyx_n_s__y, __pyx_k__y, sizeof(__pyx_k__y), 0, 0, 1, 1}, {&__pyx_n_s__z, __pyx_k__z, sizeof(__pyx_k__z), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_ValueError = __Pyx_GetName(__pyx_b, __pyx_n_s__ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_range = __Pyx_GetName(__pyx_b, __pyx_n_s__range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 228; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_RuntimeError = __Pyx_GetName(__pyx_b, __pyx_n_s__RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "numpy.pxd":215 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_k_tuple_2 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_2); __Pyx_INCREF(((PyObject *)__pyx_kp_u_1)); PyTuple_SET_ITEM(__pyx_k_tuple_2, 0, ((PyObject *)__pyx_kp_u_1)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_1)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_2)); /* "numpy.pxd":219 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_k_tuple_4 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_4); __Pyx_INCREF(((PyObject *)__pyx_kp_u_3)); PyTuple_SET_ITEM(__pyx_k_tuple_4, 0, ((PyObject *)__pyx_kp_u_3)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_3)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_4)); /* "numpy.pxd":257 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_k_tuple_6 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_6); __Pyx_INCREF(((PyObject *)__pyx_kp_u_5)); PyTuple_SET_ITEM(__pyx_k_tuple_6, 0, ((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_6)); /* "numpy.pxd":799 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_k_tuple_9 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_9); __Pyx_INCREF(((PyObject *)__pyx_kp_u_8)); PyTuple_SET_ITEM(__pyx_k_tuple_9, 0, ((PyObject *)__pyx_kp_u_8)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_8)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_9)); /* "numpy.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_k_tuple_10 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_10)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_10); __Pyx_INCREF(((PyObject *)__pyx_kp_u_5)); PyTuple_SET_ITEM(__pyx_k_tuple_10, 0, ((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_10)); /* "numpy.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_k_tuple_12 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_12)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_12); __Pyx_INCREF(((PyObject *)__pyx_kp_u_11)); PyTuple_SET_ITEM(__pyx_k_tuple_12, 0, ((PyObject *)__pyx_kp_u_11)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_11)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_12)); /* "nipy/labs/bindings/linalg.pyx":91 * * ## fff_vector.h * def vector_get(X, size_t i): # <<<<<<<<<<<<<< * """ * Get i-th element. */ __pyx_k_tuple_15 = PyTuple_New(4); if (unlikely(!__pyx_k_tuple_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_15); __Pyx_INCREF(((PyObject *)__pyx_n_s__X)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 0, ((PyObject *)__pyx_n_s__X)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__X)); __Pyx_INCREF(((PyObject *)__pyx_n_s__i)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 1, ((PyObject *)__pyx_n_s__i)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__i)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 2, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__xi)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 3, ((PyObject *)__pyx_n_s__xi)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__xi)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_15)); __pyx_k_codeobj_16 = (PyObject*)__Pyx_PyCode_New(2, 0, 4, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_15, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__vector_get, 91, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/linalg.pyx":103 * return xi * * def vector_set(X, size_t i, double a): # <<<<<<<<<<<<<< * """ * Set i-th element. */ __pyx_k_tuple_19 = PyTuple_New(6); if (unlikely(!__pyx_k_tuple_19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_19); __Pyx_INCREF(((PyObject *)__pyx_n_s__X)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 0, ((PyObject *)__pyx_n_s__X)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__X)); __Pyx_INCREF(((PyObject *)__pyx_n_s__i)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 1, ((PyObject *)__pyx_n_s__i)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__i)); __Pyx_INCREF(((PyObject *)__pyx_n_s__a)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 2, ((PyObject *)__pyx_n_s__a)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__a)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 3, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__y)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 4, ((PyObject *)__pyx_n_s__y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Y)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 5, ((PyObject *)__pyx_n_s__Y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Y)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_19)); __pyx_k_codeobj_20 = (PyObject*)__Pyx_PyCode_New(3, 0, 6, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_19, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__vector_set, 103, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_20)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/linalg.pyx":117 * return Y * * def vector_set_all(X, double a): # <<<<<<<<<<<<<< * """ * Set to a constant value. */ __pyx_k_tuple_21 = PyTuple_New(5); if (unlikely(!__pyx_k_tuple_21)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 117; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_21); __Pyx_INCREF(((PyObject *)__pyx_n_s__X)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 0, ((PyObject *)__pyx_n_s__X)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__X)); __Pyx_INCREF(((PyObject *)__pyx_n_s__a)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 1, ((PyObject *)__pyx_n_s__a)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__a)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 2, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__y)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 3, ((PyObject *)__pyx_n_s__y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Y)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 4, ((PyObject *)__pyx_n_s__Y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Y)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_21)); __pyx_k_codeobj_22 = (PyObject*)__Pyx_PyCode_New(2, 0, 5, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_21, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__vector_set_all, 117, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_22)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 117; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/linalg.pyx":131 * return Y * * def vector_scale(X, double a): # <<<<<<<<<<<<<< * """ * Multiply by a constant value. */ __pyx_k_tuple_23 = PyTuple_New(5); if (unlikely(!__pyx_k_tuple_23)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_23); __Pyx_INCREF(((PyObject *)__pyx_n_s__X)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 0, ((PyObject *)__pyx_n_s__X)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__X)); __Pyx_INCREF(((PyObject *)__pyx_n_s__a)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 1, ((PyObject *)__pyx_n_s__a)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__a)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 2, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__y)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 3, ((PyObject *)__pyx_n_s__y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Y)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 4, ((PyObject *)__pyx_n_s__Y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Y)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_23)); __pyx_k_codeobj_24 = (PyObject*)__Pyx_PyCode_New(2, 0, 5, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_23, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__vector_scale, 131, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_24)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/linalg.pyx":145 * return Y * * def vector_add_constant(X, double a): # <<<<<<<<<<<<<< * """ * Add a constant value. */ __pyx_k_tuple_25 = PyTuple_New(5); if (unlikely(!__pyx_k_tuple_25)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_25); __Pyx_INCREF(((PyObject *)__pyx_n_s__X)); PyTuple_SET_ITEM(__pyx_k_tuple_25, 0, ((PyObject *)__pyx_n_s__X)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__X)); __Pyx_INCREF(((PyObject *)__pyx_n_s__a)); PyTuple_SET_ITEM(__pyx_k_tuple_25, 1, ((PyObject *)__pyx_n_s__a)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__a)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_25, 2, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__y)); PyTuple_SET_ITEM(__pyx_k_tuple_25, 3, ((PyObject *)__pyx_n_s__y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Y)); PyTuple_SET_ITEM(__pyx_k_tuple_25, 4, ((PyObject *)__pyx_n_s__Y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Y)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_25)); __pyx_k_codeobj_26 = (PyObject*)__Pyx_PyCode_New(2, 0, 5, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_25, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__vector_add_constant, 145, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_26)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/linalg.pyx":159 * return Y * * def vector_add(X, Y): # <<<<<<<<<<<<<< * """ * Add two vectors. */ __pyx_k_tuple_27 = PyTuple_New(6); if (unlikely(!__pyx_k_tuple_27)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_27); __Pyx_INCREF(((PyObject *)__pyx_n_s__X)); PyTuple_SET_ITEM(__pyx_k_tuple_27, 0, ((PyObject *)__pyx_n_s__X)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__X)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Y)); PyTuple_SET_ITEM(__pyx_k_tuple_27, 1, ((PyObject *)__pyx_n_s__Y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_27, 2, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__y)); PyTuple_SET_ITEM(__pyx_k_tuple_27, 3, ((PyObject *)__pyx_n_s__y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__z)); PyTuple_SET_ITEM(__pyx_k_tuple_27, 4, ((PyObject *)__pyx_n_s__z)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__z)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Z)); PyTuple_SET_ITEM(__pyx_k_tuple_27, 5, ((PyObject *)__pyx_n_s__Z)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Z)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_27)); __pyx_k_codeobj_28 = (PyObject*)__Pyx_PyCode_New(2, 0, 6, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_27, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__vector_add, 159, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_28)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/linalg.pyx":175 * return Z * * def vector_sub(X, Y): # <<<<<<<<<<<<<< * """ * Substract two vectors: x - y */ __pyx_k_tuple_29 = PyTuple_New(6); if (unlikely(!__pyx_k_tuple_29)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 175; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_29); __Pyx_INCREF(((PyObject *)__pyx_n_s__X)); PyTuple_SET_ITEM(__pyx_k_tuple_29, 0, ((PyObject *)__pyx_n_s__X)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__X)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Y)); PyTuple_SET_ITEM(__pyx_k_tuple_29, 1, ((PyObject *)__pyx_n_s__Y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_29, 2, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__y)); PyTuple_SET_ITEM(__pyx_k_tuple_29, 3, ((PyObject *)__pyx_n_s__y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__z)); PyTuple_SET_ITEM(__pyx_k_tuple_29, 4, ((PyObject *)__pyx_n_s__z)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__z)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Z)); PyTuple_SET_ITEM(__pyx_k_tuple_29, 5, ((PyObject *)__pyx_n_s__Z)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Z)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_29)); __pyx_k_codeobj_30 = (PyObject*)__Pyx_PyCode_New(2, 0, 6, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_29, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__vector_sub, 175, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_30)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 175; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/linalg.pyx":191 * return Z * * def vector_mul(X, Y): # <<<<<<<<<<<<<< * """ * Element-wise multiplication. */ __pyx_k_tuple_31 = PyTuple_New(6); if (unlikely(!__pyx_k_tuple_31)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_31); __Pyx_INCREF(((PyObject *)__pyx_n_s__X)); PyTuple_SET_ITEM(__pyx_k_tuple_31, 0, ((PyObject *)__pyx_n_s__X)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__X)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Y)); PyTuple_SET_ITEM(__pyx_k_tuple_31, 1, ((PyObject *)__pyx_n_s__Y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_31, 2, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__y)); PyTuple_SET_ITEM(__pyx_k_tuple_31, 3, ((PyObject *)__pyx_n_s__y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__z)); PyTuple_SET_ITEM(__pyx_k_tuple_31, 4, ((PyObject *)__pyx_n_s__z)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__z)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Z)); PyTuple_SET_ITEM(__pyx_k_tuple_31, 5, ((PyObject *)__pyx_n_s__Z)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Z)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_31)); __pyx_k_codeobj_32 = (PyObject*)__Pyx_PyCode_New(2, 0, 6, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_31, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__vector_mul, 191, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_32)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/linalg.pyx":207 * return Z * * def vector_div(X, Y): # <<<<<<<<<<<<<< * """ * Element-wise division. */ __pyx_k_tuple_33 = PyTuple_New(6); if (unlikely(!__pyx_k_tuple_33)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_33); __Pyx_INCREF(((PyObject *)__pyx_n_s__X)); PyTuple_SET_ITEM(__pyx_k_tuple_33, 0, ((PyObject *)__pyx_n_s__X)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__X)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Y)); PyTuple_SET_ITEM(__pyx_k_tuple_33, 1, ((PyObject *)__pyx_n_s__Y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_33, 2, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__y)); PyTuple_SET_ITEM(__pyx_k_tuple_33, 3, ((PyObject *)__pyx_n_s__y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__z)); PyTuple_SET_ITEM(__pyx_k_tuple_33, 4, ((PyObject *)__pyx_n_s__z)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__z)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Z)); PyTuple_SET_ITEM(__pyx_k_tuple_33, 5, ((PyObject *)__pyx_n_s__Z)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Z)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_33)); __pyx_k_codeobj_34 = (PyObject*)__Pyx_PyCode_New(2, 0, 6, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_33, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__vector_div, 207, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_34)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/linalg.pyx":224 * * * def vector_sum(X): # <<<<<<<<<<<<<< * """ * Sum up array elements. */ __pyx_k_tuple_35 = PyTuple_New(3); if (unlikely(!__pyx_k_tuple_35)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 224; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_35); __Pyx_INCREF(((PyObject *)__pyx_n_s__X)); PyTuple_SET_ITEM(__pyx_k_tuple_35, 0, ((PyObject *)__pyx_n_s__X)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__X)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_35, 1, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__s)); PyTuple_SET_ITEM(__pyx_k_tuple_35, 2, ((PyObject *)__pyx_n_s__s)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__s)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_35)); __pyx_k_codeobj_36 = (PyObject*)__Pyx_PyCode_New(1, 0, 3, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_35, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__vector_sum, 224, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_36)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 224; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/linalg.pyx":236 * return s * * def vector_ssd(X, double m=0, int fixed=1): # <<<<<<<<<<<<<< * """ * (Minimal) sum of squared differences. */ __pyx_k_tuple_37 = PyTuple_New(5); if (unlikely(!__pyx_k_tuple_37)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 236; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_37); __Pyx_INCREF(((PyObject *)__pyx_n_s__X)); PyTuple_SET_ITEM(__pyx_k_tuple_37, 0, ((PyObject *)__pyx_n_s__X)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__X)); __Pyx_INCREF(((PyObject *)__pyx_n_s__m)); PyTuple_SET_ITEM(__pyx_k_tuple_37, 1, ((PyObject *)__pyx_n_s__m)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__m)); __Pyx_INCREF(((PyObject *)__pyx_n_s__fixed)); PyTuple_SET_ITEM(__pyx_k_tuple_37, 2, ((PyObject *)__pyx_n_s__fixed)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__fixed)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_37, 3, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__s)); PyTuple_SET_ITEM(__pyx_k_tuple_37, 4, ((PyObject *)__pyx_n_s__s)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__s)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_37)); __pyx_k_codeobj_38 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_37, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__vector_ssd, 236, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_38)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 236; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/linalg.pyx":248 * return s * * def vector_sad(X, double m=0): # <<<<<<<<<<<<<< * """ * Sum of absolute differences. */ __pyx_k_tuple_39 = PyTuple_New(4); if (unlikely(!__pyx_k_tuple_39)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 248; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_39); __Pyx_INCREF(((PyObject *)__pyx_n_s__X)); PyTuple_SET_ITEM(__pyx_k_tuple_39, 0, ((PyObject *)__pyx_n_s__X)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__X)); __Pyx_INCREF(((PyObject *)__pyx_n_s__m)); PyTuple_SET_ITEM(__pyx_k_tuple_39, 1, ((PyObject *)__pyx_n_s__m)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__m)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_39, 2, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__s)); PyTuple_SET_ITEM(__pyx_k_tuple_39, 3, ((PyObject *)__pyx_n_s__s)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__s)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_39)); __pyx_k_codeobj_40 = (PyObject*)__Pyx_PyCode_New(2, 0, 4, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_39, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__vector_sad, 248, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_40)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 248; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/linalg.pyx":260 * return s * * def vector_median(X): # <<<<<<<<<<<<<< * """ * Median. */ __pyx_k_tuple_41 = PyTuple_New(3); if (unlikely(!__pyx_k_tuple_41)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_41); __Pyx_INCREF(((PyObject *)__pyx_n_s__X)); PyTuple_SET_ITEM(__pyx_k_tuple_41, 0, ((PyObject *)__pyx_n_s__X)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__X)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_41, 1, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__m)); PyTuple_SET_ITEM(__pyx_k_tuple_41, 2, ((PyObject *)__pyx_n_s__m)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__m)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_41)); __pyx_k_codeobj_42 = (PyObject*)__Pyx_PyCode_New(1, 0, 3, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_41, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__vector_median, 260, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_42)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/linalg.pyx":272 * return m * * def vector_quantile(X, double r, int interp): # <<<<<<<<<<<<<< * """ * Quantile. */ __pyx_k_tuple_43 = PyTuple_New(5); if (unlikely(!__pyx_k_tuple_43)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_43); __Pyx_INCREF(((PyObject *)__pyx_n_s__X)); PyTuple_SET_ITEM(__pyx_k_tuple_43, 0, ((PyObject *)__pyx_n_s__X)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__X)); __Pyx_INCREF(((PyObject *)__pyx_n_s__r)); PyTuple_SET_ITEM(__pyx_k_tuple_43, 1, ((PyObject *)__pyx_n_s__r)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__r)); __Pyx_INCREF(((PyObject *)__pyx_n_s__interp)); PyTuple_SET_ITEM(__pyx_k_tuple_43, 2, ((PyObject *)__pyx_n_s__interp)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__interp)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_43, 3, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__q)); PyTuple_SET_ITEM(__pyx_k_tuple_43, 4, ((PyObject *)__pyx_n_s__q)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__q)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_43)); __pyx_k_codeobj_44 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_43, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__vector_quantile, 272, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_44)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/linalg.pyx":286 * * ## fff_matrix.h * def matrix_get(A, size_t i, size_t j): # <<<<<<<<<<<<<< * """ * Get (i,j) element. */ __pyx_k_tuple_45 = PyTuple_New(5); if (unlikely(!__pyx_k_tuple_45)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_45); __Pyx_INCREF(((PyObject *)__pyx_n_s__A)); PyTuple_SET_ITEM(__pyx_k_tuple_45, 0, ((PyObject *)__pyx_n_s__A)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__A)); __Pyx_INCREF(((PyObject *)__pyx_n_s__i)); PyTuple_SET_ITEM(__pyx_k_tuple_45, 1, ((PyObject *)__pyx_n_s__i)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__i)); __Pyx_INCREF(((PyObject *)__pyx_n_s__j)); PyTuple_SET_ITEM(__pyx_k_tuple_45, 2, ((PyObject *)__pyx_n_s__j)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__j)); __Pyx_INCREF(((PyObject *)__pyx_n_s__a)); PyTuple_SET_ITEM(__pyx_k_tuple_45, 3, ((PyObject *)__pyx_n_s__a)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__a)); __Pyx_INCREF(((PyObject *)__pyx_n_s__aij)); PyTuple_SET_ITEM(__pyx_k_tuple_45, 4, ((PyObject *)__pyx_n_s__aij)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__aij)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_45)); __pyx_k_codeobj_46 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_45, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__matrix_get, 286, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_46)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/linalg.pyx":298 * return aij * * def matrix_transpose(A): # <<<<<<<<<<<<<< * """ * Transpose a matrix. */ __pyx_k_tuple_47 = PyTuple_New(4); if (unlikely(!__pyx_k_tuple_47)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 298; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_47); __Pyx_INCREF(((PyObject *)__pyx_n_s__A)); PyTuple_SET_ITEM(__pyx_k_tuple_47, 0, ((PyObject *)__pyx_n_s__A)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__A)); __Pyx_INCREF(((PyObject *)__pyx_n_s__a)); PyTuple_SET_ITEM(__pyx_k_tuple_47, 1, ((PyObject *)__pyx_n_s__a)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__a)); __Pyx_INCREF(((PyObject *)__pyx_n_s__b)); PyTuple_SET_ITEM(__pyx_k_tuple_47, 2, ((PyObject *)__pyx_n_s__b)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__b)); __Pyx_INCREF(((PyObject *)__pyx_n_s__B)); PyTuple_SET_ITEM(__pyx_k_tuple_47, 3, ((PyObject *)__pyx_n_s__B)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__B)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_47)); __pyx_k_codeobj_48 = (PyObject*)__Pyx_PyCode_New(1, 0, 4, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_47, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__matrix_transpose, 298, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_48)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 298; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/linalg.pyx":311 * return B * * def matrix_add(A, B): # <<<<<<<<<<<<<< * """ * C = matrix_add(A, B) */ __pyx_k_tuple_49 = PyTuple_New(6); if (unlikely(!__pyx_k_tuple_49)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 311; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_49); __Pyx_INCREF(((PyObject *)__pyx_n_s__A)); PyTuple_SET_ITEM(__pyx_k_tuple_49, 0, ((PyObject *)__pyx_n_s__A)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__A)); __Pyx_INCREF(((PyObject *)__pyx_n_s__B)); PyTuple_SET_ITEM(__pyx_k_tuple_49, 1, ((PyObject *)__pyx_n_s__B)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__B)); __Pyx_INCREF(((PyObject *)__pyx_n_s__a)); PyTuple_SET_ITEM(__pyx_k_tuple_49, 2, ((PyObject *)__pyx_n_s__a)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__a)); __Pyx_INCREF(((PyObject *)__pyx_n_s__b)); PyTuple_SET_ITEM(__pyx_k_tuple_49, 3, ((PyObject *)__pyx_n_s__b)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__b)); __Pyx_INCREF(((PyObject *)__pyx_n_s__c)); PyTuple_SET_ITEM(__pyx_k_tuple_49, 4, ((PyObject *)__pyx_n_s__c)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__c)); __Pyx_INCREF(((PyObject *)__pyx_n_s__C)); PyTuple_SET_ITEM(__pyx_k_tuple_49, 5, ((PyObject *)__pyx_n_s__C)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__C)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_49)); __pyx_k_codeobj_50 = (PyObject*)__Pyx_PyCode_New(2, 0, 6, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_49, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__matrix_add, 311, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_50)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 311; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/linalg.pyx":360 * * ### BLAS 1 * def blas_dnrm2(X): # <<<<<<<<<<<<<< * cdef fff_vector *x * x = fff_vector_fromPyArray(X) */ __pyx_k_tuple_51 = PyTuple_New(2); if (unlikely(!__pyx_k_tuple_51)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_51); __Pyx_INCREF(((PyObject *)__pyx_n_s__X)); PyTuple_SET_ITEM(__pyx_k_tuple_51, 0, ((PyObject *)__pyx_n_s__X)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__X)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_51, 1, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_51)); __pyx_k_codeobj_52 = (PyObject*)__Pyx_PyCode_New(1, 0, 2, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_51, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__blas_dnrm2, 360, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_52)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/linalg.pyx":365 * return fff_blas_dnrm2(x) * * def blas_dasum(X): # <<<<<<<<<<<<<< * cdef fff_vector *x * x = fff_vector_fromPyArray(X) */ __pyx_k_tuple_53 = PyTuple_New(2); if (unlikely(!__pyx_k_tuple_53)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 365; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_53); __Pyx_INCREF(((PyObject *)__pyx_n_s__X)); PyTuple_SET_ITEM(__pyx_k_tuple_53, 0, ((PyObject *)__pyx_n_s__X)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__X)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_53, 1, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_53)); __pyx_k_codeobj_54 = (PyObject*)__Pyx_PyCode_New(1, 0, 2, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_53, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__blas_dasum, 365, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_54)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 365; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/linalg.pyx":370 * return fff_blas_dasum(x) * * def blas_ddot(X, Y): # <<<<<<<<<<<<<< * cdef fff_vector *x, *y * x = fff_vector_fromPyArray(X) */ __pyx_k_tuple_55 = PyTuple_New(4); if (unlikely(!__pyx_k_tuple_55)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_55); __Pyx_INCREF(((PyObject *)__pyx_n_s__X)); PyTuple_SET_ITEM(__pyx_k_tuple_55, 0, ((PyObject *)__pyx_n_s__X)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__X)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Y)); PyTuple_SET_ITEM(__pyx_k_tuple_55, 1, ((PyObject *)__pyx_n_s__Y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_55, 2, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__y)); PyTuple_SET_ITEM(__pyx_k_tuple_55, 3, ((PyObject *)__pyx_n_s__y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__y)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_55)); __pyx_k_codeobj_56 = (PyObject*)__Pyx_PyCode_New(2, 0, 4, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_55, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__blas_ddot, 370, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_56)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/linalg.pyx":376 * return fff_blas_ddot(x, y) * * def blas_daxpy(double alpha, X, Y): # <<<<<<<<<<<<<< * cdef fff_vector *x, *y, *z * x = fff_vector_fromPyArray(X) */ __pyx_k_tuple_57 = PyTuple_New(7); if (unlikely(!__pyx_k_tuple_57)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 376; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_57); __Pyx_INCREF(((PyObject *)__pyx_n_s__alpha)); PyTuple_SET_ITEM(__pyx_k_tuple_57, 0, ((PyObject *)__pyx_n_s__alpha)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__alpha)); __Pyx_INCREF(((PyObject *)__pyx_n_s__X)); PyTuple_SET_ITEM(__pyx_k_tuple_57, 1, ((PyObject *)__pyx_n_s__X)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__X)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Y)); PyTuple_SET_ITEM(__pyx_k_tuple_57, 2, ((PyObject *)__pyx_n_s__Y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_57, 3, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__y)); PyTuple_SET_ITEM(__pyx_k_tuple_57, 4, ((PyObject *)__pyx_n_s__y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__z)); PyTuple_SET_ITEM(__pyx_k_tuple_57, 5, ((PyObject *)__pyx_n_s__z)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__z)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Z)); PyTuple_SET_ITEM(__pyx_k_tuple_57, 6, ((PyObject *)__pyx_n_s__Z)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Z)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_57)); __pyx_k_codeobj_58 = (PyObject*)__Pyx_PyCode_New(3, 0, 7, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_57, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__blas_daxpy, 376, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_58)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 376; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/linalg.pyx":386 * return Z * * def blas_dscal(double alpha, X): # <<<<<<<<<<<<<< * cdef fff_vector *x, *y * x = fff_vector_fromPyArray(X) */ __pyx_k_tuple_59 = PyTuple_New(5); if (unlikely(!__pyx_k_tuple_59)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 386; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_59); __Pyx_INCREF(((PyObject *)__pyx_n_s__alpha)); PyTuple_SET_ITEM(__pyx_k_tuple_59, 0, ((PyObject *)__pyx_n_s__alpha)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__alpha)); __Pyx_INCREF(((PyObject *)__pyx_n_s__X)); PyTuple_SET_ITEM(__pyx_k_tuple_59, 1, ((PyObject *)__pyx_n_s__X)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__X)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_59, 2, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__y)); PyTuple_SET_ITEM(__pyx_k_tuple_59, 3, ((PyObject *)__pyx_n_s__y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Y)); PyTuple_SET_ITEM(__pyx_k_tuple_59, 4, ((PyObject *)__pyx_n_s__Y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Y)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_59)); __pyx_k_codeobj_60 = (PyObject*)__Pyx_PyCode_New(2, 0, 5, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_59, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__blas_dscal, 386, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_60)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 386; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/linalg.pyx":398 * * ### BLAS 3 * def blas_dgemm(int TransA, int TransB, double alpha, A, B, double beta, C): # <<<<<<<<<<<<<< * """ * D = blas_dgemm(int TransA, int TransB, double alpha, A, B, double beta, C). */ __pyx_k_tuple_61 = PyTuple_New(12); if (unlikely(!__pyx_k_tuple_61)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 398; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_61); __Pyx_INCREF(((PyObject *)__pyx_n_s__TransA)); PyTuple_SET_ITEM(__pyx_k_tuple_61, 0, ((PyObject *)__pyx_n_s__TransA)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__TransA)); __Pyx_INCREF(((PyObject *)__pyx_n_s__TransB)); PyTuple_SET_ITEM(__pyx_k_tuple_61, 1, ((PyObject *)__pyx_n_s__TransB)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__TransB)); __Pyx_INCREF(((PyObject *)__pyx_n_s__alpha)); PyTuple_SET_ITEM(__pyx_k_tuple_61, 2, ((PyObject *)__pyx_n_s__alpha)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__alpha)); __Pyx_INCREF(((PyObject *)__pyx_n_s__A)); PyTuple_SET_ITEM(__pyx_k_tuple_61, 3, ((PyObject *)__pyx_n_s__A)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__A)); __Pyx_INCREF(((PyObject *)__pyx_n_s__B)); PyTuple_SET_ITEM(__pyx_k_tuple_61, 4, ((PyObject *)__pyx_n_s__B)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__B)); __Pyx_INCREF(((PyObject *)__pyx_n_s__beta)); PyTuple_SET_ITEM(__pyx_k_tuple_61, 5, ((PyObject *)__pyx_n_s__beta)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__beta)); __Pyx_INCREF(((PyObject *)__pyx_n_s__C)); PyTuple_SET_ITEM(__pyx_k_tuple_61, 6, ((PyObject *)__pyx_n_s__C)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__C)); __Pyx_INCREF(((PyObject *)__pyx_n_s__a)); PyTuple_SET_ITEM(__pyx_k_tuple_61, 7, ((PyObject *)__pyx_n_s__a)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__a)); __Pyx_INCREF(((PyObject *)__pyx_n_s__b)); PyTuple_SET_ITEM(__pyx_k_tuple_61, 8, ((PyObject *)__pyx_n_s__b)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__b)); __Pyx_INCREF(((PyObject *)__pyx_n_s__c)); PyTuple_SET_ITEM(__pyx_k_tuple_61, 9, ((PyObject *)__pyx_n_s__c)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__c)); __Pyx_INCREF(((PyObject *)__pyx_n_s__d)); PyTuple_SET_ITEM(__pyx_k_tuple_61, 10, ((PyObject *)__pyx_n_s__d)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__d)); __Pyx_INCREF(((PyObject *)__pyx_n_s__D)); PyTuple_SET_ITEM(__pyx_k_tuple_61, 11, ((PyObject *)__pyx_n_s__D)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__D)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_61)); __pyx_k_codeobj_62 = (PyObject*)__Pyx_PyCode_New(7, 0, 12, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_61, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__blas_dgemm, 398, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_62)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 398; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/linalg.pyx":420 * * * def blas_dsymm(int Side, int Uplo, double alpha, A, B, beta, C): # <<<<<<<<<<<<<< * """ * D = blas_dsymm(int Side, int Uplo, double alpha, A, B, beta, C). */ __pyx_k_tuple_63 = PyTuple_New(12); if (unlikely(!__pyx_k_tuple_63)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 420; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_63); __Pyx_INCREF(((PyObject *)__pyx_n_s__Side)); PyTuple_SET_ITEM(__pyx_k_tuple_63, 0, ((PyObject *)__pyx_n_s__Side)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Side)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Uplo)); PyTuple_SET_ITEM(__pyx_k_tuple_63, 1, ((PyObject *)__pyx_n_s__Uplo)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Uplo)); __Pyx_INCREF(((PyObject *)__pyx_n_s__alpha)); PyTuple_SET_ITEM(__pyx_k_tuple_63, 2, ((PyObject *)__pyx_n_s__alpha)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__alpha)); __Pyx_INCREF(((PyObject *)__pyx_n_s__A)); PyTuple_SET_ITEM(__pyx_k_tuple_63, 3, ((PyObject *)__pyx_n_s__A)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__A)); __Pyx_INCREF(((PyObject *)__pyx_n_s__B)); PyTuple_SET_ITEM(__pyx_k_tuple_63, 4, ((PyObject *)__pyx_n_s__B)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__B)); __Pyx_INCREF(((PyObject *)__pyx_n_s__beta)); PyTuple_SET_ITEM(__pyx_k_tuple_63, 5, ((PyObject *)__pyx_n_s__beta)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__beta)); __Pyx_INCREF(((PyObject *)__pyx_n_s__C)); PyTuple_SET_ITEM(__pyx_k_tuple_63, 6, ((PyObject *)__pyx_n_s__C)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__C)); __Pyx_INCREF(((PyObject *)__pyx_n_s__a)); PyTuple_SET_ITEM(__pyx_k_tuple_63, 7, ((PyObject *)__pyx_n_s__a)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__a)); __Pyx_INCREF(((PyObject *)__pyx_n_s__b)); PyTuple_SET_ITEM(__pyx_k_tuple_63, 8, ((PyObject *)__pyx_n_s__b)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__b)); __Pyx_INCREF(((PyObject *)__pyx_n_s__c)); PyTuple_SET_ITEM(__pyx_k_tuple_63, 9, ((PyObject *)__pyx_n_s__c)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__c)); __Pyx_INCREF(((PyObject *)__pyx_n_s__d)); PyTuple_SET_ITEM(__pyx_k_tuple_63, 10, ((PyObject *)__pyx_n_s__d)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__d)); __Pyx_INCREF(((PyObject *)__pyx_n_s__D)); PyTuple_SET_ITEM(__pyx_k_tuple_63, 11, ((PyObject *)__pyx_n_s__D)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__D)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_63)); __pyx_k_codeobj_64 = (PyObject*)__Pyx_PyCode_New(7, 0, 12, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_63, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__blas_dsymm, 420, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_64)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 420; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/linalg.pyx":444 * return D * * def blas_dtrmm(int Side, int Uplo, int TransA, int Diag, double alpha, A, B): # <<<<<<<<<<<<<< * """ * C = blas_dtrmm(int Side, int Uplo, int TransA, int Diag, double alpha, A, B). */ __pyx_k_tuple_65 = PyTuple_New(11); if (unlikely(!__pyx_k_tuple_65)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 444; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_65); __Pyx_INCREF(((PyObject *)__pyx_n_s__Side)); PyTuple_SET_ITEM(__pyx_k_tuple_65, 0, ((PyObject *)__pyx_n_s__Side)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Side)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Uplo)); PyTuple_SET_ITEM(__pyx_k_tuple_65, 1, ((PyObject *)__pyx_n_s__Uplo)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Uplo)); __Pyx_INCREF(((PyObject *)__pyx_n_s__TransA)); PyTuple_SET_ITEM(__pyx_k_tuple_65, 2, ((PyObject *)__pyx_n_s__TransA)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__TransA)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Diag)); PyTuple_SET_ITEM(__pyx_k_tuple_65, 3, ((PyObject *)__pyx_n_s__Diag)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Diag)); __Pyx_INCREF(((PyObject *)__pyx_n_s__alpha)); PyTuple_SET_ITEM(__pyx_k_tuple_65, 4, ((PyObject *)__pyx_n_s__alpha)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__alpha)); __Pyx_INCREF(((PyObject *)__pyx_n_s__A)); PyTuple_SET_ITEM(__pyx_k_tuple_65, 5, ((PyObject *)__pyx_n_s__A)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__A)); __Pyx_INCREF(((PyObject *)__pyx_n_s__B)); PyTuple_SET_ITEM(__pyx_k_tuple_65, 6, ((PyObject *)__pyx_n_s__B)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__B)); __Pyx_INCREF(((PyObject *)__pyx_n_s__a)); PyTuple_SET_ITEM(__pyx_k_tuple_65, 7, ((PyObject *)__pyx_n_s__a)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__a)); __Pyx_INCREF(((PyObject *)__pyx_n_s__b)); PyTuple_SET_ITEM(__pyx_k_tuple_65, 8, ((PyObject *)__pyx_n_s__b)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__b)); __Pyx_INCREF(((PyObject *)__pyx_n_s__c)); PyTuple_SET_ITEM(__pyx_k_tuple_65, 9, ((PyObject *)__pyx_n_s__c)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__c)); __Pyx_INCREF(((PyObject *)__pyx_n_s__C)); PyTuple_SET_ITEM(__pyx_k_tuple_65, 10, ((PyObject *)__pyx_n_s__C)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__C)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_65)); __pyx_k_codeobj_66 = (PyObject*)__Pyx_PyCode_New(7, 0, 11, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_65, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__blas_dtrmm, 444, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_66)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 444; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/linalg.pyx":471 * * * def blas_dtrsm(int Side, int Uplo, int TransA, int Diag, double alpha, A, B): # <<<<<<<<<<<<<< * """ * blas_dtrsm(int Side, int Uplo, int TransA, int Diag, double alpha, A, B). */ __pyx_k_tuple_67 = PyTuple_New(11); if (unlikely(!__pyx_k_tuple_67)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 471; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_67); __Pyx_INCREF(((PyObject *)__pyx_n_s__Side)); PyTuple_SET_ITEM(__pyx_k_tuple_67, 0, ((PyObject *)__pyx_n_s__Side)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Side)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Uplo)); PyTuple_SET_ITEM(__pyx_k_tuple_67, 1, ((PyObject *)__pyx_n_s__Uplo)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Uplo)); __Pyx_INCREF(((PyObject *)__pyx_n_s__TransA)); PyTuple_SET_ITEM(__pyx_k_tuple_67, 2, ((PyObject *)__pyx_n_s__TransA)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__TransA)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Diag)); PyTuple_SET_ITEM(__pyx_k_tuple_67, 3, ((PyObject *)__pyx_n_s__Diag)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Diag)); __Pyx_INCREF(((PyObject *)__pyx_n_s__alpha)); PyTuple_SET_ITEM(__pyx_k_tuple_67, 4, ((PyObject *)__pyx_n_s__alpha)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__alpha)); __Pyx_INCREF(((PyObject *)__pyx_n_s__A)); PyTuple_SET_ITEM(__pyx_k_tuple_67, 5, ((PyObject *)__pyx_n_s__A)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__A)); __Pyx_INCREF(((PyObject *)__pyx_n_s__B)); PyTuple_SET_ITEM(__pyx_k_tuple_67, 6, ((PyObject *)__pyx_n_s__B)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__B)); __Pyx_INCREF(((PyObject *)__pyx_n_s__a)); PyTuple_SET_ITEM(__pyx_k_tuple_67, 7, ((PyObject *)__pyx_n_s__a)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__a)); __Pyx_INCREF(((PyObject *)__pyx_n_s__b)); PyTuple_SET_ITEM(__pyx_k_tuple_67, 8, ((PyObject *)__pyx_n_s__b)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__b)); __Pyx_INCREF(((PyObject *)__pyx_n_s__c)); PyTuple_SET_ITEM(__pyx_k_tuple_67, 9, ((PyObject *)__pyx_n_s__c)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__c)); __Pyx_INCREF(((PyObject *)__pyx_n_s__C)); PyTuple_SET_ITEM(__pyx_k_tuple_67, 10, ((PyObject *)__pyx_n_s__C)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__C)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_67)); __pyx_k_codeobj_68 = (PyObject*)__Pyx_PyCode_New(7, 0, 11, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_67, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__blas_dtrsm, 471, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_68)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 471; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/linalg.pyx":498 * * * def blas_dsyrk(int Uplo, int Trans, double alpha, A, double beta, C): # <<<<<<<<<<<<<< * """ * D = blas_dsyrk(int Uplo, int Trans, double alpha, A, double beta, C). */ __pyx_k_tuple_69 = PyTuple_New(10); if (unlikely(!__pyx_k_tuple_69)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 498; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_69); __Pyx_INCREF(((PyObject *)__pyx_n_s__Uplo)); PyTuple_SET_ITEM(__pyx_k_tuple_69, 0, ((PyObject *)__pyx_n_s__Uplo)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Uplo)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Trans)); PyTuple_SET_ITEM(__pyx_k_tuple_69, 1, ((PyObject *)__pyx_n_s__Trans)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Trans)); __Pyx_INCREF(((PyObject *)__pyx_n_s__alpha)); PyTuple_SET_ITEM(__pyx_k_tuple_69, 2, ((PyObject *)__pyx_n_s__alpha)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__alpha)); __Pyx_INCREF(((PyObject *)__pyx_n_s__A)); PyTuple_SET_ITEM(__pyx_k_tuple_69, 3, ((PyObject *)__pyx_n_s__A)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__A)); __Pyx_INCREF(((PyObject *)__pyx_n_s__beta)); PyTuple_SET_ITEM(__pyx_k_tuple_69, 4, ((PyObject *)__pyx_n_s__beta)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__beta)); __Pyx_INCREF(((PyObject *)__pyx_n_s__C)); PyTuple_SET_ITEM(__pyx_k_tuple_69, 5, ((PyObject *)__pyx_n_s__C)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__C)); __Pyx_INCREF(((PyObject *)__pyx_n_s__a)); PyTuple_SET_ITEM(__pyx_k_tuple_69, 6, ((PyObject *)__pyx_n_s__a)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__a)); __Pyx_INCREF(((PyObject *)__pyx_n_s__c)); PyTuple_SET_ITEM(__pyx_k_tuple_69, 7, ((PyObject *)__pyx_n_s__c)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__c)); __Pyx_INCREF(((PyObject *)__pyx_n_s__d)); PyTuple_SET_ITEM(__pyx_k_tuple_69, 8, ((PyObject *)__pyx_n_s__d)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__d)); __Pyx_INCREF(((PyObject *)__pyx_n_s__D)); PyTuple_SET_ITEM(__pyx_k_tuple_69, 9, ((PyObject *)__pyx_n_s__D)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__D)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_69)); __pyx_k_codeobj_70 = (PyObject*)__Pyx_PyCode_New(6, 0, 10, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_69, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__blas_dsyrk, 498, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_70)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 498; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/linalg.pyx":522 * * * def blas_dsyr2k(int Uplo, int Trans, double alpha, A, B, double beta, C): # <<<<<<<<<<<<<< * """ * Compute a rank-2k update of the symmetric matrix C, C = \alpha A B^T + */ __pyx_k_tuple_71 = PyTuple_New(12); if (unlikely(!__pyx_k_tuple_71)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 522; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_71); __Pyx_INCREF(((PyObject *)__pyx_n_s__Uplo)); PyTuple_SET_ITEM(__pyx_k_tuple_71, 0, ((PyObject *)__pyx_n_s__Uplo)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Uplo)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Trans)); PyTuple_SET_ITEM(__pyx_k_tuple_71, 1, ((PyObject *)__pyx_n_s__Trans)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Trans)); __Pyx_INCREF(((PyObject *)__pyx_n_s__alpha)); PyTuple_SET_ITEM(__pyx_k_tuple_71, 2, ((PyObject *)__pyx_n_s__alpha)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__alpha)); __Pyx_INCREF(((PyObject *)__pyx_n_s__A)); PyTuple_SET_ITEM(__pyx_k_tuple_71, 3, ((PyObject *)__pyx_n_s__A)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__A)); __Pyx_INCREF(((PyObject *)__pyx_n_s__B)); PyTuple_SET_ITEM(__pyx_k_tuple_71, 4, ((PyObject *)__pyx_n_s__B)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__B)); __Pyx_INCREF(((PyObject *)__pyx_n_s__beta)); PyTuple_SET_ITEM(__pyx_k_tuple_71, 5, ((PyObject *)__pyx_n_s__beta)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__beta)); __Pyx_INCREF(((PyObject *)__pyx_n_s__C)); PyTuple_SET_ITEM(__pyx_k_tuple_71, 6, ((PyObject *)__pyx_n_s__C)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__C)); __Pyx_INCREF(((PyObject *)__pyx_n_s__a)); PyTuple_SET_ITEM(__pyx_k_tuple_71, 7, ((PyObject *)__pyx_n_s__a)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__a)); __Pyx_INCREF(((PyObject *)__pyx_n_s__b)); PyTuple_SET_ITEM(__pyx_k_tuple_71, 8, ((PyObject *)__pyx_n_s__b)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__b)); __Pyx_INCREF(((PyObject *)__pyx_n_s__c)); PyTuple_SET_ITEM(__pyx_k_tuple_71, 9, ((PyObject *)__pyx_n_s__c)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__c)); __Pyx_INCREF(((PyObject *)__pyx_n_s__d)); PyTuple_SET_ITEM(__pyx_k_tuple_71, 10, ((PyObject *)__pyx_n_s__d)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__d)); __Pyx_INCREF(((PyObject *)__pyx_n_s__D)); PyTuple_SET_ITEM(__pyx_k_tuple_71, 11, ((PyObject *)__pyx_n_s__D)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__D)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_71)); __pyx_k_codeobj_72 = (PyObject*)__Pyx_PyCode_New(7, 0, 12, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_71, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__blas_dsyr2k, 522, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_72)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 522; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_15 = PyInt_FromLong(15); if (unlikely(!__pyx_int_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC initlinalg(void); /*proto*/ PyMODINIT_FUNC initlinalg(void) #else PyMODINIT_FUNC PyInit_linalg(void); /*proto*/ PyMODINIT_FUNC PyInit_linalg(void) #endif { PyObject *__pyx_t_1 = NULL; __Pyx_RefNannyDeclarations #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_linalg(void)", 0); if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #ifdef __Pyx_CyFunction_USED if (__Pyx_CyFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4(__Pyx_NAMESTR("linalg"), __pyx_methods, __Pyx_DOCSTR(__pyx_k_13), 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (!PyDict_GetItemString(modules, "nipy.labs.bindings.linalg")) { if (unlikely(PyDict_SetItemString(modules, "nipy.labs.bindings.linalg", __pyx_m) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } } #endif __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); if (unlikely(!__pyx_b)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; /*--- Initialize various global constants etc. ---*/ if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_module_is_main_nipy__labs__bindings__linalg) { if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s____main__) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; } /*--- Builtin init code ---*/ if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Constants init code ---*/ if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Global init code ---*/ /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ /*--- Type import code ---*/ __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", #if CYTHON_COMPILING_IN_PYPY sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 861; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Variable import code ---*/ /*--- Function import code ---*/ /*--- Execution code ---*/ /* "nipy/labs/bindings/linalg.pyx":10 * """ * * __version__ = '0.1' # <<<<<<<<<<<<<< * * # Include fff */ if (PyObject_SetAttr(__pyx_m, __pyx_n_s____version__, ((PyObject *)__pyx_kp_s_14)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 10; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/linalg.pyx":84 * * # Initialize numpy * fffpy_import_array() # <<<<<<<<<<<<<< * import_array() * import numpy as np */ fffpy_import_array(); /* "nipy/labs/bindings/linalg.pyx":85 * # Initialize numpy * fffpy_import_array() * import_array() # <<<<<<<<<<<<<< * import numpy as np * */ import_array(); /* "nipy/labs/bindings/linalg.pyx":86 * fffpy_import_array() * import_array() * import numpy as np # <<<<<<<<<<<<<< * * # Binded routines */ __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__numpy), 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__np, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":91 * * ## fff_vector.h * def vector_get(X, size_t i): # <<<<<<<<<<<<<< * """ * Get i-th element. */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_6linalg_1vector_get, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__vector_get, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":103 * return xi * * def vector_set(X, size_t i, double a): # <<<<<<<<<<<<<< * """ * Set i-th element. */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_6linalg_3vector_set, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__vector_set, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":117 * return Y * * def vector_set_all(X, double a): # <<<<<<<<<<<<<< * """ * Set to a constant value. */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_6linalg_5vector_set_all, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 117; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__vector_set_all, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 117; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":131 * return Y * * def vector_scale(X, double a): # <<<<<<<<<<<<<< * """ * Multiply by a constant value. */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_6linalg_7vector_scale, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__vector_scale, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":145 * return Y * * def vector_add_constant(X, double a): # <<<<<<<<<<<<<< * """ * Add a constant value. */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_6linalg_9vector_add_constant, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__vector_add_constant, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":159 * return Y * * def vector_add(X, Y): # <<<<<<<<<<<<<< * """ * Add two vectors. */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_6linalg_11vector_add, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__vector_add, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":175 * return Z * * def vector_sub(X, Y): # <<<<<<<<<<<<<< * """ * Substract two vectors: x - y */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_6linalg_13vector_sub, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 175; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__vector_sub, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 175; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":191 * return Z * * def vector_mul(X, Y): # <<<<<<<<<<<<<< * """ * Element-wise multiplication. */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_6linalg_15vector_mul, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__vector_mul, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":207 * return Z * * def vector_div(X, Y): # <<<<<<<<<<<<<< * """ * Element-wise division. */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_6linalg_17vector_div, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__vector_div, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":224 * * * def vector_sum(X): # <<<<<<<<<<<<<< * """ * Sum up array elements. */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_6linalg_19vector_sum, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 224; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__vector_sum, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 224; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":236 * return s * * def vector_ssd(X, double m=0, int fixed=1): # <<<<<<<<<<<<<< * """ * (Minimal) sum of squared differences. */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_6linalg_21vector_ssd, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 236; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__vector_ssd, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 236; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":248 * return s * * def vector_sad(X, double m=0): # <<<<<<<<<<<<<< * """ * Sum of absolute differences. */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_6linalg_23vector_sad, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 248; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__vector_sad, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 248; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":260 * return s * * def vector_median(X): # <<<<<<<<<<<<<< * """ * Median. */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_6linalg_25vector_median, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__vector_median, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":272 * return m * * def vector_quantile(X, double r, int interp): # <<<<<<<<<<<<<< * """ * Quantile. */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_6linalg_27vector_quantile, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__vector_quantile, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":286 * * ## fff_matrix.h * def matrix_get(A, size_t i, size_t j): # <<<<<<<<<<<<<< * """ * Get (i,j) element. */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_6linalg_29matrix_get, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__matrix_get, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":298 * return aij * * def matrix_transpose(A): # <<<<<<<<<<<<<< * """ * Transpose a matrix. */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_6linalg_31matrix_transpose, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 298; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__matrix_transpose, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 298; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":311 * return B * * def matrix_add(A, B): # <<<<<<<<<<<<<< * """ * C = matrix_add(A, B) */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_6linalg_33matrix_add, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 311; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__matrix_add, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 311; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":360 * * ### BLAS 1 * def blas_dnrm2(X): # <<<<<<<<<<<<<< * cdef fff_vector *x * x = fff_vector_fromPyArray(X) */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_6linalg_35blas_dnrm2, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__blas_dnrm2, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":365 * return fff_blas_dnrm2(x) * * def blas_dasum(X): # <<<<<<<<<<<<<< * cdef fff_vector *x * x = fff_vector_fromPyArray(X) */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_6linalg_37blas_dasum, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 365; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__blas_dasum, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 365; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":370 * return fff_blas_dasum(x) * * def blas_ddot(X, Y): # <<<<<<<<<<<<<< * cdef fff_vector *x, *y * x = fff_vector_fromPyArray(X) */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_6linalg_39blas_ddot, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__blas_ddot, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":376 * return fff_blas_ddot(x, y) * * def blas_daxpy(double alpha, X, Y): # <<<<<<<<<<<<<< * cdef fff_vector *x, *y, *z * x = fff_vector_fromPyArray(X) */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_6linalg_41blas_daxpy, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 376; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__blas_daxpy, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 376; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":386 * return Z * * def blas_dscal(double alpha, X): # <<<<<<<<<<<<<< * cdef fff_vector *x, *y * x = fff_vector_fromPyArray(X) */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_6linalg_43blas_dscal, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 386; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__blas_dscal, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 386; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":398 * * ### BLAS 3 * def blas_dgemm(int TransA, int TransB, double alpha, A, B, double beta, C): # <<<<<<<<<<<<<< * """ * D = blas_dgemm(int TransA, int TransB, double alpha, A, B, double beta, C). */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_6linalg_45blas_dgemm, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 398; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__blas_dgemm, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 398; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":420 * * * def blas_dsymm(int Side, int Uplo, double alpha, A, B, beta, C): # <<<<<<<<<<<<<< * """ * D = blas_dsymm(int Side, int Uplo, double alpha, A, B, beta, C). */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_6linalg_47blas_dsymm, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 420; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__blas_dsymm, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 420; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":444 * return D * * def blas_dtrmm(int Side, int Uplo, int TransA, int Diag, double alpha, A, B): # <<<<<<<<<<<<<< * """ * C = blas_dtrmm(int Side, int Uplo, int TransA, int Diag, double alpha, A, B). */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_6linalg_49blas_dtrmm, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 444; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__blas_dtrmm, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 444; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":471 * * * def blas_dtrsm(int Side, int Uplo, int TransA, int Diag, double alpha, A, B): # <<<<<<<<<<<<<< * """ * blas_dtrsm(int Side, int Uplo, int TransA, int Diag, double alpha, A, B). */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_6linalg_51blas_dtrsm, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 471; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__blas_dtrsm, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 471; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":498 * * * def blas_dsyrk(int Uplo, int Trans, double alpha, A, double beta, C): # <<<<<<<<<<<<<< * """ * D = blas_dsyrk(int Uplo, int Trans, double alpha, A, double beta, C). */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_6linalg_53blas_dsyrk, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 498; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__blas_dsyrk, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 498; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":522 * * * def blas_dsyr2k(int Uplo, int Trans, double alpha, A, B, double beta, C): # <<<<<<<<<<<<<< * """ * Compute a rank-2k update of the symmetric matrix C, C = \alpha A B^T + */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_6linalg_55blas_dsyr2k, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 522; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__blas_dsyr2k, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 522; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/linalg.pyx":1 * # -*- Mode: Python -*- Not really, but the syntax is close enough # <<<<<<<<<<<<<< * * """ */ __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); if (PyObject_SetAttr(__pyx_m, __pyx_n_s____test__, ((PyObject *)__pyx_t_1)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; /* "numpy.pxd":975 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); if (__pyx_m) { __Pyx_AddTraceback("init nipy.labs.bindings.linalg", __pyx_clineno, __pyx_lineno, __pyx_filename); Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init nipy.labs.bindings.linalg"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if PY_MAJOR_VERSION < 3 return; #else return __pyx_m; #endif } /* Runtime support code */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* CYTHON_REFNANNY */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%s() takes %s %" CYTHON_FORMAT_SSIZE_T "d positional argument%s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%s() got an unexpected keyword argument '%s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_Format(PyExc_SystemError, "Missing type object"); return 0; } if (likely(PyObject_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) { PyObject *result; result = PyObject_GetAttr(dict, name); if (!result) { if (dict != __pyx_b) { PyErr_Clear(); result = PyObject_GetAttr(__pyx_b, name); } if (!result) { PyErr_SetObject(PyExc_NameError, name); } } return result; } static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) { #if CYTHON_COMPILING_IN_CPYTHON PyObject *tmp_type, *tmp_value, *tmp_tb; PyThreadState *tstate = PyThreadState_GET(); tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_Restore(type, value, tb); #endif } static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(type, value, tb); #endif } #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } #if PY_VERSION_HEX < 0x02050000 if (PyClass_Check(type)) { #else if (PyType_Check(type)) { #endif #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; #if PY_VERSION_HEX < 0x02050000 if (PyInstance_Check(type)) { type = (PyObject*) ((PyInstanceObject*)type)->in_class; Py_INCREF(type); } else { type = 0; PyErr_SetString(PyExc_TypeError, "raise: exception must be an old-style class or instance"); goto raise_error; } #else type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } #endif } __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else /* Python 3+ */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyEval_CallObject(type, args); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause && cause != Py_None) { PyObject *fixed_cause; if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { PyThreadState *tstate = PyThreadState_GET(); PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } } bad: Py_XDECREF(owned_instance); return; } #endif static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%s to unpack", index, (index == 1) ? "" : "s"); } static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } static CYTHON_INLINE int __Pyx_IterFinish(void) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); PyObject* exc_type = tstate->curexc_type; if (unlikely(exc_type)) { if (likely(exc_type == PyExc_StopIteration) || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration)) { PyObject *exc_value, *exc_tb; exc_value = tstate->curexc_value; exc_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; Py_DECREF(exc_type); Py_XDECREF(exc_value); Py_XDECREF(exc_tb); return 0; } else { return -1; } } return 0; #else if (unlikely(PyErr_Occurred())) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) { PyErr_Clear(); return 0; } else { return -1; } } return 0; #endif } static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) { if (unlikely(retval)) { Py_DECREF(retval); __Pyx_RaiseTooManyValuesError(expected); return -1; } else { return __Pyx_IterFinish(); } return 0; } static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level) { PyObject *py_import = 0; PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; py_import = __Pyx_GetAttrString(__pyx_b, "__import__"); if (!py_import) goto bad; if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; #if PY_VERSION_HEX >= 0x02050000 { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { /* try package relative import first */ PyObject *py_level = PyInt_FromLong(1); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; /* try absolute import on failure */ } #endif if (!module) { PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); } } #else if (level>0) { PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4."); goto bad; } module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, NULL); #endif bad: Py_XDECREF(empty_list); Py_XDECREF(py_import); Py_XDECREF(empty_dict); return module; } #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return ::std::complex< float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y*(__pyx_t_float_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real*z.real + z.imag*z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(a, a); case 3: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, a); case 4: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_absf(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return ::std::complex< double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y*(__pyx_t_double_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real*z.real + z.imag*z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(a, a); case 3: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, a); case 4: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_abs(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject* x) { const unsigned char neg_one = (unsigned char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned char" : "value too large to convert to unsigned char"); } return (unsigned char)-1; } return (unsigned char)val; } return (unsigned char)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject* x) { const unsigned short neg_one = (unsigned short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned short" : "value too large to convert to unsigned short"); } return (unsigned short)-1; } return (unsigned short)val; } return (unsigned short)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject* x) { const unsigned int neg_one = (unsigned int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned int" : "value too large to convert to unsigned int"); } return (unsigned int)-1; } return (unsigned int)val; } return (unsigned int)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject* x) { const char neg_one = (char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to char" : "value too large to convert to char"); } return (char)-1; } return (char)val; } return (char)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject* x) { const short neg_one = (short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to short" : "value too large to convert to short"); } return (short)-1; } return (short)val; } return (short)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject* x) { const int neg_one = (int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to int" : "value too large to convert to int"); } return (int)-1; } return (int)val; } return (int)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject* x) { const signed char neg_one = (signed char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed char" : "value too large to convert to signed char"); } return (signed char)-1; } return (signed char)val; } return (signed char)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject* x) { const signed short neg_one = (signed short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed short" : "value too large to convert to signed short"); } return (signed short)-1; } return (signed short)val; } return (signed short)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject* x) { const signed int neg_one = (signed int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed int" : "value too large to convert to signed int"); } return (signed int)-1; } return (signed int)val; } return (signed int)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject* x) { const int neg_one = (int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to int" : "value too large to convert to int"); } return (int)-1; } return (int)val; } return (int)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) { const unsigned long neg_one = (unsigned long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned long"); return (unsigned long)-1; } return (unsigned long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned long"); return (unsigned long)-1; } return (unsigned long)PyLong_AsUnsignedLong(x); } else { return (unsigned long)PyLong_AsLong(x); } } else { unsigned long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned long)-1; val = __Pyx_PyInt_AsUnsignedLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject* x) { const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned PY_LONG_LONG"); return (unsigned PY_LONG_LONG)-1; } return (unsigned PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned PY_LONG_LONG"); return (unsigned PY_LONG_LONG)-1; } return (unsigned PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (unsigned PY_LONG_LONG)PyLong_AsLongLong(x); } } else { unsigned PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned PY_LONG_LONG)-1; val = __Pyx_PyInt_AsUnsignedLongLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject* x) { const long neg_one = (long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long)-1; } return (long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long)-1; } return (long)PyLong_AsUnsignedLong(x); } else { return (long)PyLong_AsLong(x); } } else { long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (long)-1; val = __Pyx_PyInt_AsLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) { const PY_LONG_LONG neg_one = (PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to PY_LONG_LONG"); return (PY_LONG_LONG)-1; } return (PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to PY_LONG_LONG"); return (PY_LONG_LONG)-1; } return (PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (PY_LONG_LONG)PyLong_AsLongLong(x); } } else { PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (PY_LONG_LONG)-1; val = __Pyx_PyInt_AsLongLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) { const signed long neg_one = (signed long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed long"); return (signed long)-1; } return (signed long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed long"); return (signed long)-1; } return (signed long)PyLong_AsUnsignedLong(x); } else { return (signed long)PyLong_AsLong(x); } } else { signed long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (signed long)-1; val = __Pyx_PyInt_AsSignedLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject* x) { const signed PY_LONG_LONG neg_one = (signed PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed PY_LONG_LONG"); return (signed PY_LONG_LONG)-1; } return (signed PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed PY_LONG_LONG"); return (signed PY_LONG_LONG)-1; } return (signed PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (signed PY_LONG_LONG)PyLong_AsLongLong(x); } } else { signed PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (signed PY_LONG_LONG)-1; val = __Pyx_PyInt_AsSignedLongLong(tmp); Py_DECREF(tmp); return val; } } static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); #if PY_VERSION_HEX < 0x02050000 return PyErr_Warn(NULL, message); #else return PyErr_WarnEx(NULL, message, 1); #endif } return 0; } #ifndef __PYX_HAVE_RT_ImportModule #define __PYX_HAVE_RT_ImportModule static PyObject *__Pyx_ImportModule(const char *name) { PyObject *py_name = 0; PyObject *py_module = 0; py_name = __Pyx_PyIdentifier_FromString(name); if (!py_name) goto bad; py_module = PyImport_Import(py_name); Py_DECREF(py_name); return py_module; bad: Py_XDECREF(py_name); return 0; } #endif #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict) { PyObject *py_module = 0; PyObject *result = 0; PyObject *py_name = 0; char warning[200]; py_module = __Pyx_ImportModule(module_name); if (!py_module) goto bad; py_name = __Pyx_PyIdentifier_FromString(class_name); if (!py_name) goto bad; result = PyObject_GetAttr(py_module, py_name); Py_DECREF(py_name); py_name = 0; Py_DECREF(py_module); py_module = 0; if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%s.%s is not a type object", module_name, class_name); goto bad; } if (!strict && (size_t)((PyTypeObject *)result)->tp_basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility", module_name, class_name); #if PY_VERSION_HEX < 0x02050000 if (PyErr_Warn(NULL, warning) < 0) goto bad; #else if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; #endif } else if ((size_t)((PyTypeObject *)result)->tp_basicsize != size) { PyErr_Format(PyExc_ValueError, "%s.%s has the wrong size, try recompiling", module_name, class_name); goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(py_module); Py_XDECREF(result); return NULL; } #endif static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = (start + end) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, /*int argcount,*/ 0, /*int kwonlyargcount,*/ 0, /*int nlocals,*/ 0, /*int stacksize,*/ 0, /*int flags,*/ __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, /*int firstlineno,*/ __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_globals = 0; PyFrameObject *py_frame = 0; py_code = __pyx_find_code_object(c_line ? c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? c_line : py_line, py_code); } py_globals = PyModule_GetDict(__pyx_m); if (!py_globals) goto bad; py_frame = PyFrame_New( PyThreadState_GET(), /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ py_globals, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; py_frame->f_lineno = py_line; PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else /* Python 3+ has unicode identifiers */ if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; ++t; } return 0; } /* Type Conversion Functions */ static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) { PyNumberMethods *m; const char *name = NULL; PyObject *res = NULL; #if PY_VERSION_HEX < 0x03000000 if (PyInt_Check(x) || PyLong_Check(x)) #else if (PyLong_Check(x)) #endif return Py_INCREF(x), x; m = Py_TYPE(x)->tp_as_number; #if PY_VERSION_HEX < 0x03000000 if (m && m->nb_int) { name = "int"; res = PyNumber_Int(x); } else if (m && m->nb_long) { name = "long"; res = PyNumber_Long(x); } #else if (m && m->nb_int) { name = "int"; res = PyNumber_Long(x); } #endif if (res) { #if PY_VERSION_HEX < 0x03000000 if (!PyInt_Check(res) && !PyLong_Check(res)) { #else if (!PyLong_Check(res)) { #endif PyErr_Format(PyExc_TypeError, "__%s__ returned non-%s (type %.200s)", name, name, Py_TYPE(res)->tp_name); Py_DECREF(res); return NULL; } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject* x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { #if PY_VERSION_HEX < 0x02050000 if (ival <= LONG_MAX) return PyInt_FromLong((long)ival); else { unsigned char *bytes = (unsigned char *) &ival; int one = 1; int little = (int)*(unsigned char*)&one; return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0); } #else return PyInt_FromSize_t(ival); #endif } static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject* x) { unsigned PY_LONG_LONG val = __Pyx_PyInt_AsUnsignedLongLong(x); if (unlikely(val == (unsigned PY_LONG_LONG)-1 && PyErr_Occurred())) { return (size_t)-1; } else if (unlikely(val != (unsigned PY_LONG_LONG)(size_t)val)) { PyErr_SetString(PyExc_OverflowError, "value too large to convert to size_t"); return (size_t)-1; } return (size_t)val; } #endif /* Py_PYTHON_H */ nipy-0.3.0/nipy/labs/bindings/linalg.pyx000066400000000000000000000373441210344137400202100ustar00rootroot00000000000000# -*- Mode: Python -*- Not really, but the syntax is close enough """ Python access to core fff functions written in C. This module is mainly used for unitary tests. Author: Alexis Roche, 2008. """ __version__ = '0.1' # Include fff from fff cimport * # Exports from fff_blas.h cdef extern from "fff_blas.h": ctypedef enum CBLAS_TRANSPOSE_t: CblasNoTrans=111 CblasTrans=112 CblasConjTrans=113 ctypedef enum CBLAS_UPLO_t: CblasUpper=121 CblasLower=122 ctypedef enum CBLAS_DIAG_t: CblasNonUnit=131 CblasUnit=132 ctypedef enum CBLAS_SIDE_t: CblasLeft=141 CblasRight=142 ## BLAS level 1 double fff_blas_ddot(fff_vector * x, fff_vector * y) double fff_blas_dnrm2(fff_vector * x) double fff_blas_dasum(fff_vector * x) size_t fff_blas_idamax(fff_vector * x) int fff_blas_dswap(fff_vector * x, fff_vector * y) fff_blas_dcopy(fff_vector * x, fff_vector * y) int fff_blas_daxpy(double alpha, fff_vector * x, fff_vector * y) int fff_blas_dscal(double alpha, fff_vector * x) int fff_blas_drot(fff_vector * x, fff_vector * y, double c, double s) ## BLAS level 2 int fff_blas_dgemv(CBLAS_TRANSPOSE_t TransA, double alpha, fff_matrix * A, fff_vector * x, double beta, fff_vector * y) int fff_blas_dtrmv(CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t TransA, CBLAS_DIAG_t Diag, fff_matrix * A, fff_vector * x) int fff_blas_dtrsv(CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t TransA, CBLAS_DIAG_t Diag, fff_matrix * A, fff_vector * x) int fff_blas_dsymv(CBLAS_UPLO_t Uplo, double alpha, fff_matrix * A, fff_vector * x, double beta, fff_vector * y) int fff_blas_dger(double alpha, fff_vector * x, fff_vector * y, fff_matrix * A) int fff_blas_dsyr(CBLAS_UPLO_t Uplo, double alpha, fff_vector * x, fff_matrix * A) int fff_blas_dsyr2(CBLAS_UPLO_t Uplo, double alpha, fff_vector * x, fff_vector * y, fff_matrix * A) ## BLAS level 3 int fff_blas_dgemm(CBLAS_TRANSPOSE_t TransA, CBLAS_TRANSPOSE_t TransB, double alpha, fff_matrix * A, fff_matrix * B, double beta, fff_matrix * C) int fff_blas_dsymm(CBLAS_SIDE_t Side, CBLAS_UPLO_t Uplo, double alpha, fff_matrix * A, fff_matrix * B, double beta, fff_matrix * C) int fff_blas_dtrmm(CBLAS_SIDE_t Side, CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t TransA, CBLAS_DIAG_t Diag, double alpha, fff_matrix * A, fff_matrix * B) int fff_blas_dtrsm(CBLAS_SIDE_t Side, CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t TransA, CBLAS_DIAG_t Diag, double alpha, fff_matrix * A, fff_matrix * B) int fff_blas_dsyrk(CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t Trans, double alpha, fff_matrix * A, double beta, fff_matrix * C) int fff_blas_dsyr2k(CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t Trans, double alpha, fff_matrix * A, fff_matrix * B, double beta, fff_matrix * C) # Initialize numpy fffpy_import_array() import_array() import numpy as np # Binded routines ## fff_vector.h def vector_get(X, size_t i): """ Get i-th element. xi = vector_get(x, i) """ cdef fff_vector* x cdef double xi x = fff_vector_fromPyArray(X) xi = fff_vector_get(x, i) fff_vector_delete(x) return xi def vector_set(X, size_t i, double a): """ Set i-th element. vector_set(x, i, a) """ cdef fff_vector *x, *y x = fff_vector_fromPyArray(X) y = fff_vector_new(x.size) fff_vector_memcpy(y, x) fff_vector_set(y, i, a) fff_vector_delete(x) Y = fff_vector_toPyArray(y) return Y def vector_set_all(X, double a): """ Set to a constant value. vector_set_all(x, a) """ cdef fff_vector *x, *y x = fff_vector_fromPyArray(X) y = fff_vector_new(x.size) fff_vector_memcpy(y, x) fff_vector_set_all(y, a) fff_vector_delete(x) Y = fff_vector_toPyArray(y) return Y def vector_scale(X, double a): """ Multiply by a constant value. y = vector_scale(x, a) """ cdef fff_vector *x, *y x = fff_vector_fromPyArray(X) y = fff_vector_new(x.size) fff_vector_memcpy(y, x) fff_vector_scale(y, a) fff_vector_delete(x) Y = fff_vector_toPyArray(y) return Y def vector_add_constant(X, double a): """ Add a constant value. y = vector_add_constant(x, a) """ cdef fff_vector *x, *y x = fff_vector_fromPyArray(X) y = fff_vector_new(x.size) fff_vector_memcpy(y, x) fff_vector_add_constant(y, a) fff_vector_delete(x) Y = fff_vector_toPyArray(y) return Y def vector_add(X, Y): """ Add two vectors. z = vector_add(x, y) """ cdef fff_vector *x, *y, *z x = fff_vector_fromPyArray(X) y = fff_vector_fromPyArray(Y) z = fff_vector_new(x.size) fff_vector_memcpy(z, x) fff_vector_add(z, y) fff_vector_delete(x) fff_vector_delete(y) Z = fff_vector_toPyArray(z) return Z def vector_sub(X, Y): """ Substract two vectors: x - y z = vector_sub(x, y) """ cdef fff_vector *x, *y, *z x = fff_vector_fromPyArray(X) y = fff_vector_fromPyArray(Y) z = fff_vector_new(x.size) fff_vector_memcpy(z, x) fff_vector_sub(z, y) fff_vector_delete(x) fff_vector_delete(y) Z = fff_vector_toPyArray(z) return Z def vector_mul(X, Y): """ Element-wise multiplication. z = vector_mul(x, y) """ cdef fff_vector *x, *y, *z x = fff_vector_fromPyArray(X) y = fff_vector_fromPyArray(Y) z = fff_vector_new(x.size) fff_vector_memcpy(z, x) fff_vector_mul(z, y) fff_vector_delete(x) fff_vector_delete(y) Z = fff_vector_toPyArray(z) return Z def vector_div(X, Y): """ Element-wise division. z = vector_div(x, y) """ cdef fff_vector *x, *y, *z x = fff_vector_fromPyArray(X) y = fff_vector_fromPyArray(Y) z = fff_vector_new(x.size) fff_vector_memcpy(z, x) fff_vector_mul(z, y) fff_vector_delete(x) fff_vector_delete(y) Z = fff_vector_toPyArray(z) return Z def vector_sum(X): """ Sum up array elements. s = vector_sum(x) """ cdef fff_vector* x cdef long double s x = fff_vector_fromPyArray(X) s = fff_vector_sum(x) fff_vector_delete(x) return s def vector_ssd(X, double m=0, int fixed=1): """ (Minimal) sum of squared differences. s = vector_ssd(x, m=0, fixed=1) """ cdef fff_vector* x cdef long double s x = fff_vector_fromPyArray(X) s = fff_vector_ssd(x, &m, fixed) fff_vector_delete(x) return s def vector_sad(X, double m=0): """ Sum of absolute differences. s = vector_sad(x, m=0) """ cdef fff_vector* x cdef long double s x = fff_vector_fromPyArray(X) s = fff_vector_sad(x, m) fff_vector_delete(x) return s def vector_median(X): """ Median. m = vector_median(x) """ cdef fff_vector* x cdef double m x = fff_vector_fromPyArray(X) m = fff_vector_median(x) fff_vector_delete(x) return m def vector_quantile(X, double r, int interp): """ Quantile. q = vector_quantile(x, r=0.5, interp=1) """ cdef fff_vector* x cdef double q x = fff_vector_fromPyArray(X) q = fff_vector_quantile(x, r, interp) fff_vector_delete(x) return q ## fff_matrix.h def matrix_get(A, size_t i, size_t j): """ Get (i,j) element. aij = matrix_get(A, i, j) """ cdef fff_matrix* a cdef double aij a = fff_matrix_fromPyArray(A) aij = fff_matrix_get(a, i, j) fff_matrix_delete(a) return aij def matrix_transpose(A): """ Transpose a matrix. B = matrix_transpose(A) """ cdef fff_matrix *a, *b a = fff_matrix_fromPyArray(A) b = fff_matrix_new(a.size2, a.size1) fff_matrix_transpose(b, a) fff_matrix_delete(a) B = fff_matrix_toPyArray(b) return B def matrix_add(A, B): """ C = matrix_add(A, B) """ cdef fff_matrix *a, *b, *c a = fff_matrix_fromPyArray(A) b = fff_matrix_fromPyArray(B) c = fff_matrix_new(a.size1, a.size2) fff_matrix_memcpy(c, a) fff_matrix_add(c, b) C = fff_matrix_toPyArray(c) return C ## fff_blas.h cdef CBLAS_TRANSPOSE_t flag_transpose( int flag ): cdef CBLAS_TRANSPOSE_t x if flag <= 0: x = CblasNoTrans else: x = CblasTrans return x cdef CBLAS_UPLO_t flag_uplo( int flag ): cdef CBLAS_UPLO_t x if flag <= 0: x = CblasUpper else: x = CblasLower return x cdef CBLAS_DIAG_t flag_diag( int flag ): cdef CBLAS_DIAG_t x if flag <= 0: x = CblasNonUnit else: x = CblasUnit return x cdef CBLAS_SIDE_t flag_side( int flag ): cdef CBLAS_SIDE_t x if flag <= 0: x = CblasLeft else: x = CblasRight return x ### BLAS 1 def blas_dnrm2(X): cdef fff_vector *x x = fff_vector_fromPyArray(X) return fff_blas_dnrm2(x) def blas_dasum(X): cdef fff_vector *x x = fff_vector_fromPyArray(X) return fff_blas_dasum(x) def blas_ddot(X, Y): cdef fff_vector *x, *y x = fff_vector_fromPyArray(X) y = fff_vector_fromPyArray(Y) return fff_blas_ddot(x, y) def blas_daxpy(double alpha, X, Y): cdef fff_vector *x, *y, *z x = fff_vector_fromPyArray(X) y = fff_vector_fromPyArray(Y) z = fff_vector_new(y.size) fff_vector_memcpy(z, y) fff_blas_daxpy(alpha, x, z) Z = fff_vector_toPyArray(z) return Z def blas_dscal(double alpha, X): cdef fff_vector *x, *y x = fff_vector_fromPyArray(X) y = fff_vector_new(x.size) fff_vector_memcpy(y, x) fff_blas_dscal(alpha, y) Y = fff_vector_toPyArray(y) return Y ### BLAS 3 def blas_dgemm(int TransA, int TransB, double alpha, A, B, double beta, C): """ D = blas_dgemm(int TransA, int TransB, double alpha, A, B, double beta, C). Compute the matrix-matrix product and sum D = alpha op(A) op(B) + beta C where op(A) = A, A^T, A^H for TransA = CblasNoTrans, CblasTrans, CblasConjTrans and similarly for the parameter TransB. """ cdef fff_matrix *a, *b, *c, *d a = fff_matrix_fromPyArray(A) b = fff_matrix_fromPyArray(B) c = fff_matrix_fromPyArray(C) d = fff_matrix_new(c.size1, c.size2) fff_matrix_memcpy(d, c) fff_blas_dgemm(flag_transpose(TransA), flag_transpose(TransB), alpha, a, b, beta, d) fff_matrix_delete(a) fff_matrix_delete(b) fff_matrix_delete(c) D = fff_matrix_toPyArray(d) return D def blas_dsymm(int Side, int Uplo, double alpha, A, B, beta, C): """ D = blas_dsymm(int Side, int Uplo, double alpha, A, B, beta, C). Compute the matrix-matrix product and sum C = \alpha A B + \beta C for Side is CblasLeft and C = \alpha B A + \beta C for Side is CblasRight, where the matrix A is symmetric. When Uplo is CblasUpper then the upper triangle and diagonal of A are used, and when Uplo is CblasLower then the lower triangle and diagonal of A are used. """ cdef fff_matrix *a, *b, *c, *d a = fff_matrix_fromPyArray(A) b = fff_matrix_fromPyArray(B) c = fff_matrix_fromPyArray(C) d = fff_matrix_new(c.size1, c.size2) fff_matrix_memcpy(d, c) fff_blas_dsymm(flag_side(Side), flag_uplo(Uplo), alpha, a, b, beta, d) fff_matrix_delete(a) fff_matrix_delete(b) fff_matrix_delete(c) D = fff_matrix_toPyArray(d) return D def blas_dtrmm(int Side, int Uplo, int TransA, int Diag, double alpha, A, B): """ C = blas_dtrmm(int Side, int Uplo, int TransA, int Diag, double alpha, A, B). Compute the matrix-matrix product B = \alpha op(A) B for Side is CblasLeft and B = \alpha B op(A) for Side is CblasRight. The matrix A is triangular and op(A) = A, A^T, A^H for TransA = CblasNoTrans, CblasTrans, CblasConjTrans. When Uplo is CblasUpper then the upper triangle of A is used, and when Uplo is CblasLower then the lower triangle of A is used. If Diag is CblasNonUnit then the diagonal of A is used, but if Diag is CblasUnit then the diagonal elements of the matrix A are taken as unity and are not referenced. """ cdef fff_matrix *a, *b, *c a = fff_matrix_fromPyArray(A) b = fff_matrix_fromPyArray(B) c = fff_matrix_new(a.size1, a.size2) fff_matrix_memcpy(c, b) fff_blas_dtrmm(flag_side(Side), flag_uplo(Uplo), flag_transpose(TransA), flag_diag(Diag), alpha, a, c) fff_matrix_delete(a) fff_matrix_delete(b) C = fff_matrix_toPyArray(c) return C def blas_dtrsm(int Side, int Uplo, int TransA, int Diag, double alpha, A, B): """ blas_dtrsm(int Side, int Uplo, int TransA, int Diag, double alpha, A, B). Compute the inverse-matrix matrix product B = \alpha op(inv(A))B for Side is CblasLeft and B = \alpha B op(inv(A)) for Side is CblasRight. The matrix A is triangular and op(A) = A, A^T, A^H for TransA = CblasNoTrans, CblasTrans, CblasConjTrans. When Uplo is CblasUpper then the upper triangle of A is used, and when Uplo is CblasLower then the lower triangle of A is used. If Diag is CblasNonUnit then the diagonal of A is used, but if Diag is CblasUnit then the diagonal elements of the matrix A are taken as unity and are not referenced. """ cdef fff_matrix *a, *b, *c a = fff_matrix_fromPyArray(A) b = fff_matrix_fromPyArray(B) c = fff_matrix_new(a.size1, a.size2) fff_matrix_memcpy(c, b) fff_blas_dtrsm(flag_side(Side), flag_uplo(Uplo), flag_transpose(TransA), flag_diag(Diag), alpha, a, c) fff_matrix_delete(a) fff_matrix_delete(b) C = fff_matrix_toPyArray(c) return C def blas_dsyrk(int Uplo, int Trans, double alpha, A, double beta, C): """ D = blas_dsyrk(int Uplo, int Trans, double alpha, A, double beta, C). Compute a rank-k update of the symmetric matrix C, C = \alpha A A^T + \beta C when Trans is CblasNoTrans and C = \alpha A^T A + \beta C when Trans is CblasTrans. Since the matrix C is symmetric only its upper half or lower half need to be stored. When Uplo is CblasUpper then the upper triangle and diagonal of C are used, and when Uplo is CblasLower then the lower triangle and diagonal of C are used. """ cdef fff_matrix *a, *c, *d a = fff_matrix_fromPyArray(A) c = fff_matrix_fromPyArray(C) d = fff_matrix_new(a.size1, a.size2) fff_matrix_memcpy(d, c) fff_blas_dsyrk(flag_uplo(Uplo), flag_transpose(Trans), alpha, a, beta, d) fff_matrix_delete(a) fff_matrix_delete(c) D = fff_matrix_toPyArray(d) return D def blas_dsyr2k(int Uplo, int Trans, double alpha, A, B, double beta, C): """ Compute a rank-2k update of the symmetric matrix C, C = \alpha A B^T + \alpha B A^T + \beta C when Trans is CblasNoTrans and C = \alpha A^T B + \alpha B^T A + \beta C when Trans is CblasTrans. Since the matrix C is symmetric only its upper half or lower half need to be stored. When Uplo is CblasUpper then the upper triangle and diagonal of C are used, and when Uplo is CblasLower then the lower triangle and diagonal of C are used. """ cdef fff_matrix *a, *b, *c, *d a = fff_matrix_fromPyArray(A) b = fff_matrix_fromPyArray(B) c = fff_matrix_fromPyArray(C) d = fff_matrix_new(a.size1, a.size2) fff_matrix_memcpy(d, c) fff_blas_dsyr2k(flag_uplo(Uplo), flag_transpose(Trans), alpha, a, b, beta, d) fff_matrix_delete(a) fff_matrix_delete(b) fff_matrix_delete(c) D = fff_matrix_toPyArray(d) return D nipy-0.3.0/nipy/labs/bindings/setup.py000066400000000000000000000026141210344137400177020ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os, sys def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('bindings', parent_package, top_path) config.add_subpackage('tests') config.add_subpackage('benchmarks') # We need this because libcstat.a is linked to lapack, which can # be a fortran library, and the linker needs this information. from numpy.distutils.system_info import get_info lapack_info = get_info('lapack_opt',0) if 'libraries' not in lapack_info: # But on OSX that may not give us what we need, so try with 'lapack' # instead. NOTE: scipy.linalg uses lapack_opt, not 'lapack'... lapack_info = get_info('lapack',0) config.add_extension('linalg', sources=['linalg.pyx'], libraries=['cstat'], extra_info=lapack_info) config.add_extension('array', sources=['array.pyx'], libraries=['cstat'], extra_info=lapack_info) config.add_extension('wrapper', sources=['wrapper.pyx'], libraries=['cstat'], extra_info=lapack_info) return config if __name__ == '__main__': print('This is the wrong setup.py file to run') nipy-0.3.0/nipy/labs/bindings/tests/000077500000000000000000000000001210344137400173275ustar00rootroot00000000000000nipy-0.3.0/nipy/labs/bindings/tests/__init__.py000066400000000000000000000000501210344137400214330ustar00rootroot00000000000000# Init to make test directory a package nipy-0.3.0/nipy/labs/bindings/tests/test_array.py000066400000000000000000000042061210344137400220600ustar00rootroot00000000000000#!/usr/bin/env python # # Test fff_array wrapping # from numpy.testing import assert_almost_equal, assert_equal import numpy as np from .. import (array_get, array_get_block, array_add, array_sub, array_mul, array_div) MAX_TEST_SIZE = 30 def random_shape(size): """ Output random dimensions in the range (2, MAX_TEST_SIZE) """ aux = np.random.randint(MAX_TEST_SIZE-1, size=size) + 2 if size==1: return aux else: return tuple(aux) def _test_array_get(x): pos = np.asarray(x.shape)/2 a = array_get(x, pos[0], pos[1], pos[2], pos[3]) assert_equal(a, x[pos[0], pos[1], pos[2], pos[3]]) def test_array_get(): d0, d1, d2, d3 = random_shape(4) x = np.random.rand(d0, d1, d2, d3)-.5 _test_array_get(x) def _test_array_get_block(x): b0 = array_get_block(x, 1, 8, 2, 1, 8, 2, 1, 8, 2, 1, 8, 2) b = x[1:8:2, 1:8:2, 1:8:2, 1:8:2] assert_equal(b0, b) def test_array_get_block(): x = np.random.rand(10, 10, 10, 10)-.5 _test_array_get_block(x) def _test_array_add(x, y): z = array_add(x, y) assert_equal(z, x+y) def test_array_add(): d0, d1, d2, d3 = random_shape(4) x = np.random.rand(d0, d1, d2, d3)-.5 y = (100*np.random.rand(d0, d1, d2, d3)).astype('uint8') _test_array_add(x, y) def _test_array_mul(x, y): z = array_mul(x, y) assert_equal(z, x*y) def test_array_mul(): d0, d1, d2, d3 = random_shape(4) x = np.random.rand(d0, d1, d2, d3)-.5 y = (100*np.random.rand(d0, d1, d2, d3)).astype('uint8') _test_array_mul(x, y) def _test_array_sub(x, y): z = array_sub(x, y) assert_equal(z, x-y) def test_array_sub(): d0, d1, d2, d3 = random_shape(4) x = np.random.rand(d0, d1, d2, d3)-.5 y = (100*np.random.rand(d0, d1, d2, d3)).astype('uint8') _test_array_sub(x, y) def _test_array_div(x, y): z = array_div(x, y) assert_almost_equal(z, x/y) def test_array_div(): d0, d1, d2, d3 = random_shape(4) x = np.random.rand(d0, d1, d2, d3)-.5 y = np.random.rand(d0, d1, d2, d3)-.5 _test_array_div(x, y) if __name__ == "__main__": import nose nose.run(argv=['', __file__]) nipy-0.3.0/nipy/labs/bindings/tests/test_blas1.py000066400000000000000000000016161210344137400217460ustar00rootroot00000000000000#!/usr/bin/env python # # Test BLAS 1 # from numpy.testing import assert_almost_equal import numpy as np from .. import (blas_dnrm2, blas_dasum, blas_ddot, blas_daxpy, blas_dscal) n = 15 def test_dnrm2(): x = np.random.rand(n) assert_almost_equal(np.sqrt(np.sum(x**2)), blas_dnrm2(x)) def test_dasum(): x = np.random.rand(n) assert_almost_equal(np.sum(np.abs(x)), blas_dasum(x)) def test_ddot(): x = np.random.rand(n) y = np.random.rand(n) assert_almost_equal(np.dot(x,y), blas_ddot(x, y)) def test_daxpy(): x = np.random.rand(n) y = np.random.rand(n) alpha = np.random.rand() assert_almost_equal(alpha*x+y, blas_daxpy(alpha, x, y)) def test_dscal(): x = np.random.rand(n) alpha = np.random.rand() assert_almost_equal(alpha*x, blas_dscal(alpha, x)) if __name__ == "__main__": import nose nose.run(argv=['', __file__]) nipy-0.3.0/nipy/labs/bindings/tests/test_blas3.py000066400000000000000000000126341210344137400217520ustar00rootroot00000000000000#!/usr/bin/env python # # Test BLAS 3 # from numpy.testing import assert_almost_equal import numpy as np from .. import (blas_dgemm, blas_dsymm, blas_dtrmm, blas_dtrsm, blas_dsyrk, blas_dsyr2k) n1 = 10 n2 = 13 def test_dgemm(): A = np.random.rand(n1,n2) B = np.random.rand(n2,n1) C = np.random.rand(n1,n1) C2 = np.random.rand(n2,n2) alpha = np.double(np.random.rand(1)) beta = np.double(np.random.rand(1)) # Test: A*B Dgold = alpha*np.dot(A,B) + beta*C D = blas_dgemm(0, 0, alpha, A, B, beta, C) assert_almost_equal(Dgold, D) # Test: A^t B^t Dgold = alpha*np.dot(A.T,B.T) + beta*C2 D = blas_dgemm(1, 1, alpha, A, B, beta, C2) assert_almost_equal(Dgold, D) def test_dsymm(): A = np.random.rand(n1,n1) A = A + A.T B = np.random.rand(n1,n2) C = np.random.rand(n1,n2) B2 = np.random.rand(n2,n1) C2 = np.random.rand(n2,n1) alpha = np.double(np.random.rand(1)) beta = np.double(np.random.rand(1)) # Test: A*B Dgold = alpha*np.dot(A,B) + beta*C D = blas_dsymm(0, 0, alpha, A, B, beta, C) assert_almost_equal(Dgold, D) D = blas_dsymm(0, 1, alpha, A, B, beta, C) assert_almost_equal(Dgold, D) # Test: B*A Dgold = alpha*np.dot(B2,A) + beta*C2 D = blas_dsymm(1, 0, alpha, A, B2, beta, C2) assert_almost_equal(Dgold, D) D = blas_dsymm(1, 1, alpha, A, B2, beta, C2) assert_almost_equal(Dgold, D) def _test_dtrXm(A, U, L, B, alpha, blasfn): # Test: U*B Dgold = alpha*np.dot(U,B) D = blasfn(0, 0, 0, 0, alpha, A, B) assert_almost_equal(Dgold, D) # Test: B*U Dgold = alpha*np.dot(B,U) D = blasfn(1, 0, 0, 0, alpha, A, B) assert_almost_equal(Dgold, D) # Test: U'*B Dgold = alpha*np.dot(U.T,B) D = blasfn(0, 0, 1, 0, alpha, A, B) assert_almost_equal(Dgold, D) # Test: B*U' Dgold = alpha*np.dot(B,U.T) D = blasfn(1, 0, 1, 0, alpha, A, B) assert_almost_equal(Dgold, D) # Test: L*B Dgold = alpha*np.dot(L,B) D = blasfn(0, 1, 0, 0, alpha, A, B) assert_almost_equal(Dgold, D) # Test: B*L Dgold = alpha*np.dot(B,L) D = blasfn(1, 1, 0, 0, alpha, A, B) assert_almost_equal(Dgold, D) # Test: L'*B Dgold = alpha*np.dot(L.T,B) D = blasfn(0, 1, 1, 0, alpha, A, B) assert_almost_equal(Dgold, D) # Test: B*L' Dgold = alpha*np.dot(B,L.T) D = blasfn(1, 1, 1, 0, alpha, A, B) assert_almost_equal(Dgold, D) # Test: U*B Dgold = alpha*np.dot(U,B) D = blasfn(0, 0, 0, 0, alpha, A, B) assert_almost_equal(Dgold, D) # Test: B*U Dgold = alpha*np.dot(B,U) D = blasfn(1, 0, 0, 0, alpha, A, B) assert_almost_equal(Dgold, D) # Test: U'*B Dgold = alpha*np.dot(U.T,B) D = blasfn(0, 0, 1, 0, alpha, A, B) assert_almost_equal(Dgold, D) # Test: B*U' Dgold = alpha*np.dot(B,U.T) D = blasfn(1, 0, 1, 0, alpha, A, B) assert_almost_equal(Dgold, D) # Test: L*B Dgold = alpha*np.dot(L,B) D = blasfn(0, 1, 0, 0, alpha, A, B) assert_almost_equal(Dgold, D) # Test: B*L Dgold = alpha*np.dot(B,L) D = blasfn(1, 1, 0, 0, alpha, A, B) assert_almost_equal(Dgold, D) # Test: L'*B Dgold = alpha*np.dot(L.T,B) D = blasfn(0, 1, 1, 0, alpha, A, B) assert_almost_equal(Dgold, D) # Test: B*L' Dgold = alpha*np.dot(B,L.T) D = blasfn(1, 1, 1, 0, alpha, A, B) assert_almost_equal(Dgold, D) def test_dtrmm(): A = np.random.rand(n1,n1) U = np.triu(A) L = np.tril(A) B = np.random.rand(n1,n1) alpha = np.double(np.random.rand(1)) _test_dtrXm(A, U, L, B, alpha, blas_dtrmm) def test_dtrsm(): A = np.random.rand(n1,n1) U = np.linalg.inv(np.triu(A)) L = np.linalg.inv(np.tril(A)) B = np.random.rand(n1,n1) alpha = np.double(np.random.rand(1)) _test_dtrXm(A, U, L, B, alpha, blas_dtrsm) def test_dsyrk(): A = np.random.rand(n1,n1) C = np.random.rand(n1,n1) alpha = np.double(np.random.rand(1)) beta = np.double(np.random.rand(1)) # Test A*A' U = np.triu(blas_dsyrk(0, 0, alpha, A, beta, C)) L = np.tril(blas_dsyrk(1, 0, alpha, A, beta, C)) Dgold = alpha*np.dot(A, A.T) + beta*C Ugold = np.triu(Dgold) Lgold = np.tril(Dgold) assert_almost_equal(Ugold, U) assert_almost_equal(Lgold, L) # Test A'*A U = np.triu(blas_dsyrk(0, 1, alpha, A, beta, C)) L = np.tril(blas_dsyrk(1, 1, alpha, A, beta, C)) Dgold = alpha*np.dot(A.T, A) + beta*C Ugold = np.triu(Dgold) Lgold = np.tril(Dgold) assert_almost_equal(Ugold, U) assert_almost_equal(Lgold, L) def test_dsyr2k(): A = np.random.rand(n1,n1) B = np.random.rand(n1,n1) C = np.random.rand(n1,n1) alpha = np.double(np.random.rand(1)) beta = np.double(np.random.rand(1)) # Test A*B' + B*A' U = np.triu(blas_dsyr2k(0, 0, alpha, A, B, beta, C)) L = np.tril(blas_dsyr2k(1, 0, alpha, A, B, beta, C)) Dgold = alpha*(np.dot(A,B.T) + np.dot(B,A.T)) + beta*C Ugold = np.triu(Dgold) Lgold = np.tril(Dgold) assert_almost_equal(Ugold, U) assert_almost_equal(Lgold, L) # Test A'*B + B'*A U = np.triu(blas_dsyr2k(0, 1, alpha, A, B, beta, C)) L = np.tril(blas_dsyr2k(1, 1, alpha, A, B, beta, C)) Dgold = alpha*(np.dot(A.T,B) + np.dot(B.T,A)) + beta*C Ugold = np.triu(Dgold) Lgold = np.tril(Dgold) assert_almost_equal(Ugold, U) assert_almost_equal(Lgold, L) if __name__ == "__main__": import nose nose.run(argv=['', __file__]) nipy-0.3.0/nipy/labs/bindings/tests/test_linalg.py000066400000000000000000000015101210344137400222030ustar00rootroot00000000000000#!/usr/bin/env python # # Test fff linear algebra routines # from numpy.testing import assert_equal import numpy as np from .. import vector_get, vector_set n = 15 def test_vector_get(): x = np.random.rand(n) i = np.random.randint(n) xi = vector_get(x, i) assert_equal(xi, x[i]) def test_vector_get_int32(): x = (100*np.random.rand(n)).astype('int32') i = np.random.randint(n) xi = vector_get(x, i) assert_equal(xi, x[i]) def test_vector_set(): x = np.random.rand(n) i = np.random.randint(n) y = vector_set(x, i, 3) assert_equal(3, y[i]) def test_vector_set_int32(): x = (100*np.random.rand(n)).astype('int32') i = np.random.randint(n) y = vector_set(x, i, 3) assert_equal(3, y[i]) if __name__ == "__main__": import nose nose.run(argv=['', __file__]) nipy-0.3.0/nipy/labs/bindings/tests/test_numpy.py000066400000000000000000000144411210344137400221140ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: # Test numpy bindings import numpy as np from .. import (c_types, fff_type, npy_type, copy_vector, pass_matrix, pass_vector, pass_array, pass_vector_via_iterator, sum_via_iterators, copy_via_iterators) from nose.tools import assert_equal from numpy.testing import assert_almost_equal, assert_array_equal MAX_TEST_SIZE = 30 def random_shape(size): """ Output random dimensions in the range (2, MAX_TEST_SIZE) """ aux = np.random.randint(MAX_TEST_SIZE-1, size=size) + 2 if size==1: return aux else: return tuple(aux) # # Test type conversions # def test_type_conversions_to_fff(): # use np.sctypes for testing numpy types, np.typeDict.values # contains a lot of duplicates. There are 140 values in # np.typeDict, but only 21 unique numpy types. But only 11 fff # types in c_types. for type_key in np.sctypes: for npy_t in np.sctypes[type_key]: t, nbytes = fff_type(np.dtype(npy_t)) if not t == 'unknown type': yield assert_equal, nbytes, np.dtype(npy_t).itemsize def test_type_conversions_in_C(): for t in c_types: npy_t, nbytes = npy_type(t) yield assert_equal, npy_t, t # # Test bindings # def _test_copy_vector(x): # use fff y0 = copy_vector(x, 0) # use numpy y1 = copy_vector(x, 1) yield assert_equal, y0, x yield assert_equal, y1, x def test_copy_vector_contiguous(): x = (1000*np.random.rand(1e6)).astype('int32') _test_copy_vector(x) def test_copy_vector_strided(): x0 = (1000*np.random.rand(2e6)).astype('int32') x = x0[::2] _test_copy_vector(x) """ def test_copy_vector_int32(): x = np.random.rand(1e6).astype('int32') print('int32 buffer copy') _test_copy_vector(x) def test_copy_vector_uint8(): x = np.random.rand(1e6).astype('uint8') print('uint8 buffer copy') _test_copy_vector(x) """ def _test_pass_vector(x): y = pass_vector(x) assert_array_equal(y, x) def test_pass_vector(): x = np.random.rand(random_shape(1))-.5 _test_pass_vector(x) def test_pass_vector_int32(): x = (1000*(np.random.rand(random_shape(1))-.5)).astype('int32') _test_pass_vector(x) def test_pass_vector_uint8(): x = (256*(np.random.rand(random_shape(1)))).astype('uint8') _test_pass_vector(x) def _test_pass_matrix(x): y = pass_matrix(x) yield assert_equal, y, x y = pass_matrix(x.T) yield assert_equal, y, x.T def test_pass_matrix(): d0, d1 = random_shape(2) x = np.random.rand(d0, d1)-.5 _test_pass_matrix(x) def test_pass_matrix_int32(): d0, d1 = random_shape(2) x = (1000*(np.random.rand(d0, d1)-.5)).astype('int32') _test_pass_matrix(x) def test_pass_matrix_uint8(): d0, d1 = random_shape(2) x = (256*(np.random.rand(d0, d1))).astype('uint8') _test_pass_matrix(x) def _test_pass_array(x): y = pass_array(x) yield assert_equal, y, x y = pass_array(x.T) yield assert_equal, y, x.T def test_pass_array(): d0, d1, d2, d3 = random_shape(4) x = np.random.rand(d0, d1, d2, d3)-.5 _test_pass_array(x) def test_pass_array_int32(): d0, d1, d2, d3 = random_shape(4) x = (1000*(np.random.rand(d0, d1, d2, d3)-.5)).astype('int32') _test_pass_array(x) def test_pass_array_uint8(): d0, d1, d2, d3 = random_shape(4) x = (256*(np.random.rand(d0, d1, d2, d3))).astype('uint8') _test_pass_array(x) # # Multi-iterator testing # def _test_pass_vector_via_iterator(X, pos=0): """ Assume X.ndim == 2 """ # axis == 0 x = pass_vector_via_iterator(X, axis=0, niters=pos) yield assert_equal, x, X[:, pos] # axis == 1 x = pass_vector_via_iterator(X, axis=1, niters=pos) yield assert_equal, x, X[pos, :] def test_pass_vector_via_iterator(): d0, d1 = random_shape(2) X = np.random.rand(d0, d1)-.5 _test_pass_vector_via_iterator(X) def test_pass_vector_via_iterator_int32(): d0, d1 = random_shape(2) X = (1000*(np.random.rand(d0, d1)-.5)).astype('int32') _test_pass_vector_via_iterator(X) def test_pass_vector_via_iterator_uint8(): d0, d1 = random_shape(2) X = (100*(np.random.rand(d0, d1))).astype('uint8') _test_pass_vector_via_iterator(X) def test_pass_vector_via_iterator_shift(): d0, d1 = random_shape(2) X = np.random.rand(d0, d1)-.5 _test_pass_vector_via_iterator(X, pos=1) def test_pass_vector_via_iterator_shift_int32(): d0, d1 = random_shape(2) X = (1000*(np.random.rand(d0, d1)-.5)).astype('int32') _test_pass_vector_via_iterator(X, pos=1) def test_pass_vector_via_iterator_shift_uint8(): d0, d1 = random_shape(2) X = (100*(np.random.rand(d0, d1))).astype('uint8') _test_pass_vector_via_iterator(X, pos=1) def _test_copy_via_iterators(Y): for axis in range(4): Z = copy_via_iterators(Y, axis) yield assert_equal, Z, Y ZT = copy_via_iterators(Y.T, axis) yield assert_equal, ZT, Y.T def test_copy_via_iterators(): d0, d1, d2, d3 = random_shape(4) Y = np.random.rand(d0, d1, d2, d3) _test_copy_via_iterators(Y) def test_copy_via_iterators_int32(): d0, d1, d2, d3 = random_shape(4) Y = (1000*(np.random.rand(d0, d1, d2, d3)-.5)).astype('int32') _test_copy_via_iterators(Y) def test_copy_via_iterators_uint8(): d0, d1, d2, d3 = random_shape(4) Y = (256*(np.random.rand(d0, d1, d2, d3))).astype('uint8') _test_copy_via_iterators(Y) def _test_sum_via_iterators(Y): for axis in range(4): Z = sum_via_iterators(Y, axis) yield assert_almost_equal, Z, Y.sum(axis) ZT = sum_via_iterators(Y.T, axis) yield assert_almost_equal, ZT, Y.T.sum(axis) def test_sum_via_iterators(): d0, d1, d2, d3 = random_shape(4) Y = np.random.rand(d0, d1, d2, d3) _test_sum_via_iterators(Y) def test_sum_via_iterators_int32(): d0, d1, d2, d3 = random_shape(4) Y = (1000*(np.random.rand(d0, d1, d2, d3)-.5)).astype('int32') _test_sum_via_iterators(Y) def test_sum_via_iterators_uint8(): d0, d1, d2, d3 = random_shape(4) Y = (256*(np.random.rand(d0, d1, d2, d3))).astype('uint8') _test_sum_via_iterators(Y) if __name__ == "__main__": import nose nose.run(argv=['', __file__]) nipy-0.3.0/nipy/labs/bindings/wrapper.c000066400000000000000000010740401210344137400200170ustar00rootroot00000000000000/* Generated by Cython 0.17.4 on Sat Jan 12 17:27:34 2013 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02040000 #error Cython requires Python 2.4+. #else #include /* For offsetof */ #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #endif #if PY_VERSION_HEX < 0x02050000 typedef int Py_ssize_t; #define PY_SSIZE_T_MAX INT_MAX #define PY_SSIZE_T_MIN INT_MIN #define PY_FORMAT_SIZE_T "" #define CYTHON_FORMAT_SSIZE_T "" #define PyInt_FromSsize_t(z) PyInt_FromLong(z) #define PyInt_AsSsize_t(o) __Pyx_PyInt_AsInt(o) #define PyNumber_Index(o) ((PyNumber_Check(o) && !PyFloat_Check(o)) ? PyNumber_Int(o) : \ (PyErr_Format(PyExc_TypeError, \ "expected index value, got %.200s", Py_TYPE(o)->tp_name), \ (PyObject*)0)) #define __Pyx_PyIndex_Check(o) (PyNumber_Check(o) && !PyFloat_Check(o) && \ !PyComplex_Check(o)) #define PyIndex_Check __Pyx_PyIndex_Check #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message) #define __PYX_BUILD_PY_SSIZE_T "i" #else #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #define __Pyx_PyIndex_Check PyIndex_Check #endif #if PY_VERSION_HEX < 0x02060000 #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) #define PyVarObject_HEAD_INIT(type, size) \ PyObject_HEAD_INIT(type) size, #define PyType_Modified(t) typedef struct { void *buf; PyObject *obj; Py_ssize_t len; Py_ssize_t itemsize; int readonly; int ndim; char *format; Py_ssize_t *shape; Py_ssize_t *strides; Py_ssize_t *suboffsets; void *internal; } Py_buffer; #define PyBUF_SIMPLE 0 #define PyBUF_WRITABLE 0x0001 #define PyBUF_FORMAT 0x0004 #define PyBUF_ND 0x0008 #define PyBUF_STRIDES (0x0010 | PyBUF_ND) #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) #define PyBUF_RECORDS (PyBUF_STRIDES | PyBUF_FORMAT | PyBUF_WRITABLE) #define PyBUF_FULL (PyBUF_INDIRECT | PyBUF_FORMAT | PyBUF_WRITABLE) typedef int (*getbufferproc)(PyObject *, Py_buffer *, int); typedef void (*releasebufferproc)(PyObject *, Py_buffer *); #endif #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #endif #if PY_MAJOR_VERSION < 3 && PY_MINOR_VERSION < 6 #define PyUnicode_FromString(s) PyUnicode_Decode(s, strlen(s), "UTF-8", "strict") #endif #if PY_MAJOR_VERSION >= 3 #define Py_TPFLAGS_CHECKTYPES 0 #define Py_TPFLAGS_HAVE_INDEX 0 #endif #if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3) #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ? \ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #else #define CYTHON_PEP393_ENABLED 0 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_READ(k, d, i) ((k=k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_VERSION_HEX < 0x02060000 #define PyBytesObject PyStringObject #define PyBytes_Type PyString_Type #define PyBytes_Check PyString_Check #define PyBytes_CheckExact PyString_CheckExact #define PyBytes_FromString PyString_FromString #define PyBytes_FromStringAndSize PyString_FromStringAndSize #define PyBytes_FromFormat PyString_FromFormat #define PyBytes_DecodeEscape PyString_DecodeEscape #define PyBytes_AsString PyString_AsString #define PyBytes_AsStringAndSize PyString_AsStringAndSize #define PyBytes_Size PyString_Size #define PyBytes_AS_STRING PyString_AS_STRING #define PyBytes_GET_SIZE PyString_GET_SIZE #define PyBytes_Repr PyString_Repr #define PyBytes_Concat PyString_Concat #define PyBytes_ConcatAndDel PyString_ConcatAndDel #endif #if PY_VERSION_HEX < 0x02060000 #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type) #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_VERSION_HEX < 0x03020000 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300) #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b) #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value) #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b) #else #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0))) #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1))) #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1))) #endif #if PY_MAJOR_VERSION >= 3 #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n))) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n))) #else #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n)) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_NAMESTR(n) ((char *)(n)) #define __Pyx_DOCSTR(n) ((char *)(n)) #else #define __Pyx_NAMESTR(n) (n) #define __Pyx_DOCSTR(n) (n) #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include #define __PYX_HAVE__nipy__labs__bindings__wrapper #define __PYX_HAVE_API__nipy__labs__bindings__wrapper #include "stdio.h" #include "stdlib.h" #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "fff_base.h" #include "fff_vector.h" #include "fff_matrix.h" #include "fff_array.h" #include "fffpy.h" #ifdef _OPENMP #include #endif /* _OPENMP */ #ifdef PYREX_WITHOUT_ASSERTIONS #define CYTHON_WITHOUT_ASSERTIONS #endif /* inline attribute */ #ifndef CYTHON_INLINE #if defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif /* unused attribute */ #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif typedef struct {PyObject **p; char *s; const long n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/ /* Type Conversion Predeclarations */ #define __Pyx_PyBytes_FromUString(s) PyBytes_FromString((char*)s) #define __Pyx_PyBytes_AsUString(s) ((unsigned char*) PyBytes_AsString(s)) #define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x); static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject*); #if CYTHON_COMPILING_IN_CPYTHON #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #ifdef __GNUC__ /* Test for GCC > 2.95 */ #if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* __GNUC__ > 2 ... */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ > 2 ... */ #else /* __GNUC__ */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static PyObject *__pyx_m; static PyObject *__pyx_b; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include #else #include #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "wrapper.pyx", "numpy.pxd", "type.pxd", }; /* "numpy.pxd":723 * # in Cython to enable them only on the right systems. * * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t */ typedef npy_int8 __pyx_t_5numpy_int8_t; /* "numpy.pxd":724 * * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t */ typedef npy_int16 __pyx_t_5numpy_int16_t; /* "numpy.pxd":725 * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< * ctypedef npy_int64 int64_t * #ctypedef npy_int96 int96_t */ typedef npy_int32 __pyx_t_5numpy_int32_t; /* "numpy.pxd":726 * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< * #ctypedef npy_int96 int96_t * #ctypedef npy_int128 int128_t */ typedef npy_int64 __pyx_t_5numpy_int64_t; /* "numpy.pxd":730 * #ctypedef npy_int128 int128_t * * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t */ typedef npy_uint8 __pyx_t_5numpy_uint8_t; /* "numpy.pxd":731 * * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t */ typedef npy_uint16 __pyx_t_5numpy_uint16_t; /* "numpy.pxd":732 * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< * ctypedef npy_uint64 uint64_t * #ctypedef npy_uint96 uint96_t */ typedef npy_uint32 __pyx_t_5numpy_uint32_t; /* "numpy.pxd":733 * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< * #ctypedef npy_uint96 uint96_t * #ctypedef npy_uint128 uint128_t */ typedef npy_uint64 __pyx_t_5numpy_uint64_t; /* "numpy.pxd":737 * #ctypedef npy_uint128 uint128_t * * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< * ctypedef npy_float64 float64_t * #ctypedef npy_float80 float80_t */ typedef npy_float32 __pyx_t_5numpy_float32_t; /* "numpy.pxd":738 * * ctypedef npy_float32 float32_t * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< * #ctypedef npy_float80 float80_t * #ctypedef npy_float128 float128_t */ typedef npy_float64 __pyx_t_5numpy_float64_t; /* "numpy.pxd":747 * # The int types are mapped a bit surprising -- * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t # <<<<<<<<<<<<<< * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t */ typedef npy_long __pyx_t_5numpy_int_t; /* "numpy.pxd":748 * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< * ctypedef npy_longlong longlong_t * */ typedef npy_longlong __pyx_t_5numpy_long_t; /* "numpy.pxd":749 * ctypedef npy_long int_t * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< * * ctypedef npy_ulong uint_t */ typedef npy_longlong __pyx_t_5numpy_longlong_t; /* "numpy.pxd":751 * ctypedef npy_longlong longlong_t * * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t */ typedef npy_ulong __pyx_t_5numpy_uint_t; /* "numpy.pxd":752 * * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulonglong_t * */ typedef npy_ulonglong __pyx_t_5numpy_ulong_t; /* "numpy.pxd":753 * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< * * ctypedef npy_intp intp_t */ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; /* "numpy.pxd":755 * ctypedef npy_ulonglong ulonglong_t * * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< * ctypedef npy_uintp uintp_t * */ typedef npy_intp __pyx_t_5numpy_intp_t; /* "numpy.pxd":756 * * ctypedef npy_intp intp_t * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< * * ctypedef npy_double float_t */ typedef npy_uintp __pyx_t_5numpy_uintp_t; /* "numpy.pxd":758 * ctypedef npy_uintp uintp_t * * ctypedef npy_double float_t # <<<<<<<<<<<<<< * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t */ typedef npy_double __pyx_t_5numpy_float_t; /* "numpy.pxd":759 * * ctypedef npy_double float_t * ctypedef npy_double double_t # <<<<<<<<<<<<<< * ctypedef npy_longdouble longdouble_t * */ typedef npy_double __pyx_t_5numpy_double_t; /* "numpy.pxd":760 * ctypedef npy_double float_t * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cfloat cfloat_t */ typedef npy_longdouble __pyx_t_5numpy_longdouble_t; /* "fff.pxd":9 * * # Redefine size_t * ctypedef unsigned long int size_t # <<<<<<<<<<<<<< * * */ typedef unsigned long __pyx_t_3fff_size_t; #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< float > __pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< double > __pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif /*--- Type declarations ---*/ /* "numpy.pxd":762 * ctypedef npy_longdouble longdouble_t * * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t */ typedef npy_cfloat __pyx_t_5numpy_cfloat_t; /* "numpy.pxd":763 * * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< * ctypedef npy_clongdouble clongdouble_t * */ typedef npy_cdouble __pyx_t_5numpy_cdouble_t; /* "numpy.pxd":764 * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cdouble complex_t */ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; /* "numpy.pxd":766 * ctypedef npy_clongdouble clongdouble_t * * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew1(a): */ typedef npy_cdouble __pyx_t_5numpy_complex_t; #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/ #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil) \ if (acquire_gil) { \ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ PyGILState_Release(__pyx_gilstate_save); \ } else { \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil) \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext() \ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif /* CYTHON_REFNANNY */ #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/ static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact); /*proto*/ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /*proto*/ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], \ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, \ const char* function_name); /*proto*/ static CYTHON_INLINE int __Pyx_div_int(int, int); /* proto */ #define UNARY_NEG_WOULD_OVERFLOW(x) (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len)) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_PyList_Append(L,x) PyList_Append(L,x) #endif #define __Pyx_SetItemInt(o, i, v, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ __Pyx_SetItemInt_Fast(o, i, v) : \ __Pyx_SetItemInt_Generic(o, to_py_func(i), v)) static CYTHON_INLINE int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v) { int r; if (!j) return -1; r = PyObject_SetItem(o, j, v); Py_DECREF(j); return r; } static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v) { #if CYTHON_COMPILING_IN_CPYTHON if (PyList_CheckExact(o)) { Py_ssize_t n = (likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if (likely((n >= 0) & (n < PyList_GET_SIZE(o)))) { PyObject* old = PyList_GET_ITEM(o, n); Py_INCREF(v); PyList_SET_ITEM(o, n, v); Py_DECREF(old); return 1; } } else { /* inlined PySequence_SetItem() */ PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_ass_item)) { if (unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (unlikely(l < 0)) return -1; i += l; } return m->sq_ass_item(o, i, v); } } #else #if CYTHON_COMPILING_IN_PYPY if (PySequence_Check(o) && !PyDict_Check(o)) { #else if (PySequence_Check(o)) { #endif return PySequence_SetItem(o, i, v); } #endif return __Pyx_SetItemInt_Generic(o, PyInt_FromSsize_t(i), v); } static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/ static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); static CYTHON_INLINE int __Pyx_IterFinish(void); /*proto*/ static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); /*proto*/ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level); /*proto*/ static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_Py_intptr_t(Py_intptr_t); #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if defined(_WIN32) && defined(__cplusplus) && CYTHON_CCOMPLEX #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); #if CYTHON_CCOMPLEX #define __Pyx_c_eqf(a, b) ((a)==(b)) #define __Pyx_c_sumf(a, b) ((a)+(b)) #define __Pyx_c_difff(a, b) ((a)-(b)) #define __Pyx_c_prodf(a, b) ((a)*(b)) #define __Pyx_c_quotf(a, b) ((a)/(b)) #define __Pyx_c_negf(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zerof(z) ((z)==(float)0) #define __Pyx_c_conjf(z) (::std::conj(z)) #if 1 #define __Pyx_c_absf(z) (::std::abs(z)) #define __Pyx_c_powf(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zerof(z) ((z)==0) #define __Pyx_c_conjf(z) (conjf(z)) #if 1 #define __Pyx_c_absf(z) (cabsf(z)) #define __Pyx_c_powf(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); #if CYTHON_CCOMPLEX #define __Pyx_c_eq(a, b) ((a)==(b)) #define __Pyx_c_sum(a, b) ((a)+(b)) #define __Pyx_c_diff(a, b) ((a)-(b)) #define __Pyx_c_prod(a, b) ((a)*(b)) #define __Pyx_c_quot(a, b) ((a)/(b)) #define __Pyx_c_neg(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero(z) ((z)==(double)0) #define __Pyx_c_conj(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs(z) (::std::abs(z)) #define __Pyx_c_pow(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero(z) ((z)==0) #define __Pyx_c_conj(z) (conj(z)) #if 1 #define __Pyx_c_abs(z) (cabs(z)) #define __Pyx_c_pow(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject *); static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject *); static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject *); static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject *); static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject *); static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject *); static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject *); static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject *); static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject *); static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject *); static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject *); static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject *); static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject *); static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject *); static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject *); static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject *); static int __Pyx_check_binary_version(void); #if !defined(__Pyx_PyIdentifier_FromString) #if PY_MAJOR_VERSION < 3 #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) #else #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) #endif #endif static PyObject *__Pyx_ImportModule(const char *name); /*proto*/ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /*proto*/ typedef struct { int code_line; PyCodeObject* code_object; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /*proto*/ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/ /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from 'cpython.object' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'libc.stdlib' */ /* Module declarations from 'numpy' */ /* Module declarations from 'numpy' */ static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ /* Module declarations from 'fff' */ /* Module declarations from 'nipy.labs.bindings.wrapper' */ #define __Pyx_MODULE_NAME "nipy.labs.bindings.wrapper" int __pyx_module_is_main_nipy__labs__bindings__wrapper = 0; /* Implementation of 'nipy.labs.bindings.wrapper' */ static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_RuntimeError; static PyObject *__pyx_pf_4nipy_4labs_8bindings_7wrapper_fff_type(CYTHON_UNUSED PyObject *__pyx_self, PyArray_Descr *__pyx_v_T); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_7wrapper_2npy_type(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_T); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_7wrapper_4pass_vector(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_X); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_7wrapper_6copy_vector(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_X, int __pyx_v_flag); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_7wrapper_8pass_matrix(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_X); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_7wrapper_10pass_array(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_X); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_7wrapper_12pass_vector_via_iterator(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_X, int __pyx_v_axis, int __pyx_v_niters); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_7wrapper_14copy_via_iterators(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_Y, int __pyx_v_axis); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_7wrapper_16sum_via_iterators(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_Y, int __pyx_v_axis); /* proto */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ static char __pyx_k_1[] = "ndarray is not C contiguous"; static char __pyx_k_3[] = "ndarray is not Fortran contiguous"; static char __pyx_k_5[] = "Non-native byte order not supported"; static char __pyx_k_7[] = "unknown dtype code in numpy.pxd (%d)"; static char __pyx_k_8[] = "Format string allocated too short, see comment in numpy.pxd"; static char __pyx_k_11[] = "Format string allocated too short."; static char __pyx_k_13[] = "\nIterators for testing. \nAuthor: Alexis Roche, 2009.\n"; static char __pyx_k_14[] = "0.1"; static char __pyx_k_15[] = "unknown type"; static char __pyx_k_16[] = "unsigned char"; static char __pyx_k_17[] = "signed char"; static char __pyx_k_18[] = "unsigned short"; static char __pyx_k_19[] = "signed short"; static char __pyx_k_20[] = "unsigned int"; static char __pyx_k_21[] = "unsigned long"; static char __pyx_k_24[] = "/Users/mb312/dev_trees/nipy/nipy/labs/bindings/wrapper.pyx"; static char __pyx_k_25[] = "nipy.labs.bindings.wrapper"; static char __pyx_k_38[] = "pass_vector_via_iterator"; static char __pyx_k__B[] = "B"; static char __pyx_k__H[] = "H"; static char __pyx_k__I[] = "I"; static char __pyx_k__L[] = "L"; static char __pyx_k__O[] = "O"; static char __pyx_k__Q[] = "Q"; static char __pyx_k__T[] = "T"; static char __pyx_k__X[] = "X"; static char __pyx_k__Y[] = "Y"; static char __pyx_k__Z[] = "Z"; static char __pyx_k__b[] = "b"; static char __pyx_k__d[] = "d"; static char __pyx_k__f[] = "f"; static char __pyx_k__g[] = "g"; static char __pyx_k__h[] = "h"; static char __pyx_k__i[] = "i"; static char __pyx_k__l[] = "l"; static char __pyx_k__q[] = "q"; static char __pyx_k__x[] = "x"; static char __pyx_k__y[] = "y"; static char __pyx_k__z[] = "z"; static char __pyx_k__Zd[] = "Zd"; static char __pyx_k__Zf[] = "Zf"; static char __pyx_k__Zg[] = "Zg"; static char __pyx_k__np[] = "np"; static char __pyx_k__int[] = "int"; static char __pyx_k__Xdum[] = "Xdum"; static char __pyx_k__axis[] = "axis"; static char __pyx_k__copy[] = "copy"; static char __pyx_k__data[] = "data"; static char __pyx_k__dims[] = "dims"; static char __pyx_k__flag[] = "flag"; static char __pyx_k__long[] = "long"; static char __pyx_k__size[] = "size"; static char __pyx_k__type[] = "type"; static char __pyx_k__dtype[] = "dtype"; static char __pyx_k__fff_t[] = "fff_t"; static char __pyx_k__float[] = "float"; static char __pyx_k__index[] = "index"; static char __pyx_k__multi[] = "multi"; static char __pyx_k__npy_t[] = "npy_t"; static char __pyx_k__numpy[] = "numpy"; static char __pyx_k__range[] = "range"; static char __pyx_k__zeros[] = "zeros"; static char __pyx_k__double[] = "double"; static char __pyx_k__elsize[] = "elsize"; static char __pyx_k__nbytes[] = "nbytes"; static char __pyx_k__niters[] = "niters"; static char __pyx_k__stride[] = "stride"; static char __pyx_k__c_types[] = "c_types"; static char __pyx_k__squeeze[] = "squeeze"; static char __pyx_k____main__[] = "__main__"; static char __pyx_k____test__[] = "__test__"; static char __pyx_k__fff_type[] = "fff_type"; static char __pyx_k__itemsize[] = "itemsize"; static char __pyx_k__npy_type[] = "npy_type"; static char __pyx_k__fff_types[] = "fff_types"; static char __pyx_k__npy_types[] = "npy_types"; static char __pyx_k__relstride[] = "relstride"; static char __pyx_k__ValueError[] = "ValueError"; static char __pyx_k__pass_array[] = "pass_array"; static char __pyx_k__zeros_like[] = "zeros_like"; static char __pyx_k____version__[] = "__version__"; static char __pyx_k__copy_vector[] = "copy_vector"; static char __pyx_k__pass_matrix[] = "pass_matrix"; static char __pyx_k__pass_vector[] = "pass_vector"; static char __pyx_k__RuntimeError[] = "RuntimeError"; static char __pyx_k__sum_via_iterators[] = "sum_via_iterators"; static char __pyx_k__copy_via_iterators[] = "copy_via_iterators"; static PyObject *__pyx_kp_u_1; static PyObject *__pyx_kp_u_11; static PyObject *__pyx_kp_s_14; static PyObject *__pyx_kp_s_15; static PyObject *__pyx_kp_s_16; static PyObject *__pyx_kp_s_17; static PyObject *__pyx_kp_s_18; static PyObject *__pyx_kp_s_19; static PyObject *__pyx_kp_s_20; static PyObject *__pyx_kp_s_21; static PyObject *__pyx_kp_s_24; static PyObject *__pyx_n_s_25; static PyObject *__pyx_kp_u_3; static PyObject *__pyx_n_s_38; static PyObject *__pyx_kp_u_5; static PyObject *__pyx_kp_u_7; static PyObject *__pyx_kp_u_8; static PyObject *__pyx_n_s__RuntimeError; static PyObject *__pyx_n_s__T; static PyObject *__pyx_n_s__ValueError; static PyObject *__pyx_n_s__X; static PyObject *__pyx_n_s__Xdum; static PyObject *__pyx_n_s__Y; static PyObject *__pyx_n_s__Z; static PyObject *__pyx_n_s____main__; static PyObject *__pyx_n_s____test__; static PyObject *__pyx_n_s____version__; static PyObject *__pyx_n_s__axis; static PyObject *__pyx_n_s__c_types; static PyObject *__pyx_n_s__copy; static PyObject *__pyx_n_s__copy_vector; static PyObject *__pyx_n_s__copy_via_iterators; static PyObject *__pyx_n_s__data; static PyObject *__pyx_n_s__dims; static PyObject *__pyx_n_s__double; static PyObject *__pyx_n_s__dtype; static PyObject *__pyx_n_s__elsize; static PyObject *__pyx_n_s__fff_t; static PyObject *__pyx_n_s__fff_type; static PyObject *__pyx_n_s__fff_types; static PyObject *__pyx_n_s__flag; static PyObject *__pyx_n_s__float; static PyObject *__pyx_n_s__i; static PyObject *__pyx_n_s__index; static PyObject *__pyx_n_s__int; static PyObject *__pyx_n_s__itemsize; static PyObject *__pyx_n_s__long; static PyObject *__pyx_n_s__multi; static PyObject *__pyx_n_s__nbytes; static PyObject *__pyx_n_s__niters; static PyObject *__pyx_n_s__np; static PyObject *__pyx_n_s__npy_t; static PyObject *__pyx_n_s__npy_type; static PyObject *__pyx_n_s__npy_types; static PyObject *__pyx_n_s__numpy; static PyObject *__pyx_n_s__pass_array; static PyObject *__pyx_n_s__pass_matrix; static PyObject *__pyx_n_s__pass_vector; static PyObject *__pyx_n_s__range; static PyObject *__pyx_n_s__relstride; static PyObject *__pyx_n_s__size; static PyObject *__pyx_n_s__squeeze; static PyObject *__pyx_n_s__stride; static PyObject *__pyx_n_s__sum_via_iterators; static PyObject *__pyx_n_s__type; static PyObject *__pyx_n_s__x; static PyObject *__pyx_n_s__y; static PyObject *__pyx_n_s__z; static PyObject *__pyx_n_s__zeros; static PyObject *__pyx_n_s__zeros_like; static PyObject *__pyx_int_1; static PyObject *__pyx_int_15; static PyObject *__pyx_k_tuple_2; static PyObject *__pyx_k_tuple_4; static PyObject *__pyx_k_tuple_6; static PyObject *__pyx_k_tuple_9; static PyObject *__pyx_k_tuple_10; static PyObject *__pyx_k_tuple_12; static PyObject *__pyx_k_tuple_22; static PyObject *__pyx_k_tuple_26; static PyObject *__pyx_k_tuple_28; static PyObject *__pyx_k_tuple_30; static PyObject *__pyx_k_tuple_32; static PyObject *__pyx_k_tuple_34; static PyObject *__pyx_k_tuple_36; static PyObject *__pyx_k_tuple_39; static PyObject *__pyx_k_tuple_41; static PyObject *__pyx_k_codeobj_23; static PyObject *__pyx_k_codeobj_27; static PyObject *__pyx_k_codeobj_29; static PyObject *__pyx_k_codeobj_31; static PyObject *__pyx_k_codeobj_33; static PyObject *__pyx_k_codeobj_35; static PyObject *__pyx_k_codeobj_37; static PyObject *__pyx_k_codeobj_40; static PyObject *__pyx_k_codeobj_42; /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_7wrapper_1fff_type(PyObject *__pyx_self, PyObject *__pyx_v_T); /*proto*/ static char __pyx_doc_4nipy_4labs_8bindings_7wrapper_fff_type[] = "\n fff_t, nbytes = fff_type(T)\n\n T is a np.dtype instance. Return a tuple (str, int). \n "; static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_7wrapper_1fff_type = {__Pyx_NAMESTR("fff_type"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_7wrapper_1fff_type, METH_O, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_8bindings_7wrapper_fff_type)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_7wrapper_1fff_type(PyObject *__pyx_self, PyObject *__pyx_v_T) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("fff_type (wrapper)", 0); if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_T), __pyx_ptype_5numpy_dtype, 1, "T", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_4nipy_4labs_8bindings_7wrapper_fff_type(__pyx_self, ((PyArray_Descr *)__pyx_v_T)); goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/wrapper.pyx":34 * * * def fff_type(dtype T): # <<<<<<<<<<<<<< * """ * fff_t, nbytes = fff_type(T) */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_7wrapper_fff_type(CYTHON_UNUSED PyObject *__pyx_self, PyArray_Descr *__pyx_v_T) { fff_datatype __pyx_v_fff_t; unsigned int __pyx_v_nbytes; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("fff_type", 0); /* "nipy/labs/bindings/wrapper.pyx":42 * cdef fff_datatype fff_t * cdef unsigned int nbytes * fff_t = fff_datatype_fromNumPy(T.type_num) # <<<<<<<<<<<<<< * nbytes = fff_nbytes(fff_t) * return c_types[fff_types.index(fff_t)], nbytes */ __pyx_v_fff_t = fff_datatype_fromNumPy(__pyx_v_T->type_num); /* "nipy/labs/bindings/wrapper.pyx":43 * cdef unsigned int nbytes * fff_t = fff_datatype_fromNumPy(T.type_num) * nbytes = fff_nbytes(fff_t) # <<<<<<<<<<<<<< * return c_types[fff_types.index(fff_t)], nbytes * */ __pyx_v_nbytes = fff_nbytes(__pyx_v_fff_t); /* "nipy/labs/bindings/wrapper.pyx":44 * fff_t = fff_datatype_fromNumPy(T.type_num) * nbytes = fff_nbytes(fff_t) * return c_types[fff_types.index(fff_t)], nbytes # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__c_types); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__fff_types); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__index); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyInt_FromLong(((int)__pyx_v_fff_t)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __pyx_t_4 = PyObject_GetItem(__pyx_t_1, __pyx_t_2); if (!__pyx_t_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyLong_FromUnsignedLong(__pyx_v_nbytes); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_4 = 0; __pyx_t_2 = 0; __pyx_r = ((PyObject *)__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("nipy.labs.bindings.wrapper.fff_type", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_7wrapper_3npy_type(PyObject *__pyx_self, PyObject *__pyx_v_T); /*proto*/ static char __pyx_doc_4nipy_4labs_8bindings_7wrapper_2npy_type[] = "\n npy_t, nbytes = npy_type(T)\n\n T is a string. Return a tuple (str, int). \n "; static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_7wrapper_3npy_type = {__Pyx_NAMESTR("npy_type"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_7wrapper_3npy_type, METH_O, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_8bindings_7wrapper_2npy_type)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_7wrapper_3npy_type(PyObject *__pyx_self, PyObject *__pyx_v_T) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("npy_type (wrapper)", 0); __pyx_r = __pyx_pf_4nipy_4labs_8bindings_7wrapper_2npy_type(__pyx_self, ((PyObject *)__pyx_v_T)); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/wrapper.pyx":47 * * * def npy_type(T): # <<<<<<<<<<<<<< * """ * npy_t, nbytes = npy_type(T) */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_7wrapper_2npy_type(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_T) { int __pyx_v_npy_t; fff_datatype __pyx_v_fff_t; unsigned int __pyx_v_nbytes; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; fff_datatype __pyx_t_5; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("npy_type", 0); /* "nipy/labs/bindings/wrapper.pyx":56 * cdef fff_datatype fff_t * cdef unsigned int nbytes * fff_t = fff_types[c_types.index(T)] # <<<<<<<<<<<<<< * npy_t = fff_datatype_toNumPy(fff_t) * nbytes = fff_nbytes(fff_t) */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__fff_types); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__c_types); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__index); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_T); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_T); __Pyx_GIVEREF(__pyx_v_T); __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_t_2 = PyObject_GetItem(__pyx_t_1, __pyx_t_4); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = ((fff_datatype)PyInt_AsLong(__pyx_t_2)); if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_fff_t = ((fff_datatype)__pyx_t_5); /* "nipy/labs/bindings/wrapper.pyx":57 * cdef unsigned int nbytes * fff_t = fff_types[c_types.index(T)] * npy_t = fff_datatype_toNumPy(fff_t) # <<<<<<<<<<<<<< * nbytes = fff_nbytes(fff_t) * return c_types[npy_types.index(npy_t)], nbytes */ __pyx_v_npy_t = fff_datatype_toNumPy(__pyx_v_fff_t); /* "nipy/labs/bindings/wrapper.pyx":58 * fff_t = fff_types[c_types.index(T)] * npy_t = fff_datatype_toNumPy(fff_t) * nbytes = fff_nbytes(fff_t) # <<<<<<<<<<<<<< * return c_types[npy_types.index(npy_t)], nbytes * */ __pyx_v_nbytes = fff_nbytes(__pyx_v_fff_t); /* "nipy/labs/bindings/wrapper.pyx":59 * npy_t = fff_datatype_toNumPy(fff_t) * nbytes = fff_nbytes(fff_t) * return c_types[npy_types.index(npy_t)], nbytes # <<<<<<<<<<<<<< * * def pass_vector(ndarray X): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__c_types); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 59; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__npy_types); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 59; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__index); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 59; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyInt_FromLong(__pyx_v_npy_t); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 59; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 59; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 59; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __pyx_t_3 = PyObject_GetItem(__pyx_t_2, __pyx_t_4); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 59; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyLong_FromUnsignedLong(__pyx_v_nbytes); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 59; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 59; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_r = ((PyObject *)__pyx_t_2); __pyx_t_2 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("nipy.labs.bindings.wrapper.npy_type", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_7wrapper_5pass_vector(PyObject *__pyx_self, PyObject *__pyx_v_X); /*proto*/ static char __pyx_doc_4nipy_4labs_8bindings_7wrapper_4pass_vector[] = "\n Y = pass_vector(X)\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_7wrapper_5pass_vector = {__Pyx_NAMESTR("pass_vector"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_7wrapper_5pass_vector, METH_O, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_8bindings_7wrapper_4pass_vector)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_7wrapper_5pass_vector(PyObject *__pyx_self, PyObject *__pyx_v_X) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("pass_vector (wrapper)", 0); if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_X), __pyx_ptype_5numpy_ndarray, 1, "X", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_4nipy_4labs_8bindings_7wrapper_4pass_vector(__pyx_self, ((PyArrayObject *)__pyx_v_X)); goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/wrapper.pyx":61 * return c_types[npy_types.index(npy_t)], nbytes * * def pass_vector(ndarray X): # <<<<<<<<<<<<<< * """ * Y = pass_vector(X) */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_7wrapper_4pass_vector(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_X) { fff_vector *__pyx_v_x; fff_vector *__pyx_v_y; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("pass_vector", 0); /* "nipy/labs/bindings/wrapper.pyx":66 * """ * cdef fff_vector *x, *y * x = fff_vector_fromPyArray(X) # <<<<<<<<<<<<<< * y = fff_vector_new(x.size) * fff_vector_memcpy(y, x) */ __pyx_v_x = fff_vector_fromPyArray(__pyx_v_X); /* "nipy/labs/bindings/wrapper.pyx":67 * cdef fff_vector *x, *y * x = fff_vector_fromPyArray(X) * y = fff_vector_new(x.size) # <<<<<<<<<<<<<< * fff_vector_memcpy(y, x) * fff_vector_delete(x) */ __pyx_v_y = fff_vector_new(__pyx_v_x->size); /* "nipy/labs/bindings/wrapper.pyx":68 * x = fff_vector_fromPyArray(X) * y = fff_vector_new(x.size) * fff_vector_memcpy(y, x) # <<<<<<<<<<<<<< * fff_vector_delete(x) * return fff_vector_toPyArray(y) */ fff_vector_memcpy(__pyx_v_y, __pyx_v_x); /* "nipy/labs/bindings/wrapper.pyx":69 * y = fff_vector_new(x.size) * fff_vector_memcpy(y, x) * fff_vector_delete(x) # <<<<<<<<<<<<<< * return fff_vector_toPyArray(y) * */ fff_vector_delete(__pyx_v_x); /* "nipy/labs/bindings/wrapper.pyx":70 * fff_vector_memcpy(y, x) * fff_vector_delete(x) * return fff_vector_toPyArray(y) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = ((PyObject *)fff_vector_toPyArray(__pyx_v_y)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.labs.bindings.wrapper.pass_vector", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_7wrapper_7copy_vector(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_8bindings_7wrapper_6copy_vector[] = "\n Y = copy_vector(X, flag)\n\n flag == 0 ==> use fff\n flag == 1 ==> use numpy\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_7wrapper_7copy_vector = {__Pyx_NAMESTR("copy_vector"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_7wrapper_7copy_vector, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_8bindings_7wrapper_6copy_vector)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_7wrapper_7copy_vector(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_X = 0; int __pyx_v_flag; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy_vector (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__X,&__pyx_n_s__flag,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__X)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__flag)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("copy_vector", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "copy_vector") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_X = ((PyArrayObject *)values[0]); __pyx_v_flag = __Pyx_PyInt_AsInt(values[1]); if (unlikely((__pyx_v_flag == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("copy_vector", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.bindings.wrapper.copy_vector", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_X), __pyx_ptype_5numpy_ndarray, 1, "X", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_4nipy_4labs_8bindings_7wrapper_6copy_vector(__pyx_self, __pyx_v_X, __pyx_v_flag); goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/wrapper.pyx":73 * * * def copy_vector(ndarray X, int flag): # <<<<<<<<<<<<<< * """ * Y = copy_vector(X, flag) */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_7wrapper_6copy_vector(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_X, int __pyx_v_flag) { fff_vector *__pyx_v_y; void *__pyx_v_data; int __pyx_v_size; int __pyx_v_stride; int __pyx_v_relstride; int __pyx_v_type; int __pyx_v_itemsize; fff_datatype __pyx_v_fff_type; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("copy_vector", 0); /* "nipy/labs/bindings/wrapper.pyx":85 * cdef fff_datatype fff_type * * data = X.data # <<<<<<<<<<<<<< * size = X.shape[0] * stride = X.strides[0] */ __pyx_v_data = ((void *)__pyx_v_X->data); /* "nipy/labs/bindings/wrapper.pyx":86 * * data = X.data * size = X.shape[0] # <<<<<<<<<<<<<< * stride = X.strides[0] * itemsize = X.descr.elsize */ __pyx_v_size = (__pyx_v_X->dimensions[0]); /* "nipy/labs/bindings/wrapper.pyx":87 * data = X.data * size = X.shape[0] * stride = X.strides[0] # <<<<<<<<<<<<<< * itemsize = X.descr.elsize * type = X.descr.type_num */ __pyx_v_stride = (__pyx_v_X->strides[0]); /* "nipy/labs/bindings/wrapper.pyx":88 * size = X.shape[0] * stride = X.strides[0] * itemsize = X.descr.elsize # <<<<<<<<<<<<<< * type = X.descr.type_num * */ __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_X->descr), __pyx_n_s__elsize); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyInt_AsInt(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_itemsize = __pyx_t_2; /* "nipy/labs/bindings/wrapper.pyx":89 * stride = X.strides[0] * itemsize = X.descr.elsize * type = X.descr.type_num # <<<<<<<<<<<<<< * * relstride = stride/itemsize */ __pyx_t_2 = __pyx_v_X->descr->type_num; __pyx_v_type = __pyx_t_2; /* "nipy/labs/bindings/wrapper.pyx":91 * type = X.descr.type_num * * relstride = stride/itemsize # <<<<<<<<<<<<<< * fff_type = fff_datatype_fromNumPy(type) * */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_Format(PyExc_ZeroDivisionError, "integer division or modulo by zero"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } else if (sizeof(int) == sizeof(long) && unlikely(__pyx_v_itemsize == -1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_stride))) { PyErr_Format(PyExc_OverflowError, "value too large to perform division"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_relstride = __Pyx_div_int(__pyx_v_stride, __pyx_v_itemsize); /* "nipy/labs/bindings/wrapper.pyx":92 * * relstride = stride/itemsize * fff_type = fff_datatype_fromNumPy(type) # <<<<<<<<<<<<<< * * y = fff_vector_new(size) */ __pyx_v_fff_type = fff_datatype_fromNumPy(__pyx_v_type); /* "nipy/labs/bindings/wrapper.pyx":94 * fff_type = fff_datatype_fromNumPy(type) * * y = fff_vector_new(size) # <<<<<<<<<<<<<< * * if flag == 0: */ __pyx_v_y = fff_vector_new(__pyx_v_size); /* "nipy/labs/bindings/wrapper.pyx":96 * y = fff_vector_new(size) * * if flag == 0: # <<<<<<<<<<<<<< * fff_vector_fetch(y, data, fff_type, relstride) * else: */ __pyx_t_3 = (__pyx_v_flag == 0); if (__pyx_t_3) { /* "nipy/labs/bindings/wrapper.pyx":97 * * if flag == 0: * fff_vector_fetch(y, data, fff_type, relstride) # <<<<<<<<<<<<<< * else: * fff_vector_fetch_using_NumPy(y, data, stride, type, itemsize) */ fff_vector_fetch(__pyx_v_y, __pyx_v_data, __pyx_v_fff_type, __pyx_v_relstride); goto __pyx_L3; } /*else*/ { /* "nipy/labs/bindings/wrapper.pyx":99 * fff_vector_fetch(y, data, fff_type, relstride) * else: * fff_vector_fetch_using_NumPy(y, data, stride, type, itemsize) # <<<<<<<<<<<<<< * * return fff_vector_toPyArray(y) */ fff_vector_fetch_using_NumPy(__pyx_v_y, ((char *)__pyx_v_data), __pyx_v_stride, __pyx_v_type, __pyx_v_itemsize); } __pyx_L3:; /* "nipy/labs/bindings/wrapper.pyx":101 * fff_vector_fetch_using_NumPy(y, data, stride, type, itemsize) * * return fff_vector_toPyArray(y) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = ((PyObject *)fff_vector_toPyArray(__pyx_v_y)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.labs.bindings.wrapper.copy_vector", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_7wrapper_9pass_matrix(PyObject *__pyx_self, PyObject *__pyx_v_X); /*proto*/ static char __pyx_doc_4nipy_4labs_8bindings_7wrapper_8pass_matrix[] = "\n Y = pass_matrix(X)\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_7wrapper_9pass_matrix = {__Pyx_NAMESTR("pass_matrix"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_7wrapper_9pass_matrix, METH_O, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_8bindings_7wrapper_8pass_matrix)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_7wrapper_9pass_matrix(PyObject *__pyx_self, PyObject *__pyx_v_X) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("pass_matrix (wrapper)", 0); if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_X), __pyx_ptype_5numpy_ndarray, 1, "X", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_4nipy_4labs_8bindings_7wrapper_8pass_matrix(__pyx_self, ((PyArrayObject *)__pyx_v_X)); goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/wrapper.pyx":104 * * * def pass_matrix(ndarray X): # <<<<<<<<<<<<<< * """ * Y = pass_matrix(X) */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_7wrapper_8pass_matrix(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_X) { fff_matrix *__pyx_v_x; fff_matrix *__pyx_v_y; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("pass_matrix", 0); /* "nipy/labs/bindings/wrapper.pyx":109 * """ * cdef fff_matrix *x, *y * x = fff_matrix_fromPyArray(X) # <<<<<<<<<<<<<< * y = fff_matrix_new(x.size1, x.size2) * fff_matrix_memcpy(y, x) */ __pyx_v_x = fff_matrix_fromPyArray(__pyx_v_X); /* "nipy/labs/bindings/wrapper.pyx":110 * cdef fff_matrix *x, *y * x = fff_matrix_fromPyArray(X) * y = fff_matrix_new(x.size1, x.size2) # <<<<<<<<<<<<<< * fff_matrix_memcpy(y, x) * fff_matrix_delete(x) */ __pyx_v_y = fff_matrix_new(__pyx_v_x->size1, __pyx_v_x->size2); /* "nipy/labs/bindings/wrapper.pyx":111 * x = fff_matrix_fromPyArray(X) * y = fff_matrix_new(x.size1, x.size2) * fff_matrix_memcpy(y, x) # <<<<<<<<<<<<<< * fff_matrix_delete(x) * return fff_matrix_toPyArray(y) */ fff_matrix_memcpy(__pyx_v_y, __pyx_v_x); /* "nipy/labs/bindings/wrapper.pyx":112 * y = fff_matrix_new(x.size1, x.size2) * fff_matrix_memcpy(y, x) * fff_matrix_delete(x) # <<<<<<<<<<<<<< * return fff_matrix_toPyArray(y) * */ fff_matrix_delete(__pyx_v_x); /* "nipy/labs/bindings/wrapper.pyx":113 * fff_matrix_memcpy(y, x) * fff_matrix_delete(x) * return fff_matrix_toPyArray(y) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = ((PyObject *)fff_matrix_toPyArray(__pyx_v_y)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.labs.bindings.wrapper.pass_matrix", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_7wrapper_11pass_array(PyObject *__pyx_self, PyObject *__pyx_v_X); /*proto*/ static char __pyx_doc_4nipy_4labs_8bindings_7wrapper_10pass_array[] = "\n Y = pass_array(X)\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_7wrapper_11pass_array = {__Pyx_NAMESTR("pass_array"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_7wrapper_11pass_array, METH_O, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_8bindings_7wrapper_10pass_array)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_7wrapper_11pass_array(PyObject *__pyx_self, PyObject *__pyx_v_X) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("pass_array (wrapper)", 0); if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_X), __pyx_ptype_5numpy_ndarray, 1, "X", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_4nipy_4labs_8bindings_7wrapper_10pass_array(__pyx_self, ((PyArrayObject *)__pyx_v_X)); goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/wrapper.pyx":116 * * * def pass_array(ndarray X): # <<<<<<<<<<<<<< * """ * Y = pass_array(X) */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_7wrapper_10pass_array(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_X) { fff_array *__pyx_v_x; fff_array *__pyx_v_y; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("pass_array", 0); /* "nipy/labs/bindings/wrapper.pyx":121 * """ * cdef fff_array *x, *y * x = fff_array_fromPyArray(X) # <<<<<<<<<<<<<< * y = fff_array_new(x.datatype, x.dimX, x.dimY, x.dimZ, x.dimT) * fff_array_copy(y, x) */ __pyx_v_x = fff_array_fromPyArray(__pyx_v_X); /* "nipy/labs/bindings/wrapper.pyx":122 * cdef fff_array *x, *y * x = fff_array_fromPyArray(X) * y = fff_array_new(x.datatype, x.dimX, x.dimY, x.dimZ, x.dimT) # <<<<<<<<<<<<<< * fff_array_copy(y, x) * fff_array_delete(x) */ __pyx_v_y = fff_array_new(__pyx_v_x->datatype, __pyx_v_x->dimX, __pyx_v_x->dimY, __pyx_v_x->dimZ, __pyx_v_x->dimT); /* "nipy/labs/bindings/wrapper.pyx":123 * x = fff_array_fromPyArray(X) * y = fff_array_new(x.datatype, x.dimX, x.dimY, x.dimZ, x.dimT) * fff_array_copy(y, x) # <<<<<<<<<<<<<< * fff_array_delete(x) * return fff_array_toPyArray(y) */ fff_array_copy(__pyx_v_y, __pyx_v_x); /* "nipy/labs/bindings/wrapper.pyx":124 * y = fff_array_new(x.datatype, x.dimX, x.dimY, x.dimZ, x.dimT) * fff_array_copy(y, x) * fff_array_delete(x) # <<<<<<<<<<<<<< * return fff_array_toPyArray(y) * */ fff_array_delete(__pyx_v_x); /* "nipy/labs/bindings/wrapper.pyx":125 * fff_array_copy(y, x) * fff_array_delete(x) * return fff_array_toPyArray(y) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = ((PyObject *)fff_array_toPyArray(__pyx_v_y)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.labs.bindings.wrapper.pass_array", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_7wrapper_13pass_vector_via_iterator(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_8bindings_7wrapper_12pass_vector_via_iterator[] = "\n Y = pass_vector_via_iterator(X, axis=0, niters=0)\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_7wrapper_13pass_vector_via_iterator = {__Pyx_NAMESTR("pass_vector_via_iterator"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_7wrapper_13pass_vector_via_iterator, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_8bindings_7wrapper_12pass_vector_via_iterator)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_7wrapper_13pass_vector_via_iterator(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_X = 0; int __pyx_v_axis; int __pyx_v_niters; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("pass_vector_via_iterator (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__X,&__pyx_n_s__axis,&__pyx_n_s__niters,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__X)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__axis); if (value) { values[1] = value; kw_args--; } } case 2: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__niters); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "pass_vector_via_iterator") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_X = ((PyArrayObject *)values[0]); if (values[1]) { __pyx_v_axis = __Pyx_PyInt_AsInt(values[1]); if (unlikely((__pyx_v_axis == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_axis = ((int)0); } if (values[2]) { __pyx_v_niters = __Pyx_PyInt_AsInt(values[2]); if (unlikely((__pyx_v_niters == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_niters = ((int)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("pass_vector_via_iterator", 0, 1, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.bindings.wrapper.pass_vector_via_iterator", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_X), __pyx_ptype_5numpy_ndarray, 1, "X", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_4nipy_4labs_8bindings_7wrapper_12pass_vector_via_iterator(__pyx_self, __pyx_v_X, __pyx_v_axis, __pyx_v_niters); goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/wrapper.pyx":128 * * * def pass_vector_via_iterator(ndarray X, int axis=0, int niters=0): # <<<<<<<<<<<<<< * """ * Y = pass_vector_via_iterator(X, axis=0, niters=0) */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_7wrapper_12pass_vector_via_iterator(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_X, int __pyx_v_axis, int __pyx_v_niters) { fff_vector *__pyx_v_x; fff_vector *__pyx_v_y; fffpy_multi_iterator *__pyx_v_multi; PyObject *__pyx_v_Xdum = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("pass_vector_via_iterator", 0); /* "nipy/labs/bindings/wrapper.pyx":135 * cdef fffpy_multi_iterator* multi * * Xdum = X.copy() ## at least two arrays needed for multi iterator # <<<<<<<<<<<<<< * multi = fffpy_multi_iterator_new(2, axis, X, Xdum) * x = multi.vector[0] */ __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_X), __pyx_n_s__copy); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_Xdum = __pyx_t_2; __pyx_t_2 = 0; /* "nipy/labs/bindings/wrapper.pyx":136 * * Xdum = X.copy() ## at least two arrays needed for multi iterator * multi = fffpy_multi_iterator_new(2, axis, X, Xdum) # <<<<<<<<<<<<<< * x = multi.vector[0] * */ __pyx_v_multi = fffpy_multi_iterator_new(2, __pyx_v_axis, ((void *)__pyx_v_X), ((void *)__pyx_v_Xdum)); /* "nipy/labs/bindings/wrapper.pyx":137 * Xdum = X.copy() ## at least two arrays needed for multi iterator * multi = fffpy_multi_iterator_new(2, axis, X, Xdum) * x = multi.vector[0] # <<<<<<<<<<<<<< * * while(multi.index < niters): */ __pyx_v_x = (__pyx_v_multi->vector[0]); /* "nipy/labs/bindings/wrapper.pyx":139 * x = multi.vector[0] * * while(multi.index < niters): # <<<<<<<<<<<<<< * fffpy_multi_iterator_update(multi) * */ while (1) { __pyx_t_3 = (__pyx_v_multi->index < __pyx_v_niters); if (!__pyx_t_3) break; /* "nipy/labs/bindings/wrapper.pyx":140 * * while(multi.index < niters): * fffpy_multi_iterator_update(multi) # <<<<<<<<<<<<<< * * y = fff_vector_new(x.size) */ fffpy_multi_iterator_update(__pyx_v_multi); } /* "nipy/labs/bindings/wrapper.pyx":142 * fffpy_multi_iterator_update(multi) * * y = fff_vector_new(x.size) # <<<<<<<<<<<<<< * fff_vector_memcpy(y, x) * fffpy_multi_iterator_delete(multi) */ __pyx_v_y = fff_vector_new(__pyx_v_x->size); /* "nipy/labs/bindings/wrapper.pyx":143 * * y = fff_vector_new(x.size) * fff_vector_memcpy(y, x) # <<<<<<<<<<<<<< * fffpy_multi_iterator_delete(multi) * return fff_vector_toPyArray(y) */ fff_vector_memcpy(__pyx_v_y, __pyx_v_x); /* "nipy/labs/bindings/wrapper.pyx":144 * y = fff_vector_new(x.size) * fff_vector_memcpy(y, x) * fffpy_multi_iterator_delete(multi) # <<<<<<<<<<<<<< * return fff_vector_toPyArray(y) * */ fffpy_multi_iterator_delete(__pyx_v_multi); /* "nipy/labs/bindings/wrapper.pyx":145 * fff_vector_memcpy(y, x) * fffpy_multi_iterator_delete(multi) * return fff_vector_toPyArray(y) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = ((PyObject *)fff_vector_toPyArray(__pyx_v_y)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("nipy.labs.bindings.wrapper.pass_vector_via_iterator", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_Xdum); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_7wrapper_15copy_via_iterators(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_8bindings_7wrapper_14copy_via_iterators[] = "\n Z = copy_via_iterators(Y, int axis=0) \n\n Copy array Y into Z via fff's PyArray_MultiIterAllButAxis C function.\n Behavior should be equivalent to Z = Y.copy(). \n "; static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_7wrapper_15copy_via_iterators = {__Pyx_NAMESTR("copy_via_iterators"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_7wrapper_15copy_via_iterators, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_8bindings_7wrapper_14copy_via_iterators)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_7wrapper_15copy_via_iterators(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_Y = 0; int __pyx_v_axis; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy_via_iterators (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__Y,&__pyx_n_s__axis,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Y)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__axis); if (value) { values[1] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "copy_via_iterators") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_Y = ((PyArrayObject *)values[0]); if (values[1]) { __pyx_v_axis = __Pyx_PyInt_AsInt(values[1]); if (unlikely((__pyx_v_axis == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_axis = ((int)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("copy_via_iterators", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.bindings.wrapper.copy_via_iterators", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_Y), __pyx_ptype_5numpy_ndarray, 1, "Y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_4nipy_4labs_8bindings_7wrapper_14copy_via_iterators(__pyx_self, __pyx_v_Y, __pyx_v_axis); goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/wrapper.pyx":148 * * * def copy_via_iterators(ndarray Y, int axis=0): # <<<<<<<<<<<<<< * """ * Z = copy_via_iterators(Y, int axis=0) */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_7wrapper_14copy_via_iterators(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_Y, int __pyx_v_axis) { fff_vector *__pyx_v_y; fff_vector *__pyx_v_z; fffpy_multi_iterator *__pyx_v_multi; PyObject *__pyx_v_Z = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("copy_via_iterators", 0); /* "nipy/labs/bindings/wrapper.pyx":159 * * # Allocate output array * Z = np.zeros_like(Y, dtype=np.float) # <<<<<<<<<<<<<< * * # Create a new array iterator */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__zeros_like); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)__pyx_v_Y)); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_v_Y)); __Pyx_GIVEREF(((PyObject *)__pyx_v_Y)); __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_3)); __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__float); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (PyDict_SetItem(__pyx_t_3, ((PyObject *)__pyx_n_s__dtype), __pyx_t_5) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), ((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __pyx_v_Z = __pyx_t_5; __pyx_t_5 = 0; /* "nipy/labs/bindings/wrapper.pyx":162 * * # Create a new array iterator * multi = fffpy_multi_iterator_new(2, axis, Y, Z) # <<<<<<<<<<<<<< * * # Create views */ __pyx_v_multi = fffpy_multi_iterator_new(2, __pyx_v_axis, ((void *)__pyx_v_Y), ((void *)__pyx_v_Z)); /* "nipy/labs/bindings/wrapper.pyx":165 * * # Create views * y = multi.vector[0] # <<<<<<<<<<<<<< * z = multi.vector[1] * */ __pyx_v_y = (__pyx_v_multi->vector[0]); /* "nipy/labs/bindings/wrapper.pyx":166 * # Create views * y = multi.vector[0] * z = multi.vector[1] # <<<<<<<<<<<<<< * * # Loop */ __pyx_v_z = (__pyx_v_multi->vector[1]); /* "nipy/labs/bindings/wrapper.pyx":169 * * # Loop * while(multi.index < multi.size): # <<<<<<<<<<<<<< * fff_vector_memcpy(z, y) * fffpy_multi_iterator_update(multi) */ while (1) { __pyx_t_6 = (__pyx_v_multi->index < __pyx_v_multi->size); if (!__pyx_t_6) break; /* "nipy/labs/bindings/wrapper.pyx":170 * # Loop * while(multi.index < multi.size): * fff_vector_memcpy(z, y) # <<<<<<<<<<<<<< * fffpy_multi_iterator_update(multi) * */ fff_vector_memcpy(__pyx_v_z, __pyx_v_y); /* "nipy/labs/bindings/wrapper.pyx":171 * while(multi.index < multi.size): * fff_vector_memcpy(z, y) * fffpy_multi_iterator_update(multi) # <<<<<<<<<<<<<< * * # Free memory */ fffpy_multi_iterator_update(__pyx_v_multi); } /* "nipy/labs/bindings/wrapper.pyx":174 * * # Free memory * fffpy_multi_iterator_delete(multi) # <<<<<<<<<<<<<< * * # Return */ fffpy_multi_iterator_delete(__pyx_v_multi); /* "nipy/labs/bindings/wrapper.pyx":177 * * # Return * return Z # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_Z); __pyx_r = __pyx_v_Z; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("nipy.labs.bindings.wrapper.copy_via_iterators", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_Z); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_8bindings_7wrapper_17sum_via_iterators(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_8bindings_7wrapper_16sum_via_iterators[] = "\n Z = dummy_iterator(Y, int axis=0) \n\n Return the sum of input elements along the dimension specified by axis.\n Behavior should be equivalent to Z = Y.sum(axis). \n "; static PyMethodDef __pyx_mdef_4nipy_4labs_8bindings_7wrapper_17sum_via_iterators = {__Pyx_NAMESTR("sum_via_iterators"), (PyCFunction)__pyx_pw_4nipy_4labs_8bindings_7wrapper_17sum_via_iterators, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_8bindings_7wrapper_16sum_via_iterators)}; static PyObject *__pyx_pw_4nipy_4labs_8bindings_7wrapper_17sum_via_iterators(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_Y = 0; int __pyx_v_axis; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("sum_via_iterators (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__Y,&__pyx_n_s__axis,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Y)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__axis); if (value) { values[1] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "sum_via_iterators") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_Y = ((PyArrayObject *)values[0]); if (values[1]) { __pyx_v_axis = __Pyx_PyInt_AsInt(values[1]); if (unlikely((__pyx_v_axis == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_axis = ((int)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("sum_via_iterators", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.bindings.wrapper.sum_via_iterators", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_Y), __pyx_ptype_5numpy_ndarray, 1, "Y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_4nipy_4labs_8bindings_7wrapper_16sum_via_iterators(__pyx_self, __pyx_v_Y, __pyx_v_axis); goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/bindings/wrapper.pyx":180 * * * def sum_via_iterators(ndarray Y, int axis=0): # <<<<<<<<<<<<<< * """ * Z = dummy_iterator(Y, int axis=0) */ static PyObject *__pyx_pf_4nipy_4labs_8bindings_7wrapper_16sum_via_iterators(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_Y, int __pyx_v_axis) { fff_vector *__pyx_v_y; fff_vector *__pyx_v_z; fffpy_multi_iterator *__pyx_v_multi; PyObject *__pyx_v_dims = NULL; PyObject *__pyx_v_Z = NULL; int __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("sum_via_iterators", 0); /* "nipy/labs/bindings/wrapper.pyx":191 * * # Allocate output array * dims = [Y.shape[i] for i in range(Y.ndim)] # <<<<<<<<<<<<<< * dims[axis] = 1 * Z = np.zeros(dims) */ __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_v_Y->nd; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; __pyx_t_4 = __Pyx_PyInt_to_py_Py_intptr_t((__pyx_v_Y->dimensions[__pyx_v_i])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); if (unlikely(__Pyx_PyList_Append(__pyx_t_1, (PyObject*)__pyx_t_4))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } __pyx_t_4 = ((PyObject *)__pyx_t_1); __Pyx_INCREF(__pyx_t_4); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_v_dims = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; /* "nipy/labs/bindings/wrapper.pyx":192 * # Allocate output array * dims = [Y.shape[i] for i in range(Y.ndim)] * dims[axis] = 1 # <<<<<<<<<<<<<< * Z = np.zeros(dims) * */ if (__Pyx_SetItemInt(((PyObject *)__pyx_v_dims), __pyx_v_axis, __pyx_int_1, sizeof(int), PyInt_FromLong) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 192; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/wrapper.pyx":193 * dims = [Y.shape[i] for i in range(Y.ndim)] * dims[axis] = 1 * Z = np.zeros(dims) # <<<<<<<<<<<<<< * * # Create a new array iterator */ __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__zeros); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_INCREF(((PyObject *)__pyx_v_dims)); PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_v_dims)); __Pyx_GIVEREF(((PyObject *)__pyx_v_dims)); __pyx_t_5 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __pyx_v_Z = __pyx_t_5; __pyx_t_5 = 0; /* "nipy/labs/bindings/wrapper.pyx":196 * * # Create a new array iterator * multi = fffpy_multi_iterator_new(2, axis, Y, Z) # <<<<<<<<<<<<<< * * # Create views */ __pyx_v_multi = fffpy_multi_iterator_new(2, __pyx_v_axis, ((void *)__pyx_v_Y), ((void *)__pyx_v_Z)); /* "nipy/labs/bindings/wrapper.pyx":199 * * # Create views * y = multi.vector[0] # <<<<<<<<<<<<<< * z = multi.vector[1] * */ __pyx_v_y = (__pyx_v_multi->vector[0]); /* "nipy/labs/bindings/wrapper.pyx":200 * # Create views * y = multi.vector[0] * z = multi.vector[1] # <<<<<<<<<<<<<< * * # Loop */ __pyx_v_z = (__pyx_v_multi->vector[1]); /* "nipy/labs/bindings/wrapper.pyx":203 * * # Loop * while(multi.index < multi.size): # <<<<<<<<<<<<<< * z.data[0] = fff_vector_sum(y) * fffpy_multi_iterator_update(multi) */ while (1) { __pyx_t_6 = (__pyx_v_multi->index < __pyx_v_multi->size); if (!__pyx_t_6) break; /* "nipy/labs/bindings/wrapper.pyx":204 * # Loop * while(multi.index < multi.size): * z.data[0] = fff_vector_sum(y) # <<<<<<<<<<<<<< * fffpy_multi_iterator_update(multi) * */ (__pyx_v_z->data[0]) = ((double)fff_vector_sum(__pyx_v_y)); /* "nipy/labs/bindings/wrapper.pyx":205 * while(multi.index < multi.size): * z.data[0] = fff_vector_sum(y) * fffpy_multi_iterator_update(multi) # <<<<<<<<<<<<<< * * # Free memory */ fffpy_multi_iterator_update(__pyx_v_multi); } /* "nipy/labs/bindings/wrapper.pyx":208 * * # Free memory * fffpy_multi_iterator_delete(multi) # <<<<<<<<<<<<<< * * # Return */ fffpy_multi_iterator_delete(__pyx_v_multi); /* "nipy/labs/bindings/wrapper.pyx":211 * * # Return * return Z.squeeze() # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_5 = PyObject_GetAttr(__pyx_v_Z, __pyx_n_s__squeeze); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("nipy.labs.bindings.wrapper.sum_via_iterators", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_dims); __Pyx_XDECREF(__pyx_v_Z); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":194 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_copy_shape; int __pyx_v_i; int __pyx_v_ndim; int __pyx_v_endian_detector; int __pyx_v_little_endian; int __pyx_v_t; char *__pyx_v_f; PyArray_Descr *__pyx_v_descr = 0; int __pyx_v_offset; int __pyx_v_hasfields; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; char *__pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "numpy.pxd":200 * # of flags * * if info == NULL: return # <<<<<<<<<<<<<< * * cdef int copy_shape, i, ndim */ __pyx_t_1 = (__pyx_v_info == NULL); if (__pyx_t_1) { __pyx_r = 0; goto __pyx_L0; goto __pyx_L3; } __pyx_L3:; /* "numpy.pxd":203 * * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((&endian_detector)[0] != 0) * */ __pyx_v_endian_detector = 1; /* "numpy.pxd":204 * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * * ndim = PyArray_NDIM(self) */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "numpy.pxd":206 * cdef bint little_endian = ((&endian_detector)[0] != 0) * * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< * * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); /* "numpy.pxd":208 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); if (__pyx_t_1) { /* "numpy.pxd":209 * * if sizeof(npy_intp) != sizeof(Py_ssize_t): * copy_shape = 1 # <<<<<<<<<<<<<< * else: * copy_shape = 0 */ __pyx_v_copy_shape = 1; goto __pyx_L4; } /*else*/ { /* "numpy.pxd":211 * copy_shape = 1 * else: * copy_shape = 0 # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) */ __pyx_v_copy_shape = 0; } __pyx_L4:; /* "numpy.pxd":213 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS); if (__pyx_t_1) { /* "numpy.pxd":214 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not C contiguous") * */ __pyx_t_2 = (!PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS)); __pyx_t_3 = __pyx_t_2; } else { __pyx_t_3 = __pyx_t_1; } if (__pyx_t_3) { /* "numpy.pxd":215 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L5; } __pyx_L5:; /* "numpy.pxd":217 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ __pyx_t_3 = ((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS); if (__pyx_t_3) { /* "numpy.pxd":218 * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not Fortran contiguous") * */ __pyx_t_1 = (!PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS)); __pyx_t_2 = __pyx_t_1; } else { __pyx_t_2 = __pyx_t_3; } if (__pyx_t_2) { /* "numpy.pxd":219 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_4), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L6; } __pyx_L6:; /* "numpy.pxd":221 * raise ValueError(u"ndarray is not Fortran contiguous") * * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< * info.ndim = ndim * if copy_shape: */ __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); /* "numpy.pxd":222 * * info.buf = PyArray_DATA(self) * info.ndim = ndim # <<<<<<<<<<<<<< * if copy_shape: * # Allocate new buffer for strides and shape info. */ __pyx_v_info->ndim = __pyx_v_ndim; /* "numpy.pxd":223 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ if (__pyx_v_copy_shape) { /* "numpy.pxd":226 * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) # <<<<<<<<<<<<<< * info.shape = info.strides + ndim * for i in range(ndim): */ __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2))); /* "numpy.pxd":227 * # This is allocated as one block, strides first. * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) * info.shape = info.strides + ndim # <<<<<<<<<<<<<< * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] */ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); /* "numpy.pxd":228 * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) * info.shape = info.strides + ndim * for i in range(ndim): # <<<<<<<<<<<<<< * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] */ __pyx_t_5 = __pyx_v_ndim; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "numpy.pxd":229 * info.shape = info.strides + ndim * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< * info.shape[i] = PyArray_DIMS(self)[i] * else: */ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); /* "numpy.pxd":230 * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< * else: * info.strides = PyArray_STRIDES(self) */ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); } goto __pyx_L7; } /*else*/ { /* "numpy.pxd":232 * info.shape[i] = PyArray_DIMS(self)[i] * else: * info.strides = PyArray_STRIDES(self) # <<<<<<<<<<<<<< * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL */ __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); /* "numpy.pxd":233 * else: * info.strides = PyArray_STRIDES(self) * info.shape = PyArray_DIMS(self) # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) */ __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); } __pyx_L7:; /* "numpy.pxd":234 * info.strides = PyArray_STRIDES(self) * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) */ __pyx_v_info->suboffsets = NULL; /* "numpy.pxd":235 * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< * info.readonly = not PyArray_ISWRITEABLE(self) * */ __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); /* "numpy.pxd":236 * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< * * cdef int t */ __pyx_v_info->readonly = (!PyArray_ISWRITEABLE(__pyx_v_self)); /* "numpy.pxd":239 * * cdef int t * cdef char* f = NULL # <<<<<<<<<<<<<< * cdef dtype descr = self.descr * cdef list stack */ __pyx_v_f = NULL; /* "numpy.pxd":240 * cdef int t * cdef char* f = NULL * cdef dtype descr = self.descr # <<<<<<<<<<<<<< * cdef list stack * cdef int offset */ __pyx_t_4 = ((PyObject *)__pyx_v_self->descr); __Pyx_INCREF(__pyx_t_4); __pyx_v_descr = ((PyArray_Descr *)__pyx_t_4); __pyx_t_4 = 0; /* "numpy.pxd":244 * cdef int offset * * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< * * if not hasfields and not copy_shape: */ __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); /* "numpy.pxd":246 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ __pyx_t_2 = (!__pyx_v_hasfields); if (__pyx_t_2) { __pyx_t_3 = (!__pyx_v_copy_shape); __pyx_t_1 = __pyx_t_3; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "numpy.pxd":248 * if not hasfields and not copy_shape: * # do not call releasebuffer * info.obj = None # <<<<<<<<<<<<<< * else: * # need to call releasebuffer */ __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = Py_None; goto __pyx_L10; } /*else*/ { /* "numpy.pxd":251 * else: * # need to call releasebuffer * info.obj = self # <<<<<<<<<<<<<< * * if not hasfields: */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); } __pyx_L10:; /* "numpy.pxd":253 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ __pyx_t_1 = (!__pyx_v_hasfields); if (__pyx_t_1) { /* "numpy.pxd":254 * * if not hasfields: * t = descr.type_num # <<<<<<<<<<<<<< * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): */ __pyx_t_5 = __pyx_v_descr->type_num; __pyx_v_t = __pyx_t_5; /* "numpy.pxd":255 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_1 = (__pyx_v_descr->byteorder == '>'); if (__pyx_t_1) { __pyx_t_2 = __pyx_v_little_endian; } else { __pyx_t_2 = __pyx_t_1; } if (!__pyx_t_2) { /* "numpy.pxd":256 * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" */ __pyx_t_1 = (__pyx_v_descr->byteorder == '<'); if (__pyx_t_1) { __pyx_t_3 = (!__pyx_v_little_endian); __pyx_t_7 = __pyx_t_3; } else { __pyx_t_7 = __pyx_t_1; } __pyx_t_1 = __pyx_t_7; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "numpy.pxd":257 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_6), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L12; } __pyx_L12:; /* "numpy.pxd":258 * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" */ __pyx_t_1 = (__pyx_v_t == NPY_BYTE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__b; goto __pyx_L13; } /* "numpy.pxd":259 * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" */ __pyx_t_1 = (__pyx_v_t == NPY_UBYTE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__B; goto __pyx_L13; } /* "numpy.pxd":260 * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" */ __pyx_t_1 = (__pyx_v_t == NPY_SHORT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__h; goto __pyx_L13; } /* "numpy.pxd":261 * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" */ __pyx_t_1 = (__pyx_v_t == NPY_USHORT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__H; goto __pyx_L13; } /* "numpy.pxd":262 * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" */ __pyx_t_1 = (__pyx_v_t == NPY_INT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__i; goto __pyx_L13; } /* "numpy.pxd":263 * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" */ __pyx_t_1 = (__pyx_v_t == NPY_UINT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__I; goto __pyx_L13; } /* "numpy.pxd":264 * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" */ __pyx_t_1 = (__pyx_v_t == NPY_LONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__l; goto __pyx_L13; } /* "numpy.pxd":265 * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" */ __pyx_t_1 = (__pyx_v_t == NPY_ULONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__L; goto __pyx_L13; } /* "numpy.pxd":266 * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" */ __pyx_t_1 = (__pyx_v_t == NPY_LONGLONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__q; goto __pyx_L13; } /* "numpy.pxd":267 * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" */ __pyx_t_1 = (__pyx_v_t == NPY_ULONGLONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Q; goto __pyx_L13; } /* "numpy.pxd":268 * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" */ __pyx_t_1 = (__pyx_v_t == NPY_FLOAT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__f; goto __pyx_L13; } /* "numpy.pxd":269 * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" */ __pyx_t_1 = (__pyx_v_t == NPY_DOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__d; goto __pyx_L13; } /* "numpy.pxd":270 * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" */ __pyx_t_1 = (__pyx_v_t == NPY_LONGDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__g; goto __pyx_L13; } /* "numpy.pxd":271 * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" */ __pyx_t_1 = (__pyx_v_t == NPY_CFLOAT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zf; goto __pyx_L13; } /* "numpy.pxd":272 * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" */ __pyx_t_1 = (__pyx_v_t == NPY_CDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zd; goto __pyx_L13; } /* "numpy.pxd":273 * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f = "O" * else: */ __pyx_t_1 = (__pyx_v_t == NPY_CLONGDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zg; goto __pyx_L13; } /* "numpy.pxd":274 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_1 = (__pyx_v_t == NPY_OBJECT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__O; goto __pyx_L13; } /*else*/ { /* "numpy.pxd":276 * elif t == NPY_OBJECT: f = "O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * info.format = f * return */ __pyx_t_4 = PyInt_FromLong(__pyx_v_t); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_8 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_7), __pyx_t_4); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_8)); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_8)); __Pyx_GIVEREF(((PyObject *)__pyx_t_8)); __pyx_t_8 = 0; __pyx_t_8 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L13:; /* "numpy.pxd":277 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f # <<<<<<<<<<<<<< * return * else: */ __pyx_v_info->format = __pyx_v_f; /* "numpy.pxd":278 * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f * return # <<<<<<<<<<<<<< * else: * info.format = stdlib.malloc(_buffer_format_string_len) */ __pyx_r = 0; goto __pyx_L0; goto __pyx_L11; } /*else*/ { /* "numpy.pxd":280 * return * else: * info.format = stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 */ __pyx_v_info->format = ((char *)malloc(255)); /* "numpy.pxd":281 * else: * info.format = stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< * offset = 0 * f = _util_dtypestring(descr, info.format + 1, */ (__pyx_v_info->format[0]) = '^'; /* "numpy.pxd":282 * info.format = stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 # <<<<<<<<<<<<<< * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, */ __pyx_v_offset = 0; /* "numpy.pxd":285 * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, * &offset) # <<<<<<<<<<<<<< * f[0] = c'\0' # Terminate format string * */ __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_9; /* "numpy.pxd":286 * info.format + _buffer_format_string_len, * &offset) * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< * * def __releasebuffer__(ndarray self, Py_buffer* info): */ (__pyx_v_f[0]) = '\x00'; } __pyx_L11:; __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_descr); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":288 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__releasebuffer__", 0); /* "numpy.pxd":289 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_t_1 = PyArray_HASFIELDS(__pyx_v_self); if (__pyx_t_1) { /* "numpy.pxd":290 * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): * stdlib.free(info.format) # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) */ free(__pyx_v_info->format); goto __pyx_L3; } __pyx_L3:; /* "numpy.pxd":291 * if PyArray_HASFIELDS(self): * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * stdlib.free(info.strides) * # info.shape was stored after info.strides in the same block */ __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); if (__pyx_t_1) { /* "numpy.pxd":292 * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) # <<<<<<<<<<<<<< * # info.shape was stored after info.strides in the same block * */ free(__pyx_v_info->strides); goto __pyx_L4; } __pyx_L4:; __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":768 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, a) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); /* "numpy.pxd":769 * * cdef inline object PyArray_MultiIterNew1(a): * return PyArray_MultiIterNew(1, a) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew2(a, b): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 769; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":771 * return PyArray_MultiIterNew(1, a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, a, b) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); /* "numpy.pxd":772 * * cdef inline object PyArray_MultiIterNew2(a, b): * return PyArray_MultiIterNew(2, a, b) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew3(a, b, c): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":774 * return PyArray_MultiIterNew(2, a, b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, a, b, c) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); /* "numpy.pxd":775 * * cdef inline object PyArray_MultiIterNew3(a, b, c): * return PyArray_MultiIterNew(3, a, b, c) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":777 * return PyArray_MultiIterNew(3, a, b, c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, a, b, c, d) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); /* "numpy.pxd":778 * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): * return PyArray_MultiIterNew(4, a, b, c, d) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 778; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":780 * return PyArray_MultiIterNew(4, a, b, c, d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, a, b, c, d, e) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); /* "numpy.pxd":781 * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): * return PyArray_MultiIterNew(5, a, b, c, d, e) # <<<<<<<<<<<<<< * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 781; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":783 * return PyArray_MultiIterNew(5, a, b, c, d, e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { PyArray_Descr *__pyx_v_child = 0; int __pyx_v_endian_detector; int __pyx_v_little_endian; PyObject *__pyx_v_fields = 0; PyObject *__pyx_v_childname = NULL; PyObject *__pyx_v_new_offset = NULL; PyObject *__pyx_v_t = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *(*__pyx_t_6)(PyObject *); int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_t_10; long __pyx_t_11; char *__pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_util_dtypestring", 0); /* "numpy.pxd":790 * cdef int delta_offset * cdef tuple i * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((&endian_detector)[0] != 0) * cdef tuple fields */ __pyx_v_endian_detector = 1; /* "numpy.pxd":791 * cdef tuple i * cdef int endian_detector = 1 * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * cdef tuple fields * */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "numpy.pxd":794 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ if (unlikely(((PyObject *)__pyx_v_descr->names) == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_1 = ((PyObject *)__pyx_v_descr->names); __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; for (;;) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif __Pyx_XDECREF(__pyx_v_childname); __pyx_v_childname = __pyx_t_3; __pyx_t_3 = 0; /* "numpy.pxd":795 * * for childname in descr.names: * fields = descr.fields[childname] # <<<<<<<<<<<<<< * child, new_offset = fields * */ __pyx_t_3 = PyObject_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (!__pyx_t_3) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected tuple, got %.200s", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF(((PyObject *)__pyx_v_fields)); __pyx_v_fields = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "numpy.pxd":796 * for childname in descr.names: * fields = descr.fields[childname] * child, new_offset = fields # <<<<<<<<<<<<<< * * if (end - f) - (new_offset - offset[0]) < 15: */ if (likely(PyTuple_CheckExact(((PyObject *)__pyx_v_fields)))) { PyObject* sequence = ((PyObject *)__pyx_v_fields); #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else if (1) { __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } else { Py_ssize_t index = -1; __pyx_t_5 = PyObject_GetIter(((PyObject *)__pyx_v_fields)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = Py_TYPE(__pyx_t_5)->tp_iternext; index = 0; __pyx_t_3 = __pyx_t_6(__pyx_t_5); if (unlikely(!__pyx_t_3)) goto __pyx_L5_unpacking_failed; __Pyx_GOTREF(__pyx_t_3); index = 1; __pyx_t_4 = __pyx_t_6(__pyx_t_5); if (unlikely(!__pyx_t_4)) goto __pyx_L5_unpacking_failed; __Pyx_GOTREF(__pyx_t_4); if (__Pyx_IternextUnpackEndCheck(__pyx_t_6(__pyx_t_5), 2) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = NULL; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L6_unpacking_done; __pyx_L5_unpacking_failed:; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_L6_unpacking_done:; } if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF(((PyObject *)__pyx_v_child)); __pyx_v_child = ((PyArray_Descr *)__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_v_new_offset); __pyx_v_new_offset = __pyx_t_4; __pyx_t_4 = 0; /* "numpy.pxd":798 * child, new_offset = fields * * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ __pyx_t_4 = PyInt_FromLong((__pyx_v_end - __pyx_v_f)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_Subtract(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyObject_RichCompare(__pyx_t_3, __pyx_int_15, Py_LT); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { /* "numpy.pxd":799 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_t_5 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_9), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L7; } __pyx_L7:; /* "numpy.pxd":801 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_7 = (__pyx_v_child->byteorder == '>'); if (__pyx_t_7) { __pyx_t_8 = __pyx_v_little_endian; } else { __pyx_t_8 = __pyx_t_7; } if (!__pyx_t_8) { /* "numpy.pxd":802 * * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * # One could encode it in the format string and have Cython */ __pyx_t_7 = (__pyx_v_child->byteorder == '<'); if (__pyx_t_7) { __pyx_t_9 = (!__pyx_v_little_endian); __pyx_t_10 = __pyx_t_9; } else { __pyx_t_10 = __pyx_t_7; } __pyx_t_7 = __pyx_t_10; } else { __pyx_t_7 = __pyx_t_8; } if (__pyx_t_7) { /* "numpy.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_10), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L8; } __pyx_L8:; /* "numpy.pxd":813 * * # Output padding bytes * while offset[0] < new_offset: # <<<<<<<<<<<<<< * f[0] = 120 # "x"; pad byte * f += 1 */ while (1) { __pyx_t_5 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_t_5, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!__pyx_t_7) break; /* "numpy.pxd":814 * # Output padding bytes * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< * f += 1 * offset[0] += 1 */ (__pyx_v_f[0]) = 120; /* "numpy.pxd":815 * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte * f += 1 # <<<<<<<<<<<<<< * offset[0] += 1 * */ __pyx_v_f = (__pyx_v_f + 1); /* "numpy.pxd":816 * f[0] = 120 # "x"; pad byte * f += 1 * offset[0] += 1 # <<<<<<<<<<<<<< * * offset[0] += child.itemsize */ __pyx_t_11 = 0; (__pyx_v_offset[__pyx_t_11]) = ((__pyx_v_offset[__pyx_t_11]) + 1); } /* "numpy.pxd":818 * offset[0] += 1 * * offset[0] += child.itemsize # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(child): */ __pyx_t_11 = 0; (__pyx_v_offset[__pyx_t_11]) = ((__pyx_v_offset[__pyx_t_11]) + __pyx_v_child->elsize); /* "numpy.pxd":820 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ __pyx_t_7 = (!PyDataType_HASFIELDS(__pyx_v_child)); if (__pyx_t_7) { /* "numpy.pxd":821 * * if not PyDataType_HASFIELDS(child): * t = child.type_num # <<<<<<<<<<<<<< * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") */ __pyx_t_3 = PyInt_FromLong(__pyx_v_child->type_num); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 821; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_XDECREF(__pyx_v_t); __pyx_v_t = __pyx_t_3; __pyx_t_3 = 0; /* "numpy.pxd":822 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ __pyx_t_7 = ((__pyx_v_end - __pyx_v_f) < 5); if (__pyx_t_7) { /* "numpy.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_t_3 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_12), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L12; } __pyx_L12:; /* "numpy.pxd":826 * * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" */ __pyx_t_3 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 98; goto __pyx_L13; } /* "numpy.pxd":827 * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" */ __pyx_t_5 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 66; goto __pyx_L13; } /* "numpy.pxd":828 * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" */ __pyx_t_3 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 104; goto __pyx_L13; } /* "numpy.pxd":829 * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" */ __pyx_t_5 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 72; goto __pyx_L13; } /* "numpy.pxd":830 * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" */ __pyx_t_3 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 105; goto __pyx_L13; } /* "numpy.pxd":831 * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" */ __pyx_t_5 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 73; goto __pyx_L13; } /* "numpy.pxd":832 * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" */ __pyx_t_3 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 108; goto __pyx_L13; } /* "numpy.pxd":833 * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" */ __pyx_t_5 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 76; goto __pyx_L13; } /* "numpy.pxd":834 * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" */ __pyx_t_3 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 113; goto __pyx_L13; } /* "numpy.pxd":835 * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" */ __pyx_t_5 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 81; goto __pyx_L13; } /* "numpy.pxd":836 * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" */ __pyx_t_3 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 102; goto __pyx_L13; } /* "numpy.pxd":837 * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf */ __pyx_t_5 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 100; goto __pyx_L13; } /* "numpy.pxd":838 * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd */ __pyx_t_3 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 103; goto __pyx_L13; } /* "numpy.pxd":839 * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg */ __pyx_t_5 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 102; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":840 * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" */ __pyx_t_3 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 100; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":841 * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: */ __pyx_t_5 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 103; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":842 * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_3 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 79; goto __pyx_L13; } /*else*/ { /* "numpy.pxd":844 * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * f += 1 * else: */ __pyx_t_5 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_7), __pyx_v_t); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_5)); __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_5)); __Pyx_GIVEREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L13:; /* "numpy.pxd":845 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * f += 1 # <<<<<<<<<<<<<< * else: * # Cython ignores struct boundary information ("T{...}"), */ __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L11; } /*else*/ { /* "numpy.pxd":849 * # Cython ignores struct boundary information ("T{...}"), * # so don't output it * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< * return f * */ __pyx_t_12 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_12 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 849; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_12; } __pyx_L11:; } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "numpy.pxd":850 * # so don't output it * f = _util_dtypestring(child, f, end, offset) * return f # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_f; goto __pyx_L0; __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_child); __Pyx_XDECREF(__pyx_v_fields); __Pyx_XDECREF(__pyx_v_childname); __Pyx_XDECREF(__pyx_v_new_offset); __Pyx_XDECREF(__pyx_v_t); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":965 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { PyObject *__pyx_v_baseptr; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("set_array_base", 0); /* "numpy.pxd":967 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ __pyx_t_1 = (__pyx_v_base == Py_None); if (__pyx_t_1) { /* "numpy.pxd":968 * cdef PyObject* baseptr * if base is None: * baseptr = NULL # <<<<<<<<<<<<<< * else: * Py_INCREF(base) # important to do this before decref below! */ __pyx_v_baseptr = NULL; goto __pyx_L3; } /*else*/ { /* "numpy.pxd":970 * baseptr = NULL * else: * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< * baseptr = base * Py_XDECREF(arr.base) */ Py_INCREF(__pyx_v_base); /* "numpy.pxd":971 * else: * Py_INCREF(base) # important to do this before decref below! * baseptr = base # <<<<<<<<<<<<<< * Py_XDECREF(arr.base) * arr.base = baseptr */ __pyx_v_baseptr = ((PyObject *)__pyx_v_base); } __pyx_L3:; /* "numpy.pxd":972 * Py_INCREF(base) # important to do this before decref below! * baseptr = base * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< * arr.base = baseptr * */ Py_XDECREF(__pyx_v_arr->base); /* "numpy.pxd":973 * baseptr = base * Py_XDECREF(arr.base) * arr.base = baseptr # <<<<<<<<<<<<<< * * cdef inline object get_array_base(ndarray arr): */ __pyx_v_arr->base = __pyx_v_baseptr; __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":975 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("get_array_base", 0); /* "numpy.pxd":976 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ __pyx_t_1 = (__pyx_v_arr->base == NULL); if (__pyx_t_1) { /* "numpy.pxd":977 * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: * return None # <<<<<<<<<<<<<< * else: * return arr.base */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; goto __pyx_L3; } /*else*/ { /* "numpy.pxd":979 * return None * else: * return arr.base # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); __pyx_r = ((PyObject *)__pyx_v_arr->base); goto __pyx_L0; } __pyx_L3:; __pyx_r = Py_None; __Pyx_INCREF(Py_None); __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, __Pyx_NAMESTR("wrapper"), __Pyx_DOCSTR(__pyx_k_13), /* m_doc */ -1, /* m_size */ __pyx_methods /* m_methods */, NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_kp_u_1, __pyx_k_1, sizeof(__pyx_k_1), 0, 1, 0, 0}, {&__pyx_kp_u_11, __pyx_k_11, sizeof(__pyx_k_11), 0, 1, 0, 0}, {&__pyx_kp_s_14, __pyx_k_14, sizeof(__pyx_k_14), 0, 0, 1, 0}, {&__pyx_kp_s_15, __pyx_k_15, sizeof(__pyx_k_15), 0, 0, 1, 0}, {&__pyx_kp_s_16, __pyx_k_16, sizeof(__pyx_k_16), 0, 0, 1, 0}, {&__pyx_kp_s_17, __pyx_k_17, sizeof(__pyx_k_17), 0, 0, 1, 0}, {&__pyx_kp_s_18, __pyx_k_18, sizeof(__pyx_k_18), 0, 0, 1, 0}, {&__pyx_kp_s_19, __pyx_k_19, sizeof(__pyx_k_19), 0, 0, 1, 0}, {&__pyx_kp_s_20, __pyx_k_20, sizeof(__pyx_k_20), 0, 0, 1, 0}, {&__pyx_kp_s_21, __pyx_k_21, sizeof(__pyx_k_21), 0, 0, 1, 0}, {&__pyx_kp_s_24, __pyx_k_24, sizeof(__pyx_k_24), 0, 0, 1, 0}, {&__pyx_n_s_25, __pyx_k_25, sizeof(__pyx_k_25), 0, 0, 1, 1}, {&__pyx_kp_u_3, __pyx_k_3, sizeof(__pyx_k_3), 0, 1, 0, 0}, {&__pyx_n_s_38, __pyx_k_38, sizeof(__pyx_k_38), 0, 0, 1, 1}, {&__pyx_kp_u_5, __pyx_k_5, sizeof(__pyx_k_5), 0, 1, 0, 0}, {&__pyx_kp_u_7, __pyx_k_7, sizeof(__pyx_k_7), 0, 1, 0, 0}, {&__pyx_kp_u_8, __pyx_k_8, sizeof(__pyx_k_8), 0, 1, 0, 0}, {&__pyx_n_s__RuntimeError, __pyx_k__RuntimeError, sizeof(__pyx_k__RuntimeError), 0, 0, 1, 1}, {&__pyx_n_s__T, __pyx_k__T, sizeof(__pyx_k__T), 0, 0, 1, 1}, {&__pyx_n_s__ValueError, __pyx_k__ValueError, sizeof(__pyx_k__ValueError), 0, 0, 1, 1}, {&__pyx_n_s__X, __pyx_k__X, sizeof(__pyx_k__X), 0, 0, 1, 1}, {&__pyx_n_s__Xdum, __pyx_k__Xdum, sizeof(__pyx_k__Xdum), 0, 0, 1, 1}, {&__pyx_n_s__Y, __pyx_k__Y, sizeof(__pyx_k__Y), 0, 0, 1, 1}, {&__pyx_n_s__Z, __pyx_k__Z, sizeof(__pyx_k__Z), 0, 0, 1, 1}, {&__pyx_n_s____main__, __pyx_k____main__, sizeof(__pyx_k____main__), 0, 0, 1, 1}, {&__pyx_n_s____test__, __pyx_k____test__, sizeof(__pyx_k____test__), 0, 0, 1, 1}, {&__pyx_n_s____version__, __pyx_k____version__, sizeof(__pyx_k____version__), 0, 0, 1, 1}, {&__pyx_n_s__axis, __pyx_k__axis, sizeof(__pyx_k__axis), 0, 0, 1, 1}, {&__pyx_n_s__c_types, __pyx_k__c_types, sizeof(__pyx_k__c_types), 0, 0, 1, 1}, {&__pyx_n_s__copy, __pyx_k__copy, sizeof(__pyx_k__copy), 0, 0, 1, 1}, {&__pyx_n_s__copy_vector, __pyx_k__copy_vector, sizeof(__pyx_k__copy_vector), 0, 0, 1, 1}, {&__pyx_n_s__copy_via_iterators, __pyx_k__copy_via_iterators, sizeof(__pyx_k__copy_via_iterators), 0, 0, 1, 1}, {&__pyx_n_s__data, __pyx_k__data, sizeof(__pyx_k__data), 0, 0, 1, 1}, {&__pyx_n_s__dims, __pyx_k__dims, sizeof(__pyx_k__dims), 0, 0, 1, 1}, {&__pyx_n_s__double, __pyx_k__double, sizeof(__pyx_k__double), 0, 0, 1, 1}, {&__pyx_n_s__dtype, __pyx_k__dtype, sizeof(__pyx_k__dtype), 0, 0, 1, 1}, {&__pyx_n_s__elsize, __pyx_k__elsize, sizeof(__pyx_k__elsize), 0, 0, 1, 1}, {&__pyx_n_s__fff_t, __pyx_k__fff_t, sizeof(__pyx_k__fff_t), 0, 0, 1, 1}, {&__pyx_n_s__fff_type, __pyx_k__fff_type, sizeof(__pyx_k__fff_type), 0, 0, 1, 1}, {&__pyx_n_s__fff_types, __pyx_k__fff_types, sizeof(__pyx_k__fff_types), 0, 0, 1, 1}, {&__pyx_n_s__flag, __pyx_k__flag, sizeof(__pyx_k__flag), 0, 0, 1, 1}, {&__pyx_n_s__float, __pyx_k__float, sizeof(__pyx_k__float), 0, 0, 1, 1}, {&__pyx_n_s__i, __pyx_k__i, sizeof(__pyx_k__i), 0, 0, 1, 1}, {&__pyx_n_s__index, __pyx_k__index, sizeof(__pyx_k__index), 0, 0, 1, 1}, {&__pyx_n_s__int, __pyx_k__int, sizeof(__pyx_k__int), 0, 0, 1, 1}, {&__pyx_n_s__itemsize, __pyx_k__itemsize, sizeof(__pyx_k__itemsize), 0, 0, 1, 1}, {&__pyx_n_s__long, __pyx_k__long, sizeof(__pyx_k__long), 0, 0, 1, 1}, {&__pyx_n_s__multi, __pyx_k__multi, sizeof(__pyx_k__multi), 0, 0, 1, 1}, {&__pyx_n_s__nbytes, __pyx_k__nbytes, sizeof(__pyx_k__nbytes), 0, 0, 1, 1}, {&__pyx_n_s__niters, __pyx_k__niters, sizeof(__pyx_k__niters), 0, 0, 1, 1}, {&__pyx_n_s__np, __pyx_k__np, sizeof(__pyx_k__np), 0, 0, 1, 1}, {&__pyx_n_s__npy_t, __pyx_k__npy_t, sizeof(__pyx_k__npy_t), 0, 0, 1, 1}, {&__pyx_n_s__npy_type, __pyx_k__npy_type, sizeof(__pyx_k__npy_type), 0, 0, 1, 1}, {&__pyx_n_s__npy_types, __pyx_k__npy_types, sizeof(__pyx_k__npy_types), 0, 0, 1, 1}, {&__pyx_n_s__numpy, __pyx_k__numpy, sizeof(__pyx_k__numpy), 0, 0, 1, 1}, {&__pyx_n_s__pass_array, __pyx_k__pass_array, sizeof(__pyx_k__pass_array), 0, 0, 1, 1}, {&__pyx_n_s__pass_matrix, __pyx_k__pass_matrix, sizeof(__pyx_k__pass_matrix), 0, 0, 1, 1}, {&__pyx_n_s__pass_vector, __pyx_k__pass_vector, sizeof(__pyx_k__pass_vector), 0, 0, 1, 1}, {&__pyx_n_s__range, __pyx_k__range, sizeof(__pyx_k__range), 0, 0, 1, 1}, {&__pyx_n_s__relstride, __pyx_k__relstride, sizeof(__pyx_k__relstride), 0, 0, 1, 1}, {&__pyx_n_s__size, __pyx_k__size, sizeof(__pyx_k__size), 0, 0, 1, 1}, {&__pyx_n_s__squeeze, __pyx_k__squeeze, sizeof(__pyx_k__squeeze), 0, 0, 1, 1}, {&__pyx_n_s__stride, __pyx_k__stride, sizeof(__pyx_k__stride), 0, 0, 1, 1}, {&__pyx_n_s__sum_via_iterators, __pyx_k__sum_via_iterators, sizeof(__pyx_k__sum_via_iterators), 0, 0, 1, 1}, {&__pyx_n_s__type, __pyx_k__type, sizeof(__pyx_k__type), 0, 0, 1, 1}, {&__pyx_n_s__x, __pyx_k__x, sizeof(__pyx_k__x), 0, 0, 1, 1}, {&__pyx_n_s__y, __pyx_k__y, sizeof(__pyx_k__y), 0, 0, 1, 1}, {&__pyx_n_s__z, __pyx_k__z, sizeof(__pyx_k__z), 0, 0, 1, 1}, {&__pyx_n_s__zeros, __pyx_k__zeros, sizeof(__pyx_k__zeros), 0, 0, 1, 1}, {&__pyx_n_s__zeros_like, __pyx_k__zeros_like, sizeof(__pyx_k__zeros_like), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_range = __Pyx_GetName(__pyx_b, __pyx_n_s__range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_ValueError = __Pyx_GetName(__pyx_b, __pyx_n_s__ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_RuntimeError = __Pyx_GetName(__pyx_b, __pyx_n_s__RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "numpy.pxd":215 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_k_tuple_2 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_2); __Pyx_INCREF(((PyObject *)__pyx_kp_u_1)); PyTuple_SET_ITEM(__pyx_k_tuple_2, 0, ((PyObject *)__pyx_kp_u_1)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_1)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_2)); /* "numpy.pxd":219 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_k_tuple_4 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_4); __Pyx_INCREF(((PyObject *)__pyx_kp_u_3)); PyTuple_SET_ITEM(__pyx_k_tuple_4, 0, ((PyObject *)__pyx_kp_u_3)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_3)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_4)); /* "numpy.pxd":257 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_k_tuple_6 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_6); __Pyx_INCREF(((PyObject *)__pyx_kp_u_5)); PyTuple_SET_ITEM(__pyx_k_tuple_6, 0, ((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_6)); /* "numpy.pxd":799 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_k_tuple_9 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_9); __Pyx_INCREF(((PyObject *)__pyx_kp_u_8)); PyTuple_SET_ITEM(__pyx_k_tuple_9, 0, ((PyObject *)__pyx_kp_u_8)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_8)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_9)); /* "numpy.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_k_tuple_10 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_10)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_10); __Pyx_INCREF(((PyObject *)__pyx_kp_u_5)); PyTuple_SET_ITEM(__pyx_k_tuple_10, 0, ((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_10)); /* "numpy.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_k_tuple_12 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_12)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_12); __Pyx_INCREF(((PyObject *)__pyx_kp_u_11)); PyTuple_SET_ITEM(__pyx_k_tuple_12, 0, ((PyObject *)__pyx_kp_u_11)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_11)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_12)); /* "nipy/labs/bindings/wrapper.pyx":34 * * * def fff_type(dtype T): # <<<<<<<<<<<<<< * """ * fff_t, nbytes = fff_type(T) */ __pyx_k_tuple_22 = PyTuple_New(3); if (unlikely(!__pyx_k_tuple_22)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_22); __Pyx_INCREF(((PyObject *)__pyx_n_s__T)); PyTuple_SET_ITEM(__pyx_k_tuple_22, 0, ((PyObject *)__pyx_n_s__T)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__T)); __Pyx_INCREF(((PyObject *)__pyx_n_s__fff_t)); PyTuple_SET_ITEM(__pyx_k_tuple_22, 1, ((PyObject *)__pyx_n_s__fff_t)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__fff_t)); __Pyx_INCREF(((PyObject *)__pyx_n_s__nbytes)); PyTuple_SET_ITEM(__pyx_k_tuple_22, 2, ((PyObject *)__pyx_n_s__nbytes)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__nbytes)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_22)); __pyx_k_codeobj_23 = (PyObject*)__Pyx_PyCode_New(1, 0, 3, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_22, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_24, __pyx_n_s__fff_type, 34, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_23)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/wrapper.pyx":47 * * * def npy_type(T): # <<<<<<<<<<<<<< * """ * npy_t, nbytes = npy_type(T) */ __pyx_k_tuple_26 = PyTuple_New(4); if (unlikely(!__pyx_k_tuple_26)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_26); __Pyx_INCREF(((PyObject *)__pyx_n_s__T)); PyTuple_SET_ITEM(__pyx_k_tuple_26, 0, ((PyObject *)__pyx_n_s__T)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__T)); __Pyx_INCREF(((PyObject *)__pyx_n_s__npy_t)); PyTuple_SET_ITEM(__pyx_k_tuple_26, 1, ((PyObject *)__pyx_n_s__npy_t)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__npy_t)); __Pyx_INCREF(((PyObject *)__pyx_n_s__fff_t)); PyTuple_SET_ITEM(__pyx_k_tuple_26, 2, ((PyObject *)__pyx_n_s__fff_t)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__fff_t)); __Pyx_INCREF(((PyObject *)__pyx_n_s__nbytes)); PyTuple_SET_ITEM(__pyx_k_tuple_26, 3, ((PyObject *)__pyx_n_s__nbytes)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__nbytes)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_26)); __pyx_k_codeobj_27 = (PyObject*)__Pyx_PyCode_New(1, 0, 4, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_26, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_24, __pyx_n_s__npy_type, 47, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_27)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/wrapper.pyx":61 * return c_types[npy_types.index(npy_t)], nbytes * * def pass_vector(ndarray X): # <<<<<<<<<<<<<< * """ * Y = pass_vector(X) */ __pyx_k_tuple_28 = PyTuple_New(3); if (unlikely(!__pyx_k_tuple_28)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_28); __Pyx_INCREF(((PyObject *)__pyx_n_s__X)); PyTuple_SET_ITEM(__pyx_k_tuple_28, 0, ((PyObject *)__pyx_n_s__X)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__X)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_28, 1, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__y)); PyTuple_SET_ITEM(__pyx_k_tuple_28, 2, ((PyObject *)__pyx_n_s__y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__y)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_28)); __pyx_k_codeobj_29 = (PyObject*)__Pyx_PyCode_New(1, 0, 3, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_28, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_24, __pyx_n_s__pass_vector, 61, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_29)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/wrapper.pyx":73 * * * def copy_vector(ndarray X, int flag): # <<<<<<<<<<<<<< * """ * Y = copy_vector(X, flag) */ __pyx_k_tuple_30 = PyTuple_New(10); if (unlikely(!__pyx_k_tuple_30)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_30); __Pyx_INCREF(((PyObject *)__pyx_n_s__X)); PyTuple_SET_ITEM(__pyx_k_tuple_30, 0, ((PyObject *)__pyx_n_s__X)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__X)); __Pyx_INCREF(((PyObject *)__pyx_n_s__flag)); PyTuple_SET_ITEM(__pyx_k_tuple_30, 1, ((PyObject *)__pyx_n_s__flag)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__flag)); __Pyx_INCREF(((PyObject *)__pyx_n_s__y)); PyTuple_SET_ITEM(__pyx_k_tuple_30, 2, ((PyObject *)__pyx_n_s__y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__data)); PyTuple_SET_ITEM(__pyx_k_tuple_30, 3, ((PyObject *)__pyx_n_s__data)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__data)); __Pyx_INCREF(((PyObject *)__pyx_n_s__size)); PyTuple_SET_ITEM(__pyx_k_tuple_30, 4, ((PyObject *)__pyx_n_s__size)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__size)); __Pyx_INCREF(((PyObject *)__pyx_n_s__stride)); PyTuple_SET_ITEM(__pyx_k_tuple_30, 5, ((PyObject *)__pyx_n_s__stride)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__stride)); __Pyx_INCREF(((PyObject *)__pyx_n_s__relstride)); PyTuple_SET_ITEM(__pyx_k_tuple_30, 6, ((PyObject *)__pyx_n_s__relstride)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__relstride)); __Pyx_INCREF(((PyObject *)__pyx_n_s__type)); PyTuple_SET_ITEM(__pyx_k_tuple_30, 7, ((PyObject *)__pyx_n_s__type)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__type)); __Pyx_INCREF(((PyObject *)__pyx_n_s__itemsize)); PyTuple_SET_ITEM(__pyx_k_tuple_30, 8, ((PyObject *)__pyx_n_s__itemsize)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__itemsize)); __Pyx_INCREF(((PyObject *)__pyx_n_s__fff_type)); PyTuple_SET_ITEM(__pyx_k_tuple_30, 9, ((PyObject *)__pyx_n_s__fff_type)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__fff_type)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_30)); __pyx_k_codeobj_31 = (PyObject*)__Pyx_PyCode_New(2, 0, 10, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_30, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_24, __pyx_n_s__copy_vector, 73, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_31)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/wrapper.pyx":104 * * * def pass_matrix(ndarray X): # <<<<<<<<<<<<<< * """ * Y = pass_matrix(X) */ __pyx_k_tuple_32 = PyTuple_New(3); if (unlikely(!__pyx_k_tuple_32)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_32); __Pyx_INCREF(((PyObject *)__pyx_n_s__X)); PyTuple_SET_ITEM(__pyx_k_tuple_32, 0, ((PyObject *)__pyx_n_s__X)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__X)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_32, 1, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__y)); PyTuple_SET_ITEM(__pyx_k_tuple_32, 2, ((PyObject *)__pyx_n_s__y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__y)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_32)); __pyx_k_codeobj_33 = (PyObject*)__Pyx_PyCode_New(1, 0, 3, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_32, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_24, __pyx_n_s__pass_matrix, 104, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_33)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/wrapper.pyx":116 * * * def pass_array(ndarray X): # <<<<<<<<<<<<<< * """ * Y = pass_array(X) */ __pyx_k_tuple_34 = PyTuple_New(3); if (unlikely(!__pyx_k_tuple_34)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_34); __Pyx_INCREF(((PyObject *)__pyx_n_s__X)); PyTuple_SET_ITEM(__pyx_k_tuple_34, 0, ((PyObject *)__pyx_n_s__X)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__X)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_34, 1, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__y)); PyTuple_SET_ITEM(__pyx_k_tuple_34, 2, ((PyObject *)__pyx_n_s__y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__y)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_34)); __pyx_k_codeobj_35 = (PyObject*)__Pyx_PyCode_New(1, 0, 3, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_34, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_24, __pyx_n_s__pass_array, 116, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_35)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/wrapper.pyx":128 * * * def pass_vector_via_iterator(ndarray X, int axis=0, int niters=0): # <<<<<<<<<<<<<< * """ * Y = pass_vector_via_iterator(X, axis=0, niters=0) */ __pyx_k_tuple_36 = PyTuple_New(8); if (unlikely(!__pyx_k_tuple_36)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_36); __Pyx_INCREF(((PyObject *)__pyx_n_s__X)); PyTuple_SET_ITEM(__pyx_k_tuple_36, 0, ((PyObject *)__pyx_n_s__X)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__X)); __Pyx_INCREF(((PyObject *)__pyx_n_s__axis)); PyTuple_SET_ITEM(__pyx_k_tuple_36, 1, ((PyObject *)__pyx_n_s__axis)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__axis)); __Pyx_INCREF(((PyObject *)__pyx_n_s__niters)); PyTuple_SET_ITEM(__pyx_k_tuple_36, 2, ((PyObject *)__pyx_n_s__niters)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__niters)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_36, 3, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__y)); PyTuple_SET_ITEM(__pyx_k_tuple_36, 4, ((PyObject *)__pyx_n_s__y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__z)); PyTuple_SET_ITEM(__pyx_k_tuple_36, 5, ((PyObject *)__pyx_n_s__z)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__z)); __Pyx_INCREF(((PyObject *)__pyx_n_s__multi)); PyTuple_SET_ITEM(__pyx_k_tuple_36, 6, ((PyObject *)__pyx_n_s__multi)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__multi)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Xdum)); PyTuple_SET_ITEM(__pyx_k_tuple_36, 7, ((PyObject *)__pyx_n_s__Xdum)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Xdum)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_36)); __pyx_k_codeobj_37 = (PyObject*)__Pyx_PyCode_New(3, 0, 8, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_36, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_24, __pyx_n_s_38, 128, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_37)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/wrapper.pyx":148 * * * def copy_via_iterators(ndarray Y, int axis=0): # <<<<<<<<<<<<<< * """ * Z = copy_via_iterators(Y, int axis=0) */ __pyx_k_tuple_39 = PyTuple_New(6); if (unlikely(!__pyx_k_tuple_39)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_39); __Pyx_INCREF(((PyObject *)__pyx_n_s__Y)); PyTuple_SET_ITEM(__pyx_k_tuple_39, 0, ((PyObject *)__pyx_n_s__Y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__axis)); PyTuple_SET_ITEM(__pyx_k_tuple_39, 1, ((PyObject *)__pyx_n_s__axis)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__axis)); __Pyx_INCREF(((PyObject *)__pyx_n_s__y)); PyTuple_SET_ITEM(__pyx_k_tuple_39, 2, ((PyObject *)__pyx_n_s__y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__z)); PyTuple_SET_ITEM(__pyx_k_tuple_39, 3, ((PyObject *)__pyx_n_s__z)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__z)); __Pyx_INCREF(((PyObject *)__pyx_n_s__multi)); PyTuple_SET_ITEM(__pyx_k_tuple_39, 4, ((PyObject *)__pyx_n_s__multi)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__multi)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Z)); PyTuple_SET_ITEM(__pyx_k_tuple_39, 5, ((PyObject *)__pyx_n_s__Z)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Z)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_39)); __pyx_k_codeobj_40 = (PyObject*)__Pyx_PyCode_New(2, 0, 6, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_39, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_24, __pyx_n_s__copy_via_iterators, 148, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_40)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/wrapper.pyx":180 * * * def sum_via_iterators(ndarray Y, int axis=0): # <<<<<<<<<<<<<< * """ * Z = dummy_iterator(Y, int axis=0) */ __pyx_k_tuple_41 = PyTuple_New(8); if (unlikely(!__pyx_k_tuple_41)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_41); __Pyx_INCREF(((PyObject *)__pyx_n_s__Y)); PyTuple_SET_ITEM(__pyx_k_tuple_41, 0, ((PyObject *)__pyx_n_s__Y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__axis)); PyTuple_SET_ITEM(__pyx_k_tuple_41, 1, ((PyObject *)__pyx_n_s__axis)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__axis)); __Pyx_INCREF(((PyObject *)__pyx_n_s__y)); PyTuple_SET_ITEM(__pyx_k_tuple_41, 2, ((PyObject *)__pyx_n_s__y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__z)); PyTuple_SET_ITEM(__pyx_k_tuple_41, 3, ((PyObject *)__pyx_n_s__z)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__z)); __Pyx_INCREF(((PyObject *)__pyx_n_s__multi)); PyTuple_SET_ITEM(__pyx_k_tuple_41, 4, ((PyObject *)__pyx_n_s__multi)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__multi)); __Pyx_INCREF(((PyObject *)__pyx_n_s__dims)); PyTuple_SET_ITEM(__pyx_k_tuple_41, 5, ((PyObject *)__pyx_n_s__dims)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__dims)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Z)); PyTuple_SET_ITEM(__pyx_k_tuple_41, 6, ((PyObject *)__pyx_n_s__Z)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Z)); __Pyx_INCREF(((PyObject *)__pyx_n_s__i)); PyTuple_SET_ITEM(__pyx_k_tuple_41, 7, ((PyObject *)__pyx_n_s__i)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__i)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_41)); __pyx_k_codeobj_42 = (PyObject*)__Pyx_PyCode_New(2, 0, 8, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_41, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_24, __pyx_n_s__sum_via_iterators, 180, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_42)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_15 = PyInt_FromLong(15); if (unlikely(!__pyx_int_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC initwrapper(void); /*proto*/ PyMODINIT_FUNC initwrapper(void) #else PyMODINIT_FUNC PyInit_wrapper(void); /*proto*/ PyMODINIT_FUNC PyInit_wrapper(void) #endif { PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; PyObject *__pyx_t_12 = NULL; __Pyx_RefNannyDeclarations #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_wrapper(void)", 0); if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #ifdef __Pyx_CyFunction_USED if (__Pyx_CyFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4(__Pyx_NAMESTR("wrapper"), __pyx_methods, __Pyx_DOCSTR(__pyx_k_13), 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (!PyDict_GetItemString(modules, "nipy.labs.bindings.wrapper")) { if (unlikely(PyDict_SetItemString(modules, "nipy.labs.bindings.wrapper", __pyx_m) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } } #endif __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); if (unlikely(!__pyx_b)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; /*--- Initialize various global constants etc. ---*/ if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_module_is_main_nipy__labs__bindings__wrapper) { if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s____main__) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; } /*--- Builtin init code ---*/ if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Constants init code ---*/ if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Global init code ---*/ /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ /*--- Type import code ---*/ __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", #if CYTHON_COMPILING_IN_PYPY sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 861; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Variable import code ---*/ /*--- Function import code ---*/ /*--- Execution code ---*/ /* "nipy/labs/bindings/wrapper.pyx":9 * """ * * __version__ = '0.1' # <<<<<<<<<<<<<< * * */ if (PyObject_SetAttr(__pyx_m, __pyx_n_s____version__, ((PyObject *)__pyx_kp_s_14)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/bindings/wrapper.pyx":18 * * # Initialize numpy * fffpy_import_array() # <<<<<<<<<<<<<< * import_array() * import numpy as np */ fffpy_import_array(); /* "nipy/labs/bindings/wrapper.pyx":19 * # Initialize numpy * fffpy_import_array() * import_array() # <<<<<<<<<<<<<< * import numpy as np * */ import_array(); /* "nipy/labs/bindings/wrapper.pyx":20 * fffpy_import_array() * import_array() * import numpy as np # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__numpy), 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__np, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/wrapper.pyx":23 * * * c_types = ['unknown type', 'unsigned char', 'signed char', 'unsigned short', 'signed short', # <<<<<<<<<<<<<< * 'int', 'unsigned int', 'unsigned long', 'long', 'float', 'double'] * */ __pyx_t_1 = PyList_New(11); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)__pyx_kp_s_15)); PyList_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_kp_s_15)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_15)); __Pyx_INCREF(((PyObject *)__pyx_kp_s_16)); PyList_SET_ITEM(__pyx_t_1, 1, ((PyObject *)__pyx_kp_s_16)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_16)); __Pyx_INCREF(((PyObject *)__pyx_kp_s_17)); PyList_SET_ITEM(__pyx_t_1, 2, ((PyObject *)__pyx_kp_s_17)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_17)); __Pyx_INCREF(((PyObject *)__pyx_kp_s_18)); PyList_SET_ITEM(__pyx_t_1, 3, ((PyObject *)__pyx_kp_s_18)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_18)); __Pyx_INCREF(((PyObject *)__pyx_kp_s_19)); PyList_SET_ITEM(__pyx_t_1, 4, ((PyObject *)__pyx_kp_s_19)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_19)); __Pyx_INCREF(((PyObject *)__pyx_n_s__int)); PyList_SET_ITEM(__pyx_t_1, 5, ((PyObject *)__pyx_n_s__int)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__int)); __Pyx_INCREF(((PyObject *)__pyx_kp_s_20)); PyList_SET_ITEM(__pyx_t_1, 6, ((PyObject *)__pyx_kp_s_20)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_20)); __Pyx_INCREF(((PyObject *)__pyx_kp_s_21)); PyList_SET_ITEM(__pyx_t_1, 7, ((PyObject *)__pyx_kp_s_21)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_21)); __Pyx_INCREF(((PyObject *)__pyx_n_s__long)); PyList_SET_ITEM(__pyx_t_1, 8, ((PyObject *)__pyx_n_s__long)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__long)); __Pyx_INCREF(((PyObject *)__pyx_n_s__float)); PyList_SET_ITEM(__pyx_t_1, 9, ((PyObject *)__pyx_n_s__float)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__float)); __Pyx_INCREF(((PyObject *)__pyx_n_s__double)); PyList_SET_ITEM(__pyx_t_1, 10, ((PyObject *)__pyx_n_s__double)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__double)); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__c_types, ((PyObject *)__pyx_t_1)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; /* "nipy/labs/bindings/wrapper.pyx":26 * 'int', 'unsigned int', 'unsigned long', 'long', 'float', 'double'] * * fff_types = [FFF_UNKNOWN_TYPE, FFF_UCHAR, FFF_SCHAR, FFF_USHORT, FFF_SSHORT, # <<<<<<<<<<<<<< * FFF_UINT, FFF_INT, FFF_ULONG, FFF_LONG, FFF_FLOAT, FFF_DOUBLE] * */ __pyx_t_1 = PyInt_FromLong(FFF_UNKNOWN_TYPE); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromLong(FFF_UCHAR); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyInt_FromLong(FFF_SCHAR); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyInt_FromLong(FFF_USHORT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyInt_FromLong(FFF_SSHORT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); /* "nipy/labs/bindings/wrapper.pyx":27 * * fff_types = [FFF_UNKNOWN_TYPE, FFF_UCHAR, FFF_SCHAR, FFF_USHORT, FFF_SSHORT, * FFF_UINT, FFF_INT, FFF_ULONG, FFF_LONG, FFF_FLOAT, FFF_DOUBLE] # <<<<<<<<<<<<<< * * npy_types = [cnp.NPY_NOTYPE, cnp.NPY_UBYTE, cnp.NPY_BYTE, cnp.NPY_USHORT, */ __pyx_t_6 = PyInt_FromLong(FFF_UINT); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 27; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = PyInt_FromLong(FFF_INT); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 27; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __pyx_t_8 = PyInt_FromLong(FFF_ULONG); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 27; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __pyx_t_9 = PyInt_FromLong(FFF_LONG); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 27; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __pyx_t_10 = PyInt_FromLong(FFF_FLOAT); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 27; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_10); __pyx_t_11 = PyInt_FromLong(FFF_DOUBLE); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 27; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_11); __pyx_t_12 = PyList_New(11); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_12); PyList_SET_ITEM(__pyx_t_12, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); PyList_SET_ITEM(__pyx_t_12, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_12, 2, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); PyList_SET_ITEM(__pyx_t_12, 3, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); PyList_SET_ITEM(__pyx_t_12, 4, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_12, 5, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_6); PyList_SET_ITEM(__pyx_t_12, 6, __pyx_t_7); __Pyx_GIVEREF(__pyx_t_7); PyList_SET_ITEM(__pyx_t_12, 7, __pyx_t_8); __Pyx_GIVEREF(__pyx_t_8); PyList_SET_ITEM(__pyx_t_12, 8, __pyx_t_9); __Pyx_GIVEREF(__pyx_t_9); PyList_SET_ITEM(__pyx_t_12, 9, __pyx_t_10); __Pyx_GIVEREF(__pyx_t_10); PyList_SET_ITEM(__pyx_t_12, 10, __pyx_t_11); __Pyx_GIVEREF(__pyx_t_11); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; if (PyObject_SetAttr(__pyx_m, __pyx_n_s__fff_types, ((PyObject *)__pyx_t_12)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(((PyObject *)__pyx_t_12)); __pyx_t_12 = 0; /* "nipy/labs/bindings/wrapper.pyx":29 * FFF_UINT, FFF_INT, FFF_ULONG, FFF_LONG, FFF_FLOAT, FFF_DOUBLE] * * npy_types = [cnp.NPY_NOTYPE, cnp.NPY_UBYTE, cnp.NPY_BYTE, cnp.NPY_USHORT, # <<<<<<<<<<<<<< * cnp.NPY_SHORT, cnp.NPY_UINT, cnp.NPY_INT, cnp.NPY_ULONG, * cnp.NPY_LONG, cnp.NPY_FLOAT, cnp.NPY_DOUBLE] */ __pyx_t_12 = PyInt_FromLong(NPY_NOTYPE); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_12); __pyx_t_11 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_11); __pyx_t_10 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_10); __pyx_t_9 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); /* "nipy/labs/bindings/wrapper.pyx":30 * * npy_types = [cnp.NPY_NOTYPE, cnp.NPY_UBYTE, cnp.NPY_BYTE, cnp.NPY_USHORT, * cnp.NPY_SHORT, cnp.NPY_UINT, cnp.NPY_INT, cnp.NPY_ULONG, # <<<<<<<<<<<<<< * cnp.NPY_LONG, cnp.NPY_FLOAT, cnp.NPY_DOUBLE] * */ __pyx_t_8 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __pyx_t_7 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __pyx_t_6 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); /* "nipy/labs/bindings/wrapper.pyx":31 * npy_types = [cnp.NPY_NOTYPE, cnp.NPY_UBYTE, cnp.NPY_BYTE, cnp.NPY_USHORT, * cnp.NPY_SHORT, cnp.NPY_UINT, cnp.NPY_INT, cnp.NPY_ULONG, * cnp.NPY_LONG, cnp.NPY_FLOAT, cnp.NPY_DOUBLE] # <<<<<<<<<<<<<< * * */ __pyx_t_4 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyList_New(11); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); PyList_SET_ITEM(__pyx_t_1, 0, __pyx_t_12); __Pyx_GIVEREF(__pyx_t_12); PyList_SET_ITEM(__pyx_t_1, 1, __pyx_t_11); __Pyx_GIVEREF(__pyx_t_11); PyList_SET_ITEM(__pyx_t_1, 2, __pyx_t_10); __Pyx_GIVEREF(__pyx_t_10); PyList_SET_ITEM(__pyx_t_1, 3, __pyx_t_9); __Pyx_GIVEREF(__pyx_t_9); PyList_SET_ITEM(__pyx_t_1, 4, __pyx_t_8); __Pyx_GIVEREF(__pyx_t_8); PyList_SET_ITEM(__pyx_t_1, 5, __pyx_t_7); __Pyx_GIVEREF(__pyx_t_7); PyList_SET_ITEM(__pyx_t_1, 6, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_6); PyList_SET_ITEM(__pyx_t_1, 7, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_1, 8, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); PyList_SET_ITEM(__pyx_t_1, 9, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); PyList_SET_ITEM(__pyx_t_1, 10, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_12 = 0; __pyx_t_11 = 0; __pyx_t_10 = 0; __pyx_t_9 = 0; __pyx_t_8 = 0; __pyx_t_7 = 0; __pyx_t_6 = 0; __pyx_t_5 = 0; __pyx_t_4 = 0; __pyx_t_3 = 0; __pyx_t_2 = 0; if (PyObject_SetAttr(__pyx_m, __pyx_n_s__npy_types, ((PyObject *)__pyx_t_1)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; /* "nipy/labs/bindings/wrapper.pyx":34 * * * def fff_type(dtype T): # <<<<<<<<<<<<<< * """ * fff_t, nbytes = fff_type(T) */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_7wrapper_1fff_type, NULL, __pyx_n_s_25); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__fff_type, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/wrapper.pyx":47 * * * def npy_type(T): # <<<<<<<<<<<<<< * """ * npy_t, nbytes = npy_type(T) */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_7wrapper_3npy_type, NULL, __pyx_n_s_25); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__npy_type, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/wrapper.pyx":61 * return c_types[npy_types.index(npy_t)], nbytes * * def pass_vector(ndarray X): # <<<<<<<<<<<<<< * """ * Y = pass_vector(X) */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_7wrapper_5pass_vector, NULL, __pyx_n_s_25); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__pass_vector, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/wrapper.pyx":73 * * * def copy_vector(ndarray X, int flag): # <<<<<<<<<<<<<< * """ * Y = copy_vector(X, flag) */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_7wrapper_7copy_vector, NULL, __pyx_n_s_25); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__copy_vector, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/wrapper.pyx":104 * * * def pass_matrix(ndarray X): # <<<<<<<<<<<<<< * """ * Y = pass_matrix(X) */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_7wrapper_9pass_matrix, NULL, __pyx_n_s_25); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__pass_matrix, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/wrapper.pyx":116 * * * def pass_array(ndarray X): # <<<<<<<<<<<<<< * """ * Y = pass_array(X) */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_7wrapper_11pass_array, NULL, __pyx_n_s_25); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__pass_array, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/wrapper.pyx":128 * * * def pass_vector_via_iterator(ndarray X, int axis=0, int niters=0): # <<<<<<<<<<<<<< * """ * Y = pass_vector_via_iterator(X, axis=0, niters=0) */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_7wrapper_13pass_vector_via_iterator, NULL, __pyx_n_s_25); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s_38, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/wrapper.pyx":148 * * * def copy_via_iterators(ndarray Y, int axis=0): # <<<<<<<<<<<<<< * """ * Z = copy_via_iterators(Y, int axis=0) */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_7wrapper_15copy_via_iterators, NULL, __pyx_n_s_25); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__copy_via_iterators, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/wrapper.pyx":180 * * * def sum_via_iterators(ndarray Y, int axis=0): # <<<<<<<<<<<<<< * """ * Z = dummy_iterator(Y, int axis=0) */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_8bindings_7wrapper_17sum_via_iterators, NULL, __pyx_n_s_25); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__sum_via_iterators, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/bindings/wrapper.pyx":1 * # -*- Mode: Python -*- Not really, but the syntax is close enough # <<<<<<<<<<<<<< * * */ __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); if (PyObject_SetAttr(__pyx_m, __pyx_n_s____test__, ((PyObject *)__pyx_t_1)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; /* "numpy.pxd":975 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_XDECREF(__pyx_t_11); __Pyx_XDECREF(__pyx_t_12); if (__pyx_m) { __Pyx_AddTraceback("init nipy.labs.bindings.wrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init nipy.labs.bindings.wrapper"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if PY_MAJOR_VERSION < 3 return; #else return __pyx_m; #endif } /* Runtime support code */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* CYTHON_REFNANNY */ static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) { PyObject *result; result = PyObject_GetAttr(dict, name); if (!result) { if (dict != __pyx_b) { PyErr_Clear(); result = PyObject_GetAttr(__pyx_b, name); } if (!result) { PyErr_SetObject(PyExc_NameError, name); } } return result; } static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact) { if (!type) { PyErr_Format(PyExc_SystemError, "Missing type object"); return 0; } if (none_allowed && obj == Py_None) return 1; else if (exact) { if (Py_TYPE(obj) == type) return 1; } else { if (PyObject_TypeCheck(obj, type)) return 1; } PyErr_Format(PyExc_TypeError, "Argument '%s' has incorrect type (expected %s, got %s)", name, type->tp_name, Py_TYPE(obj)->tp_name); return 0; } static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%s() takes %s %" CYTHON_FORMAT_SSIZE_T "d positional argument%s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%s() got an unexpected keyword argument '%s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } static CYTHON_INLINE int __Pyx_div_int(int a, int b) { int q = a / b; int r = a - q*b; q -= ((r != 0) & ((r ^ b) < 0)); return q; } static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) { #if CYTHON_COMPILING_IN_CPYTHON PyObject *tmp_type, *tmp_value, *tmp_tb; PyThreadState *tstate = PyThreadState_GET(); tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_Restore(type, value, tb); #endif } static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(type, value, tb); #endif } #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } #if PY_VERSION_HEX < 0x02050000 if (PyClass_Check(type)) { #else if (PyType_Check(type)) { #endif #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; #if PY_VERSION_HEX < 0x02050000 if (PyInstance_Check(type)) { type = (PyObject*) ((PyInstanceObject*)type)->in_class; Py_INCREF(type); } else { type = 0; PyErr_SetString(PyExc_TypeError, "raise: exception must be an old-style class or instance"); goto raise_error; } #else type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } #endif } __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else /* Python 3+ */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyEval_CallObject(type, args); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause && cause != Py_None) { PyObject *fixed_cause; if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { PyThreadState *tstate = PyThreadState_GET(); PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } } bad: Py_XDECREF(owned_instance); return; } #endif static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%s to unpack", index, (index == 1) ? "" : "s"); } static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } static CYTHON_INLINE int __Pyx_IterFinish(void) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); PyObject* exc_type = tstate->curexc_type; if (unlikely(exc_type)) { if (likely(exc_type == PyExc_StopIteration) || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration)) { PyObject *exc_value, *exc_tb; exc_value = tstate->curexc_value; exc_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; Py_DECREF(exc_type); Py_XDECREF(exc_value); Py_XDECREF(exc_tb); return 0; } else { return -1; } } return 0; #else if (unlikely(PyErr_Occurred())) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) { PyErr_Clear(); return 0; } else { return -1; } } return 0; #endif } static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) { if (unlikely(retval)) { Py_DECREF(retval); __Pyx_RaiseTooManyValuesError(expected); return -1; } else { return __Pyx_IterFinish(); } return 0; } static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_Format(PyExc_SystemError, "Missing type object"); return 0; } if (likely(PyObject_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level) { PyObject *py_import = 0; PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; py_import = __Pyx_GetAttrString(__pyx_b, "__import__"); if (!py_import) goto bad; if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; #if PY_VERSION_HEX >= 0x02050000 { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { /* try package relative import first */ PyObject *py_level = PyInt_FromLong(1); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; /* try absolute import on failure */ } #endif if (!module) { PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); } } #else if (level>0) { PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4."); goto bad; } module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, NULL); #endif bad: Py_XDECREF(empty_list); Py_XDECREF(py_import); Py_XDECREF(empty_dict); return module; } static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_Py_intptr_t(Py_intptr_t val) { const Py_intptr_t neg_one = (Py_intptr_t)-1, const_zero = (Py_intptr_t)0; const int is_unsigned = const_zero < neg_one; if ((sizeof(Py_intptr_t) == sizeof(char)) || (sizeof(Py_intptr_t) == sizeof(short))) { return PyInt_FromLong((long)val); } else if ((sizeof(Py_intptr_t) == sizeof(int)) || (sizeof(Py_intptr_t) == sizeof(long))) { if (is_unsigned) return PyLong_FromUnsignedLong((unsigned long)val); else return PyInt_FromLong((long)val); } else if (sizeof(Py_intptr_t) == sizeof(PY_LONG_LONG)) { if (is_unsigned) return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG)val); else return PyLong_FromLongLong((PY_LONG_LONG)val); } else { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; return _PyLong_FromByteArray(bytes, sizeof(Py_intptr_t), little, !is_unsigned); } } #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return ::std::complex< float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y*(__pyx_t_float_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real*z.real + z.imag*z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(a, a); case 3: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, a); case 4: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_absf(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return ::std::complex< double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y*(__pyx_t_double_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real*z.real + z.imag*z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(a, a); case 3: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, a); case 4: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_abs(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject* x) { const unsigned char neg_one = (unsigned char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned char" : "value too large to convert to unsigned char"); } return (unsigned char)-1; } return (unsigned char)val; } return (unsigned char)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject* x) { const unsigned short neg_one = (unsigned short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned short" : "value too large to convert to unsigned short"); } return (unsigned short)-1; } return (unsigned short)val; } return (unsigned short)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject* x) { const unsigned int neg_one = (unsigned int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned int" : "value too large to convert to unsigned int"); } return (unsigned int)-1; } return (unsigned int)val; } return (unsigned int)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject* x) { const char neg_one = (char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to char" : "value too large to convert to char"); } return (char)-1; } return (char)val; } return (char)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject* x) { const short neg_one = (short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to short" : "value too large to convert to short"); } return (short)-1; } return (short)val; } return (short)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject* x) { const int neg_one = (int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to int" : "value too large to convert to int"); } return (int)-1; } return (int)val; } return (int)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject* x) { const signed char neg_one = (signed char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed char" : "value too large to convert to signed char"); } return (signed char)-1; } return (signed char)val; } return (signed char)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject* x) { const signed short neg_one = (signed short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed short" : "value too large to convert to signed short"); } return (signed short)-1; } return (signed short)val; } return (signed short)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject* x) { const signed int neg_one = (signed int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed int" : "value too large to convert to signed int"); } return (signed int)-1; } return (signed int)val; } return (signed int)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject* x) { const int neg_one = (int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to int" : "value too large to convert to int"); } return (int)-1; } return (int)val; } return (int)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) { const unsigned long neg_one = (unsigned long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned long"); return (unsigned long)-1; } return (unsigned long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned long"); return (unsigned long)-1; } return (unsigned long)PyLong_AsUnsignedLong(x); } else { return (unsigned long)PyLong_AsLong(x); } } else { unsigned long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned long)-1; val = __Pyx_PyInt_AsUnsignedLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject* x) { const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned PY_LONG_LONG"); return (unsigned PY_LONG_LONG)-1; } return (unsigned PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned PY_LONG_LONG"); return (unsigned PY_LONG_LONG)-1; } return (unsigned PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (unsigned PY_LONG_LONG)PyLong_AsLongLong(x); } } else { unsigned PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned PY_LONG_LONG)-1; val = __Pyx_PyInt_AsUnsignedLongLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject* x) { const long neg_one = (long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long)-1; } return (long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long)-1; } return (long)PyLong_AsUnsignedLong(x); } else { return (long)PyLong_AsLong(x); } } else { long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (long)-1; val = __Pyx_PyInt_AsLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) { const PY_LONG_LONG neg_one = (PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to PY_LONG_LONG"); return (PY_LONG_LONG)-1; } return (PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to PY_LONG_LONG"); return (PY_LONG_LONG)-1; } return (PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (PY_LONG_LONG)PyLong_AsLongLong(x); } } else { PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (PY_LONG_LONG)-1; val = __Pyx_PyInt_AsLongLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) { const signed long neg_one = (signed long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed long"); return (signed long)-1; } return (signed long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed long"); return (signed long)-1; } return (signed long)PyLong_AsUnsignedLong(x); } else { return (signed long)PyLong_AsLong(x); } } else { signed long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (signed long)-1; val = __Pyx_PyInt_AsSignedLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject* x) { const signed PY_LONG_LONG neg_one = (signed PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed PY_LONG_LONG"); return (signed PY_LONG_LONG)-1; } return (signed PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed PY_LONG_LONG"); return (signed PY_LONG_LONG)-1; } return (signed PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (signed PY_LONG_LONG)PyLong_AsLongLong(x); } } else { signed PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (signed PY_LONG_LONG)-1; val = __Pyx_PyInt_AsSignedLongLong(tmp); Py_DECREF(tmp); return val; } } static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); #if PY_VERSION_HEX < 0x02050000 return PyErr_Warn(NULL, message); #else return PyErr_WarnEx(NULL, message, 1); #endif } return 0; } #ifndef __PYX_HAVE_RT_ImportModule #define __PYX_HAVE_RT_ImportModule static PyObject *__Pyx_ImportModule(const char *name) { PyObject *py_name = 0; PyObject *py_module = 0; py_name = __Pyx_PyIdentifier_FromString(name); if (!py_name) goto bad; py_module = PyImport_Import(py_name); Py_DECREF(py_name); return py_module; bad: Py_XDECREF(py_name); return 0; } #endif #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict) { PyObject *py_module = 0; PyObject *result = 0; PyObject *py_name = 0; char warning[200]; py_module = __Pyx_ImportModule(module_name); if (!py_module) goto bad; py_name = __Pyx_PyIdentifier_FromString(class_name); if (!py_name) goto bad; result = PyObject_GetAttr(py_module, py_name); Py_DECREF(py_name); py_name = 0; Py_DECREF(py_module); py_module = 0; if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%s.%s is not a type object", module_name, class_name); goto bad; } if (!strict && (size_t)((PyTypeObject *)result)->tp_basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility", module_name, class_name); #if PY_VERSION_HEX < 0x02050000 if (PyErr_Warn(NULL, warning) < 0) goto bad; #else if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; #endif } else if ((size_t)((PyTypeObject *)result)->tp_basicsize != size) { PyErr_Format(PyExc_ValueError, "%s.%s has the wrong size, try recompiling", module_name, class_name); goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(py_module); Py_XDECREF(result); return NULL; } #endif static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = (start + end) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, /*int argcount,*/ 0, /*int kwonlyargcount,*/ 0, /*int nlocals,*/ 0, /*int stacksize,*/ 0, /*int flags,*/ __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, /*int firstlineno,*/ __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_globals = 0; PyFrameObject *py_frame = 0; py_code = __pyx_find_code_object(c_line ? c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? c_line : py_line, py_code); } py_globals = PyModule_GetDict(__pyx_m); if (!py_globals) goto bad; py_frame = PyFrame_New( PyThreadState_GET(), /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ py_globals, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; py_frame->f_lineno = py_line; PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else /* Python 3+ has unicode identifiers */ if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; ++t; } return 0; } /* Type Conversion Functions */ static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) { PyNumberMethods *m; const char *name = NULL; PyObject *res = NULL; #if PY_VERSION_HEX < 0x03000000 if (PyInt_Check(x) || PyLong_Check(x)) #else if (PyLong_Check(x)) #endif return Py_INCREF(x), x; m = Py_TYPE(x)->tp_as_number; #if PY_VERSION_HEX < 0x03000000 if (m && m->nb_int) { name = "int"; res = PyNumber_Int(x); } else if (m && m->nb_long) { name = "long"; res = PyNumber_Long(x); } #else if (m && m->nb_int) { name = "int"; res = PyNumber_Long(x); } #endif if (res) { #if PY_VERSION_HEX < 0x03000000 if (!PyInt_Check(res) && !PyLong_Check(res)) { #else if (!PyLong_Check(res)) { #endif PyErr_Format(PyExc_TypeError, "__%s__ returned non-%s (type %.200s)", name, name, Py_TYPE(res)->tp_name); Py_DECREF(res); return NULL; } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject* x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { #if PY_VERSION_HEX < 0x02050000 if (ival <= LONG_MAX) return PyInt_FromLong((long)ival); else { unsigned char *bytes = (unsigned char *) &ival; int one = 1; int little = (int)*(unsigned char*)&one; return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0); } #else return PyInt_FromSize_t(ival); #endif } static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject* x) { unsigned PY_LONG_LONG val = __Pyx_PyInt_AsUnsignedLongLong(x); if (unlikely(val == (unsigned PY_LONG_LONG)-1 && PyErr_Occurred())) { return (size_t)-1; } else if (unlikely(val != (unsigned PY_LONG_LONG)(size_t)val)) { PyErr_SetString(PyExc_OverflowError, "value too large to convert to size_t"); return (size_t)-1; } return (size_t)val; } #endif /* Py_PYTHON_H */ nipy-0.3.0/nipy/labs/bindings/wrapper.pyx000066400000000000000000000121221210344137400204050ustar00rootroot00000000000000# -*- Mode: Python -*- Not really, but the syntax is close enough """ Iterators for testing. Author: Alexis Roche, 2009. """ __version__ = '0.1' # Includes from fff cimport * from numpy cimport dtype cimport numpy as cnp # Initialize numpy fffpy_import_array() import_array() import numpy as np c_types = ['unknown type', 'unsigned char', 'signed char', 'unsigned short', 'signed short', 'int', 'unsigned int', 'unsigned long', 'long', 'float', 'double'] fff_types = [FFF_UNKNOWN_TYPE, FFF_UCHAR, FFF_SCHAR, FFF_USHORT, FFF_SSHORT, FFF_UINT, FFF_INT, FFF_ULONG, FFF_LONG, FFF_FLOAT, FFF_DOUBLE] npy_types = [cnp.NPY_NOTYPE, cnp.NPY_UBYTE, cnp.NPY_BYTE, cnp.NPY_USHORT, cnp.NPY_SHORT, cnp.NPY_UINT, cnp.NPY_INT, cnp.NPY_ULONG, cnp.NPY_LONG, cnp.NPY_FLOAT, cnp.NPY_DOUBLE] def fff_type(dtype T): """ fff_t, nbytes = fff_type(T) T is a np.dtype instance. Return a tuple (str, int). """ cdef fff_datatype fff_t cdef unsigned int nbytes fff_t = fff_datatype_fromNumPy(T.type_num) nbytes = fff_nbytes(fff_t) return c_types[fff_types.index(fff_t)], nbytes def npy_type(T): """ npy_t, nbytes = npy_type(T) T is a string. Return a tuple (str, int). """ cdef int npy_t cdef fff_datatype fff_t cdef unsigned int nbytes fff_t = fff_types[c_types.index(T)] npy_t = fff_datatype_toNumPy(fff_t) nbytes = fff_nbytes(fff_t) return c_types[npy_types.index(npy_t)], nbytes def pass_vector(ndarray X): """ Y = pass_vector(X) """ cdef fff_vector *x, *y x = fff_vector_fromPyArray(X) y = fff_vector_new(x.size) fff_vector_memcpy(y, x) fff_vector_delete(x) return fff_vector_toPyArray(y) def copy_vector(ndarray X, int flag): """ Y = copy_vector(X, flag) flag == 0 ==> use fff flag == 1 ==> use numpy """ cdef fff_vector *y cdef void* data cdef int size, stride, relstride, type, itemsize cdef fff_datatype fff_type data = X.data size = X.shape[0] stride = X.strides[0] itemsize = X.descr.elsize type = X.descr.type_num relstride = stride/itemsize fff_type = fff_datatype_fromNumPy(type) y = fff_vector_new(size) if flag == 0: fff_vector_fetch(y, data, fff_type, relstride) else: fff_vector_fetch_using_NumPy(y, data, stride, type, itemsize) return fff_vector_toPyArray(y) def pass_matrix(ndarray X): """ Y = pass_matrix(X) """ cdef fff_matrix *x, *y x = fff_matrix_fromPyArray(X) y = fff_matrix_new(x.size1, x.size2) fff_matrix_memcpy(y, x) fff_matrix_delete(x) return fff_matrix_toPyArray(y) def pass_array(ndarray X): """ Y = pass_array(X) """ cdef fff_array *x, *y x = fff_array_fromPyArray(X) y = fff_array_new(x.datatype, x.dimX, x.dimY, x.dimZ, x.dimT) fff_array_copy(y, x) fff_array_delete(x) return fff_array_toPyArray(y) def pass_vector_via_iterator(ndarray X, int axis=0, int niters=0): """ Y = pass_vector_via_iterator(X, axis=0, niters=0) """ cdef fff_vector *x, *y, *z cdef fffpy_multi_iterator* multi Xdum = X.copy() ## at least two arrays needed for multi iterator multi = fffpy_multi_iterator_new(2, axis, X, Xdum) x = multi.vector[0] while(multi.index < niters): fffpy_multi_iterator_update(multi) y = fff_vector_new(x.size) fff_vector_memcpy(y, x) fffpy_multi_iterator_delete(multi) return fff_vector_toPyArray(y) def copy_via_iterators(ndarray Y, int axis=0): """ Z = copy_via_iterators(Y, int axis=0) Copy array Y into Z via fff's PyArray_MultiIterAllButAxis C function. Behavior should be equivalent to Z = Y.copy(). """ cdef fff_vector *y, *z cdef fffpy_multi_iterator* multi # Allocate output array Z = np.zeros_like(Y, dtype=np.float) # Create a new array iterator multi = fffpy_multi_iterator_new(2, axis, Y, Z) # Create views y = multi.vector[0] z = multi.vector[1] # Loop while(multi.index < multi.size): fff_vector_memcpy(z, y) fffpy_multi_iterator_update(multi) # Free memory fffpy_multi_iterator_delete(multi) # Return return Z def sum_via_iterators(ndarray Y, int axis=0): """ Z = dummy_iterator(Y, int axis=0) Return the sum of input elements along the dimension specified by axis. Behavior should be equivalent to Z = Y.sum(axis). """ cdef fff_vector *y, *z cdef fffpy_multi_iterator* multi # Allocate output array dims = [Y.shape[i] for i in range(Y.ndim)] dims[axis] = 1 Z = np.zeros(dims) # Create a new array iterator multi = fffpy_multi_iterator_new(2, axis, Y, Z) # Create views y = multi.vector[0] z = multi.vector[1] # Loop while(multi.index < multi.size): z.data[0] = fff_vector_sum(y) fffpy_multi_iterator_update(multi) # Free memory fffpy_multi_iterator_delete(multi) # Return return Z.squeeze() nipy-0.3.0/nipy/labs/datasets/000077500000000000000000000000001210344137400162005ustar00rootroot00000000000000nipy-0.3.0/nipy/labs/datasets/__init__.py000066400000000000000000000011541210344137400203120ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Package containing core nipy classes. """ __docformat__ = 'restructuredtext' from .volumes.volume_field import VolumeField from .volumes.volume_img import VolumeImg from .volumes.volume_grid import VolumeGrid from .transforms.transform import Transform, CompositionError from .transforms.affine_transform import AffineTransform from .transforms.affine_utils import apply_affine from .converters import as_volume_img, save from nipy.testing import Tester test = Tester().test bench = Tester().bench nipy-0.3.0/nipy/labs/datasets/converters.py000066400000000000000000000073021210344137400207460ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Conversion mechansims for IO and interaction between volumetric datasets and other type of neuroimaging data. """ import os import numpy as np import nibabel as nib from nibabel.spatialimages import SpatialImage from .volumes.volume_img import VolumeImg def as_volume_img(obj, copy=True, squeeze=True, world_space=None): """ Convert the input to a VolumeImg. Parameters ---------- obj : filename, pynifti or brifti object, or volume dataset. Input object, in any form that can be converted to a VolumeImg. This includes Nifti filenames, pynifti or brifti objects, or other volumetric dataset objects. copy: boolean, optional If copy is True, the data and affine arrays are copied, elsewhere a view is taken. squeeze: boolean, optional If squeeze is True, the data array is squeeze on for dimensions above 3. world_space: string or None, optional An optional specification of the world space, to override that given by the image. Returns ------- volume_img: VolumeImg object A VolumeImg object containing the data. The metadata is kept as much as possible in the metadata attribute. Notes ------ The world space might not be correctly defined by the input object (in particular, when loading data from disk). In this case, you can correct it manually using the world_space keyword argument. For pynifti objects, the data is transposed. """ if hasattr(obj, 'as_volume_img'): obj = obj.as_volume_img(copy=copy) if copy: obj = obj.__copy__() return obj elif isinstance(obj, basestring): if not os.path.exists(obj): raise ValueError("The file '%s' cannot be found" % obj) obj = nib.load(obj) copy = False if isinstance(obj, SpatialImage): data = obj.get_data() affine = obj.get_affine() header = dict(obj.get_header()) fname = obj.file_map['image'].filename if fname: header['filename'] = fname elif hasattr(obj, 'data') and hasattr(obj, 'sform') and \ hasattr(obj, 'getVolumeExtent'): # Duck-types to a pynifti object data = obj.data.T affine = obj.sform header = obj.header filename = obj.getFilename() if filename != '': header['filename'] = filename else: raise ValueError('Invalid type (%s) passed in: cannot convert %s to ' 'VolumeImg' % (type(obj), obj)) if world_space is None and header.get('sform_code', 0) == 4: world_space = 'mni152' data = np.asanyarray(data) affine = np.asanyarray(affine) if copy: data = data.copy() affine = affine.copy() if squeeze: # Squeeze the dimensions above 3 shape = [val for index, val in enumerate(data.shape) if val !=1 or index < 3] data = np.reshape(data, shape) return VolumeImg(data, affine, world_space, metadata=header) def save(filename, obj): """ Save an nipy image object to a file. """ obj = as_volume_img(obj, copy=False) hdr = nib.Nifti1Header() for key, value in obj.metadata.iteritems(): if key in hdr: hdr[key] = value img = nib.Nifti1Image(obj.get_data(), obj.affine, header=hdr) nib.save(img, filename) nipy-0.3.0/nipy/labs/datasets/setup.py000066400000000000000000000010331210344137400177070ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('datasets', parent_package, top_path) config.add_subpackage('volumes') config.add_subpackage('transforms') config.add_subpackage('tests') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipy-0.3.0/nipy/labs/datasets/tests/000077500000000000000000000000001210344137400173425ustar00rootroot00000000000000nipy-0.3.0/nipy/labs/datasets/tests/__init__.py000066400000000000000000000000001210344137400214410ustar00rootroot00000000000000nipy-0.3.0/nipy/labs/datasets/tests/test_converters.py000066400000000000000000000022411210344137400231440ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Test for the converters. """ import os import tempfile import nose from .. import as_volume_img, save import nibabel as nib data_file = os.path.join(nib.__path__[0], 'tests', 'data', 'example4d.nii.gz') def test_conversion(): brifti_obj = nib.load(data_file) vol_img = as_volume_img(data_file) yield nose.tools.assert_equals, as_volume_img(vol_img), \ vol_img yield nose.tools.assert_equals, as_volume_img(brifti_obj), \ vol_img def test_basics(): yield nose.tools.assert_raises, ValueError, as_volume_img, 'foobar' def test_save(): filename = tempfile.mktemp() try: img = as_volume_img(data_file) save(filename, img) finally: if os.path.exists(filename): os.remove(filename) try: import nifti def test_from_nifti(): nim = nifti.NiftiImage(data_file) yield nose.tools.assert_equals, as_volume_img(data_file), \ as_volume_img(nim) except ImportError: pass nipy-0.3.0/nipy/labs/datasets/transforms/000077500000000000000000000000001210344137400203765ustar00rootroot00000000000000nipy-0.3.0/nipy/labs/datasets/transforms/__init__.py000066400000000000000000000000001210344137400224750ustar00rootroot00000000000000nipy-0.3.0/nipy/labs/datasets/transforms/affine_transform.py000066400000000000000000000113351210344137400242760ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ The AffineTransform class """ import numpy as np from .transform import Transform from .affine_utils import apply_affine ################################################################################ # Class `AffineTransform` ################################################################################ class AffineTransform(Transform): """ A transformation from an input 3D space to an output 3D space defined by an affine matrix. It is defined by the affine matrix , and the name of the input and output spaces. """ # The coordinate mapping from input space to output space affine = None def __init__(self, input_space, output_space, affine): """ Create a new affine transform object. Parameters ---------- input_space: string Name of the input space output_space: string Name of the output space affine: 4x4 ndarray Affine matrix giving the coordinate mapping between the input and output space. """ assert hasattr(affine, '__array__'), \ 'affine argument should be an array-like' self.affine = affine self.input_space = input_space self.output_space = output_space #------------------------------------------------------------------------- # Transform Interface #------------------------------------------------------------------------- def composed_with(self, transform): """ Returns a new transform obtained by composing this transform with the one provided. Parameters ----------- transform: nipy.core.transforms.transform object The transform to compose with. """ if not isinstance(transform, AffineTransform): return super(AffineTransform, self).composed_with(transform) self._check_composition(transform) new_affine = np.dot(transform.affine, self.affine) return AffineTransform(self.input_space, transform.output_space, new_affine, ) def get_inverse(self): """ Return the inverse transform. """ return AffineTransform(self.output_space, self.input_space, np.linalg.inv(self.affine), ) def inverse_mapping(self, x, y, z): """ Transform the given coordinate from output space to input space. Parameters ---------- x: number or ndarray The x coordinates y: number or ndarray The y coordinates z: number or ndarray The z coordinates """ return apply_affine(x, y, z, np.linalg.inv(self.affine)) def mapping(self, x, y, z): """ Transform the given coordinate from input space to output space. Parameters ---------- x: number or ndarray The x coordinates y: number or ndarray The y coordinates z: number or ndarray The z coordinates """ return apply_affine(x, y, z, self.affine) #--------------------------------------------------------------------------- # Private methods #--------------------------------------------------------------------------- def __repr__(self): representation = \ '%s(\n affine=%s,\n input_space=%s,\n output_space=%s)' % ( self.__class__.__name__, '\n '.join(repr(self.affine).split('\n')), self.input_space, self.output_space, ) return representation def __copy__(self): """ Copy the transform """ return self.__class__(affine=self.affine, input_space=self.input_space, output_space=self.output_space) def __deepcopy__(self, option): """ Copy the Image and the arrays and metadata it contains. """ return self.__class__(affine=self.affine.copy(), input_space=self.input_space, output_space=self.output_space) def __eq__(self, other): return (isinstance(other, self.__class__) and self.input_space == other.input_space and self.output_space == other.output_space and np.allclose(self.affine, other.affine)) nipy-0.3.0/nipy/labs/datasets/transforms/affine_utils.py000066400000000000000000000067261210344137400234330ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Functions working with affine transformation matrices. """ import numpy as np def apply_affine(x, y, z, affine): """ Apply the affine matrix to the given coordinate. Parameters ---------- x: number or ndarray The x coordinates y: number or ndarray The y coordinates z: number or ndarray The z coordinates affine: 4x4 ndarray The affine matrix of the transformation """ x = np.atleast_1d(x) y = np.atleast_1d(y) z = np.atleast_1d(z) shape = x.shape assert y.shape == shape, 'Coordinate shapes are not equal' assert z.shape == shape, 'Coordinate shapes are not equal' # Ravel, but avoiding a copy if possible x = np.reshape(x, (-1,)) y = np.reshape(y, (-1,)) z = np.reshape(z, (-1,)) in_coords = np.c_[x, y, z, np.ones(x.shape)].T x, y, z, _ = np.dot(affine, in_coords) x = np.reshape(x, shape) y = np.reshape(y, shape) z = np.reshape(z, shape) return x, y, z def to_matrix_vector(transform): """Split a transform into it's matrix and vector components. The tranformation must be represented in homogeneous coordinates and is split into it's rotation matrix and translation vector components. Parameters ---------- transform : ndarray Transform matrix in homogeneous coordinates. Example, a 4x4 transform representing rotations and translations in 3 dimensions. Returns ------- matrix, vector : ndarray The matrix and vector components of the transform matrix. For an NxN transform, matrix will be N-1xN-1 and vector will be 1xN-1. See Also -------- from_matrix_vector """ ndimin = transform.shape[0] - 1 ndimout = transform.shape[1] - 1 matrix = transform[0:ndimin, 0:ndimout] vector = transform[0:ndimin, ndimout] return matrix, vector def from_matrix_vector(matrix, vector): """Combine a matrix and vector into a homogeneous transform. Combine a rotation matrix and translation vector into a transform in homogeneous coordinates. Parameters ---------- matrix : ndarray An NxN array representing the rotation matrix. vector : ndarray A 1xN array representing the translation. Returns ------- xform : ndarray An N+1xN+1 transform matrix. See Also -------- to_matrix_vector """ nin, nout = matrix.shape t = np.zeros((nin+1,nout+1), matrix.dtype) t[0:nin, 0:nout] = matrix t[nin, nout] = 1. t[0:nin, nout] = vector return t def get_bounds(shape, affine): """ Return the world-space bounds occupied by an array given an affine. """ adim, bdim, cdim = shape adim -= 1 bdim -= 1 cdim -= 1 # form a collection of vectors for each 8 corners of the box box = np.array([ [0., 0, 0, 1], [adim, 0, 0, 1], [0, bdim, 0, 1], [0, 0, cdim, 1], [adim, bdim, 0, 1], [adim, 0, cdim, 1], [0, bdim, cdim, 1], [adim, bdim, cdim, 1] ]).T box = np.dot(affine, box)[:3] return zip(box.min(axis=-1), box.max(axis=-1)) nipy-0.3.0/nipy/labs/datasets/transforms/setup.py000066400000000000000000000007201210344137400221070ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('transforms', parent_package, top_path) config.add_subpackage('tests') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipy-0.3.0/nipy/labs/datasets/transforms/tests/000077500000000000000000000000001210344137400215405ustar00rootroot00000000000000nipy-0.3.0/nipy/labs/datasets/transforms/tests/__init__.py000066400000000000000000000000001210344137400236370ustar00rootroot00000000000000nipy-0.3.0/nipy/labs/datasets/transforms/tests/test_affine_transform.py000066400000000000000000000032001210344137400264670ustar00rootroot00000000000000 """ This test can only be run from the directory above, as it uses relative imports. """ import numpy as np import copy # Don't import from nipy.testing not to have a hard dependence on nipy, # use np.testing or nose from nose.tools import assert_equal, assert_true from ..affine_transform import AffineTransform def test_compose_with_inverse(): """ Check that an affine transform composed with its inverse returns the identity transform, and the taking the inverse twice gives the same transform. """ for _ in range(10): affine = np.eye(4) affine[:3, :3] = np.random.random((3, 3)) transform = AffineTransform('in', 'out', affine) identity = transform.composed_with( transform.get_inverse()) yield np.testing.assert_almost_equal, identity.affine, \ np.eye(4) yield assert_equal, transform, \ transform.get_inverse().get_inverse() x, y, z = np.random.random((3, 10)) x_, y_, z_ = transform.mapping(*transform.inverse_mapping(x, y, z)) yield np.testing.assert_almost_equal, x, x_ yield np.testing.assert_almost_equal, y, y_ yield np.testing.assert_almost_equal, z, z_ def test_misc(): """ Test misc private methods for AffineTransform. """ transform = AffineTransform('in', 'out', np.random.random((3, 3))) # Check that the repr does not raise an error: yield assert_true, isinstance(repr(transform), str) # Check that copy and eq work yield assert_equal, transform, copy.copy(transform) yield assert_equal, transform, copy.deepcopy(transform) nipy-0.3.0/nipy/labs/datasets/transforms/tests/test_affine_utils.py000066400000000000000000000016071210344137400256250ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This test can only be run from the directory above, as it uses relative imports. """ from nipy.testing import assert_equal import numpy as np from ..affine_utils import to_matrix_vector, from_matrix_vector def build_xform(): mat = np.arange(9).reshape((3, 3)) vec = np.arange(3) + 10 xform = np.empty((4, 4), dtype=mat.dtype) xform[:3, :3] = mat[:] xform[3, :] = [0, 0, 0, 1] xform[:3, 3] = vec[:] return mat, vec, xform def test_to_matrix_vector(): mat, vec, xform = build_xform() newmat, newvec = to_matrix_vector(xform) yield assert_equal, newmat, mat yield assert_equal, newvec, vec def test_from_matrix_vector(): mat, vec, xform = build_xform() newxform = from_matrix_vector(mat, vec) assert_equal, newxform, xform nipy-0.3.0/nipy/labs/datasets/transforms/tests/test_transform.py000066400000000000000000000044771210344137400252000ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This test can only be run from the directory above, as it uses relative imports. """ import numpy as np import copy # Don't import from nipy.testing not to have a hard dependence on nipy, # use np.testing or nose from nose.tools import assert_equal, assert_raises, assert_true from ..transform import Transform, CompositionError ################################################################################ # Mappings def id(x, y, z): return x, y, z def mapping(x, y, z): return 2*x, y, 0.5*z ################################################################################ # Tests def test_composition(): t1 = Transform('in', 'mid', mapping=id) t2 = Transform('mid', 'out', mapping=mapping) yield assert_raises, CompositionError, t1.composed_with, t1 # Check forward composition (transforms have forward mappings) t12 = t1.composed_with(t2) x, y, z = np.random.random((3, 10)) yield np.testing.assert_equal, mapping(x, y, z), \ t12.mapping(x, y, z) # Check backward composition (transforms have reverse mappings) t21 = t2.get_inverse().composed_with(t1.get_inverse()) x, y, z = np.random.random((3, 10)) yield np.testing.assert_equal, mapping(x, y, z), \ t21.inverse_mapping(x, y, z) # Check that you cannot compose transforms that do not have chainable # mappings yield assert_raises, CompositionError, t1.composed_with, \ t1.get_inverse() def test_misc(): """ Test misc private methods for Transform. """ # Check that passing neither a mapping, nor an inverse_mapping raises # a ValueError yield assert_raises, ValueError, Transform, 'world1', 'world2' transform = Transform('in', 'out', mapping=mapping) # Check that the repr does not raise an error: yield assert_true, isinstance(repr(transform), str) # Check that copy and eq work yield assert_equal, transform, copy.copy(transform) def test_inverse(): t1 = Transform('in', 'mid', mapping=id, inverse_mapping=id) t2 = Transform('mid', 'out', mapping=mapping) t3 = Transform('mid', 'out', inverse_mapping=mapping) for t in (t1, t2, t3): yield assert_equal, t.get_inverse().get_inverse(), t nipy-0.3.0/nipy/labs/datasets/transforms/transform.py000066400000000000000000000164151210344137400227720ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ The base Transform class. This class defines the Transform interface and can be subclassed to define more clever composition logic. """ ################################################################################ class CompositionError(Exception): """ The Exception raised when composing transforms with non matching respective input and output word spaces. """ pass ################################################################################ # Class `Transform` ################################################################################ class Transform(object): """ A transform is a representation of a transformation from one 3D space to another. It is composed of a coordinate mapping, or its inverse, as well as the name of the input and output spaces. The Transform class is the base class for transformations and defines the transform object API. """ # The name of the input space input_space = '' # The name of the output space output_space = '' # The coordinate mapping from input space to output space mapping = None # The inverse coordinate mapping from output space to input space inverse_mapping = None def __init__(self, input_space, output_space, mapping=None, inverse_mapping=None): """ Create a new transform object. Parameters ---------- mapping: callable f(x, y, z) Callable mapping coordinates from the input space to the output space. It should take 3 numbers or arrays, and return 3 numbers or arrays of the same shape. inverse_mapping: callable f(x, y, z) Callable mapping coordinates from the output space to the input space. It should take 3 numbers or arrays, and return 3 numbers or arrays of the same shape. input_space: string Name of the input space output_space: string Name of the output space Notes ------ You need to supply either the mapping or the inverse mapping. """ if inverse_mapping is None and mapping is None: raise ValueError( 'You need to supply either the coordinate mapping or ' 'the inverse coordinate mapping' ) if mapping is not None: assert callable(mapping), \ 'The mapping argument of a Transform must be callable' if inverse_mapping is not None: assert callable(inverse_mapping), \ 'The inverse_mapping argument of a Transform must be callable' self.mapping = mapping self.inverse_mapping = inverse_mapping self.input_space = input_space self.output_space = output_space #------------------------------------------------------------------------- # Transform Interface #------------------------------------------------------------------------- def composed_with(self, transform): """ Returns a new transform obtained by composing this transform with the one provided. Parameters ----------- transform: nipy.core.transforms.transform object The transform to compose with. """ self._check_composition(transform) # We don't want to keep references on the transforms, in the # closure of the new mapping so we extract their mapping # outside of the definition of the new mapping first_mapping = self.mapping second_mapping = transform.mapping if first_mapping is not None and second_mapping is not None: def new_mapping(x, y, z): """ Coordinate mapping from %s to %s. """ % (self.input_space, transform.output_space) return second_mapping(*first_mapping(x, y, z)) else: new_mapping = None first_inverse_mapping = self.inverse_mapping second_inverse_mapping = transform.inverse_mapping if ( first_inverse_mapping is not None and second_inverse_mapping is not None): def new_inverse_mapping(x, y, z): """ Coordinate mapping from %s to %s. """ % (transform.output_space, self.input_space) return first_inverse_mapping(*second_inverse_mapping(x, y, z)) else: new_inverse_mapping = None if new_mapping is None and new_inverse_mapping is None: raise CompositionError( """Composing two transforms with no chainable mapping: %s and %s""" % (self, transform) ) return Transform(self.input_space, transform.output_space, mapping=new_mapping, inverse_mapping=new_inverse_mapping, ) def get_inverse(self): """ Return the inverse transform. """ return self.__class__( input_space = self.output_space, output_space = self.input_space, mapping = self.inverse_mapping, inverse_mapping = self.mapping, ) #------------------------------------------------------------------------- # Private methods #------------------------------------------------------------------------- def _check_composition(self, transform): """ Check that the given transform can be composed with this one. """ if not transform.input_space == self.output_space: raise CompositionError("The input space of the " "second transform (%s) does not match the input space " "of first transform (%s)" % (transform.input_space, self.output_space) ) def __repr__(self): representation = \ '%s(\n input_space=%s,\n output_space=%s,\n mapping=%s,\n inverse_mapping=%s)' % ( self.__class__.__name__, self.input_space, self.output_space, '\n '.join(repr(self.mapping).split('\n')), '\n '.join(repr(self.inverse_mapping).split('\n')), ) return representation def __copy__(self): """ Copy the transform """ return self.__class__(input_space=self.input_space, output_space=self.output_space, mapping=self.mapping, inverse_mapping=self.inverse_mapping) def __eq__(self, other): return (isinstance(other, self.__class__) and self.input_space == other.input_space and self.output_space == other.output_space and self.mapping == other.mapping and self.inverse_mapping == other.inverse_mapping ) nipy-0.3.0/nipy/labs/datasets/volumes/000077500000000000000000000000001210344137400176725ustar00rootroot00000000000000nipy-0.3.0/nipy/labs/datasets/volumes/__init__.py000066400000000000000000000006061210344137400220050ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ The Image class provides the interface which should be used by users at the application level. The image provides a coordinate map, and the data itself. """ __docformat__ = 'restructuredtext' from nipy.testing import Tester test = Tester().test bench = Tester().bench nipy-0.3.0/nipy/labs/datasets/volumes/setup.py000066400000000000000000000007151210344137400214070ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('volumes', parent_package, top_path) config.add_subpackage('tests') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipy-0.3.0/nipy/labs/datasets/volumes/tests/000077500000000000000000000000001210344137400210345ustar00rootroot00000000000000nipy-0.3.0/nipy/labs/datasets/volumes/tests/__init__.py000066400000000000000000000000001210344137400231330ustar00rootroot00000000000000nipy-0.3.0/nipy/labs/datasets/volumes/tests/test_volume_data.py000066400000000000000000000014701210344137400247470ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Testing volume data interface. """ import nose import copy # Local imports from ..volume_data import VolumeData ################################################################################ # Tests def test_volume_data(): """ Sanity testing of the VolumeData class. """ vol = VolumeData() # Test that the repr doesn't raise an error yield repr, vol # Check the non-implemented interface yield nose.tools.assert_raises, NotImplementedError, \ vol.values_in_world, 0, 0, 0 yield nose.tools.assert_raises, NotImplementedError, \ vol.as_volume_img yield nose.tools.assert_raises, NotImplementedError, copy.copy, vol nipy-0.3.0/nipy/labs/datasets/volumes/tests/test_volume_field.py000066400000000000000000000022461210344137400251230ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Testing data image interface. """ import numpy as np # Local imports from ..volume_field import VolumeField from ...transforms.transform import Transform, CompositionError ################################################################################ # Tests def test_interface(): img = VolumeField() img.world_space = 'world' for method in ('get_transform', 'as_volume_img'): method = getattr(img, method) yield np.testing.assert_raises, NotImplementedError, method yield np.testing.assert_raises, CompositionError, \ img.composed_with_transform, \ Transform('world2', 'world', mapping=map) yield np.testing.assert_raises, NotImplementedError, \ img.composed_with_transform, \ Transform('world', 'world2', mapping=map) yield np.testing.assert_raises, NotImplementedError, \ img.resampled_to_img, None yield np.testing.assert_raises, NotImplementedError, \ img.values_in_world, None, None, None nipy-0.3.0/nipy/labs/datasets/volumes/tests/test_volume_grid.py000066400000000000000000000104671210344137400247710ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Testing VolumeGrid interface. """ import nose import copy import numpy as np # Local imports from ..volume_grid import VolumeGrid from ..volume_img import VolumeImg from ...transforms.transform import Transform, CompositionError def mapping(x, y, z): return 2*x, y, 0.5*z def inverse_mapping(x, y, z): return 0.5*x, y, 2*z def id(x, y, z): return x, y, z ################################################################################ # Tests def test_constructor(): yield np.testing.assert_raises, ValueError, VolumeGrid, None, \ None, {}, 'e' def test_volume_grid(): """ Sanity testing of the VolumeGrid class. """ transform = Transform('voxels', 'world', mapping) img = VolumeGrid(data=np.random.random((10, 10, 10)), transform=transform, ) # Test that the repr doesn't raise an error yield repr, img # We cannot calculate the values in the world, because the transform # is not invertible. yield np.testing.assert_raises, ValueError, \ img.values_in_world, 0, 0, 0 yield np.testing.assert_raises, ValueError, \ img.as_volume_img yield nose.tools.assert_equal, img, copy.copy(img) def test_trivial_grid(): """ Test resampling for an grid embedded in world space with an identity transform. """ N = 10 identity = Transform('voxels', 'world', id, id) data = np.random.random((N, N, N)) img = VolumeGrid(data=data, transform=identity, ) x, y, z = np.random.random_integers(N, size=(3, 10)) - 1 data_ = img.values_in_world(x, y, z) # Check that passing in arrays with different shapes raises an error yield np.testing.assert_raises, ValueError, \ img.values_in_world, x, y, z[:-1] # Check that passing in wrong interpolation keyword raises an error yield np.testing.assert_raises, ValueError, \ img.values_in_world, 0, 0, 0, 'e' yield np.testing.assert_almost_equal, data[x, y, z], data_ def test_transformation(): """ Test transforming images. """ N = 10 v2w_mapping = Transform('voxels', 'world1', mapping, inverse_mapping) identity = Transform('world1', 'world2', id, id) data = np.random.random((N, N, N)) img1 = VolumeGrid(data=data, transform=v2w_mapping, ) img2 = img1.composed_with_transform(identity) yield nose.tools.assert_equal, img2.world_space, 'world2' x, y, z = N*np.random.random(size=(3, 10)) yield np.testing.assert_almost_equal, img1.values_in_world(x, y, z), \ img2.values_in_world(x, y, z) yield nose.tools.assert_raises, CompositionError, \ img1.composed_with_transform, identity.get_inverse() yield nose.tools.assert_raises, CompositionError, img1.resampled_to_img, \ img2 # Resample an image on itself: it shouldn't change much: img = img1.resampled_to_img(img1) yield np.testing.assert_almost_equal, data, img.get_data() # Check that if I 'resampled_to_img' on an VolumeImg, I get an # VolumeImg, and vice versa volume_image = VolumeImg(data, np.eye(4), 'world') identity = Transform('voxels', 'world', id, id) image = VolumeGrid(data, identity) image2 = image.resampled_to_img(volume_image) yield nose.tools.assert_true, isinstance(image2, VolumeImg) volume_image2 = volume_image.resampled_to_img(image) yield nose.tools.assert_true, isinstance(image2, VolumeGrid) # Check that the data are all the same: we have been playing only # with identity mappings yield np.testing.assert_array_equal, volume_image2.get_data(), \ image2.get_data() def test_as_volume_image(): """ Test casting VolumeGrid to VolumeImg """ N = 10 v2w_mapping = Transform('voxels', 'world2', id, id) data = np.random.random((N, N, N)) img1 = VolumeGrid(data=data, transform=v2w_mapping, ) img2 = img1.as_volume_img() # Check that passing in the wrong shape raises an error yield nose.tools.assert_raises, ValueError, img1.as_volume_img, None, \ (10, 10) nipy-0.3.0/nipy/labs/datasets/volumes/tests/test_volume_img.py000066400000000000000000000223421210344137400246130ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Test the VolumeImg object. """ import copy import nose import numpy as np from ...transforms.affine_utils import from_matrix_vector from ...transforms.affine_transform import AffineTransform from ...transforms.transform import Transform from ..volume_img import VolumeImg, CompositionError from nose.tools import assert_true ################################################################################ # Helper function def rotation(theta, phi): """ Returns a rotation 3x3 matrix. """ cos = np.cos sin = np.sin a1 = np.array([[cos(theta), -sin(theta), 0], [sin(theta), cos(theta), 0], [ 0, 0, 1]]) a2 = np.array([[ 1, 0, 0], [ 0, cos(phi), -sin(phi)], [ 0, sin(phi), cos(phi)]]) return np.dot(a1, a2) def id(x, y, z): return x, y, z ################################################################################ # Tests def test_constructor(): yield np.testing.assert_raises, AttributeError, VolumeImg, None, \ None, 'foo' yield np.testing.assert_raises, ValueError, VolumeImg, None, \ np.eye(4), 'foo', {}, 'e' def test_identity_resample(): """ Test resampling of the VolumeImg with an identity affine. """ shape = (3., 2., 5., 2.) data = np.random.randint(0, 10, shape) affine = np.eye(4) affine[:3, -1] = 0.5*np.array(shape[:3]) ref_im = VolumeImg(data, affine, 'mine') rot_im = ref_im.as_volume_img(affine, interpolation='nearest') yield np.testing.assert_almost_equal, data, rot_im.get_data() # Now test when specifying only a 3x3 affine #rot_im = ref_im.as_volume_img(affine[:3, :3], interpolation='nearest') yield np.testing.assert_almost_equal, data, rot_im.get_data() reordered_im = rot_im.xyz_ordered() yield np.testing.assert_almost_equal, data, reordered_im.get_data() def test_downsample(): """ Test resampling of the VolumeImg with a 1/2 down-sampling affine. """ shape = (6., 3., 6, 2.) data = np.random.random(shape) affine = np.eye(4) ref_im = VolumeImg(data, affine, 'mine') rot_im = ref_im.as_volume_img(2*affine, interpolation='nearest') downsampled = data[::2, ::2, ::2, ...] x, y, z = downsampled.shape[:3] np.testing.assert_almost_equal(downsampled, rot_im.get_data()[:x, :y, :z, ...]) def test_resampling_with_affine(): """ Test resampling with a given rotation part of the affine. """ prng = np.random.RandomState(10) data = prng.randint(4, size=(1, 4, 4)) img = VolumeImg(data, np.eye(4), 'mine', interpolation='nearest') for angle in (0, np.pi, np.pi/2, np.pi/4, np.pi/3): rot = rotation(0, angle) rot_im = img.as_volume_img(affine=rot) yield np.testing.assert_almost_equal, np.max(data), np.max(rot_im.get_data()) def test_reordering(): """ Test the xyz_ordered method of the VolumeImg. """ # We need to test on a square array, as rotation does not change # shape, whereas reordering does. shape = (5., 5., 5., 2., 2.) data = np.random.random(shape) affine = np.eye(4) affine[:3, -1] = 0.5*np.array(shape[:3]) ref_im = VolumeImg(data, affine, 'mine') # Test with purely positive matrices and compare to a rotation for theta, phi in np.random.randint(4, size=(5, 2)): rot = rotation(theta*np.pi/2, phi*np.pi/2) rot[np.abs(rot)<0.001] = 0 rot[rot>0.9] = 1 rot[rot<-0.9] = 1 b = 0.5*np.array(shape[:3]) new_affine = from_matrix_vector(rot, b) rot_im = ref_im.as_volume_img(affine=new_affine) yield np.testing.assert_array_equal, rot_im.affine, \ new_affine yield np.testing.assert_array_equal, rot_im.get_data().shape, \ shape reordered_im = rot_im.xyz_ordered() yield np.testing.assert_array_equal, reordered_im.affine[:3, :3], \ np.eye(3) yield np.testing.assert_almost_equal, reordered_im.get_data(), \ data # Check that we cannot swap axes for non spatial axis: yield nose.tools.assert_raises, ValueError, ref_im._swapaxes, 4, 5 # Create a non-diagonal affine, and check that we raise a sensible # exception affine[1, 0] = 0.1 ref_im = VolumeImg(data, affine, 'mine') yield nose.tools.assert_raises, CompositionError, ref_im.xyz_ordered # Test flipping an axis data = np.random.random(shape) for i in (0, 1, 2): # Make a diagonal affine with a negative axis, and check that # can be reordered, also vary the shape shape = (i+1, i+2, 3-i) affine = np.eye(4) affine[i, i] *= -1 img = VolumeImg(data, affine, 'mine') orig_img = copy.copy(img) x, y, z = img.get_world_coords() sample = img.values_in_world(x, y, z) img2 = img.xyz_ordered() # Check that img has not been changed yield nose.tools.assert_true, img == orig_img x_, y_, z_ = img.get_world_coords() yield np.testing.assert_array_equal, np.unique(x), np.unique(x_) yield np.testing.assert_array_equal, np.unique(y), np.unique(y_) yield np.testing.assert_array_equal, np.unique(z), np.unique(z_) sample2 = img.values_in_world(x, y, z) yield np.testing.assert_array_equal, sample, sample2 def test_eq(): """ Test copy and equality for VolumeImgs. """ import copy shape = (4., 3., 5., 2.) data = np.random.random(shape) affine = np.random.random((4, 4)) ref_im = VolumeImg(data, affine, 'mine') yield nose.tools.assert_equal, ref_im, ref_im yield nose.tools.assert_equal, ref_im, copy.copy(ref_im) yield nose.tools.assert_equal, ref_im, copy.deepcopy(ref_im) # Check that as_volume_img with no arguments returns the same image yield nose.tools.assert_equal, ref_im, ref_im.as_volume_img() copy_im = copy.copy(ref_im) copy_im.get_data()[0, 0, 0] *= -1 yield nose.tools.assert_not_equal, ref_im, copy_im copy_im = copy.copy(ref_im) copy_im.affine[0, 0] *= -1 yield nose.tools.assert_not_equal, ref_im, copy_im copy_im = copy.copy(ref_im) copy_im.world_space = 'other' yield nose.tools.assert_not_equal, ref_im, copy_im # Test repr yield assert_true, isinstance(repr(ref_im), str) # Test init: should raise exception is not passing in right affine yield nose.tools.assert_raises, Exception, VolumeImg, data, \ np.eye(3, 3), 'mine' def test_values_in_world(): """ Test the evaluation of the data in world coordinate. """ shape = (3., 5., 4., 2.) data = np.random.random(shape) affine = np.eye(4) ref_im = VolumeImg(data, affine, 'mine') x, y, z = np.indices(ref_im.get_data().shape[:3]) values = ref_im.values_in_world(x, y, z) np.testing.assert_almost_equal(values, data) def test_resampled_to_img(): """ Trivial test of resampled_to_img. """ shape = (5., 4., 3., 2.) data = np.random.random(shape) affine = np.random.random((4, 4)) ref_im = VolumeImg(data, affine, 'mine') yield np.testing.assert_almost_equal, data, \ ref_im.as_volume_img(affine=ref_im.affine).get_data() yield np.testing.assert_almost_equal, data, \ ref_im.resampled_to_img(ref_im).get_data() # Check that we cannot resample to another image in a different # world. other_im = VolumeImg(data, affine, 'other') yield nose.tools.assert_raises, CompositionError, \ other_im.resampled_to_img, ref_im # Also check that trying to resample on a non 3D grid will raise an # error yield nose.tools.assert_raises, ValueError, \ ref_im.as_volume_img, None, (2, 2) def test_transformation(): """ Test transforming images. """ N = 10 identity1 = Transform('world1', 'world2', id, id) identity2 = AffineTransform('world1', 'world2', np.eye(4)) for identity in (identity1, identity2): data = np.random.random((N, N, N)) img1 = VolumeImg(data=data, affine=np.eye(4), world_space='world1', ) img2 = img1.composed_with_transform(identity) yield nose.tools.assert_equal, img2.world_space, 'world2' x, y, z = N*np.random.random(size=(3, 10)) yield np.testing.assert_almost_equal, img1.values_in_world(x, y, z), \ img2.values_in_world(x, y, z) yield nose.tools.assert_raises, CompositionError, \ img1.composed_with_transform, identity.get_inverse() yield nose.tools.assert_raises, CompositionError, \ img1.resampled_to_img, img2 # Resample an image on itself: it shouldn't change much: img = img1.resampled_to_img(img1) yield np.testing.assert_almost_equal, data, img.get_data() def test_get_affine(): shape = (1., 2., 3., 4.) data = np.random.randint(0, 10, shape) affine = np.eye(4) ref_im = VolumeImg(data, affine, 'mine') np.testing.assert_equal(ref_im.affine, ref_im.get_affine()) nipy-0.3.0/nipy/labs/datasets/volumes/volume_data.py000066400000000000000000000153431210344137400225520ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ The volume data class This class represents indexable data embedded in a 3D space """ import copy as copy import numpy as np # Local imports from .volume_field import VolumeField from ..transforms.transform import CompositionError ################################################################################ # class `VolumeData` ################################################################################ class VolumeData(VolumeField): """ A class representing data embedded in a 3D space This object has data stored in an array like, that knows how it is mapped to a 3D "real-world space", and how it can change real-world coordinate system. Attributes ----------- world_space: string World space the data is embedded in. For instance `mni152`. metadata: dictionnary Optional, user-defined, dictionnary used to carry around extra information about the data as it goes through transformations. The class consistency of this information is not maintained as the data is modified. _data: Private pointer to the data. Notes ------ The data is stored in an undefined way: prescalings might need to be applied to it before using it, or the data might be loaded on demand. The best practice to access the data is not to access the _data attribute, but to use the `get_data` method. """ #--------------------------------------------------------------------------- # Public attributes -- VolumeData interface #--------------------------------------------------------------------------- # The interpolation logic used interpolation = 'continuous' #--------------------------------------------------------------------------- # Private attributes -- VolumeData interface #--------------------------------------------------------------------------- # The data (ndarray-like) _data = None #--------------------------------------------------------------------------- # Public methods -- VolumeData interface #--------------------------------------------------------------------------- def get_data(self): """ Return data as a numpy array. """ return np.asanyarray(self._data) def like_from_data(self, data): """ Returns an volumetric data structure with the same relationship between data and world space, and same metadata, but different data. Parameters ----------- data: ndarray """ raise NotImplementedError def resampled_to_img(self, target_image, interpolation=None): """ Resample the data to be on the same voxel grid than the target volume structure. Parameters ---------- target_image : nipy image Nipy image onto the voxel grid of which the data will be resampled. This can be any kind of img understood by Nipy (datasets, pynifti objects, nibabel object) or a string giving the path to a nifti of analyse image. interpolation : None, 'continuous' or 'nearest', optional Interpolation type used when calculating values in different word spaces. If None, the image's interpolation logic is used. Returns ------- resampled_image : nipy_image New nipy image with the data resampled. Notes ----- Both the target image and the original image should be embedded in the same world space. """ if not hasattr(target_image, 'world_space'): from ..converters import as_volume_img target_image = as_volume_img(target_image) if not target_image.world_space == self.world_space: raise CompositionError( "The two images are not embedded in the same world space") x, y, z = target_image.get_world_coords() new_data = self.values_in_world(x, y, z, interpolation=interpolation) new_img = target_image.like_from_data(new_data) new_img.metadata = copy.copy(self.metadata) return new_img #--------------------------------------------------------------------------- # Private methods #--------------------------------------------------------------------------- def _apply_transform(self, w2w_transform): """ Method applying the transform: inner part of transformed_with, used in subclassing. """ new_v2w_transform = \ self.get_transform().composed_with(w2w_transform) new_img = copy.copy(self) new_img._transform = new_v2w_transform return new_img def _get_interpolation_order(self, interpolation): """ Inner method used to get the interpolation type for the image. """ if interpolation is None: interpolation = self.interpolation if interpolation == 'continuous': interpolation_order = 3 elif interpolation == 'nearest': interpolation_order = 0 else: raise ValueError("interpolation must be either 'continuous' " "or 'nearest'") return interpolation_order # TODO: We need to implement (or check if implemented) hashing, # weakref, pickling? def __repr__(self): options = np.get_printoptions() np.set_printoptions(precision=5, threshold=64, edgeitems=2) representation = \ '%s(\n data=%s,\n world_space=%s,\n interpolation=%s)' % ( self.__class__.__name__, '\n '.join(repr(self._data).split('\n')), repr(self.world_space), repr(self.interpolation), ) np.set_printoptions(**options) return representation def __copy__(self): return self.like_from_data(self.get_data().copy()) def __deepcopy__(self, option): """ Copy the Volume and the arrays and metadata it contains. """ out = self.__copy__() out.metadata = copy.deepcopy(self.metadata) return out def __eq__(self, other): return ( self.world_space == other.world_space and self.get_transform() == other.get_transform() and np.all(self.get_data() == other.get_data()) and self.interpolation == other.interpolation ) nipy-0.3.0/nipy/labs/datasets/volumes/volume_field.py000066400000000000000000000170271210344137400227250ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ The base volumetric field interface This defines the nipy volumetric structure interface. """ from ..transforms.transform import CompositionError ################################################################################ # class `VolumeField` ################################################################################ class VolumeField(object): """ The base volumetric structure. This object represents numerical values embedded in a 3-dimensional world space (called a field in physics and engineering) This is an abstract base class: it defines the interface, but not the logics. Attributes ---------- world_space: string World space the data is embedded in. For instance `mni152`. metadata: dictionnary Optional, user-defined, dictionnary used to carry around extra information about the data as it goes through transformations. The consistency of this information is not maintained as the data is modified. """ #--------------------------------------------------------------------------- # Public attributes -- VolumeField interface #--------------------------------------------------------------------------- # The name of the reference coordinate system world_space = '' # User defined meta data metadata = dict() #--------------------------------------------------------------------------- # Public methods -- VolumeField interface #--------------------------------------------------------------------------- def get_transform(self): """ Returns the transform object associated with the volumetric structure which is a general description of the mapping from the values to the world space. Returns ------- transform : nipy.datasets.Transform object """ raise NotImplementedError def resampled_to_img(self, target_image, interpolation=None): """ Resample the volume to be sampled similarly than the target volumetric structure. Parameters ---------- target_image : nipy volume Nipy volume structure onto the grid of which the data will be resampled. interpolation : None, 'continuous' or 'nearest', optional Interpolation type used when calculating values in different word spaces. If None, the volume's interpolation logic is used. Returns ------- resampled_image : nipy_image New nipy image with the data resampled. Notes ----- Both the target image and the original image should be embedded in the same world space. """ # IMPORTANT: Polymorphism can be implemented by walking the # MRO and finding a method that does not raise # NotImplementedError. raise NotImplementedError def as_volume_img(self, affine=None, shape=None, interpolation=None, copy=True): """ Resample the image to be an image with the data points lying on a regular grid with an affine mapping to the word space (a nipy VolumeImg). Parameters ---------- affine: 4x4 or 3x3 ndarray, optional Affine of the new voxel grid or transform object pointing to the new voxel coordinate grid. If a 3x3 ndarray is given, it is considered to be the rotation part of the affine, and the best possible bounding box is calculated, in this case, the shape argument is not used. If None is given, a default affine is provided by the image. shape: (n_x, n_y, n_z), tuple of integers, optional The shape of the grid used for sampling, if None is given, a default affine is provided by the image. interpolation : None, 'continuous' or 'nearest', optional Interpolation type used when calculating values in different word spaces. If None, the image's interpolation logic is used. Returns ------- resampled_image : nipy VolumeImg New nipy VolumeImg with the data sampled on the grid defined by the affine and shape. Notes ----- The coordinate system of the image is not changed: the returned image points to the same world space. """ raise NotImplementedError def values_in_world(self, x, y, z, interpolation=None): """ Return the values of the data at the world-space positions given by x, y, z Parameters ---------- x : number or ndarray x positions in world space, in other words milimeters y : number or ndarray y positions in world space, in other words milimeters. The shape of y should match the shape of x z : number or ndarray z positions in world space, in other words milimeters. The shape of z should match the shape of x interpolation : None, 'continuous' or 'nearest', optional Interpolation type used when calculating values in different word spaces. If None, the image's interpolation logic is used. Returns ------- values : number or ndarray Data values interpolated at the given world position. This is a number or an ndarray, depending on the shape of the input coordinate. """ raise NotImplementedError def composed_with_transform(self, w2w_transform): """ Return a new image embedding the same data in a different word space using the given world to world transform. Parameters ---------- w2w_transform : transform object The transform object giving the mapping between the current world space of the image, and the new word space. Returns -------- remapped_image : nipy image An image containing the same data, expressed in the new world space. """ if not w2w_transform.input_space == self.world_space: raise CompositionError( "The transform given does not apply to " "the image's world space:\n%s\n\n%s" % (w2w_transform, self) ) new_img = self._apply_transform(w2w_transform) new_img.world_space = w2w_transform.output_space return new_img #--------------------------------------------------------------------------- # Private methods #--------------------------------------------------------------------------- # The subclasses should implement __repr__, __copy__, __deepcopy__, # __eq__ # TODO: We need to implement (or check if implemented) hashing, # weakref, pickling? def _apply_transform(self, w2w_transform): """ Implement this method to put in the logic of applying a transformation on the image class. """ raise NotImplementedError nipy-0.3.0/nipy/labs/datasets/volumes/volume_grid.py000066400000000000000000000235621210344137400225700ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ The volume grid class. This class represents data lying on a (non rigid, non regular) grid embedded in a 3D world represented as a 3+D array. """ import copy as copy import numpy as np from scipy import ndimage # Local imports from .volume_data import VolumeData from ..transforms.affine_utils import apply_affine, from_matrix_vector ################################################################################ # class `VolumeGrid` ################################################################################ class VolumeGrid(VolumeData): """ A class representing data stored in a 3+D array embedded in a 3D world. This object has data stored in an array-like multidimensional indexable objects, with the 3 first dimensions corresponding to spatial axis and defining a 3D grid that may be non-regular or non-rigid. The object knows how the data is mapped to a 3D "real-world space", and how it can change real-world coordinate system. The transform mapping it to world is arbitrary, and thus the grid can be warped: in the world space, the grid may not be regular or orthogonal. Attributes ----------- world_space: string World space the data is embedded in. For instance `mni152`. metadata: dictionnary Optional, user-defined, dictionnary used to carry around extra information about the data as it goes through transformations. The consistency of this information is not maintained as the data is modified. _data: Private pointer to the data. Notes ------ The data is stored in an undefined way: prescalings might need to be applied to it before using it, or the data might be loaded on demand. The best practice to access the data is not to access the _data attribute, but to use the `get_data` method. If the transform associated with the image has no inverse mapping, data corresponding to a given world space position cannot be calulated. If it has no forward mapping, it is impossible to resample another dataset on the same support. """ #--------------------------------------------------------------------------- # Public methods -- VolumeGrid interface #--------------------------------------------------------------------------- def __init__(self, data, transform, metadata=None, interpolation='continuous'): """ The base image containing data. Parameters ---------- data: ndarray n dimensional array giving the embedded data, with the 3 first dimensions being spatial. transform: nipy transform object The transformation from voxel to world. metadata : dictionnary, optional Dictionnary of user-specified information to store with the image. interpolation : 'continuous' or 'nearest', optional Interpolation type used when calculating values in different word spaces. """ if not interpolation in ('continuous', 'nearest'): raise ValueError('interpolation must be either continuous ' 'or nearest') self._data = data self._transform = transform self.world_space = transform.output_space if metadata is None: metadata = dict() self.metadata = metadata self.interpolation = interpolation def as_volume_img(self, affine=None, shape=None, interpolation=None, copy=True): if affine is None: affine = np.eye(3) if affine.shape[0] == 3 or shape is None: affine3d = affine[:3, :3] affine4d = np.eye(4) affine4d[:3, :3] = affine3d x, y, z = self.get_world_coords() x, y, z = apply_affine(x, y, z, np.linalg.inv(affine4d)) xmin = x.min() ymin = y.min() zmin = z.min() if affine.shape[0] == 3: offset = np.array((xmin, ymin, zmin)) offset = np.dot(affine3d, offset) affine = from_matrix_vector(affine3d, offset[:3]) if shape is None: xmax = x.max() ymax = y.max() zmax = z.max() shape = (np.ceil(xmax - xmin)+1, np.ceil(ymax - ymin)+1, np.ceil(zmax - zmin)+1, ) shape = list(shape) if not len(shape) == 3: raise ValueError('The shape specified should be the shape ' 'the 3D grid, and thus of length 3. %s was specified' % shape ) x, y, z = np.indices(shape) x, y, z = apply_affine(x, y, z, affine) values = self.values_in_world(x, y, z) # We import late to avoid circular import from .volume_img import VolumeImg return VolumeImg(values, affine, self.world_space, metadata=self.metadata, interpolation=self.interpolation) # Inherit docstring as_volume_img.__doc__ = VolumeData.as_volume_img.__doc__ def get_world_coords(self): """ Return the data points coordinates in the world space. Returns -------- x: ndarray x coordinates of the data points in world space y: ndarray y coordinates of the data points in world space z: ndarray z coordinates of the data points in world space """ x, y, z = np.indices(self._data.shape[:3]) return self.get_transform().mapping(x, y, z) # XXX: The docstring should be inherited def like_from_data(self, data): return self.__class__(data = data, transform = copy.copy(self._transform), metadata = copy.copy(self.metadata), interpolation = self.interpolation, ) # Inherit docstring like_from_data.__doc__ = VolumeData.like_from_data.__doc__ def get_transform(self): """ Returns the transform object associated with the image which is a general description of the mapping from the voxel space to the world space. Returns ------- transform : nipy.core.Transform object """ return self._transform # Inherit docstring get_transform.__doc__ = VolumeData.get_transform.__doc__ def values_in_world(self, x, y, z, interpolation=None): """ Return the values of the data at the world-space positions given by x, y, z Parameters ---------- x : number or ndarray x positions in world space, in other words milimeters y : number or ndarray y positions in world space, in other words milimeters. The shape of y should match the shape of x z : number or ndarray z positions in world space, in other words milimeters. The shape of z should match the shape of x interpolation : None, 'continuous' or 'nearest', optional Interpolation type used when calculating values in different word spaces. If None, the object's interpolation logic is used. Returns ------- values : number or ndarray Data values interpolated at the given world position. This is a number or an ndarray, depending on the shape of the input coordinate. """ interpolation_order = self._get_interpolation_order(interpolation) transform = self.get_transform() if transform.inverse_mapping is None: raise ValueError( "Cannot calculate the world values for volume data: mapping to " "word is not invertible." ) x = np.atleast_1d(x) y = np.atleast_1d(y) z = np.atleast_1d(z) shape = list(x.shape) if not ((x.shape == y.shape) and (x.shape == z.shape)): raise ValueError('x, y and z shapes should be equal') x = x.ravel() y = y.ravel() z = z.ravel() i, j, k = transform.inverse_mapping(x, y, z) coords = np.c_[i, j, k].T # work round an ndimage deficiency in scipy <= 0.9.0. # See: https://github.com/scipy/scipy/pull/64 if coords.dtype == np.dtype(np.intp): coords = coords.astype(np.dtype(coords.dtype.str)) data = self.get_data() data_shape = list(data.shape) n_dims = len(data_shape) if n_dims > 3: # Iter in a set of 3D volumes, as the interpolation problem is # separable in the extra dimensions. This reduces the # computational cost data = np.reshape(data, data_shape[:3] + [-1]) data = np.rollaxis(data, 3) values = [ ndimage.map_coordinates(slice, coords, order=interpolation_order) for slice in data] values = np.array(values) values = np.swapaxes(values, 0, -1) values = np.reshape(values, shape + data_shape[3:]) else: values = ndimage.map_coordinates(data, coords, order=interpolation_order) values = np.reshape(values, shape) return values # Inherit docstring values_in_world.__doc__ = VolumeData.values_in_world.__doc__ nipy-0.3.0/nipy/labs/datasets/volumes/volume_img.py000066400000000000000000000357741210344137400224270ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ An image that stores the data as an (x, y, z, ...) array, with an affine mapping to the world space """ import copy import numpy as np from scipy import ndimage # Local imports from ..transforms.affine_utils import to_matrix_vector, \ from_matrix_vector, get_bounds from ..transforms.affine_transform import AffineTransform from ..transforms.transform import CompositionError from .volume_grid import VolumeGrid ################################################################################ # class `VolumeImg` ################################################################################ class VolumeImg(VolumeGrid): """ A regularly-spaced image for embedding data in an x, y, z 3D world, for neuroimaging. This object is an ndarray representing a volume, with the first 3 dimensions being spatial, and mapped to a named world space using an affine (4x4 matrix). Attributes ---------- affine : 4x4 ndarray Affine mapping from indices to world coordinates. world_space : string Name of the world space the data is embedded in. For instance `mni152`. metadata : dictionnary Optional, user-defined, dictionnary used to carry around extra information about the data as it goes through transformations. The consistency of this information may not be maintained as the data is modified. interpolation : 'continuous' or 'nearest' String giving the interpolation logic used when calculating values in different world spaces _data : Private pointer to the data. Notes ------ The data is stored in an undefined way: prescalings might need to be applied to it before using it, or the data might be loaded on demand. The best practice to access the data is not to access the _data attribute, but to use the `get_data` method. """ # most attributes are given by the VolumeField interface #--------------------------------------------------------------------------- # Attributes, VolumeImg interface #--------------------------------------------------------------------------- # The affine (4x4 ndarray) affine = np.eye(4) #--------------------------------------------------------------------------- # VolumeField interface #--------------------------------------------------------------------------- def __init__(self, data, affine, world_space, metadata=None, interpolation='continuous'): """ Creates a new neuroimaging image with an affine mapping. Parameters ---------- data : ndarray ndarray representing the data. affine : 4x4 ndarray affine transformation to the reference world space world_space : string name of the reference world space. metadata : dictionnary dictionnary of user-specified information to store with the image. """ if not interpolation in ('continuous', 'nearest'): raise ValueError('interpolation must be either continuous ' 'or nearest') self._data = data if not affine.shape == (4, 4): raise ValueError('The affine should be a 4x4 array') self.affine = affine self.world_space = world_space if metadata is None: metadata = dict() self.metadata = metadata self.interpolation = interpolation def like_from_data(self, data): # Use self.__class__ for subclassing. assert len(data.shape) >= 3, \ 'The data passed must be an array of at least 3 dimensions' return self.__class__(data=data, affine=copy.copy(self.affine), world_space=self.world_space, metadata=copy.copy(self.metadata), interpolation=self.interpolation, ) # Inherit docstring like_from_data.__doc__ = VolumeGrid.like_from_data.__doc__ def get_transform(self): return AffineTransform('voxel_space', self.world_space, self.affine) # Inherit docstring get_transform.__doc__ = VolumeGrid.get_transform.__doc__ def get_affine(self): return self.affine def resampled_to_img(self, target_image, interpolation=None): if not hasattr(target_image, 'world_space'): from ..converters import as_volume_img target_image = as_volume_img(target_image) if not target_image.world_space == self.world_space: raise CompositionError( 'The two images are not embedded in the same world space') if isinstance(target_image, VolumeImg): return self.as_volume_img(affine=target_image.affine, shape=target_image.get_data().shape[:3], interpolation=interpolation) else: # IMPORTANT: Polymorphism can be implemented by walking the # MRO and finding a method that does not raise # NotImplementedError. return super(VolumeImg, self).resampled_to_img(target_image, interpolation=interpolation) # Inherit docstring resampled_to_img.__doc__ = VolumeGrid.resampled_to_img.__doc__ def as_volume_img(self, affine=None, shape=None, interpolation=None, copy=True): if affine is None and shape is None: if copy: import copy return copy.copy(self) else: return self if affine is None: affine = self.affine data = self.get_data() if shape is None: shape = data.shape[:3] shape = list(shape) if affine.shape[0] == 3: # We have a 3D affine, we need to find out the offset and # shape to keep the same bounding box in the new space affine4d = np.eye(4) affine4d[:3, :3] = affine transform_affine = np.dot(np.linalg.inv(affine4d), self.affine, ) # The bounding box in the new world, if no offset is given (xmin, xmax), (ymin, ymax), (zmin, zmax) = get_bounds( data.shape[:3], transform_affine, ) offset = np.array((xmin, ymin, zmin)) offset = np.dot(affine, offset) affine = from_matrix_vector(affine, offset[:3]) shape = (np.ceil(xmax - xmin)+1, np.ceil(ymax - ymin)+1, np.ceil(zmax - zmin)+1, ) if not len(shape) == 3: raise ValueError('The shape specified should be the shape ' 'the 3D grid, and thus of length 3. %s was specified' % shape ) interpolation_order = self._get_interpolation_order(interpolation) if np.all(affine == self.affine): # Small trick to be more numericaly stable transform_affine = np.eye(4) else: transform_affine = np.dot(np.linalg.inv(self.affine), affine) A, b = to_matrix_vector(transform_affine) A_inv = np.linalg.inv(A) # If A is diagonal, ndimage.affine_transform is clever-enough # to use a better algorithm if np.all(np.diag(np.diag(A)) == A): A = np.diag(A) else: b = np.dot(A, b) # For images with dimensions larger than 3D: data_shape = list(data.shape) if len(data_shape) > 3: # Iter in a set of 3D volumes, as the interpolation problem is # separable in the extra dimensions. This reduces the # computational cost data = np.reshape(data, data_shape[:3] + [-1]) data = np.rollaxis(data, 3) resampled_data = [ ndimage.affine_transform(slice, A, offset=np.dot(A_inv, b), output_shape=shape, order=interpolation_order) for slice in data] resampled_data = np.concatenate([d[..., np.newaxis] for d in resampled_data], axis=3) resampled_data = np.reshape(resampled_data, list(shape) + list(data_shape[3:])) else: resampled_data = ndimage.affine_transform(data, A, offset=np.dot(A_inv, b), output_shape=shape, order=interpolation_order) return self.__class__(resampled_data, affine, self.world_space, metadata=self.metadata, interpolation=self.interpolation) # Inherit docstring as_volume_img.__doc__ = VolumeGrid.as_volume_img.__doc__ #--------------------------------------------------------------------------- # VolumeImg interface #--------------------------------------------------------------------------- def xyz_ordered(self, resample=False, copy=True): """ Returns an image with the affine diagonal and positive in the world space it is embedded in. Parameters ----------- resample: boolean, optional If resample is False, no resampling is performed, the axis are only permuted. If it is impossible to get xyz ordering by permuting the axis, a 'CompositionError' is raised. copy: boolean, optional If copy is True, a deep copy of the image (including the data) is made. """ A, b = to_matrix_vector(self.affine.copy()) if not np.all((np.abs(A) > 0.001).sum(axis=0) == 1): if not resample: raise CompositionError( 'Cannot reorder the axis: the image affine contains rotations' ) else: # Identify the voxel size using a QR decomposition of the # affine R, Q = np.linalg.qr(self.affine[:3, :3]) target_affine = np.diag(np.abs(np.diag(Q))[ np.abs(R).argmax(axis=1)]) return self.as_volume_img(affine=target_affine) # Copy the image, we don't want to modify in place. if copy: img = self.__copy__() else: img = self axis_numbers = np.argmax(np.abs(A), axis=0) while not np.all(np.sort(axis_numbers) == axis_numbers): first_inversion = np.argmax(np.diff(axis_numbers)<0) img = img._swapaxes(first_inversion+1, first_inversion) A, b = to_matrix_vector(img.affine) axis_numbers = np.argmax(np.abs(A), axis=0) # Now make sure the affine is positive pixdim = np.diag(A) data = img.get_data() if pixdim[0] < 0: b[0] = b[0] + pixdim[0]*(data.shape[0] - 1) pixdim[0] = -pixdim[0] slice1 = slice(None, None, -1) else: slice1 = slice(None, None, None) if pixdim[1] < 0: b[1] = b[1] + 1 + pixdim[1]*(data.shape[1] - 1) pixdim[1] = -pixdim[1] slice2 = slice(None, None, -1) else: slice2 = slice(None, None, None) if pixdim[2] < 0: b[2] = b[2] + 1 + pixdim[2]*(data.shape[2] - 1) pixdim[2] = -pixdim[2] slice3 = slice(None, None, -1) else: slice3 = slice(None, None, None) data = data[slice1, slice2, slice3] img._data = data img.affine = from_matrix_vector(np.diag(pixdim), b) return img def _swapaxes(self, axis1, axis2): """ Swap the axis axis1 and axis2 of the data array and reorder the affine matrix to stay consistent with the data See also -------- self.xyz_ordered """ if (axis1 > 2) or (axis2 > 2): raise ValueError('Can swap axis only on spatial axis. ' 'Use np.swapaxes of the data array.') reordered_data = np.swapaxes(self.get_data(), axis1, axis2) new_affine = self.affine order = np.array((0, 1, 2, 3)) order[axis1] = axis2 order[axis2] = axis1 new_affine = new_affine.T[order].T return VolumeImg(reordered_data, new_affine, self.world_space, metadata=self.metadata) #--------------------------------------------------------------------------- # Private methods #--------------------------------------------------------------------------- def _apply_transform(self, w2w_transform): """ Used for subclassing only. Do not call """ new_v2w_transform = \ self.get_transform().composed_with(w2w_transform) if hasattr(new_v2w_transform, 'affine'): new_img = self.__class__(self.get_data(), new_v2w_transform.affine, new_v2w_transform.output_space, metadata=self.metadata, interpolation=self.interpolation) else: new_img = VolumeGrid(self.get_data(), transform=new_v2w_transform, metadata=self.metadata, interpolation=self.interpolation) return new_img def __repr__(self): options = np.get_printoptions() np.set_printoptions(precision=5, threshold=64, edgeitems=2) representation = \ '%s(\n data=%s,\n affine=%s,\n world_space=%s,\n interpolation=%s)' % ( self.__class__.__name__, '\n '.join(repr(self._data).split('\n')), '\n '.join(repr(self.affine).split('\n')), repr(self.world_space), repr(self.interpolation)) np.set_printoptions(**options) return representation def __eq__(self, other): return ( isinstance(other, self.__class__) and np.all(self.get_data() == other.get_data()) and np.all(self.affine == other.affine) and (self.world_space == other.world_space) and (self.interpolation == other.interpolation) ) nipy-0.3.0/nipy/labs/glm/000077500000000000000000000000001210344137400151475ustar00rootroot00000000000000nipy-0.3.0/nipy/labs/glm/__init__.py000066400000000000000000000003541210344137400172620ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from .glm import models, contrast, ols, load from nipy.testing import Tester test = Tester().test bench = Tester().bench nipy-0.3.0/nipy/labs/glm/benchmarks/000077500000000000000000000000001210344137400172645ustar00rootroot00000000000000nipy-0.3.0/nipy/labs/glm/benchmarks/__init__.py000066400000000000000000000000501210344137400213700ustar00rootroot00000000000000# Init to make test directory a package nipy-0.3.0/nipy/labs/glm/benchmarks/bench_glm.py000066400000000000000000000023541210344137400215600ustar00rootroot00000000000000 import numpy as np from ..glm import glm def make_data(): dimt = 100 dimx = 10 dimy = 11 dimz = 12 y = np.random.randn(dimt, dimx, dimy, dimz) X = np.array([np.ones(dimt), range(dimt)]) X = X.transpose() ## the design matrix X must have dimt lines return y, X def ols(axis, y, X): y = np.rollaxis(y, 0, axis+1) ## time index is axis X = X m = glm(y, X, axis=axis) m1 = glm(y, X, axis=axis, method='kalman') b = m.beta b1 = m1.beta v = m.s2 v1 = m1.s2 print "Comparing standard OLS with Kalman OLS..." re = ( np.abs(b-b1) / (np.abs(b)+1e-20) ).mean() print " Relative difference in Effect estimate: %s" % re re = ( np.abs(v-v1) / (np.abs(v)+1e-20) ).mean() print " Relative difference in Variance: %s" % re tcon = m.contrast([1,0]) tcon1 = m1.contrast([1,0]) z = tcon.zscore() z1 = tcon1.zscore() re = ( abs(z-z1) / (abs(z)+1e-20) ).mean() print " Relative difference in z score: %s" % re def bench_ols_axis0(): x, Y = make_data() ols(0, x, Y) def bench_ols_axis1(): x, Y = make_data() ols(1, x, Y) def bench_ols_axis2(): x, Y = make_data() ols(2, x, Y) def bench_ols_axis3(): x, Y = make_data() ols(3, x, Y) nipy-0.3.0/nipy/labs/glm/glm.py000066400000000000000000000220071210344137400163010ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np import scipy.stats as sps from . import kalman from ..utils import mahalanobis from nipy.algorithms.statistics.utils import z_score as zscore DEF_TINY = 1e-50 DEF_DOFMAX = 1e10 models = {'spherical': ['ols', 'kalman'], 'ar1': ['kalman']} class glm(object): def __init__(self, Y=None, X=None, formula=None, axis=0, model='spherical', method=None, niter=2): # Check dimensions if Y == None: return else: self.fit(Y, X, formula, axis, model, method, niter) def fit(self, Y, X, formula=None, axis=0, model='spherical', method=None, niter=2): if Y.shape[axis] != X.shape[0]: raise ValueError('Response and predictors are inconsistent') # Find model type self._axis = axis if isinstance(formula, str): model = 'mfx' if model in models: self.model = model if method == None: self.method = models[model][0] elif models[model].count(method): self.method = method else: raise ValueError('Unknown method') else: raise ValueError('Unknown model') # Initialize fields constants = [] a = 0 # Switch on models / methods if self.model == 'spherical': constants = ['nvbeta', 'a'] if self.method == 'ols': out = ols(Y, X, axis=axis) elif self.method == 'kalman': out = kalman.ols(Y, X, axis=axis) elif self.model == 'ar1': constants = ['a'] out = kalman.ar1(Y, X, axis=axis, niter=niter) a = out[4] out = out[0: 4] # Finalize self.beta, self.nvbeta, self.s2, self.dof = out self.s2 = self.s2.squeeze() self.a = a self._constants = constants def save(self, file): """ Save fit into a .npz file """ np.savez(file, beta=self.beta, nvbeta=self.nvbeta, s2=self.s2, dof=self.dof, a=self.a, model=self.model, method=self.method, axis=self._axis, constants=self._constants) def contrast(self, c, type='t', tiny=DEF_TINY, dofmax=DEF_DOFMAX): """ Specify and estimate a constrast c must be a numpy.ndarray (or anything that numpy.asarray can cast to a ndarray). For a F contrast, c must be q x p where q is the number of contrast vectors and p is the total number of regressors. """ c = np.asarray(c) #dim = len(c.shape) if c.ndim == 1: dim = 1 else: dim = c.shape[0] axis = self._axis ndims = len(self.beta.shape) # Compute the contrast estimate: c*B B = np.rollaxis(self.beta, axis, ndims) con = np.inner(c, B) # shape = q, X # Compute the variance of the contrast estimate: s2 * (c' * nvbeta * c) # Two cases are considered: either the input effect variance # is position-dependent (output by RKF_fit), or it is a global # one (output by KF_fit) s2 = self.s2.squeeze() nvbeta = self.nvbeta if not 'nvbeta' in self._constants: nvbeta = np.rollaxis(nvbeta, axis, ndims + 1) nvbeta = np.rollaxis(nvbeta, axis, ndims + 1) # shape = X, p, p if dim == 1: vcon = np.inner(c, np.inner(c, nvbeta)) vcon = vcon.squeeze() * s2 else: vcon = np.dot(c, np.inner(nvbeta, c)) # q, X, q or q, q if not 'nvbeta' in self._constants: vcon = np.rollaxis(vcon, ndims, 1) * s2 # q, q, X else: aux = vcon.shape # q, q vcon = np.resize(vcon, s2.shape + aux) # X, q, q vcon = vcon.T.reshape(aux + (s2.size,)) * \ s2.reshape((s2.size,)) # q, q, Xflat vcon = vcon.reshape(aux + s2.shape) # q, q, X # Create contrast instance c = contrast(dim, type, tiny, dofmax) c.effect = con c.variance = vcon c.dof = self.dof return c class contrast(object): def __init__(self, dim, type='t', tiny=DEF_TINY, dofmax=DEF_DOFMAX): """tiny is a numerical constant for computations. """ self.dim = dim self.effect = None self.variance = None self.dof = None if dim > 1: if type is 't': type = 'F' self.type = type self._stat = None self._pvalue = None self._baseline = 0 self._tiny = tiny self._dofmax = dofmax def summary(self): """ Return a dictionary containing the estimated contrast effect, the associated ReML-based estimation variance, and the estimated degrees of freedom (variance of the variance). """ return {'effect': self.effect, 'variance': self.variance, 'dof': self.dof} def stat(self, baseline=0.0): """ Return the decision statistic associated with the test of the null hypothesis: (H0) 'contrast equals baseline' """ self._baseline = baseline # Case: one-dimensional contrast ==> t or t**2 if self.dim == 1: # avoids division by zero t = (self.effect - baseline) / np.sqrt( np.maximum(self.variance, self._tiny)) if self.type == 'F': t = t ** 2 # Case: F contrast elif self.type == 'F': # F = |t|^2/q , |t|^2 = e^t v-1 e t = mahalanobis(self.effect - baseline, np.maximum( self.variance, self._tiny)) / self.dim # Case: tmin (conjunctions) elif self.type == 'tmin': vdiag = self.variance.reshape([self.dim ** 2] + list( self.variance.shape[2:]))[:: self.dim + 1] t = (self.effect - baseline) / np.sqrt( np.maximum(vdiag, self._tiny)) t = t.min(0) # Unknwon stat else: raise ValueError('Unknown statistic type') self._stat = t return t def pvalue(self, baseline=0.0): """ Return a parametric approximation of the p-value associated with the null hypothesis: (H0) 'contrast equals baseline' """ if self._stat == None or not self._baseline == baseline: self._stat = self.stat(baseline) # Valid conjunction as in Nichols et al, Neuroimage 25, 2005. if self.type in ['t', 'tmin']: p = sps.t.sf(self._stat, np.minimum(self.dof, self._dofmax)) elif self.type == 'F': p = sps.f.sf(self._stat, self.dim, np.minimum( self.dof, self._dofmax)) else: raise ValueError('Unknown statistic type') self._pvalue = p return p def zscore(self, baseline=0.0): """ Return a parametric approximation of the z-score associated with the null hypothesis: (H0) 'contrast equals baseline' """ if self._pvalue == None or not self._baseline == baseline: self._pvalue = self.pvalue(baseline) # Avoid inf values kindly supplied by scipy. z = zscore(self._pvalue) return z def __add__(self, other): if self.dim != other.dim: return None con = contrast(self.dim) con.type = self.type con.effect = self.effect + other.effect con.variance = self.variance + other.variance con.dof = self.dof + other.dof return con def __rmul__(self, other): k = float(other) con = contrast(self.dim) con.type = self.type con.effect = k * self.effect con.variance = k ** 2 * self.variance con.dof = self.dof return con __mul__ = __rmul__ def __div__(self, other): return self.__rmul__(1 / float(other)) def ols(Y, X, axis=0): """Essentially, compute pinv(X)*Y """ ndims = len(Y.shape) pX = np.linalg.pinv(X) beta = np.rollaxis(np.inner(pX, np.rollaxis(Y, axis, ndims)), 0, axis + 1) nvbeta = np.inner(pX, pX) res = Y - np.rollaxis( np.inner(X, np.rollaxis(beta, axis, ndims)), 0, axis + 1) n = res.shape[axis] s2 = (res ** 2).sum(axis) / float(n - X.shape[1]) dof = float(X.shape[0] - X.shape[1]) return beta, nvbeta, s2, dof def load(file): """Load a fitted glm """ from os.path import splitext if splitext(file)[1] == '': file = file + '.npz' fmod = np.load(file) mod = glm() mod.beta = fmod['beta'] mod.nvbeta = fmod['nvbeta'] mod.s2 = fmod['s2'] mod.dof = fmod['dof'] mod.a = fmod['a'] mod.model = str(fmod['model']) mod.method = str(fmod['method']) mod._axis = int(fmod['axis']) mod._constants = list(fmod['constants']) return mod nipy-0.3.0/nipy/labs/glm/kalman.c000066400000000000000000007506741210344137400166010ustar00rootroot00000000000000/* Generated by Cython 0.17.4 on Sat Jan 12 17:27:35 2013 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02040000 #error Cython requires Python 2.4+. #else #include /* For offsetof */ #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #endif #if PY_VERSION_HEX < 0x02050000 typedef int Py_ssize_t; #define PY_SSIZE_T_MAX INT_MAX #define PY_SSIZE_T_MIN INT_MIN #define PY_FORMAT_SIZE_T "" #define CYTHON_FORMAT_SSIZE_T "" #define PyInt_FromSsize_t(z) PyInt_FromLong(z) #define PyInt_AsSsize_t(o) __Pyx_PyInt_AsInt(o) #define PyNumber_Index(o) ((PyNumber_Check(o) && !PyFloat_Check(o)) ? PyNumber_Int(o) : \ (PyErr_Format(PyExc_TypeError, \ "expected index value, got %.200s", Py_TYPE(o)->tp_name), \ (PyObject*)0)) #define __Pyx_PyIndex_Check(o) (PyNumber_Check(o) && !PyFloat_Check(o) && \ !PyComplex_Check(o)) #define PyIndex_Check __Pyx_PyIndex_Check #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message) #define __PYX_BUILD_PY_SSIZE_T "i" #else #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #define __Pyx_PyIndex_Check PyIndex_Check #endif #if PY_VERSION_HEX < 0x02060000 #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) #define PyVarObject_HEAD_INIT(type, size) \ PyObject_HEAD_INIT(type) size, #define PyType_Modified(t) typedef struct { void *buf; PyObject *obj; Py_ssize_t len; Py_ssize_t itemsize; int readonly; int ndim; char *format; Py_ssize_t *shape; Py_ssize_t *strides; Py_ssize_t *suboffsets; void *internal; } Py_buffer; #define PyBUF_SIMPLE 0 #define PyBUF_WRITABLE 0x0001 #define PyBUF_FORMAT 0x0004 #define PyBUF_ND 0x0008 #define PyBUF_STRIDES (0x0010 | PyBUF_ND) #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) #define PyBUF_RECORDS (PyBUF_STRIDES | PyBUF_FORMAT | PyBUF_WRITABLE) #define PyBUF_FULL (PyBUF_INDIRECT | PyBUF_FORMAT | PyBUF_WRITABLE) typedef int (*getbufferproc)(PyObject *, Py_buffer *, int); typedef void (*releasebufferproc)(PyObject *, Py_buffer *); #endif #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #endif #if PY_MAJOR_VERSION < 3 && PY_MINOR_VERSION < 6 #define PyUnicode_FromString(s) PyUnicode_Decode(s, strlen(s), "UTF-8", "strict") #endif #if PY_MAJOR_VERSION >= 3 #define Py_TPFLAGS_CHECKTYPES 0 #define Py_TPFLAGS_HAVE_INDEX 0 #endif #if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3) #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ? \ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #else #define CYTHON_PEP393_ENABLED 0 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_READ(k, d, i) ((k=k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_VERSION_HEX < 0x02060000 #define PyBytesObject PyStringObject #define PyBytes_Type PyString_Type #define PyBytes_Check PyString_Check #define PyBytes_CheckExact PyString_CheckExact #define PyBytes_FromString PyString_FromString #define PyBytes_FromStringAndSize PyString_FromStringAndSize #define PyBytes_FromFormat PyString_FromFormat #define PyBytes_DecodeEscape PyString_DecodeEscape #define PyBytes_AsString PyString_AsString #define PyBytes_AsStringAndSize PyString_AsStringAndSize #define PyBytes_Size PyString_Size #define PyBytes_AS_STRING PyString_AS_STRING #define PyBytes_GET_SIZE PyString_GET_SIZE #define PyBytes_Repr PyString_Repr #define PyBytes_Concat PyString_Concat #define PyBytes_ConcatAndDel PyString_ConcatAndDel #endif #if PY_VERSION_HEX < 0x02060000 #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type) #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_VERSION_HEX < 0x03020000 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300) #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b) #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value) #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b) #else #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0))) #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1))) #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1))) #endif #if PY_MAJOR_VERSION >= 3 #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n))) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n))) #else #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n)) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_NAMESTR(n) ((char *)(n)) #define __Pyx_DOCSTR(n) ((char *)(n)) #else #define __Pyx_NAMESTR(n) (n) #define __Pyx_DOCSTR(n) (n) #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include #define __PYX_HAVE__nipy__labs__glm__kalman #define __PYX_HAVE_API__nipy__labs__glm__kalman #include "stdio.h" #include "stdlib.h" #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "fff_base.h" #include "fff_vector.h" #include "fff_matrix.h" #include "fff_array.h" #include "fffpy.h" #include "fff_glm_kalman.h" #ifdef _OPENMP #include #endif /* _OPENMP */ #ifdef PYREX_WITHOUT_ASSERTIONS #define CYTHON_WITHOUT_ASSERTIONS #endif /* inline attribute */ #ifndef CYTHON_INLINE #if defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif /* unused attribute */ #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif typedef struct {PyObject **p; char *s; const long n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/ /* Type Conversion Predeclarations */ #define __Pyx_PyBytes_FromUString(s) PyBytes_FromString((char*)s) #define __Pyx_PyBytes_AsUString(s) ((unsigned char*) PyBytes_AsString(s)) #define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x); static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject*); #if CYTHON_COMPILING_IN_CPYTHON #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #ifdef __GNUC__ /* Test for GCC > 2.95 */ #if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* __GNUC__ > 2 ... */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ > 2 ... */ #else /* __GNUC__ */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static PyObject *__pyx_m; static PyObject *__pyx_b; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include #else #include #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "kalman.pyx", "numpy.pxd", "type.pxd", }; /* "numpy.pxd":723 * # in Cython to enable them only on the right systems. * * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t */ typedef npy_int8 __pyx_t_5numpy_int8_t; /* "numpy.pxd":724 * * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t */ typedef npy_int16 __pyx_t_5numpy_int16_t; /* "numpy.pxd":725 * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< * ctypedef npy_int64 int64_t * #ctypedef npy_int96 int96_t */ typedef npy_int32 __pyx_t_5numpy_int32_t; /* "numpy.pxd":726 * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< * #ctypedef npy_int96 int96_t * #ctypedef npy_int128 int128_t */ typedef npy_int64 __pyx_t_5numpy_int64_t; /* "numpy.pxd":730 * #ctypedef npy_int128 int128_t * * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t */ typedef npy_uint8 __pyx_t_5numpy_uint8_t; /* "numpy.pxd":731 * * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t */ typedef npy_uint16 __pyx_t_5numpy_uint16_t; /* "numpy.pxd":732 * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< * ctypedef npy_uint64 uint64_t * #ctypedef npy_uint96 uint96_t */ typedef npy_uint32 __pyx_t_5numpy_uint32_t; /* "numpy.pxd":733 * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< * #ctypedef npy_uint96 uint96_t * #ctypedef npy_uint128 uint128_t */ typedef npy_uint64 __pyx_t_5numpy_uint64_t; /* "numpy.pxd":737 * #ctypedef npy_uint128 uint128_t * * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< * ctypedef npy_float64 float64_t * #ctypedef npy_float80 float80_t */ typedef npy_float32 __pyx_t_5numpy_float32_t; /* "numpy.pxd":738 * * ctypedef npy_float32 float32_t * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< * #ctypedef npy_float80 float80_t * #ctypedef npy_float128 float128_t */ typedef npy_float64 __pyx_t_5numpy_float64_t; /* "numpy.pxd":747 * # The int types are mapped a bit surprising -- * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t # <<<<<<<<<<<<<< * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t */ typedef npy_long __pyx_t_5numpy_int_t; /* "numpy.pxd":748 * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< * ctypedef npy_longlong longlong_t * */ typedef npy_longlong __pyx_t_5numpy_long_t; /* "numpy.pxd":749 * ctypedef npy_long int_t * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< * * ctypedef npy_ulong uint_t */ typedef npy_longlong __pyx_t_5numpy_longlong_t; /* "numpy.pxd":751 * ctypedef npy_longlong longlong_t * * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t */ typedef npy_ulong __pyx_t_5numpy_uint_t; /* "numpy.pxd":752 * * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulonglong_t * */ typedef npy_ulonglong __pyx_t_5numpy_ulong_t; /* "numpy.pxd":753 * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< * * ctypedef npy_intp intp_t */ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; /* "numpy.pxd":755 * ctypedef npy_ulonglong ulonglong_t * * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< * ctypedef npy_uintp uintp_t * */ typedef npy_intp __pyx_t_5numpy_intp_t; /* "numpy.pxd":756 * * ctypedef npy_intp intp_t * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< * * ctypedef npy_double float_t */ typedef npy_uintp __pyx_t_5numpy_uintp_t; /* "numpy.pxd":758 * ctypedef npy_uintp uintp_t * * ctypedef npy_double float_t # <<<<<<<<<<<<<< * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t */ typedef npy_double __pyx_t_5numpy_float_t; /* "numpy.pxd":759 * * ctypedef npy_double float_t * ctypedef npy_double double_t # <<<<<<<<<<<<<< * ctypedef npy_longdouble longdouble_t * */ typedef npy_double __pyx_t_5numpy_double_t; /* "numpy.pxd":760 * ctypedef npy_double float_t * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cfloat cfloat_t */ typedef npy_longdouble __pyx_t_5numpy_longdouble_t; /* "fff.pxd":9 * * # Redefine size_t * ctypedef unsigned long int size_t # <<<<<<<<<<<<<< * * */ typedef unsigned long __pyx_t_3fff_size_t; #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< float > __pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< double > __pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif /*--- Type declarations ---*/ /* "numpy.pxd":762 * ctypedef npy_longdouble longdouble_t * * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t */ typedef npy_cfloat __pyx_t_5numpy_cfloat_t; /* "numpy.pxd":763 * * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< * ctypedef npy_clongdouble clongdouble_t * */ typedef npy_cdouble __pyx_t_5numpy_cdouble_t; /* "numpy.pxd":764 * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cdouble complex_t */ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; /* "numpy.pxd":766 * ctypedef npy_clongdouble clongdouble_t * * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew1(a): */ typedef npy_cdouble __pyx_t_5numpy_complex_t; #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/ #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil) \ if (acquire_gil) { \ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ PyGILState_Release(__pyx_gilstate_save); \ } else { \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil) \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext() \ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif /* CYTHON_REFNANNY */ #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /*proto*/ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], \ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, \ const char* function_name); /*proto*/ static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact); /*proto*/ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len)) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_PyList_Append(L,x) PyList_Append(L,x) #endif #define __Pyx_SetItemInt(o, i, v, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ __Pyx_SetItemInt_Fast(o, i, v) : \ __Pyx_SetItemInt_Generic(o, to_py_func(i), v)) static CYTHON_INLINE int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v) { int r; if (!j) return -1; r = PyObject_SetItem(o, j, v); Py_DECREF(j); return r; } static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v) { #if CYTHON_COMPILING_IN_CPYTHON if (PyList_CheckExact(o)) { Py_ssize_t n = (likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if (likely((n >= 0) & (n < PyList_GET_SIZE(o)))) { PyObject* old = PyList_GET_ITEM(o, n); Py_INCREF(v); PyList_SET_ITEM(o, n, v); Py_DECREF(old); return 1; } } else { /* inlined PySequence_SetItem() */ PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_ass_item)) { if (unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (unlikely(l < 0)) return -1; i += l; } return m->sq_ass_item(o, i, v); } } #else #if CYTHON_COMPILING_IN_PYPY if (PySequence_Check(o) && !PyDict_Check(o)) { #else if (PySequence_Check(o)) { #endif return PySequence_SetItem(o, i, v); } #endif return __Pyx_SetItemInt_Generic(o, PyInt_FromSsize_t(i), v); } static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/ static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); static CYTHON_INLINE int __Pyx_IterFinish(void); /*proto*/ static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); /*proto*/ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level); /*proto*/ static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_Py_intptr_t(Py_intptr_t); #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if defined(_WIN32) && defined(__cplusplus) && CYTHON_CCOMPLEX #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); #if CYTHON_CCOMPLEX #define __Pyx_c_eqf(a, b) ((a)==(b)) #define __Pyx_c_sumf(a, b) ((a)+(b)) #define __Pyx_c_difff(a, b) ((a)-(b)) #define __Pyx_c_prodf(a, b) ((a)*(b)) #define __Pyx_c_quotf(a, b) ((a)/(b)) #define __Pyx_c_negf(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zerof(z) ((z)==(float)0) #define __Pyx_c_conjf(z) (::std::conj(z)) #if 1 #define __Pyx_c_absf(z) (::std::abs(z)) #define __Pyx_c_powf(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zerof(z) ((z)==0) #define __Pyx_c_conjf(z) (conjf(z)) #if 1 #define __Pyx_c_absf(z) (cabsf(z)) #define __Pyx_c_powf(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); #if CYTHON_CCOMPLEX #define __Pyx_c_eq(a, b) ((a)==(b)) #define __Pyx_c_sum(a, b) ((a)+(b)) #define __Pyx_c_diff(a, b) ((a)-(b)) #define __Pyx_c_prod(a, b) ((a)*(b)) #define __Pyx_c_quot(a, b) ((a)/(b)) #define __Pyx_c_neg(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero(z) ((z)==(double)0) #define __Pyx_c_conj(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs(z) (::std::abs(z)) #define __Pyx_c_pow(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero(z) ((z)==0) #define __Pyx_c_conj(z) (conj(z)) #if 1 #define __Pyx_c_abs(z) (cabs(z)) #define __Pyx_c_pow(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject *); static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject *); static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject *); static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject *); static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject *); static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject *); static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject *); static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject *); static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject *); static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject *); static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject *); static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject *); static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject *); static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject *); static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject *); static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject *); static int __Pyx_check_binary_version(void); #if !defined(__Pyx_PyIdentifier_FromString) #if PY_MAJOR_VERSION < 3 #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) #else #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) #endif #endif static PyObject *__Pyx_ImportModule(const char *name); /*proto*/ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /*proto*/ typedef struct { int code_line; PyCodeObject* code_object; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /*proto*/ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/ /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from 'cpython.object' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'libc.stdlib' */ /* Module declarations from 'numpy' */ /* Module declarations from 'numpy' */ static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ /* Module declarations from 'fff' */ /* Module declarations from 'nipy.labs.glm.kalman' */ #define __Pyx_MODULE_NAME "nipy.labs.glm.kalman" int __pyx_module_is_main_nipy__labs__glm__kalman = 0; /* Implementation of 'nipy.labs.glm.kalman' */ static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_RuntimeError; static PyObject *__pyx_pf_4nipy_4labs_3glm_6kalman_ols(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_Y, PyArrayObject *__pyx_v_X, int __pyx_v_axis); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_3glm_6kalman_2ar1(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_Y, PyArrayObject *__pyx_v_X, int __pyx_v_niter, int __pyx_v_axis); /* proto */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ static char __pyx_k_1[] = "ndarray is not C contiguous"; static char __pyx_k_3[] = "ndarray is not Fortran contiguous"; static char __pyx_k_5[] = "Non-native byte order not supported"; static char __pyx_k_7[] = "unknown dtype code in numpy.pxd (%d)"; static char __pyx_k_8[] = "Format string allocated too short, see comment in numpy.pxd"; static char __pyx_k_11[] = "Format string allocated too short."; static char __pyx_k_13[] = "\nIncremental (Kalman-like) filters for linear regression. \n\nAuthor: Alexis Roche, 2008.\n"; static char __pyx_k_14[] = "0.1"; static char __pyx_k_17[] = "/Users/mb312/dev_trees/nipy/nipy/labs/glm/kalman.pyx"; static char __pyx_k_18[] = "nipy.labs.glm.kalman"; static char __pyx_k__A[] = "A"; static char __pyx_k__B[] = "B"; static char __pyx_k__H[] = "H"; static char __pyx_k__I[] = "I"; static char __pyx_k__L[] = "L"; static char __pyx_k__O[] = "O"; static char __pyx_k__Q[] = "Q"; static char __pyx_k__X[] = "X"; static char __pyx_k__Y[] = "Y"; static char __pyx_k__a[] = "a"; static char __pyx_k__b[] = "b"; static char __pyx_k__d[] = "d"; static char __pyx_k__f[] = "f"; static char __pyx_k__g[] = "g"; static char __pyx_k__h[] = "h"; static char __pyx_k__i[] = "i"; static char __pyx_k__l[] = "l"; static char __pyx_k__p[] = "p"; static char __pyx_k__q[] = "q"; static char __pyx_k__x[] = "x"; static char __pyx_k__y[] = "y"; static char __pyx_k__S2[] = "S2"; static char __pyx_k__VB[] = "VB"; static char __pyx_k__Zd[] = "Zd"; static char __pyx_k__Zf[] = "Zf"; static char __pyx_k__Zg[] = "Zg"; static char __pyx_k__np[] = "np"; static char __pyx_k__p2[] = "p2"; static char __pyx_k__s2[] = "s2"; static char __pyx_k__vb[] = "vb"; static char __pyx_k__ar1[] = "ar1"; static char __pyx_k__dof[] = "dof"; static char __pyx_k__ols[] = "ols"; static char __pyx_k__axis[] = "axis"; static char __pyx_k__dims[] = "dims"; static char __pyx_k__dtype[] = "dtype"; static char __pyx_k__kfilt[] = "kfilt"; static char __pyx_k__multi[] = "multi"; static char __pyx_k__niter[] = "niter"; static char __pyx_k__numpy[] = "numpy"; static char __pyx_k__range[] = "range"; static char __pyx_k__zeros[] = "zeros"; static char __pyx_k__double[] = "double"; static char __pyx_k__rkfilt[] = "rkfilt"; static char __pyx_k__Vb_flat[] = "Vb_flat"; static char __pyx_k__reshape[] = "reshape"; static char __pyx_k____main__[] = "__main__"; static char __pyx_k____test__[] = "__test__"; static char __pyx_k__ValueError[] = "ValueError"; static char __pyx_k____version__[] = "__version__"; static char __pyx_k__RuntimeError[] = "RuntimeError"; static PyObject *__pyx_kp_u_1; static PyObject *__pyx_kp_u_11; static PyObject *__pyx_kp_s_14; static PyObject *__pyx_kp_s_17; static PyObject *__pyx_n_s_18; static PyObject *__pyx_kp_u_3; static PyObject *__pyx_kp_u_5; static PyObject *__pyx_kp_u_7; static PyObject *__pyx_kp_u_8; static PyObject *__pyx_n_s__A; static PyObject *__pyx_n_s__B; static PyObject *__pyx_n_s__RuntimeError; static PyObject *__pyx_n_s__S2; static PyObject *__pyx_n_s__VB; static PyObject *__pyx_n_s__ValueError; static PyObject *__pyx_n_s__Vb_flat; static PyObject *__pyx_n_s__X; static PyObject *__pyx_n_s__Y; static PyObject *__pyx_n_s____main__; static PyObject *__pyx_n_s____test__; static PyObject *__pyx_n_s____version__; static PyObject *__pyx_n_s__a; static PyObject *__pyx_n_s__ar1; static PyObject *__pyx_n_s__axis; static PyObject *__pyx_n_s__b; static PyObject *__pyx_n_s__dims; static PyObject *__pyx_n_s__dof; static PyObject *__pyx_n_s__double; static PyObject *__pyx_n_s__dtype; static PyObject *__pyx_n_s__i; static PyObject *__pyx_n_s__kfilt; static PyObject *__pyx_n_s__multi; static PyObject *__pyx_n_s__niter; static PyObject *__pyx_n_s__np; static PyObject *__pyx_n_s__numpy; static PyObject *__pyx_n_s__ols; static PyObject *__pyx_n_s__p; static PyObject *__pyx_n_s__p2; static PyObject *__pyx_n_s__range; static PyObject *__pyx_n_s__reshape; static PyObject *__pyx_n_s__rkfilt; static PyObject *__pyx_n_s__s2; static PyObject *__pyx_n_s__vb; static PyObject *__pyx_n_s__x; static PyObject *__pyx_n_s__y; static PyObject *__pyx_n_s__zeros; static PyObject *__pyx_int_1; static PyObject *__pyx_int_15; static PyObject *__pyx_k_tuple_2; static PyObject *__pyx_k_tuple_4; static PyObject *__pyx_k_tuple_6; static PyObject *__pyx_k_tuple_9; static PyObject *__pyx_k_tuple_10; static PyObject *__pyx_k_tuple_12; static PyObject *__pyx_k_tuple_15; static PyObject *__pyx_k_tuple_19; static PyObject *__pyx_k_codeobj_16; static PyObject *__pyx_k_codeobj_20; /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_3glm_6kalman_1ols(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_3glm_6kalman_ols[] = "\n (beta, norm_var_beta, s2, dof) = ols(Y, X, axis=0).\n\n Ordinary least-square multiple regression using the Kalman filter.\n Fit the N-dimensional array Y along the given axis in terms of the\n regressors in matrix X. The regressors must be stored columnwise.\n\n OUTPUT: a four-element tuple\n beta -- array of parameter estimates\n norm_var_beta -- normalized variance matrix of the parameter\n estimates (data independent)\n s2 -- array of squared scale\n parameters to multiply norm_var_beta for the variance matrix of\n beta.\n dof -- scalar degrees of freedom.\n\n REFERENCE: Roche et al, ISBI 2004.\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_3glm_6kalman_1ols = {__Pyx_NAMESTR("ols"), (PyCFunction)__pyx_pw_4nipy_4labs_3glm_6kalman_1ols, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_3glm_6kalman_ols)}; static PyObject *__pyx_pw_4nipy_4labs_3glm_6kalman_1ols(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_Y = 0; PyArrayObject *__pyx_v_X = 0; int __pyx_v_axis; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("ols (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__Y,&__pyx_n_s__X,&__pyx_n_s__axis,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Y)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__X)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("ols", 0, 2, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 68; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__axis); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "ols") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 68; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_Y = ((PyArrayObject *)values[0]); __pyx_v_X = ((PyArrayObject *)values[1]); if (values[2]) { __pyx_v_axis = __Pyx_PyInt_AsInt(values[2]); if (unlikely((__pyx_v_axis == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 68; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_axis = ((int)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("ols", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 68; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.glm.kalman.ols", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_Y), __pyx_ptype_5numpy_ndarray, 1, "Y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 68; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_X), __pyx_ptype_5numpy_ndarray, 1, "X", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 68; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_4nipy_4labs_3glm_6kalman_ols(__pyx_self, __pyx_v_Y, __pyx_v_X, __pyx_v_axis); goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/glm/kalman.pyx":68 * # Standard Kalman filter * * def ols(ndarray Y, ndarray X, int axis=0): # <<<<<<<<<<<<<< * """ * (beta, norm_var_beta, s2, dof) = ols(Y, X, axis=0). */ static PyObject *__pyx_pf_4nipy_4labs_3glm_6kalman_ols(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_Y, PyArrayObject *__pyx_v_X, int __pyx_v_axis) { fff_vector *__pyx_v_y; fff_vector *__pyx_v_b; fff_vector *__pyx_v_s2; fff_matrix *__pyx_v_x; fff_glm_KF *__pyx_v_kfilt; size_t __pyx_v_p; fffpy_multi_iterator *__pyx_v_multi; double __pyx_v_dof; PyObject *__pyx_v_dims = NULL; PyObject *__pyx_v_B = NULL; PyObject *__pyx_v_S2 = NULL; PyArrayObject *__pyx_v_VB = NULL; int __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations size_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; double __pyx_t_10; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("ols", 0); /* "nipy/labs/glm/kalman.pyx":95 * * # View on design matrix * x = fff_matrix_fromPyArray(X) # <<<<<<<<<<<<<< * * # Number of regressors */ __pyx_v_x = fff_matrix_fromPyArray(__pyx_v_X); /* "nipy/labs/glm/kalman.pyx":98 * * # Number of regressors * p = x.size2 # <<<<<<<<<<<<<< * * # Allocate output arrays B and S2 */ __pyx_t_1 = __pyx_v_x->size2; __pyx_v_p = __pyx_t_1; /* "nipy/labs/glm/kalman.pyx":105 * # type; see: * # http://codespeak.net/pipermail/cython-dev/2009-April/005229.html * dims = [Y.shape[i] for i in range(Y.ndim)] # <<<<<<<<<<<<<< * dims[axis] = p * B = np.zeros(dims, dtype=np.double) */ __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __pyx_v_Y->nd; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; __pyx_t_5 = __Pyx_PyInt_to_py_Py_intptr_t((__pyx_v_Y->dimensions[__pyx_v_i])); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); if (unlikely(__Pyx_PyList_Append(__pyx_t_2, (PyObject*)__pyx_t_5))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __pyx_t_5 = ((PyObject *)__pyx_t_2); __Pyx_INCREF(__pyx_t_5); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_v_dims = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; /* "nipy/labs/glm/kalman.pyx":106 * # http://codespeak.net/pipermail/cython-dev/2009-April/005229.html * dims = [Y.shape[i] for i in range(Y.ndim)] * dims[axis] = p # <<<<<<<<<<<<<< * B = np.zeros(dims, dtype=np.double) * dims[axis] = 1 */ __pyx_t_5 = __Pyx_PyInt_FromSize_t(__pyx_v_p); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 106; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); if (__Pyx_SetItemInt(((PyObject *)__pyx_v_dims), __pyx_v_axis, __pyx_t_5, sizeof(int), PyInt_FromLong) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 106; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "nipy/labs/glm/kalman.pyx":107 * dims = [Y.shape[i] for i in range(Y.ndim)] * dims[axis] = p * B = np.zeros(dims, dtype=np.double) # <<<<<<<<<<<<<< * dims[axis] = 1 * S2 = np.zeros(dims, dtype=np.double) */ __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__zeros); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(((PyObject *)__pyx_v_dims)); PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_dims)); __Pyx_GIVEREF(((PyObject *)__pyx_v_dims)); __pyx_t_6 = PyDict_New(); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_6)); __pyx_t_7 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __pyx_t_8 = PyObject_GetAttr(__pyx_t_7, __pyx_n_s__double); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; if (PyDict_SetItem(__pyx_t_6, ((PyObject *)__pyx_n_s__dtype), __pyx_t_8) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_5), ((PyObject *)__pyx_t_6)); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0; __pyx_v_B = __pyx_t_8; __pyx_t_8 = 0; /* "nipy/labs/glm/kalman.pyx":108 * dims[axis] = p * B = np.zeros(dims, dtype=np.double) * dims[axis] = 1 # <<<<<<<<<<<<<< * S2 = np.zeros(dims, dtype=np.double) * */ if (__Pyx_SetItemInt(((PyObject *)__pyx_v_dims), __pyx_v_axis, __pyx_int_1, sizeof(int), PyInt_FromLong) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/glm/kalman.pyx":109 * B = np.zeros(dims, dtype=np.double) * dims[axis] = 1 * S2 = np.zeros(dims, dtype=np.double) # <<<<<<<<<<<<<< * * # Allocate local structure */ __pyx_t_8 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __pyx_t_6 = PyObject_GetAttr(__pyx_t_8, __pyx_n_s__zeros); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = PyTuple_New(1); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_INCREF(((PyObject *)__pyx_v_dims)); PyTuple_SET_ITEM(__pyx_t_8, 0, ((PyObject *)__pyx_v_dims)); __Pyx_GIVEREF(((PyObject *)__pyx_v_dims)); __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_5)); __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_7 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__double); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (PyDict_SetItem(__pyx_t_5, ((PyObject *)__pyx_n_s__dtype), __pyx_t_7) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = PyObject_Call(__pyx_t_6, ((PyObject *)__pyx_t_8), ((PyObject *)__pyx_t_5)); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_8)); __pyx_t_8 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __pyx_v_S2 = __pyx_t_7; __pyx_t_7 = 0; /* "nipy/labs/glm/kalman.pyx":112 * * # Allocate local structure * kfilt = fff_glm_KF_new(p) # <<<<<<<<<<<<<< * * # Create a new array iterator */ __pyx_v_kfilt = fff_glm_KF_new(__pyx_v_p); /* "nipy/labs/glm/kalman.pyx":115 * * # Create a new array iterator * multi = fffpy_multi_iterator_new(3, axis, Y, B, S2) # <<<<<<<<<<<<<< * * # Create views */ __pyx_v_multi = fffpy_multi_iterator_new(3, __pyx_v_axis, ((void *)__pyx_v_Y), ((void *)__pyx_v_B), ((void *)__pyx_v_S2)); /* "nipy/labs/glm/kalman.pyx":118 * * # Create views * y = multi.vector[0] # <<<<<<<<<<<<<< * b = multi.vector[1] * s2 = multi.vector[2] */ __pyx_v_y = (__pyx_v_multi->vector[0]); /* "nipy/labs/glm/kalman.pyx":119 * # Create views * y = multi.vector[0] * b = multi.vector[1] # <<<<<<<<<<<<<< * s2 = multi.vector[2] * */ __pyx_v_b = (__pyx_v_multi->vector[1]); /* "nipy/labs/glm/kalman.pyx":120 * y = multi.vector[0] * b = multi.vector[1] * s2 = multi.vector[2] # <<<<<<<<<<<<<< * * # Loop */ __pyx_v_s2 = (__pyx_v_multi->vector[2]); /* "nipy/labs/glm/kalman.pyx":123 * * # Loop * while(multi.index < multi.size): # <<<<<<<<<<<<<< * fff_glm_KF_fit(kfilt, y, x) * fff_vector_memcpy(b, kfilt.b) */ while (1) { __pyx_t_9 = (__pyx_v_multi->index < __pyx_v_multi->size); if (!__pyx_t_9) break; /* "nipy/labs/glm/kalman.pyx":124 * # Loop * while(multi.index < multi.size): * fff_glm_KF_fit(kfilt, y, x) # <<<<<<<<<<<<<< * fff_vector_memcpy(b, kfilt.b) * s2.data[0] = kfilt.s2 */ fff_glm_KF_fit(__pyx_v_kfilt, __pyx_v_y, __pyx_v_x); /* "nipy/labs/glm/kalman.pyx":125 * while(multi.index < multi.size): * fff_glm_KF_fit(kfilt, y, x) * fff_vector_memcpy(b, kfilt.b) # <<<<<<<<<<<<<< * s2.data[0] = kfilt.s2 * fffpy_multi_iterator_update(multi) */ fff_vector_memcpy(__pyx_v_b, __pyx_v_kfilt->b); /* "nipy/labs/glm/kalman.pyx":126 * fff_glm_KF_fit(kfilt, y, x) * fff_vector_memcpy(b, kfilt.b) * s2.data[0] = kfilt.s2 # <<<<<<<<<<<<<< * fffpy_multi_iterator_update(multi) * */ __pyx_t_10 = __pyx_v_kfilt->s2; (__pyx_v_s2->data[0]) = __pyx_t_10; /* "nipy/labs/glm/kalman.pyx":127 * fff_vector_memcpy(b, kfilt.b) * s2.data[0] = kfilt.s2 * fffpy_multi_iterator_update(multi) # <<<<<<<<<<<<<< * * # Normalized variance (computed from the last item) */ fffpy_multi_iterator_update(__pyx_v_multi); } /* "nipy/labs/glm/kalman.pyx":130 * * # Normalized variance (computed from the last item) * VB = fff_matrix_const_toPyArray(kfilt.Vb); # <<<<<<<<<<<<<< * dof = kfilt.dof * */ __pyx_t_7 = ((PyObject *)fff_matrix_const_toPyArray(__pyx_v_kfilt->Vb)); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __pyx_v_VB = ((PyArrayObject *)__pyx_t_7); __pyx_t_7 = 0; /* "nipy/labs/glm/kalman.pyx":131 * # Normalized variance (computed from the last item) * VB = fff_matrix_const_toPyArray(kfilt.Vb); * dof = kfilt.dof # <<<<<<<<<<<<<< * * # Free memory */ __pyx_t_10 = __pyx_v_kfilt->dof; __pyx_v_dof = __pyx_t_10; /* "nipy/labs/glm/kalman.pyx":134 * * # Free memory * fff_matrix_delete(x) # <<<<<<<<<<<<<< * fff_glm_KF_delete(kfilt) * fffpy_multi_iterator_delete(multi) */ fff_matrix_delete(__pyx_v_x); /* "nipy/labs/glm/kalman.pyx":135 * # Free memory * fff_matrix_delete(x) * fff_glm_KF_delete(kfilt) # <<<<<<<<<<<<<< * fffpy_multi_iterator_delete(multi) * */ fff_glm_KF_delete(__pyx_v_kfilt); /* "nipy/labs/glm/kalman.pyx":136 * fff_matrix_delete(x) * fff_glm_KF_delete(kfilt) * fffpy_multi_iterator_delete(multi) # <<<<<<<<<<<<<< * * # Return */ fffpy_multi_iterator_delete(__pyx_v_multi); /* "nipy/labs/glm/kalman.pyx":139 * * # Return * return B, VB, S2, dof # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_7 = PyFloat_FromDouble(__pyx_v_dof); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_B); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_B); __Pyx_GIVEREF(__pyx_v_B); __Pyx_INCREF(((PyObject *)__pyx_v_VB)); PyTuple_SET_ITEM(__pyx_t_5, 1, ((PyObject *)__pyx_v_VB)); __Pyx_GIVEREF(((PyObject *)__pyx_v_VB)); __Pyx_INCREF(__pyx_v_S2); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_S2); __Pyx_GIVEREF(__pyx_v_S2); PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_7); __Pyx_GIVEREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_r = ((PyObject *)__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("nipy.labs.glm.kalman.ols", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_dims); __Pyx_XDECREF(__pyx_v_B); __Pyx_XDECREF(__pyx_v_S2); __Pyx_XDECREF((PyObject *)__pyx_v_VB); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_3glm_6kalman_3ar1(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_3glm_6kalman_2ar1[] = "\n (beta, norm_var_beta, s2, dof, a) = ar1(Y, X, niter=2, axis=0)\n\n Refined Kalman filter -- enhanced Kalman filter to account for\n noise autocorrelation using an AR(1) model. Pseudo-likelihood\n multiple regression using the refined Kalman filter, a Kalman\n variant based on a AR(1) error model. Fit the N-dimensional array\n Y along the given axis in terms of the regressors in matrix X. The\n regressors must be stored columnwise.\n\n OUTPUT: a five-element tuple\n beta -- array of parameter estimates\n norm_var_beta -- array of normalized variance matrices (which are data dependent\n unlike in standard OLS regression)\n s2 -- array of squared scale parameters to multiply norm_var_beta for the variance matrix of beta.\n dof -- scalar degrees of freedom\n a -- array of error autocorrelation estimates\n\n REFERENCE:\n Roche et al, MICCAI 2004.\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_3glm_6kalman_3ar1 = {__Pyx_NAMESTR("ar1"), (PyCFunction)__pyx_pw_4nipy_4labs_3glm_6kalman_3ar1, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_3glm_6kalman_2ar1)}; static PyObject *__pyx_pw_4nipy_4labs_3glm_6kalman_3ar1(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_Y = 0; PyArrayObject *__pyx_v_X = 0; int __pyx_v_niter; int __pyx_v_axis; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("ar1 (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__Y,&__pyx_n_s__X,&__pyx_n_s__niter,&__pyx_n_s__axis,0}; PyObject* values[4] = {0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Y)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__X)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("ar1", 0, 2, 4, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__niter); if (value) { values[2] = value; kw_args--; } } case 3: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__axis); if (value) { values[3] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "ar1") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_Y = ((PyArrayObject *)values[0]); __pyx_v_X = ((PyArrayObject *)values[1]); if (values[2]) { __pyx_v_niter = __Pyx_PyInt_AsInt(values[2]); if (unlikely((__pyx_v_niter == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_niter = ((int)2); } if (values[3]) { __pyx_v_axis = __Pyx_PyInt_AsInt(values[3]); if (unlikely((__pyx_v_axis == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_axis = ((int)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("ar1", 0, 2, 4, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.glm.kalman.ar1", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_Y), __pyx_ptype_5numpy_ndarray, 1, "Y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_X), __pyx_ptype_5numpy_ndarray, 1, "X", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_4nipy_4labs_3glm_6kalman_2ar1(__pyx_self, __pyx_v_Y, __pyx_v_X, __pyx_v_niter, __pyx_v_axis); goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/glm/kalman.pyx":142 * * * def ar1(ndarray Y, ndarray X, int niter=2, int axis=0): # <<<<<<<<<<<<<< * """ * (beta, norm_var_beta, s2, dof, a) = ar1(Y, X, niter=2, axis=0) */ static PyObject *__pyx_pf_4nipy_4labs_3glm_6kalman_2ar1(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_Y, PyArrayObject *__pyx_v_X, int __pyx_v_niter, int __pyx_v_axis) { fff_vector *__pyx_v_y; fff_vector *__pyx_v_b; fff_vector *__pyx_v_vb; fff_vector *__pyx_v_s2; fff_vector *__pyx_v_a; fff_vector __pyx_v_Vb_flat; fff_matrix *__pyx_v_x; fff_glm_RKF *__pyx_v_rkfilt; size_t __pyx_v_p; size_t __pyx_v_p2; fffpy_multi_iterator *__pyx_v_multi; double __pyx_v_dof; PyObject *__pyx_v_dims = NULL; PyObject *__pyx_v_B = NULL; PyObject *__pyx_v_VB = NULL; PyObject *__pyx_v_S2 = NULL; PyObject *__pyx_v_A = NULL; int __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations size_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; double __pyx_t_10; int __pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("ar1", 0); /* "nipy/labs/glm/kalman.pyx":173 * * # View on design matrix * x = fff_matrix_fromPyArray(X) # <<<<<<<<<<<<<< * * # Number of regressors */ __pyx_v_x = fff_matrix_fromPyArray(__pyx_v_X); /* "nipy/labs/glm/kalman.pyx":176 * * # Number of regressors * p = x.size2 # <<<<<<<<<<<<<< * p2 = p*p * */ __pyx_t_1 = __pyx_v_x->size2; __pyx_v_p = __pyx_t_1; /* "nipy/labs/glm/kalman.pyx":177 * # Number of regressors * p = x.size2 * p2 = p*p # <<<<<<<<<<<<<< * * # Allocate output arrays B and S2. */ __pyx_v_p2 = (__pyx_v_p * __pyx_v_p); /* "nipy/labs/glm/kalman.pyx":184 * # type; see: * # http://codespeak.net/pipermail/cython-dev/2009-April/005229.html * dims = [Y.shape[i] for i in range(Y.ndim)] # <<<<<<<<<<<<<< * dims[axis] = p * B = np.zeros(dims, dtype=np.double) */ __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __pyx_v_Y->nd; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; __pyx_t_5 = __Pyx_PyInt_to_py_Py_intptr_t((__pyx_v_Y->dimensions[__pyx_v_i])); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); if (unlikely(__Pyx_PyList_Append(__pyx_t_2, (PyObject*)__pyx_t_5))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __pyx_t_5 = ((PyObject *)__pyx_t_2); __Pyx_INCREF(__pyx_t_5); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_v_dims = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; /* "nipy/labs/glm/kalman.pyx":185 * # http://codespeak.net/pipermail/cython-dev/2009-April/005229.html * dims = [Y.shape[i] for i in range(Y.ndim)] * dims[axis] = p # <<<<<<<<<<<<<< * B = np.zeros(dims, dtype=np.double) * dims[axis] = p2 */ __pyx_t_5 = __Pyx_PyInt_FromSize_t(__pyx_v_p); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); if (__Pyx_SetItemInt(((PyObject *)__pyx_v_dims), __pyx_v_axis, __pyx_t_5, sizeof(int), PyInt_FromLong) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "nipy/labs/glm/kalman.pyx":186 * dims = [Y.shape[i] for i in range(Y.ndim)] * dims[axis] = p * B = np.zeros(dims, dtype=np.double) # <<<<<<<<<<<<<< * dims[axis] = p2 * VB = np.zeros(dims, dtype=np.double) */ __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__zeros); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(((PyObject *)__pyx_v_dims)); PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_dims)); __Pyx_GIVEREF(((PyObject *)__pyx_v_dims)); __pyx_t_6 = PyDict_New(); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_6)); __pyx_t_7 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __pyx_t_8 = PyObject_GetAttr(__pyx_t_7, __pyx_n_s__double); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; if (PyDict_SetItem(__pyx_t_6, ((PyObject *)__pyx_n_s__dtype), __pyx_t_8) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_5), ((PyObject *)__pyx_t_6)); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0; __pyx_v_B = __pyx_t_8; __pyx_t_8 = 0; /* "nipy/labs/glm/kalman.pyx":187 * dims[axis] = p * B = np.zeros(dims, dtype=np.double) * dims[axis] = p2 # <<<<<<<<<<<<<< * VB = np.zeros(dims, dtype=np.double) * dims[axis] = 1 */ __pyx_t_8 = __Pyx_PyInt_FromSize_t(__pyx_v_p2); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 187; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); if (__Pyx_SetItemInt(((PyObject *)__pyx_v_dims), __pyx_v_axis, __pyx_t_8, sizeof(int), PyInt_FromLong) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 187; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; /* "nipy/labs/glm/kalman.pyx":188 * B = np.zeros(dims, dtype=np.double) * dims[axis] = p2 * VB = np.zeros(dims, dtype=np.double) # <<<<<<<<<<<<<< * dims[axis] = 1 * S2 = np.zeros(dims, dtype=np.double) */ __pyx_t_8 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 188; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __pyx_t_6 = PyObject_GetAttr(__pyx_t_8, __pyx_n_s__zeros); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 188; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = PyTuple_New(1); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 188; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_INCREF(((PyObject *)__pyx_v_dims)); PyTuple_SET_ITEM(__pyx_t_8, 0, ((PyObject *)__pyx_v_dims)); __Pyx_GIVEREF(((PyObject *)__pyx_v_dims)); __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 188; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_5)); __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 188; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_7 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__double); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 188; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (PyDict_SetItem(__pyx_t_5, ((PyObject *)__pyx_n_s__dtype), __pyx_t_7) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 188; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = PyObject_Call(__pyx_t_6, ((PyObject *)__pyx_t_8), ((PyObject *)__pyx_t_5)); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 188; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_8)); __pyx_t_8 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __pyx_v_VB = __pyx_t_7; __pyx_t_7 = 0; /* "nipy/labs/glm/kalman.pyx":189 * dims[axis] = p2 * VB = np.zeros(dims, dtype=np.double) * dims[axis] = 1 # <<<<<<<<<<<<<< * S2 = np.zeros(dims, dtype=np.double) * A = np.zeros(dims, dtype=np.double) */ if (__Pyx_SetItemInt(((PyObject *)__pyx_v_dims), __pyx_v_axis, __pyx_int_1, sizeof(int), PyInt_FromLong) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 189; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/glm/kalman.pyx":190 * VB = np.zeros(dims, dtype=np.double) * dims[axis] = 1 * S2 = np.zeros(dims, dtype=np.double) # <<<<<<<<<<<<<< * A = np.zeros(dims, dtype=np.double) * */ __pyx_t_7 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __pyx_t_5 = PyObject_GetAttr(__pyx_t_7, __pyx_n_s__zeros); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = PyTuple_New(1); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __Pyx_INCREF(((PyObject *)__pyx_v_dims)); PyTuple_SET_ITEM(__pyx_t_7, 0, ((PyObject *)__pyx_v_dims)); __Pyx_GIVEREF(((PyObject *)__pyx_v_dims)); __pyx_t_8 = PyDict_New(); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_8)); __pyx_t_6 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __pyx_t_2 = PyObject_GetAttr(__pyx_t_6, __pyx_n_s__double); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (PyDict_SetItem(__pyx_t_8, ((PyObject *)__pyx_n_s__dtype), __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_7), ((PyObject *)__pyx_t_8)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_7)); __pyx_t_7 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_8)); __pyx_t_8 = 0; __pyx_v_S2 = __pyx_t_2; __pyx_t_2 = 0; /* "nipy/labs/glm/kalman.pyx":191 * dims[axis] = 1 * S2 = np.zeros(dims, dtype=np.double) * A = np.zeros(dims, dtype=np.double) # <<<<<<<<<<<<<< * * # Allocate local structure */ __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_8 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__zeros); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(((PyObject *)__pyx_v_dims)); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_dims)); __Pyx_GIVEREF(((PyObject *)__pyx_v_dims)); __pyx_t_7 = PyDict_New(); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_7)); __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__double); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (PyDict_SetItem(__pyx_t_7, ((PyObject *)__pyx_n_s__dtype), __pyx_t_6) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyObject_Call(__pyx_t_8, ((PyObject *)__pyx_t_2), ((PyObject *)__pyx_t_7)); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_7)); __pyx_t_7 = 0; __pyx_v_A = __pyx_t_6; __pyx_t_6 = 0; /* "nipy/labs/glm/kalman.pyx":194 * * # Allocate local structure * rkfilt = fff_glm_RKF_new(p) # <<<<<<<<<<<<<< * * # Create a new array iterator */ __pyx_v_rkfilt = fff_glm_RKF_new(__pyx_v_p); /* "nipy/labs/glm/kalman.pyx":197 * * # Create a new array iterator * multi = fffpy_multi_iterator_new(5, axis, Y, B, VB, S2, A) # <<<<<<<<<<<<<< * * # Create views */ __pyx_v_multi = fffpy_multi_iterator_new(5, __pyx_v_axis, ((void *)__pyx_v_Y), ((void *)__pyx_v_B), ((void *)__pyx_v_VB), ((void *)__pyx_v_S2), ((void *)__pyx_v_A)); /* "nipy/labs/glm/kalman.pyx":200 * * # Create views * y = multi.vector[0] # <<<<<<<<<<<<<< * b = multi.vector[1] * vb = multi.vector[2] */ __pyx_v_y = (__pyx_v_multi->vector[0]); /* "nipy/labs/glm/kalman.pyx":201 * # Create views * y = multi.vector[0] * b = multi.vector[1] # <<<<<<<<<<<<<< * vb = multi.vector[2] * s2 = multi.vector[3] */ __pyx_v_b = (__pyx_v_multi->vector[1]); /* "nipy/labs/glm/kalman.pyx":202 * y = multi.vector[0] * b = multi.vector[1] * vb = multi.vector[2] # <<<<<<<<<<<<<< * s2 = multi.vector[3] * a = multi.vector[4] */ __pyx_v_vb = (__pyx_v_multi->vector[2]); /* "nipy/labs/glm/kalman.pyx":203 * b = multi.vector[1] * vb = multi.vector[2] * s2 = multi.vector[3] # <<<<<<<<<<<<<< * a = multi.vector[4] * */ __pyx_v_s2 = (__pyx_v_multi->vector[3]); /* "nipy/labs/glm/kalman.pyx":204 * vb = multi.vector[2] * s2 = multi.vector[3] * a = multi.vector[4] # <<<<<<<<<<<<<< * * # Loop */ __pyx_v_a = (__pyx_v_multi->vector[4]); /* "nipy/labs/glm/kalman.pyx":207 * * # Loop * while(multi.index < multi.size): # <<<<<<<<<<<<<< * fff_glm_RKF_fit(rkfilt, niter, y, x) * fff_vector_memcpy(b, rkfilt.b) */ while (1) { __pyx_t_9 = (__pyx_v_multi->index < __pyx_v_multi->size); if (!__pyx_t_9) break; /* "nipy/labs/glm/kalman.pyx":208 * # Loop * while(multi.index < multi.size): * fff_glm_RKF_fit(rkfilt, niter, y, x) # <<<<<<<<<<<<<< * fff_vector_memcpy(b, rkfilt.b) * Vb_flat = fff_vector_view(rkfilt.Vb.data, p2, 1) # rkfilt.Vb contiguous by construction */ fff_glm_RKF_fit(__pyx_v_rkfilt, __pyx_v_niter, __pyx_v_y, __pyx_v_x); /* "nipy/labs/glm/kalman.pyx":209 * while(multi.index < multi.size): * fff_glm_RKF_fit(rkfilt, niter, y, x) * fff_vector_memcpy(b, rkfilt.b) # <<<<<<<<<<<<<< * Vb_flat = fff_vector_view(rkfilt.Vb.data, p2, 1) # rkfilt.Vb contiguous by construction * fff_vector_memcpy(vb, &Vb_flat) */ fff_vector_memcpy(__pyx_v_b, __pyx_v_rkfilt->b); /* "nipy/labs/glm/kalman.pyx":210 * fff_glm_RKF_fit(rkfilt, niter, y, x) * fff_vector_memcpy(b, rkfilt.b) * Vb_flat = fff_vector_view(rkfilt.Vb.data, p2, 1) # rkfilt.Vb contiguous by construction # <<<<<<<<<<<<<< * fff_vector_memcpy(vb, &Vb_flat) * s2.data[0] = rkfilt.s2 */ __pyx_v_Vb_flat = fff_vector_view(__pyx_v_rkfilt->Vb->data, __pyx_v_p2, 1); /* "nipy/labs/glm/kalman.pyx":211 * fff_vector_memcpy(b, rkfilt.b) * Vb_flat = fff_vector_view(rkfilt.Vb.data, p2, 1) # rkfilt.Vb contiguous by construction * fff_vector_memcpy(vb, &Vb_flat) # <<<<<<<<<<<<<< * s2.data[0] = rkfilt.s2 * a.data[0] = rkfilt.a */ fff_vector_memcpy(__pyx_v_vb, (&__pyx_v_Vb_flat)); /* "nipy/labs/glm/kalman.pyx":212 * Vb_flat = fff_vector_view(rkfilt.Vb.data, p2, 1) # rkfilt.Vb contiguous by construction * fff_vector_memcpy(vb, &Vb_flat) * s2.data[0] = rkfilt.s2 # <<<<<<<<<<<<<< * a.data[0] = rkfilt.a * fffpy_multi_iterator_update(multi) */ __pyx_t_10 = __pyx_v_rkfilt->s2; (__pyx_v_s2->data[0]) = __pyx_t_10; /* "nipy/labs/glm/kalman.pyx":213 * fff_vector_memcpy(vb, &Vb_flat) * s2.data[0] = rkfilt.s2 * a.data[0] = rkfilt.a # <<<<<<<<<<<<<< * fffpy_multi_iterator_update(multi) * */ __pyx_t_10 = __pyx_v_rkfilt->a; (__pyx_v_a->data[0]) = __pyx_t_10; /* "nipy/labs/glm/kalman.pyx":214 * s2.data[0] = rkfilt.s2 * a.data[0] = rkfilt.a * fffpy_multi_iterator_update(multi) # <<<<<<<<<<<<<< * * # Dof */ fffpy_multi_iterator_update(__pyx_v_multi); } /* "nipy/labs/glm/kalman.pyx":217 * * # Dof * dof = rkfilt.dof # <<<<<<<<<<<<<< * * # Free memory */ __pyx_t_10 = __pyx_v_rkfilt->dof; __pyx_v_dof = __pyx_t_10; /* "nipy/labs/glm/kalman.pyx":220 * * # Free memory * fff_matrix_delete(x) # <<<<<<<<<<<<<< * fff_glm_RKF_delete(rkfilt) * fffpy_multi_iterator_delete(multi) */ fff_matrix_delete(__pyx_v_x); /* "nipy/labs/glm/kalman.pyx":221 * # Free memory * fff_matrix_delete(x) * fff_glm_RKF_delete(rkfilt) # <<<<<<<<<<<<<< * fffpy_multi_iterator_delete(multi) * */ fff_glm_RKF_delete(__pyx_v_rkfilt); /* "nipy/labs/glm/kalman.pyx":222 * fff_matrix_delete(x) * fff_glm_RKF_delete(rkfilt) * fffpy_multi_iterator_delete(multi) # <<<<<<<<<<<<<< * * # Reshape variance array */ fffpy_multi_iterator_delete(__pyx_v_multi); /* "nipy/labs/glm/kalman.pyx":225 * * # Reshape variance array * dims[axis] = p # <<<<<<<<<<<<<< * dims.insert(axis+1, p) * VB = VB.reshape(dims) */ __pyx_t_6 = __Pyx_PyInt_FromSize_t(__pyx_v_p); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 225; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); if (__Pyx_SetItemInt(((PyObject *)__pyx_v_dims), __pyx_v_axis, __pyx_t_6, sizeof(int), PyInt_FromLong) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 225; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "nipy/labs/glm/kalman.pyx":226 * # Reshape variance array * dims[axis] = p * dims.insert(axis+1, p) # <<<<<<<<<<<<<< * VB = VB.reshape(dims) * */ __pyx_t_6 = __Pyx_PyInt_FromSize_t(__pyx_v_p); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 226; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __pyx_t_11 = PyList_Insert(__pyx_v_dims, (__pyx_v_axis + 1), __pyx_t_6); if (unlikely(__pyx_t_11 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 226; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "nipy/labs/glm/kalman.pyx":227 * dims[axis] = p * dims.insert(axis+1, p) * VB = VB.reshape(dims) # <<<<<<<<<<<<<< * * # Return */ __pyx_t_6 = PyObject_GetAttr(__pyx_v_VB, __pyx_n_s__reshape); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 227; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = PyTuple_New(1); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 227; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __Pyx_INCREF(((PyObject *)__pyx_v_dims)); PyTuple_SET_ITEM(__pyx_t_7, 0, ((PyObject *)__pyx_v_dims)); __Pyx_GIVEREF(((PyObject *)__pyx_v_dims)); __pyx_t_2 = PyObject_Call(__pyx_t_6, ((PyObject *)__pyx_t_7), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 227; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_7)); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_v_VB); __pyx_v_VB = __pyx_t_2; __pyx_t_2 = 0; /* "nipy/labs/glm/kalman.pyx":230 * * # Return * return B, VB, S2, dof, A # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = PyFloat_FromDouble(__pyx_v_dof); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_7 = PyTuple_New(5); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __Pyx_INCREF(__pyx_v_B); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_v_B); __Pyx_GIVEREF(__pyx_v_B); __Pyx_INCREF(__pyx_v_VB); PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_v_VB); __Pyx_GIVEREF(__pyx_v_VB); __Pyx_INCREF(__pyx_v_S2); PyTuple_SET_ITEM(__pyx_t_7, 2, __pyx_v_S2); __Pyx_GIVEREF(__pyx_v_S2); PyTuple_SET_ITEM(__pyx_t_7, 3, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_A); PyTuple_SET_ITEM(__pyx_t_7, 4, __pyx_v_A); __Pyx_GIVEREF(__pyx_v_A); __pyx_t_2 = 0; __pyx_r = ((PyObject *)__pyx_t_7); __pyx_t_7 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("nipy.labs.glm.kalman.ar1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_dims); __Pyx_XDECREF(__pyx_v_B); __Pyx_XDECREF(__pyx_v_VB); __Pyx_XDECREF(__pyx_v_S2); __Pyx_XDECREF(__pyx_v_A); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":194 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_copy_shape; int __pyx_v_i; int __pyx_v_ndim; int __pyx_v_endian_detector; int __pyx_v_little_endian; int __pyx_v_t; char *__pyx_v_f; PyArray_Descr *__pyx_v_descr = 0; int __pyx_v_offset; int __pyx_v_hasfields; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; char *__pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "numpy.pxd":200 * # of flags * * if info == NULL: return # <<<<<<<<<<<<<< * * cdef int copy_shape, i, ndim */ __pyx_t_1 = (__pyx_v_info == NULL); if (__pyx_t_1) { __pyx_r = 0; goto __pyx_L0; goto __pyx_L3; } __pyx_L3:; /* "numpy.pxd":203 * * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((&endian_detector)[0] != 0) * */ __pyx_v_endian_detector = 1; /* "numpy.pxd":204 * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * * ndim = PyArray_NDIM(self) */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "numpy.pxd":206 * cdef bint little_endian = ((&endian_detector)[0] != 0) * * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< * * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); /* "numpy.pxd":208 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); if (__pyx_t_1) { /* "numpy.pxd":209 * * if sizeof(npy_intp) != sizeof(Py_ssize_t): * copy_shape = 1 # <<<<<<<<<<<<<< * else: * copy_shape = 0 */ __pyx_v_copy_shape = 1; goto __pyx_L4; } /*else*/ { /* "numpy.pxd":211 * copy_shape = 1 * else: * copy_shape = 0 # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) */ __pyx_v_copy_shape = 0; } __pyx_L4:; /* "numpy.pxd":213 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS); if (__pyx_t_1) { /* "numpy.pxd":214 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not C contiguous") * */ __pyx_t_2 = (!PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS)); __pyx_t_3 = __pyx_t_2; } else { __pyx_t_3 = __pyx_t_1; } if (__pyx_t_3) { /* "numpy.pxd":215 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L5; } __pyx_L5:; /* "numpy.pxd":217 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ __pyx_t_3 = ((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS); if (__pyx_t_3) { /* "numpy.pxd":218 * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not Fortran contiguous") * */ __pyx_t_1 = (!PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS)); __pyx_t_2 = __pyx_t_1; } else { __pyx_t_2 = __pyx_t_3; } if (__pyx_t_2) { /* "numpy.pxd":219 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_4), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L6; } __pyx_L6:; /* "numpy.pxd":221 * raise ValueError(u"ndarray is not Fortran contiguous") * * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< * info.ndim = ndim * if copy_shape: */ __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); /* "numpy.pxd":222 * * info.buf = PyArray_DATA(self) * info.ndim = ndim # <<<<<<<<<<<<<< * if copy_shape: * # Allocate new buffer for strides and shape info. */ __pyx_v_info->ndim = __pyx_v_ndim; /* "numpy.pxd":223 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ if (__pyx_v_copy_shape) { /* "numpy.pxd":226 * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) # <<<<<<<<<<<<<< * info.shape = info.strides + ndim * for i in range(ndim): */ __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2))); /* "numpy.pxd":227 * # This is allocated as one block, strides first. * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) * info.shape = info.strides + ndim # <<<<<<<<<<<<<< * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] */ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); /* "numpy.pxd":228 * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) * info.shape = info.strides + ndim * for i in range(ndim): # <<<<<<<<<<<<<< * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] */ __pyx_t_5 = __pyx_v_ndim; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "numpy.pxd":229 * info.shape = info.strides + ndim * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< * info.shape[i] = PyArray_DIMS(self)[i] * else: */ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); /* "numpy.pxd":230 * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< * else: * info.strides = PyArray_STRIDES(self) */ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); } goto __pyx_L7; } /*else*/ { /* "numpy.pxd":232 * info.shape[i] = PyArray_DIMS(self)[i] * else: * info.strides = PyArray_STRIDES(self) # <<<<<<<<<<<<<< * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL */ __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); /* "numpy.pxd":233 * else: * info.strides = PyArray_STRIDES(self) * info.shape = PyArray_DIMS(self) # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) */ __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); } __pyx_L7:; /* "numpy.pxd":234 * info.strides = PyArray_STRIDES(self) * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) */ __pyx_v_info->suboffsets = NULL; /* "numpy.pxd":235 * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< * info.readonly = not PyArray_ISWRITEABLE(self) * */ __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); /* "numpy.pxd":236 * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< * * cdef int t */ __pyx_v_info->readonly = (!PyArray_ISWRITEABLE(__pyx_v_self)); /* "numpy.pxd":239 * * cdef int t * cdef char* f = NULL # <<<<<<<<<<<<<< * cdef dtype descr = self.descr * cdef list stack */ __pyx_v_f = NULL; /* "numpy.pxd":240 * cdef int t * cdef char* f = NULL * cdef dtype descr = self.descr # <<<<<<<<<<<<<< * cdef list stack * cdef int offset */ __pyx_t_4 = ((PyObject *)__pyx_v_self->descr); __Pyx_INCREF(__pyx_t_4); __pyx_v_descr = ((PyArray_Descr *)__pyx_t_4); __pyx_t_4 = 0; /* "numpy.pxd":244 * cdef int offset * * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< * * if not hasfields and not copy_shape: */ __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); /* "numpy.pxd":246 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ __pyx_t_2 = (!__pyx_v_hasfields); if (__pyx_t_2) { __pyx_t_3 = (!__pyx_v_copy_shape); __pyx_t_1 = __pyx_t_3; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "numpy.pxd":248 * if not hasfields and not copy_shape: * # do not call releasebuffer * info.obj = None # <<<<<<<<<<<<<< * else: * # need to call releasebuffer */ __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = Py_None; goto __pyx_L10; } /*else*/ { /* "numpy.pxd":251 * else: * # need to call releasebuffer * info.obj = self # <<<<<<<<<<<<<< * * if not hasfields: */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); } __pyx_L10:; /* "numpy.pxd":253 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ __pyx_t_1 = (!__pyx_v_hasfields); if (__pyx_t_1) { /* "numpy.pxd":254 * * if not hasfields: * t = descr.type_num # <<<<<<<<<<<<<< * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): */ __pyx_t_5 = __pyx_v_descr->type_num; __pyx_v_t = __pyx_t_5; /* "numpy.pxd":255 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_1 = (__pyx_v_descr->byteorder == '>'); if (__pyx_t_1) { __pyx_t_2 = __pyx_v_little_endian; } else { __pyx_t_2 = __pyx_t_1; } if (!__pyx_t_2) { /* "numpy.pxd":256 * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" */ __pyx_t_1 = (__pyx_v_descr->byteorder == '<'); if (__pyx_t_1) { __pyx_t_3 = (!__pyx_v_little_endian); __pyx_t_7 = __pyx_t_3; } else { __pyx_t_7 = __pyx_t_1; } __pyx_t_1 = __pyx_t_7; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "numpy.pxd":257 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_6), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L12; } __pyx_L12:; /* "numpy.pxd":258 * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" */ __pyx_t_1 = (__pyx_v_t == NPY_BYTE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__b; goto __pyx_L13; } /* "numpy.pxd":259 * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" */ __pyx_t_1 = (__pyx_v_t == NPY_UBYTE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__B; goto __pyx_L13; } /* "numpy.pxd":260 * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" */ __pyx_t_1 = (__pyx_v_t == NPY_SHORT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__h; goto __pyx_L13; } /* "numpy.pxd":261 * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" */ __pyx_t_1 = (__pyx_v_t == NPY_USHORT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__H; goto __pyx_L13; } /* "numpy.pxd":262 * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" */ __pyx_t_1 = (__pyx_v_t == NPY_INT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__i; goto __pyx_L13; } /* "numpy.pxd":263 * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" */ __pyx_t_1 = (__pyx_v_t == NPY_UINT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__I; goto __pyx_L13; } /* "numpy.pxd":264 * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" */ __pyx_t_1 = (__pyx_v_t == NPY_LONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__l; goto __pyx_L13; } /* "numpy.pxd":265 * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" */ __pyx_t_1 = (__pyx_v_t == NPY_ULONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__L; goto __pyx_L13; } /* "numpy.pxd":266 * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" */ __pyx_t_1 = (__pyx_v_t == NPY_LONGLONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__q; goto __pyx_L13; } /* "numpy.pxd":267 * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" */ __pyx_t_1 = (__pyx_v_t == NPY_ULONGLONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Q; goto __pyx_L13; } /* "numpy.pxd":268 * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" */ __pyx_t_1 = (__pyx_v_t == NPY_FLOAT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__f; goto __pyx_L13; } /* "numpy.pxd":269 * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" */ __pyx_t_1 = (__pyx_v_t == NPY_DOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__d; goto __pyx_L13; } /* "numpy.pxd":270 * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" */ __pyx_t_1 = (__pyx_v_t == NPY_LONGDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__g; goto __pyx_L13; } /* "numpy.pxd":271 * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" */ __pyx_t_1 = (__pyx_v_t == NPY_CFLOAT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zf; goto __pyx_L13; } /* "numpy.pxd":272 * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" */ __pyx_t_1 = (__pyx_v_t == NPY_CDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zd; goto __pyx_L13; } /* "numpy.pxd":273 * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f = "O" * else: */ __pyx_t_1 = (__pyx_v_t == NPY_CLONGDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zg; goto __pyx_L13; } /* "numpy.pxd":274 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_1 = (__pyx_v_t == NPY_OBJECT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__O; goto __pyx_L13; } /*else*/ { /* "numpy.pxd":276 * elif t == NPY_OBJECT: f = "O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * info.format = f * return */ __pyx_t_4 = PyInt_FromLong(__pyx_v_t); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_8 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_7), __pyx_t_4); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_8)); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_8)); __Pyx_GIVEREF(((PyObject *)__pyx_t_8)); __pyx_t_8 = 0; __pyx_t_8 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L13:; /* "numpy.pxd":277 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f # <<<<<<<<<<<<<< * return * else: */ __pyx_v_info->format = __pyx_v_f; /* "numpy.pxd":278 * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f * return # <<<<<<<<<<<<<< * else: * info.format = stdlib.malloc(_buffer_format_string_len) */ __pyx_r = 0; goto __pyx_L0; goto __pyx_L11; } /*else*/ { /* "numpy.pxd":280 * return * else: * info.format = stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 */ __pyx_v_info->format = ((char *)malloc(255)); /* "numpy.pxd":281 * else: * info.format = stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< * offset = 0 * f = _util_dtypestring(descr, info.format + 1, */ (__pyx_v_info->format[0]) = '^'; /* "numpy.pxd":282 * info.format = stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 # <<<<<<<<<<<<<< * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, */ __pyx_v_offset = 0; /* "numpy.pxd":285 * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, * &offset) # <<<<<<<<<<<<<< * f[0] = c'\0' # Terminate format string * */ __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_9; /* "numpy.pxd":286 * info.format + _buffer_format_string_len, * &offset) * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< * * def __releasebuffer__(ndarray self, Py_buffer* info): */ (__pyx_v_f[0]) = '\x00'; } __pyx_L11:; __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_descr); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":288 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__releasebuffer__", 0); /* "numpy.pxd":289 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_t_1 = PyArray_HASFIELDS(__pyx_v_self); if (__pyx_t_1) { /* "numpy.pxd":290 * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): * stdlib.free(info.format) # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) */ free(__pyx_v_info->format); goto __pyx_L3; } __pyx_L3:; /* "numpy.pxd":291 * if PyArray_HASFIELDS(self): * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * stdlib.free(info.strides) * # info.shape was stored after info.strides in the same block */ __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); if (__pyx_t_1) { /* "numpy.pxd":292 * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) # <<<<<<<<<<<<<< * # info.shape was stored after info.strides in the same block * */ free(__pyx_v_info->strides); goto __pyx_L4; } __pyx_L4:; __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":768 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, a) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); /* "numpy.pxd":769 * * cdef inline object PyArray_MultiIterNew1(a): * return PyArray_MultiIterNew(1, a) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew2(a, b): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 769; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":771 * return PyArray_MultiIterNew(1, a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, a, b) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); /* "numpy.pxd":772 * * cdef inline object PyArray_MultiIterNew2(a, b): * return PyArray_MultiIterNew(2, a, b) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew3(a, b, c): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":774 * return PyArray_MultiIterNew(2, a, b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, a, b, c) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); /* "numpy.pxd":775 * * cdef inline object PyArray_MultiIterNew3(a, b, c): * return PyArray_MultiIterNew(3, a, b, c) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":777 * return PyArray_MultiIterNew(3, a, b, c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, a, b, c, d) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); /* "numpy.pxd":778 * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): * return PyArray_MultiIterNew(4, a, b, c, d) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 778; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":780 * return PyArray_MultiIterNew(4, a, b, c, d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, a, b, c, d, e) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); /* "numpy.pxd":781 * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): * return PyArray_MultiIterNew(5, a, b, c, d, e) # <<<<<<<<<<<<<< * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 781; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":783 * return PyArray_MultiIterNew(5, a, b, c, d, e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { PyArray_Descr *__pyx_v_child = 0; int __pyx_v_endian_detector; int __pyx_v_little_endian; PyObject *__pyx_v_fields = 0; PyObject *__pyx_v_childname = NULL; PyObject *__pyx_v_new_offset = NULL; PyObject *__pyx_v_t = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *(*__pyx_t_6)(PyObject *); int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_t_10; long __pyx_t_11; char *__pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_util_dtypestring", 0); /* "numpy.pxd":790 * cdef int delta_offset * cdef tuple i * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((&endian_detector)[0] != 0) * cdef tuple fields */ __pyx_v_endian_detector = 1; /* "numpy.pxd":791 * cdef tuple i * cdef int endian_detector = 1 * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * cdef tuple fields * */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "numpy.pxd":794 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ if (unlikely(((PyObject *)__pyx_v_descr->names) == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_1 = ((PyObject *)__pyx_v_descr->names); __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; for (;;) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif __Pyx_XDECREF(__pyx_v_childname); __pyx_v_childname = __pyx_t_3; __pyx_t_3 = 0; /* "numpy.pxd":795 * * for childname in descr.names: * fields = descr.fields[childname] # <<<<<<<<<<<<<< * child, new_offset = fields * */ __pyx_t_3 = PyObject_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (!__pyx_t_3) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected tuple, got %.200s", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF(((PyObject *)__pyx_v_fields)); __pyx_v_fields = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "numpy.pxd":796 * for childname in descr.names: * fields = descr.fields[childname] * child, new_offset = fields # <<<<<<<<<<<<<< * * if (end - f) - (new_offset - offset[0]) < 15: */ if (likely(PyTuple_CheckExact(((PyObject *)__pyx_v_fields)))) { PyObject* sequence = ((PyObject *)__pyx_v_fields); #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else if (1) { __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } else { Py_ssize_t index = -1; __pyx_t_5 = PyObject_GetIter(((PyObject *)__pyx_v_fields)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = Py_TYPE(__pyx_t_5)->tp_iternext; index = 0; __pyx_t_3 = __pyx_t_6(__pyx_t_5); if (unlikely(!__pyx_t_3)) goto __pyx_L5_unpacking_failed; __Pyx_GOTREF(__pyx_t_3); index = 1; __pyx_t_4 = __pyx_t_6(__pyx_t_5); if (unlikely(!__pyx_t_4)) goto __pyx_L5_unpacking_failed; __Pyx_GOTREF(__pyx_t_4); if (__Pyx_IternextUnpackEndCheck(__pyx_t_6(__pyx_t_5), 2) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = NULL; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L6_unpacking_done; __pyx_L5_unpacking_failed:; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_L6_unpacking_done:; } if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF(((PyObject *)__pyx_v_child)); __pyx_v_child = ((PyArray_Descr *)__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_v_new_offset); __pyx_v_new_offset = __pyx_t_4; __pyx_t_4 = 0; /* "numpy.pxd":798 * child, new_offset = fields * * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ __pyx_t_4 = PyInt_FromLong((__pyx_v_end - __pyx_v_f)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_Subtract(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyObject_RichCompare(__pyx_t_3, __pyx_int_15, Py_LT); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { /* "numpy.pxd":799 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_t_5 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_9), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L7; } __pyx_L7:; /* "numpy.pxd":801 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_7 = (__pyx_v_child->byteorder == '>'); if (__pyx_t_7) { __pyx_t_8 = __pyx_v_little_endian; } else { __pyx_t_8 = __pyx_t_7; } if (!__pyx_t_8) { /* "numpy.pxd":802 * * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * # One could encode it in the format string and have Cython */ __pyx_t_7 = (__pyx_v_child->byteorder == '<'); if (__pyx_t_7) { __pyx_t_9 = (!__pyx_v_little_endian); __pyx_t_10 = __pyx_t_9; } else { __pyx_t_10 = __pyx_t_7; } __pyx_t_7 = __pyx_t_10; } else { __pyx_t_7 = __pyx_t_8; } if (__pyx_t_7) { /* "numpy.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_10), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L8; } __pyx_L8:; /* "numpy.pxd":813 * * # Output padding bytes * while offset[0] < new_offset: # <<<<<<<<<<<<<< * f[0] = 120 # "x"; pad byte * f += 1 */ while (1) { __pyx_t_5 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_t_5, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!__pyx_t_7) break; /* "numpy.pxd":814 * # Output padding bytes * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< * f += 1 * offset[0] += 1 */ (__pyx_v_f[0]) = 120; /* "numpy.pxd":815 * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte * f += 1 # <<<<<<<<<<<<<< * offset[0] += 1 * */ __pyx_v_f = (__pyx_v_f + 1); /* "numpy.pxd":816 * f[0] = 120 # "x"; pad byte * f += 1 * offset[0] += 1 # <<<<<<<<<<<<<< * * offset[0] += child.itemsize */ __pyx_t_11 = 0; (__pyx_v_offset[__pyx_t_11]) = ((__pyx_v_offset[__pyx_t_11]) + 1); } /* "numpy.pxd":818 * offset[0] += 1 * * offset[0] += child.itemsize # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(child): */ __pyx_t_11 = 0; (__pyx_v_offset[__pyx_t_11]) = ((__pyx_v_offset[__pyx_t_11]) + __pyx_v_child->elsize); /* "numpy.pxd":820 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ __pyx_t_7 = (!PyDataType_HASFIELDS(__pyx_v_child)); if (__pyx_t_7) { /* "numpy.pxd":821 * * if not PyDataType_HASFIELDS(child): * t = child.type_num # <<<<<<<<<<<<<< * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") */ __pyx_t_3 = PyInt_FromLong(__pyx_v_child->type_num); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 821; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_XDECREF(__pyx_v_t); __pyx_v_t = __pyx_t_3; __pyx_t_3 = 0; /* "numpy.pxd":822 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ __pyx_t_7 = ((__pyx_v_end - __pyx_v_f) < 5); if (__pyx_t_7) { /* "numpy.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_t_3 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_12), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L12; } __pyx_L12:; /* "numpy.pxd":826 * * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" */ __pyx_t_3 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 98; goto __pyx_L13; } /* "numpy.pxd":827 * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" */ __pyx_t_5 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 66; goto __pyx_L13; } /* "numpy.pxd":828 * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" */ __pyx_t_3 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 104; goto __pyx_L13; } /* "numpy.pxd":829 * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" */ __pyx_t_5 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 72; goto __pyx_L13; } /* "numpy.pxd":830 * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" */ __pyx_t_3 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 105; goto __pyx_L13; } /* "numpy.pxd":831 * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" */ __pyx_t_5 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 73; goto __pyx_L13; } /* "numpy.pxd":832 * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" */ __pyx_t_3 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 108; goto __pyx_L13; } /* "numpy.pxd":833 * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" */ __pyx_t_5 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 76; goto __pyx_L13; } /* "numpy.pxd":834 * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" */ __pyx_t_3 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 113; goto __pyx_L13; } /* "numpy.pxd":835 * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" */ __pyx_t_5 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 81; goto __pyx_L13; } /* "numpy.pxd":836 * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" */ __pyx_t_3 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 102; goto __pyx_L13; } /* "numpy.pxd":837 * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf */ __pyx_t_5 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 100; goto __pyx_L13; } /* "numpy.pxd":838 * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd */ __pyx_t_3 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 103; goto __pyx_L13; } /* "numpy.pxd":839 * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg */ __pyx_t_5 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 102; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":840 * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" */ __pyx_t_3 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 100; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":841 * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: */ __pyx_t_5 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 103; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":842 * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_3 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 79; goto __pyx_L13; } /*else*/ { /* "numpy.pxd":844 * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * f += 1 * else: */ __pyx_t_5 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_7), __pyx_v_t); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_5)); __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_5)); __Pyx_GIVEREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L13:; /* "numpy.pxd":845 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * f += 1 # <<<<<<<<<<<<<< * else: * # Cython ignores struct boundary information ("T{...}"), */ __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L11; } /*else*/ { /* "numpy.pxd":849 * # Cython ignores struct boundary information ("T{...}"), * # so don't output it * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< * return f * */ __pyx_t_12 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_12 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 849; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_12; } __pyx_L11:; } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "numpy.pxd":850 * # so don't output it * f = _util_dtypestring(child, f, end, offset) * return f # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_f; goto __pyx_L0; __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_child); __Pyx_XDECREF(__pyx_v_fields); __Pyx_XDECREF(__pyx_v_childname); __Pyx_XDECREF(__pyx_v_new_offset); __Pyx_XDECREF(__pyx_v_t); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":965 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { PyObject *__pyx_v_baseptr; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("set_array_base", 0); /* "numpy.pxd":967 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ __pyx_t_1 = (__pyx_v_base == Py_None); if (__pyx_t_1) { /* "numpy.pxd":968 * cdef PyObject* baseptr * if base is None: * baseptr = NULL # <<<<<<<<<<<<<< * else: * Py_INCREF(base) # important to do this before decref below! */ __pyx_v_baseptr = NULL; goto __pyx_L3; } /*else*/ { /* "numpy.pxd":970 * baseptr = NULL * else: * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< * baseptr = base * Py_XDECREF(arr.base) */ Py_INCREF(__pyx_v_base); /* "numpy.pxd":971 * else: * Py_INCREF(base) # important to do this before decref below! * baseptr = base # <<<<<<<<<<<<<< * Py_XDECREF(arr.base) * arr.base = baseptr */ __pyx_v_baseptr = ((PyObject *)__pyx_v_base); } __pyx_L3:; /* "numpy.pxd":972 * Py_INCREF(base) # important to do this before decref below! * baseptr = base * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< * arr.base = baseptr * */ Py_XDECREF(__pyx_v_arr->base); /* "numpy.pxd":973 * baseptr = base * Py_XDECREF(arr.base) * arr.base = baseptr # <<<<<<<<<<<<<< * * cdef inline object get_array_base(ndarray arr): */ __pyx_v_arr->base = __pyx_v_baseptr; __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":975 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("get_array_base", 0); /* "numpy.pxd":976 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ __pyx_t_1 = (__pyx_v_arr->base == NULL); if (__pyx_t_1) { /* "numpy.pxd":977 * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: * return None # <<<<<<<<<<<<<< * else: * return arr.base */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; goto __pyx_L3; } /*else*/ { /* "numpy.pxd":979 * return None * else: * return arr.base # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); __pyx_r = ((PyObject *)__pyx_v_arr->base); goto __pyx_L0; } __pyx_L3:; __pyx_r = Py_None; __Pyx_INCREF(Py_None); __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, __Pyx_NAMESTR("kalman"), __Pyx_DOCSTR(__pyx_k_13), /* m_doc */ -1, /* m_size */ __pyx_methods /* m_methods */, NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_kp_u_1, __pyx_k_1, sizeof(__pyx_k_1), 0, 1, 0, 0}, {&__pyx_kp_u_11, __pyx_k_11, sizeof(__pyx_k_11), 0, 1, 0, 0}, {&__pyx_kp_s_14, __pyx_k_14, sizeof(__pyx_k_14), 0, 0, 1, 0}, {&__pyx_kp_s_17, __pyx_k_17, sizeof(__pyx_k_17), 0, 0, 1, 0}, {&__pyx_n_s_18, __pyx_k_18, sizeof(__pyx_k_18), 0, 0, 1, 1}, {&__pyx_kp_u_3, __pyx_k_3, sizeof(__pyx_k_3), 0, 1, 0, 0}, {&__pyx_kp_u_5, __pyx_k_5, sizeof(__pyx_k_5), 0, 1, 0, 0}, {&__pyx_kp_u_7, __pyx_k_7, sizeof(__pyx_k_7), 0, 1, 0, 0}, {&__pyx_kp_u_8, __pyx_k_8, sizeof(__pyx_k_8), 0, 1, 0, 0}, {&__pyx_n_s__A, __pyx_k__A, sizeof(__pyx_k__A), 0, 0, 1, 1}, {&__pyx_n_s__B, __pyx_k__B, sizeof(__pyx_k__B), 0, 0, 1, 1}, {&__pyx_n_s__RuntimeError, __pyx_k__RuntimeError, sizeof(__pyx_k__RuntimeError), 0, 0, 1, 1}, {&__pyx_n_s__S2, __pyx_k__S2, sizeof(__pyx_k__S2), 0, 0, 1, 1}, {&__pyx_n_s__VB, __pyx_k__VB, sizeof(__pyx_k__VB), 0, 0, 1, 1}, {&__pyx_n_s__ValueError, __pyx_k__ValueError, sizeof(__pyx_k__ValueError), 0, 0, 1, 1}, {&__pyx_n_s__Vb_flat, __pyx_k__Vb_flat, sizeof(__pyx_k__Vb_flat), 0, 0, 1, 1}, {&__pyx_n_s__X, __pyx_k__X, sizeof(__pyx_k__X), 0, 0, 1, 1}, {&__pyx_n_s__Y, __pyx_k__Y, sizeof(__pyx_k__Y), 0, 0, 1, 1}, {&__pyx_n_s____main__, __pyx_k____main__, sizeof(__pyx_k____main__), 0, 0, 1, 1}, {&__pyx_n_s____test__, __pyx_k____test__, sizeof(__pyx_k____test__), 0, 0, 1, 1}, {&__pyx_n_s____version__, __pyx_k____version__, sizeof(__pyx_k____version__), 0, 0, 1, 1}, {&__pyx_n_s__a, __pyx_k__a, sizeof(__pyx_k__a), 0, 0, 1, 1}, {&__pyx_n_s__ar1, __pyx_k__ar1, sizeof(__pyx_k__ar1), 0, 0, 1, 1}, {&__pyx_n_s__axis, __pyx_k__axis, sizeof(__pyx_k__axis), 0, 0, 1, 1}, {&__pyx_n_s__b, __pyx_k__b, sizeof(__pyx_k__b), 0, 0, 1, 1}, {&__pyx_n_s__dims, __pyx_k__dims, sizeof(__pyx_k__dims), 0, 0, 1, 1}, {&__pyx_n_s__dof, __pyx_k__dof, sizeof(__pyx_k__dof), 0, 0, 1, 1}, {&__pyx_n_s__double, __pyx_k__double, sizeof(__pyx_k__double), 0, 0, 1, 1}, {&__pyx_n_s__dtype, __pyx_k__dtype, sizeof(__pyx_k__dtype), 0, 0, 1, 1}, {&__pyx_n_s__i, __pyx_k__i, sizeof(__pyx_k__i), 0, 0, 1, 1}, {&__pyx_n_s__kfilt, __pyx_k__kfilt, sizeof(__pyx_k__kfilt), 0, 0, 1, 1}, {&__pyx_n_s__multi, __pyx_k__multi, sizeof(__pyx_k__multi), 0, 0, 1, 1}, {&__pyx_n_s__niter, __pyx_k__niter, sizeof(__pyx_k__niter), 0, 0, 1, 1}, {&__pyx_n_s__np, __pyx_k__np, sizeof(__pyx_k__np), 0, 0, 1, 1}, {&__pyx_n_s__numpy, __pyx_k__numpy, sizeof(__pyx_k__numpy), 0, 0, 1, 1}, {&__pyx_n_s__ols, __pyx_k__ols, sizeof(__pyx_k__ols), 0, 0, 1, 1}, {&__pyx_n_s__p, __pyx_k__p, sizeof(__pyx_k__p), 0, 0, 1, 1}, {&__pyx_n_s__p2, __pyx_k__p2, sizeof(__pyx_k__p2), 0, 0, 1, 1}, {&__pyx_n_s__range, __pyx_k__range, sizeof(__pyx_k__range), 0, 0, 1, 1}, {&__pyx_n_s__reshape, __pyx_k__reshape, sizeof(__pyx_k__reshape), 0, 0, 1, 1}, {&__pyx_n_s__rkfilt, __pyx_k__rkfilt, sizeof(__pyx_k__rkfilt), 0, 0, 1, 1}, {&__pyx_n_s__s2, __pyx_k__s2, sizeof(__pyx_k__s2), 0, 0, 1, 1}, {&__pyx_n_s__vb, __pyx_k__vb, sizeof(__pyx_k__vb), 0, 0, 1, 1}, {&__pyx_n_s__x, __pyx_k__x, sizeof(__pyx_k__x), 0, 0, 1, 1}, {&__pyx_n_s__y, __pyx_k__y, sizeof(__pyx_k__y), 0, 0, 1, 1}, {&__pyx_n_s__zeros, __pyx_k__zeros, sizeof(__pyx_k__zeros), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_range = __Pyx_GetName(__pyx_b, __pyx_n_s__range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_ValueError = __Pyx_GetName(__pyx_b, __pyx_n_s__ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_RuntimeError = __Pyx_GetName(__pyx_b, __pyx_n_s__RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "numpy.pxd":215 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_k_tuple_2 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_2); __Pyx_INCREF(((PyObject *)__pyx_kp_u_1)); PyTuple_SET_ITEM(__pyx_k_tuple_2, 0, ((PyObject *)__pyx_kp_u_1)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_1)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_2)); /* "numpy.pxd":219 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_k_tuple_4 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_4); __Pyx_INCREF(((PyObject *)__pyx_kp_u_3)); PyTuple_SET_ITEM(__pyx_k_tuple_4, 0, ((PyObject *)__pyx_kp_u_3)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_3)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_4)); /* "numpy.pxd":257 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_k_tuple_6 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_6); __Pyx_INCREF(((PyObject *)__pyx_kp_u_5)); PyTuple_SET_ITEM(__pyx_k_tuple_6, 0, ((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_6)); /* "numpy.pxd":799 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_k_tuple_9 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_9); __Pyx_INCREF(((PyObject *)__pyx_kp_u_8)); PyTuple_SET_ITEM(__pyx_k_tuple_9, 0, ((PyObject *)__pyx_kp_u_8)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_8)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_9)); /* "numpy.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_k_tuple_10 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_10)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_10); __Pyx_INCREF(((PyObject *)__pyx_kp_u_5)); PyTuple_SET_ITEM(__pyx_k_tuple_10, 0, ((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_10)); /* "numpy.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_k_tuple_12 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_12)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_12); __Pyx_INCREF(((PyObject *)__pyx_kp_u_11)); PyTuple_SET_ITEM(__pyx_k_tuple_12, 0, ((PyObject *)__pyx_kp_u_11)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_11)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_12)); /* "nipy/labs/glm/kalman.pyx":68 * # Standard Kalman filter * * def ols(ndarray Y, ndarray X, int axis=0): # <<<<<<<<<<<<<< * """ * (beta, norm_var_beta, s2, dof) = ols(Y, X, axis=0). */ __pyx_k_tuple_15 = PyTuple_New(16); if (unlikely(!__pyx_k_tuple_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 68; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_15); __Pyx_INCREF(((PyObject *)__pyx_n_s__Y)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 0, ((PyObject *)__pyx_n_s__Y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__X)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 1, ((PyObject *)__pyx_n_s__X)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__X)); __Pyx_INCREF(((PyObject *)__pyx_n_s__axis)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 2, ((PyObject *)__pyx_n_s__axis)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__axis)); __Pyx_INCREF(((PyObject *)__pyx_n_s__y)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 3, ((PyObject *)__pyx_n_s__y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__b)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 4, ((PyObject *)__pyx_n_s__b)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__b)); __Pyx_INCREF(((PyObject *)__pyx_n_s__s2)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 5, ((PyObject *)__pyx_n_s__s2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__s2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 6, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__kfilt)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 7, ((PyObject *)__pyx_n_s__kfilt)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__kfilt)); __Pyx_INCREF(((PyObject *)__pyx_n_s__p)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 8, ((PyObject *)__pyx_n_s__p)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__p)); __Pyx_INCREF(((PyObject *)__pyx_n_s__multi)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 9, ((PyObject *)__pyx_n_s__multi)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__multi)); __Pyx_INCREF(((PyObject *)__pyx_n_s__dof)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 10, ((PyObject *)__pyx_n_s__dof)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__dof)); __Pyx_INCREF(((PyObject *)__pyx_n_s__dims)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 11, ((PyObject *)__pyx_n_s__dims)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__dims)); __Pyx_INCREF(((PyObject *)__pyx_n_s__B)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 12, ((PyObject *)__pyx_n_s__B)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__B)); __Pyx_INCREF(((PyObject *)__pyx_n_s__S2)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 13, ((PyObject *)__pyx_n_s__S2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__S2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__VB)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 14, ((PyObject *)__pyx_n_s__VB)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__VB)); __Pyx_INCREF(((PyObject *)__pyx_n_s__i)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 15, ((PyObject *)__pyx_n_s__i)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__i)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_15)); __pyx_k_codeobj_16 = (PyObject*)__Pyx_PyCode_New(3, 0, 16, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_15, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__ols, 68, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 68; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/glm/kalman.pyx":142 * * * def ar1(ndarray Y, ndarray X, int niter=2, int axis=0): # <<<<<<<<<<<<<< * """ * (beta, norm_var_beta, s2, dof, a) = ar1(Y, X, niter=2, axis=0) */ __pyx_k_tuple_19 = PyTuple_New(22); if (unlikely(!__pyx_k_tuple_19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_19); __Pyx_INCREF(((PyObject *)__pyx_n_s__Y)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 0, ((PyObject *)__pyx_n_s__Y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__X)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 1, ((PyObject *)__pyx_n_s__X)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__X)); __Pyx_INCREF(((PyObject *)__pyx_n_s__niter)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 2, ((PyObject *)__pyx_n_s__niter)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__niter)); __Pyx_INCREF(((PyObject *)__pyx_n_s__axis)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 3, ((PyObject *)__pyx_n_s__axis)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__axis)); __Pyx_INCREF(((PyObject *)__pyx_n_s__y)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 4, ((PyObject *)__pyx_n_s__y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__b)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 5, ((PyObject *)__pyx_n_s__b)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__b)); __Pyx_INCREF(((PyObject *)__pyx_n_s__vb)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 6, ((PyObject *)__pyx_n_s__vb)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__vb)); __Pyx_INCREF(((PyObject *)__pyx_n_s__s2)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 7, ((PyObject *)__pyx_n_s__s2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__s2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__a)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 8, ((PyObject *)__pyx_n_s__a)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__a)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Vb_flat)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 9, ((PyObject *)__pyx_n_s__Vb_flat)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Vb_flat)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 10, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__rkfilt)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 11, ((PyObject *)__pyx_n_s__rkfilt)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__rkfilt)); __Pyx_INCREF(((PyObject *)__pyx_n_s__p)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 12, ((PyObject *)__pyx_n_s__p)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__p)); __Pyx_INCREF(((PyObject *)__pyx_n_s__p2)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 13, ((PyObject *)__pyx_n_s__p2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__p2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__multi)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 14, ((PyObject *)__pyx_n_s__multi)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__multi)); __Pyx_INCREF(((PyObject *)__pyx_n_s__dof)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 15, ((PyObject *)__pyx_n_s__dof)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__dof)); __Pyx_INCREF(((PyObject *)__pyx_n_s__dims)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 16, ((PyObject *)__pyx_n_s__dims)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__dims)); __Pyx_INCREF(((PyObject *)__pyx_n_s__B)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 17, ((PyObject *)__pyx_n_s__B)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__B)); __Pyx_INCREF(((PyObject *)__pyx_n_s__VB)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 18, ((PyObject *)__pyx_n_s__VB)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__VB)); __Pyx_INCREF(((PyObject *)__pyx_n_s__S2)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 19, ((PyObject *)__pyx_n_s__S2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__S2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__A)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 20, ((PyObject *)__pyx_n_s__A)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__A)); __Pyx_INCREF(((PyObject *)__pyx_n_s__i)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 21, ((PyObject *)__pyx_n_s__i)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__i)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_19)); __pyx_k_codeobj_20 = (PyObject*)__Pyx_PyCode_New(4, 0, 22, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_19, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__ar1, 142, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_20)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_15 = PyInt_FromLong(15); if (unlikely(!__pyx_int_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC initkalman(void); /*proto*/ PyMODINIT_FUNC initkalman(void) #else PyMODINIT_FUNC PyInit_kalman(void); /*proto*/ PyMODINIT_FUNC PyInit_kalman(void) #endif { PyObject *__pyx_t_1 = NULL; __Pyx_RefNannyDeclarations #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_kalman(void)", 0); if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #ifdef __Pyx_CyFunction_USED if (__Pyx_CyFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4(__Pyx_NAMESTR("kalman"), __pyx_methods, __Pyx_DOCSTR(__pyx_k_13), 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (!PyDict_GetItemString(modules, "nipy.labs.glm.kalman")) { if (unlikely(PyDict_SetItemString(modules, "nipy.labs.glm.kalman", __pyx_m) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } } #endif __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); if (unlikely(!__pyx_b)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; /*--- Initialize various global constants etc. ---*/ if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_module_is_main_nipy__labs__glm__kalman) { if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s____main__) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; } /*--- Builtin init code ---*/ if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Constants init code ---*/ if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Global init code ---*/ /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ /*--- Type import code ---*/ __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", #if CYTHON_COMPILING_IN_PYPY sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 861; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Variable import code ---*/ /*--- Function import code ---*/ /*--- Execution code ---*/ /* "nipy/labs/glm/kalman.pyx":10 * """ * * __version__ = '0.1' # <<<<<<<<<<<<<< * * # Includes */ if (PyObject_SetAttr(__pyx_m, __pyx_n_s____version__, ((PyObject *)__pyx_kp_s_14)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 10; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/glm/kalman.pyx":62 * * # Initialize numpy * fffpy_import_array() # <<<<<<<<<<<<<< * import_array() * import numpy as np */ fffpy_import_array(); /* "nipy/labs/glm/kalman.pyx":63 * # Initialize numpy * fffpy_import_array() * import_array() # <<<<<<<<<<<<<< * import numpy as np * */ import_array(); /* "nipy/labs/glm/kalman.pyx":64 * fffpy_import_array() * import_array() * import numpy as np # <<<<<<<<<<<<<< * * # Standard Kalman filter */ __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__numpy), 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 64; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__np, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 64; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/glm/kalman.pyx":68 * # Standard Kalman filter * * def ols(ndarray Y, ndarray X, int axis=0): # <<<<<<<<<<<<<< * """ * (beta, norm_var_beta, s2, dof) = ols(Y, X, axis=0). */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_3glm_6kalman_1ols, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 68; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__ols, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 68; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/glm/kalman.pyx":142 * * * def ar1(ndarray Y, ndarray X, int niter=2, int axis=0): # <<<<<<<<<<<<<< * """ * (beta, norm_var_beta, s2, dof, a) = ar1(Y, X, niter=2, axis=0) */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_3glm_6kalman_3ar1, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__ar1, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/glm/kalman.pyx":1 * # -*- Mode: Python -*- Not really, but the syntax is close enough # <<<<<<<<<<<<<< * * */ __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); if (PyObject_SetAttr(__pyx_m, __pyx_n_s____test__, ((PyObject *)__pyx_t_1)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; /* "numpy.pxd":975 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); if (__pyx_m) { __Pyx_AddTraceback("init nipy.labs.glm.kalman", __pyx_clineno, __pyx_lineno, __pyx_filename); Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init nipy.labs.glm.kalman"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if PY_MAJOR_VERSION < 3 return; #else return __pyx_m; #endif } /* Runtime support code */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* CYTHON_REFNANNY */ static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) { PyObject *result; result = PyObject_GetAttr(dict, name); if (!result) { if (dict != __pyx_b) { PyErr_Clear(); result = PyObject_GetAttr(__pyx_b, name); } if (!result) { PyErr_SetObject(PyExc_NameError, name); } } return result; } static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%s() takes %s %" CYTHON_FORMAT_SSIZE_T "d positional argument%s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%s() got an unexpected keyword argument '%s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact) { if (!type) { PyErr_Format(PyExc_SystemError, "Missing type object"); return 0; } if (none_allowed && obj == Py_None) return 1; else if (exact) { if (Py_TYPE(obj) == type) return 1; } else { if (PyObject_TypeCheck(obj, type)) return 1; } PyErr_Format(PyExc_TypeError, "Argument '%s' has incorrect type (expected %s, got %s)", name, type->tp_name, Py_TYPE(obj)->tp_name); return 0; } static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) { #if CYTHON_COMPILING_IN_CPYTHON PyObject *tmp_type, *tmp_value, *tmp_tb; PyThreadState *tstate = PyThreadState_GET(); tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_Restore(type, value, tb); #endif } static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(type, value, tb); #endif } #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } #if PY_VERSION_HEX < 0x02050000 if (PyClass_Check(type)) { #else if (PyType_Check(type)) { #endif #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; #if PY_VERSION_HEX < 0x02050000 if (PyInstance_Check(type)) { type = (PyObject*) ((PyInstanceObject*)type)->in_class; Py_INCREF(type); } else { type = 0; PyErr_SetString(PyExc_TypeError, "raise: exception must be an old-style class or instance"); goto raise_error; } #else type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } #endif } __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else /* Python 3+ */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyEval_CallObject(type, args); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause && cause != Py_None) { PyObject *fixed_cause; if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { PyThreadState *tstate = PyThreadState_GET(); PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } } bad: Py_XDECREF(owned_instance); return; } #endif static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%s to unpack", index, (index == 1) ? "" : "s"); } static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } static CYTHON_INLINE int __Pyx_IterFinish(void) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); PyObject* exc_type = tstate->curexc_type; if (unlikely(exc_type)) { if (likely(exc_type == PyExc_StopIteration) || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration)) { PyObject *exc_value, *exc_tb; exc_value = tstate->curexc_value; exc_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; Py_DECREF(exc_type); Py_XDECREF(exc_value); Py_XDECREF(exc_tb); return 0; } else { return -1; } } return 0; #else if (unlikely(PyErr_Occurred())) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) { PyErr_Clear(); return 0; } else { return -1; } } return 0; #endif } static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) { if (unlikely(retval)) { Py_DECREF(retval); __Pyx_RaiseTooManyValuesError(expected); return -1; } else { return __Pyx_IterFinish(); } return 0; } static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_Format(PyExc_SystemError, "Missing type object"); return 0; } if (likely(PyObject_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level) { PyObject *py_import = 0; PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; py_import = __Pyx_GetAttrString(__pyx_b, "__import__"); if (!py_import) goto bad; if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; #if PY_VERSION_HEX >= 0x02050000 { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { /* try package relative import first */ PyObject *py_level = PyInt_FromLong(1); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; /* try absolute import on failure */ } #endif if (!module) { PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); } } #else if (level>0) { PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4."); goto bad; } module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, NULL); #endif bad: Py_XDECREF(empty_list); Py_XDECREF(py_import); Py_XDECREF(empty_dict); return module; } static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_Py_intptr_t(Py_intptr_t val) { const Py_intptr_t neg_one = (Py_intptr_t)-1, const_zero = (Py_intptr_t)0; const int is_unsigned = const_zero < neg_one; if ((sizeof(Py_intptr_t) == sizeof(char)) || (sizeof(Py_intptr_t) == sizeof(short))) { return PyInt_FromLong((long)val); } else if ((sizeof(Py_intptr_t) == sizeof(int)) || (sizeof(Py_intptr_t) == sizeof(long))) { if (is_unsigned) return PyLong_FromUnsignedLong((unsigned long)val); else return PyInt_FromLong((long)val); } else if (sizeof(Py_intptr_t) == sizeof(PY_LONG_LONG)) { if (is_unsigned) return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG)val); else return PyLong_FromLongLong((PY_LONG_LONG)val); } else { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; return _PyLong_FromByteArray(bytes, sizeof(Py_intptr_t), little, !is_unsigned); } } #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return ::std::complex< float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y*(__pyx_t_float_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real*z.real + z.imag*z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(a, a); case 3: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, a); case 4: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_absf(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return ::std::complex< double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y*(__pyx_t_double_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real*z.real + z.imag*z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(a, a); case 3: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, a); case 4: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_abs(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject* x) { const unsigned char neg_one = (unsigned char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned char" : "value too large to convert to unsigned char"); } return (unsigned char)-1; } return (unsigned char)val; } return (unsigned char)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject* x) { const unsigned short neg_one = (unsigned short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned short" : "value too large to convert to unsigned short"); } return (unsigned short)-1; } return (unsigned short)val; } return (unsigned short)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject* x) { const unsigned int neg_one = (unsigned int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned int" : "value too large to convert to unsigned int"); } return (unsigned int)-1; } return (unsigned int)val; } return (unsigned int)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject* x) { const char neg_one = (char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to char" : "value too large to convert to char"); } return (char)-1; } return (char)val; } return (char)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject* x) { const short neg_one = (short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to short" : "value too large to convert to short"); } return (short)-1; } return (short)val; } return (short)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject* x) { const int neg_one = (int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to int" : "value too large to convert to int"); } return (int)-1; } return (int)val; } return (int)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject* x) { const signed char neg_one = (signed char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed char" : "value too large to convert to signed char"); } return (signed char)-1; } return (signed char)val; } return (signed char)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject* x) { const signed short neg_one = (signed short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed short" : "value too large to convert to signed short"); } return (signed short)-1; } return (signed short)val; } return (signed short)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject* x) { const signed int neg_one = (signed int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed int" : "value too large to convert to signed int"); } return (signed int)-1; } return (signed int)val; } return (signed int)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject* x) { const int neg_one = (int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to int" : "value too large to convert to int"); } return (int)-1; } return (int)val; } return (int)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) { const unsigned long neg_one = (unsigned long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned long"); return (unsigned long)-1; } return (unsigned long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned long"); return (unsigned long)-1; } return (unsigned long)PyLong_AsUnsignedLong(x); } else { return (unsigned long)PyLong_AsLong(x); } } else { unsigned long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned long)-1; val = __Pyx_PyInt_AsUnsignedLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject* x) { const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned PY_LONG_LONG"); return (unsigned PY_LONG_LONG)-1; } return (unsigned PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned PY_LONG_LONG"); return (unsigned PY_LONG_LONG)-1; } return (unsigned PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (unsigned PY_LONG_LONG)PyLong_AsLongLong(x); } } else { unsigned PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned PY_LONG_LONG)-1; val = __Pyx_PyInt_AsUnsignedLongLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject* x) { const long neg_one = (long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long)-1; } return (long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long)-1; } return (long)PyLong_AsUnsignedLong(x); } else { return (long)PyLong_AsLong(x); } } else { long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (long)-1; val = __Pyx_PyInt_AsLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) { const PY_LONG_LONG neg_one = (PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to PY_LONG_LONG"); return (PY_LONG_LONG)-1; } return (PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to PY_LONG_LONG"); return (PY_LONG_LONG)-1; } return (PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (PY_LONG_LONG)PyLong_AsLongLong(x); } } else { PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (PY_LONG_LONG)-1; val = __Pyx_PyInt_AsLongLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) { const signed long neg_one = (signed long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed long"); return (signed long)-1; } return (signed long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed long"); return (signed long)-1; } return (signed long)PyLong_AsUnsignedLong(x); } else { return (signed long)PyLong_AsLong(x); } } else { signed long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (signed long)-1; val = __Pyx_PyInt_AsSignedLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject* x) { const signed PY_LONG_LONG neg_one = (signed PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed PY_LONG_LONG"); return (signed PY_LONG_LONG)-1; } return (signed PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed PY_LONG_LONG"); return (signed PY_LONG_LONG)-1; } return (signed PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (signed PY_LONG_LONG)PyLong_AsLongLong(x); } } else { signed PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (signed PY_LONG_LONG)-1; val = __Pyx_PyInt_AsSignedLongLong(tmp); Py_DECREF(tmp); return val; } } static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); #if PY_VERSION_HEX < 0x02050000 return PyErr_Warn(NULL, message); #else return PyErr_WarnEx(NULL, message, 1); #endif } return 0; } #ifndef __PYX_HAVE_RT_ImportModule #define __PYX_HAVE_RT_ImportModule static PyObject *__Pyx_ImportModule(const char *name) { PyObject *py_name = 0; PyObject *py_module = 0; py_name = __Pyx_PyIdentifier_FromString(name); if (!py_name) goto bad; py_module = PyImport_Import(py_name); Py_DECREF(py_name); return py_module; bad: Py_XDECREF(py_name); return 0; } #endif #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict) { PyObject *py_module = 0; PyObject *result = 0; PyObject *py_name = 0; char warning[200]; py_module = __Pyx_ImportModule(module_name); if (!py_module) goto bad; py_name = __Pyx_PyIdentifier_FromString(class_name); if (!py_name) goto bad; result = PyObject_GetAttr(py_module, py_name); Py_DECREF(py_name); py_name = 0; Py_DECREF(py_module); py_module = 0; if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%s.%s is not a type object", module_name, class_name); goto bad; } if (!strict && (size_t)((PyTypeObject *)result)->tp_basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility", module_name, class_name); #if PY_VERSION_HEX < 0x02050000 if (PyErr_Warn(NULL, warning) < 0) goto bad; #else if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; #endif } else if ((size_t)((PyTypeObject *)result)->tp_basicsize != size) { PyErr_Format(PyExc_ValueError, "%s.%s has the wrong size, try recompiling", module_name, class_name); goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(py_module); Py_XDECREF(result); return NULL; } #endif static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = (start + end) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, /*int argcount,*/ 0, /*int kwonlyargcount,*/ 0, /*int nlocals,*/ 0, /*int stacksize,*/ 0, /*int flags,*/ __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, /*int firstlineno,*/ __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_globals = 0; PyFrameObject *py_frame = 0; py_code = __pyx_find_code_object(c_line ? c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? c_line : py_line, py_code); } py_globals = PyModule_GetDict(__pyx_m); if (!py_globals) goto bad; py_frame = PyFrame_New( PyThreadState_GET(), /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ py_globals, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; py_frame->f_lineno = py_line; PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else /* Python 3+ has unicode identifiers */ if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; ++t; } return 0; } /* Type Conversion Functions */ static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) { PyNumberMethods *m; const char *name = NULL; PyObject *res = NULL; #if PY_VERSION_HEX < 0x03000000 if (PyInt_Check(x) || PyLong_Check(x)) #else if (PyLong_Check(x)) #endif return Py_INCREF(x), x; m = Py_TYPE(x)->tp_as_number; #if PY_VERSION_HEX < 0x03000000 if (m && m->nb_int) { name = "int"; res = PyNumber_Int(x); } else if (m && m->nb_long) { name = "long"; res = PyNumber_Long(x); } #else if (m && m->nb_int) { name = "int"; res = PyNumber_Long(x); } #endif if (res) { #if PY_VERSION_HEX < 0x03000000 if (!PyInt_Check(res) && !PyLong_Check(res)) { #else if (!PyLong_Check(res)) { #endif PyErr_Format(PyExc_TypeError, "__%s__ returned non-%s (type %.200s)", name, name, Py_TYPE(res)->tp_name); Py_DECREF(res); return NULL; } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject* x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { #if PY_VERSION_HEX < 0x02050000 if (ival <= LONG_MAX) return PyInt_FromLong((long)ival); else { unsigned char *bytes = (unsigned char *) &ival; int one = 1; int little = (int)*(unsigned char*)&one; return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0); } #else return PyInt_FromSize_t(ival); #endif } static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject* x) { unsigned PY_LONG_LONG val = __Pyx_PyInt_AsUnsignedLongLong(x); if (unlikely(val == (unsigned PY_LONG_LONG)-1 && PyErr_Occurred())) { return (size_t)-1; } else if (unlikely(val != (unsigned PY_LONG_LONG)(size_t)val)) { PyErr_SetString(PyExc_OverflowError, "value too large to convert to size_t"); return (size_t)-1; } return (size_t)val; } #endif /* Py_PYTHON_H */ nipy-0.3.0/nipy/labs/glm/kalman.pyx000066400000000000000000000146461210344137400171670ustar00rootroot00000000000000# -*- Mode: Python -*- Not really, but the syntax is close enough """ Incremental (Kalman-like) filters for linear regression. Author: Alexis Roche, 2008. """ __version__ = '0.1' # Includes from fff cimport * # Exports from fff_glm_kalman.h cdef extern from "fff_glm_kalman.h": ctypedef struct fff_glm_KF: size_t t size_t dim fff_vector* b fff_matrix* Vb double ssd double s2 double dof double s2_corr ctypedef struct fff_glm_RKF: size_t t size_t dim fff_glm_KF* Kfilt fff_vector* db fff_matrix* Hssd double spp fff_vector* Gspp fff_matrix* Hspp fff_vector* b fff_matrix* Vb double s2 double a double dof double s2_cor fff_vector* vaux fff_matrix* Maux fff_glm_KF* fff_glm_KF_new(size_t dim) void fff_glm_KF_delete(fff_glm_KF* thisone) void fff_glm_KF_reset(fff_glm_KF* thisone) void fff_glm_KF_iterate(fff_glm_KF* thisone, double y, fff_vector* x) fff_glm_RKF* fff_glm_RKF_new(size_t dim) void fff_glm_RKF_delete(fff_glm_RKF* thisone) void fff_glm_RKF_reset(fff_glm_RKF* thisone) void fff_glm_RKF_iterate(fff_glm_RKF* thisone, unsigned int nloop, double y, fff_vector* x, double yy, fff_vector* xx) void fff_glm_KF_fit(fff_glm_KF* thisone, fff_vector* y, fff_matrix* X) void fff_glm_RKF_fit(fff_glm_RKF* thisone, unsigned int nloop, fff_vector* y, fff_matrix* X) # Initialize numpy fffpy_import_array() import_array() import numpy as np # Standard Kalman filter def ols(ndarray Y, ndarray X, int axis=0): """ (beta, norm_var_beta, s2, dof) = ols(Y, X, axis=0). Ordinary least-square multiple regression using the Kalman filter. Fit the N-dimensional array Y along the given axis in terms of the regressors in matrix X. The regressors must be stored columnwise. OUTPUT: a four-element tuple beta -- array of parameter estimates norm_var_beta -- normalized variance matrix of the parameter estimates (data independent) s2 -- array of squared scale parameters to multiply norm_var_beta for the variance matrix of beta. dof -- scalar degrees of freedom. REFERENCE: Roche et al, ISBI 2004. """ cdef fff_vector *y, *b, *s2 cdef fff_matrix *x cdef fff_glm_KF *kfilt cdef size_t p cdef fffpy_multi_iterator* multi cdef double dof # View on design matrix x = fff_matrix_fromPyArray(X) # Number of regressors p = x.size2 # Allocate output arrays B and S2 # # Using Cython cimport of numpy, Y.shape is a C array of npy_intp # type; see: # http://codespeak.net/pipermail/cython-dev/2009-April/005229.html dims = [Y.shape[i] for i in range(Y.ndim)] dims[axis] = p B = np.zeros(dims, dtype=np.double) dims[axis] = 1 S2 = np.zeros(dims, dtype=np.double) # Allocate local structure kfilt = fff_glm_KF_new(p) # Create a new array iterator multi = fffpy_multi_iterator_new(3, axis, Y, B, S2) # Create views y = multi.vector[0] b = multi.vector[1] s2 = multi.vector[2] # Loop while(multi.index < multi.size): fff_glm_KF_fit(kfilt, y, x) fff_vector_memcpy(b, kfilt.b) s2.data[0] = kfilt.s2 fffpy_multi_iterator_update(multi) # Normalized variance (computed from the last item) VB = fff_matrix_const_toPyArray(kfilt.Vb); dof = kfilt.dof # Free memory fff_matrix_delete(x) fff_glm_KF_delete(kfilt) fffpy_multi_iterator_delete(multi) # Return return B, VB, S2, dof def ar1(ndarray Y, ndarray X, int niter=2, int axis=0): """ (beta, norm_var_beta, s2, dof, a) = ar1(Y, X, niter=2, axis=0) Refined Kalman filter -- enhanced Kalman filter to account for noise autocorrelation using an AR(1) model. Pseudo-likelihood multiple regression using the refined Kalman filter, a Kalman variant based on a AR(1) error model. Fit the N-dimensional array Y along the given axis in terms of the regressors in matrix X. The regressors must be stored columnwise. OUTPUT: a five-element tuple beta -- array of parameter estimates norm_var_beta -- array of normalized variance matrices (which are data dependent unlike in standard OLS regression) s2 -- array of squared scale parameters to multiply norm_var_beta for the variance matrix of beta. dof -- scalar degrees of freedom a -- array of error autocorrelation estimates REFERENCE: Roche et al, MICCAI 2004. """ cdef fff_vector *y, *b, *vb, *s2, *a cdef fff_vector Vb_flat cdef fff_matrix *x cdef fff_glm_RKF *rkfilt cdef size_t p, p2 cdef fffpy_multi_iterator* multi cdef double dof # View on design matrix x = fff_matrix_fromPyArray(X) # Number of regressors p = x.size2 p2 = p*p # Allocate output arrays B and S2. # # Using Cython cimport of numpy, Y.shape is a C array of npy_intp # type; see: # http://codespeak.net/pipermail/cython-dev/2009-April/005229.html dims = [Y.shape[i] for i in range(Y.ndim)] dims[axis] = p B = np.zeros(dims, dtype=np.double) dims[axis] = p2 VB = np.zeros(dims, dtype=np.double) dims[axis] = 1 S2 = np.zeros(dims, dtype=np.double) A = np.zeros(dims, dtype=np.double) # Allocate local structure rkfilt = fff_glm_RKF_new(p) # Create a new array iterator multi = fffpy_multi_iterator_new(5, axis, Y, B, VB, S2, A) # Create views y = multi.vector[0] b = multi.vector[1] vb = multi.vector[2] s2 = multi.vector[3] a = multi.vector[4] # Loop while(multi.index < multi.size): fff_glm_RKF_fit(rkfilt, niter, y, x) fff_vector_memcpy(b, rkfilt.b) Vb_flat = fff_vector_view(rkfilt.Vb.data, p2, 1) # rkfilt.Vb contiguous by construction fff_vector_memcpy(vb, &Vb_flat) s2.data[0] = rkfilt.s2 a.data[0] = rkfilt.a fffpy_multi_iterator_update(multi) # Dof dof = rkfilt.dof # Free memory fff_matrix_delete(x) fff_glm_RKF_delete(rkfilt) fffpy_multi_iterator_delete(multi) # Reshape variance array dims[axis] = p dims.insert(axis+1, p) VB = VB.reshape(dims) # Return return B, VB, S2, dof, A nipy-0.3.0/nipy/labs/glm/setup.py000066400000000000000000000017731210344137400166710ustar00rootroot00000000000000#!/usr/bin/env python def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration # We need this because libcstat.a is linked to lapack, which can # be a fortran library, and the linker needs this information. from numpy.distutils.system_info import get_info lapack_info = get_info('lapack_opt', 0) if 'libraries' not in lapack_info: # But on OSX that may not give us what we need, so try with 'lapack' # instead. NOTE: scipy.linalg uses lapack_opt, not 'lapack'... lapack_info = get_info('lapack', 0) config = Configuration('glm', parent_package, top_path) config.add_subpackage('tests') config.add_subpackage('benchmarks') config.add_extension( 'kalman', sources=['kalman.pyx'], libraries=['cstat'], extra_info=lapack_info, ) return config if __name__ == '__main__': print('This is the wrong setup.py file to run') nipy-0.3.0/nipy/labs/glm/tests/000077500000000000000000000000001210344137400163115ustar00rootroot00000000000000nipy-0.3.0/nipy/labs/glm/tests/__init__.py000066400000000000000000000000501210344137400204150ustar00rootroot00000000000000# Init to make test directory a package nipy-0.3.0/nipy/labs/glm/tests/test_glm.py000066400000000000000000000024731210344137400205070ustar00rootroot00000000000000#!/usr/bin/env python from numpy.testing import assert_almost_equal, TestCase import numpy as np from ..glm import glm class TestFitting(TestCase): def make_data(self): dimt = 100 dimx = 10 dimy = 11 dimz = 12 self.y = np.random.randn(dimt, dimx, dimy, dimz) X = np.array([np.ones(dimt), range(dimt)]) self.X = X.transpose() ## the design matrix X must have dimt lines def ols(self, axis): y = np.rollaxis(self.y, 0, axis+1) ## time index is axis X = self.X m = glm(y, X, axis=axis) m1 = glm(y, X, axis=axis, method='kalman') b = m.beta b1 = m1.beta tcon = m.contrast([1,0]) tcon1 = m1.contrast([1,0]) z = tcon.zscore() z1 = tcon1.zscore() assert_almost_equal(b, b1) ##assert_almost_equal(v, v1, decimal=2) ##assert_almost_equal(z, z1, decimal=3) def test_ols_axis0(self): self.make_data() self.ols(0) def test_ols_axis1(self): self.make_data() self.ols(1) def test_ols_axis2(self): self.make_data() self.ols(2) def test_ols_axis3(self): self.make_data() self.ols(3) if __name__ == "__main__": import nose nose.run(argv=['', __file__]) nipy-0.3.0/nipy/labs/group/000077500000000000000000000000001210344137400155245ustar00rootroot00000000000000nipy-0.3.0/nipy/labs/group/__init__.py000066400000000000000000000005241210344137400176360ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from . import onesample from . import twosample from . import glm_twolevel from . import permutation_test from . import spatial_relaxation_onesample from nipy.testing import Tester test = Tester().test bench = Tester().bench nipy-0.3.0/nipy/labs/group/displacement_field.py000066400000000000000000000267611210344137400217250ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np from scipy.ndimage import correlate1d, _ni_support, gaussian_filter, \ binary_erosion from scipy import math def square_gaussian_filter1d(input, sigma, axis = -1, output = None, mode = "reflect", cval = 0.0): """One-dimensional Squared Gaussian filter. The standard-deviation of the Gaussian filter is given by sigma. """ sd = float(sigma) # make the length of the filter equal to 4 times the standard # deviations: lw = int(4.0 * sd + 0.5) weights = [0.0] * (2 * lw + 1) weights[lw] = 1.0 sum = 1.0 sd = sd * sd # calculate the kernel: for ii in range(1, lw + 1): tmp = math.exp(- 0.5 * float(ii * ii) / sd) weights[lw + ii] = tmp weights[lw - ii] = tmp sum += 2.0 * tmp for ii in range(2 * lw + 1): weights[ii] /= sum weights[ii] = weights[ii]**2 return correlate1d(input, weights, axis, output, mode, cval, 0) def square_gaussian_filter(input, sigma, output = None, mode = "reflect", cval = 0.0): """Multi-dimensional Squared Gaussian filter. The standard-deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. Note: The multi-dimensional filter is implemented as a sequence of one-dimensional convolution filters. The intermediate arrays are stored in the same data type as the output. Therefore, for output types with a limited precision, the results may be imprecise because intermediate results may be stored with insufficient precision. """ input = np.asarray(input) output, return_value =_ni_support._get_output(output, input) sigmas =_ni_support._normalize_sequence(sigma, input.ndim) axes = range(input.ndim) axes = [(axes[ii], sigmas[ii]) for ii in range(len(axes)) if sigmas[ii] > 1e-15] if len(axes) > 0: for axis, sigma in axes: square_gaussian_filter1d(input, sigma, axis, output, mode, cval) input = output else: output[...] = input[...] return return_value class displacement_field(object): """ Sampling of multiple vector-valued displacement fields on a 3D-lattice. Displacement fields are generated as linear combinations of fixed displacements. The coefficients are random Gaussian variables. """ def __init__(self, XYZ, sigma, n=1, mask=None, step=None): """ Input : XYZ (3,p) array of voxel coordinates sigma standard deviate of Gaussian filter kernel Each displacement block has length 4*sigma n number of generated displacement fields. mask (q,) displacement blocks are limited to mask The constructor creates the following fields : self.block List of N block masks (voxel index vectors) self.weights List of N block weights (same shape as the masks) self.U (3,n,N) Displacement coefficients self.V (3,n,p) Displacements self.W (3,n,p) Discretize displacements self.I (n,p) Displaced voxels index (voxel k in the mask is displaced by field i to voxel self.I[i,k]) """ self.XYZ = XYZ self.sigma = sigma if np.isscalar(sigma): self.sigma = sigma * np.ones(3) self.n = n self.XYZ_min = self.XYZ.min(axis=1).reshape(3, 1) - 1 self.XYZ_max = self.XYZ.max(axis=1).reshape(3, 1) + 1 p = XYZ.shape[1] if mask == None: self.mask = np.arange(p) else: self.mask = mask if step == None: self.step = int(round(2 * self.sigma.max())) else: self.step = step self.V = np.zeros((3, n, p), float) self.W = np.zeros((3, n, p), int) self.I = np.arange(p) * np.ones((n, p), int) self.XYZ_vol = np.zeros(XYZ.max(axis=1) + 2, int) - 1 self.XYZ_vol[XYZ[0], XYZ[1], XYZ[2]] = np.arange(p) self.init_displacement_blocks() self.compute_inner_blocks() self.U = np.zeros((3, n, len(self.block)), float) def init_displacement_blocks(self): """ Called by class constructor """ XYZ = self.XYZ # displacement kernel sigma = self.sigma.max() #r = int(round(2 * sigma)) d = int(round(6 * sigma)) block_dim = (\ self.XYZ.max(axis=1)+1 - \ self.XYZ.min(axis=1)).clip(1,d) #kernel = np.zeros(d * np.ones(3), float) kernel = np.zeros(block_dim, float) kernel[block_dim[0]/2-1:block_dim[0]/2+1, block_dim[1]/2-1:block_dim[1]/2+1, block_dim[2]/2-1:block_dim[2]/2+1] += 1 kernel = gaussian_filter(kernel.squeeze(), sigma, mode='constant') kernel = kernel.reshape(block_dim) kernel /= kernel.max() # displacement 'blocks' self.block = [] self.weights = [] mask_vol = np.zeros(XYZ.max(axis=1) + 2, int) - 1 mask_vol[list(XYZ[:, self.mask])] = self.mask Xm, Ym, Zm = XYZ.min(axis=1).astype(int) XM, YM, ZM = XYZ.max(axis=1).clip(1,np.inf).astype(int) for i in xrange(Xm, XM, self.step): for j in xrange(Ym, YM, self.step): for k in xrange(Zm, ZM, self.step): block_vol = mask_vol[i:i + d, j:j + d, k:k + d] XYZ_block = np.array( np.where( block_vol > -1 ) ) if XYZ_block.size > 0 \ and (kernel[list(XYZ_block)] > 0.05).sum() == (kernel > 0.05).sum(): #print i,j,k self.block.append(block_vol[XYZ_block[0], XYZ_block[1], XYZ_block[2]]) self.weights.append(kernel[XYZ_block[0], XYZ_block[1], XYZ_block[2]]) def compute_inner_blocks(self): """ Generate self.inner_blocks, index of blocks which are "far from" the borders of the lattice. """ XYZ = self.XYZ sigma = self.sigma.max() mask_vol = np.zeros(XYZ.max(axis=1) + 1, int) mask_vol[XYZ[0], XYZ[1], XYZ[2]] += 1 mask_vol = binary_erosion(mask_vol.squeeze(), iterations=int(round(sigma))).astype(int) mask_vol = mask_vol.reshape(XYZ.max(axis=1) + 1).astype(int) inner_mask = mask_vol[XYZ[0], XYZ[1], XYZ[2]] inner_blocks = [] for i in xrange(len(self.block)): if inner_mask[self.block[i]].min() == 1: inner_blocks.append(i) self.inner_blocks = np.array(inner_blocks) def sample(self, i, b, proposal='prior', proposal_std=None, proposal_mean=None): """ Generates U, V, L, W, I, where U, V, W, I are proposals for self.U[:,i,b], self.V[:,i,block], self.W[:,i,L], self.I[i,L] if block = self.block[b]. W and I are given only in those voxels, indexed by L, where they differ from current values. Proposal is either 'prior', 'rand_walk' or 'fixed' """ block = self.block[b] # Current values Uc = self.U[:, i, b] Vc = self.V[:, i, block] Wc = self.W[:, i, block] Ic = self.I[i, block] # Proposals valid_proposal = False while not valid_proposal: if proposal == 'prior': U = np.random.randn(3) * proposal_std elif proposal == 'rand_walk': U = Uc + np.random.randn(3) * proposal_std else: U = proposal_mean + np.random.randn(3) * proposal_std V = Vc + (self.weights[b].reshape(1, -1) * (U - Uc).reshape(3,1)) #print U W = np.round(V).astype(int) L = np.where((W == Wc).prod(axis=0) == 0)[0] XYZ_W = np.clip(self.XYZ[:, block[L]] + W[:, L], self.XYZ_min, self.XYZ_max) I = self.XYZ_vol[XYZ_W[0], XYZ_W[1], XYZ_W[2]] #print (I == -1).sum() if len(L) == 0: valid_proposal = True elif min(I) > -1: valid_proposal = True return U, V, block[L], W[:, L], I def sample_all_blocks(self, proposal_std=None, proposal_mean=None): """ Generates U, V, W, I, proposals for self.U[:, i], self.V[:, i], self.W[:, i], self.I[i]. Proposal is either 'prior', 'rand_walk' or 'fixed' """ B = len(self.block) p = self.XYZ.shape[1] V = np.zeros((3, p), float) I = -np.ones(p, int) while min(I) == -1: U = np.random.randn(3, B) * proposal_std if proposal_mean != None: U += proposal_mean V *= 0 for b in xrange(B): V[:, self.block[b]] += self.weights[b].reshape(1, -1) * U[:, b].reshape(3,1) W = np.round(V).astype(int) XYZ_W = np.clip(self.XYZ + W, self.XYZ_min, self.XYZ_max) I = self.XYZ_vol[XYZ_W[0], XYZ_W[1], XYZ_W[2]] return U, V, W, I class gaussian_random_field(object): def __init__(self, XYZ, sigma, n=1): self.XYZ = XYZ self.sigma = sigma if np.isscalar(sigma): self.sigma = sigma * (XYZ.max(axis=1) > 1) self.n = n self.XYZ_vol = np.zeros(XYZ.max(axis=1) + 2, int) - 1 p = XYZ.shape[1] self.XYZ_vol[list(XYZ)] = np.arange(p) mask_vol = np.zeros(XYZ.max(axis=1) + 1, int) mask_vol[list(XYZ)] += 1 mask_vol = binary_erosion(mask_vol.squeeze(), iterations=int(round(1.5*self.sigma.max()))) mask_vol = mask_vol.reshape(XYZ.max(axis=1) + 1).astype(int) XYZ_mask = np.array(np.where(mask_vol > 0)) self.mask = self.XYZ_vol[XYZ_mask[0], XYZ_mask[1], XYZ_mask[2]] q = len(self.mask) dX, dY, dZ = XYZ.max(axis=1) + 1 self.U_vol = np.zeros((3, dX, dY, dZ), float) self.U_vol[:, XYZ_mask[0], XYZ_mask[1], XYZ_mask[2]] += 1 self.U_vol = square_gaussian_filter(self.U_vol, [0, self.sigma[0], self.sigma[1], self.sigma[2]], mode='constant') self.norm_coeff = 1 / np.sqrt(self.U_vol.max()) self.U = np.zeros((3, n, q), float) self.V = np.zeros((3, n, p), float) self.W = np.zeros((3, n, p), int) self.I = np.arange(p).reshape(1, p) * np.ones((n, 1), int) self.XYZ_min = self.XYZ.min(axis=1).reshape(3, 1) - 1 self.XYZ_max = self.XYZ.max(axis=1).reshape(3, 1) + 1 def sample(self, i, std): mask = self.mask q = len(mask) XYZ = self.XYZ sigma = self.sigma Wc = self.W[:, i] valid = False if np.isscalar(std): std = std * np.ones((3,1)) while not valid: U = np.random.randn(3, q) * std self.U_vol *= 0 self.U_vol[:, XYZ[0, mask], XYZ[1, mask], XYZ[2, mask]] = U self.U_vol = gaussian_filter(self.U_vol, [0, sigma[0], sigma[1], sigma[2]], mode='constant') V = self.U_vol[:, XYZ[0], XYZ[1], XYZ[2]] * self.norm_coeff W = np.round(V).astype(int) L = np.where((W == Wc).prod(axis=0) == 0)[0] XYZ_W = np.clip(XYZ[:, L] + W[:, L], self.XYZ_min, self.XYZ_max) I = self.XYZ_vol[XYZ_W[0], XYZ_W[1], XYZ_W[2]] if len(L) == 0: valid = True elif min(I) > -1: valid = True #self.U[:, i], self.V[:, i], self.W[:, i, L], self.I[i, L] = U, V, W[:, L], I return U, V, L, W[:, L], I nipy-0.3.0/nipy/labs/group/glm_twolevel.c000066400000000000000000010636331210344137400204040ustar00rootroot00000000000000/* Generated by Cython 0.17.4 on Sat Jan 12 17:27:36 2013 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02040000 #error Cython requires Python 2.4+. #else #include /* For offsetof */ #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #endif #if PY_VERSION_HEX < 0x02050000 typedef int Py_ssize_t; #define PY_SSIZE_T_MAX INT_MAX #define PY_SSIZE_T_MIN INT_MIN #define PY_FORMAT_SIZE_T "" #define CYTHON_FORMAT_SSIZE_T "" #define PyInt_FromSsize_t(z) PyInt_FromLong(z) #define PyInt_AsSsize_t(o) __Pyx_PyInt_AsInt(o) #define PyNumber_Index(o) ((PyNumber_Check(o) && !PyFloat_Check(o)) ? PyNumber_Int(o) : \ (PyErr_Format(PyExc_TypeError, \ "expected index value, got %.200s", Py_TYPE(o)->tp_name), \ (PyObject*)0)) #define __Pyx_PyIndex_Check(o) (PyNumber_Check(o) && !PyFloat_Check(o) && \ !PyComplex_Check(o)) #define PyIndex_Check __Pyx_PyIndex_Check #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message) #define __PYX_BUILD_PY_SSIZE_T "i" #else #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #define __Pyx_PyIndex_Check PyIndex_Check #endif #if PY_VERSION_HEX < 0x02060000 #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) #define PyVarObject_HEAD_INIT(type, size) \ PyObject_HEAD_INIT(type) size, #define PyType_Modified(t) typedef struct { void *buf; PyObject *obj; Py_ssize_t len; Py_ssize_t itemsize; int readonly; int ndim; char *format; Py_ssize_t *shape; Py_ssize_t *strides; Py_ssize_t *suboffsets; void *internal; } Py_buffer; #define PyBUF_SIMPLE 0 #define PyBUF_WRITABLE 0x0001 #define PyBUF_FORMAT 0x0004 #define PyBUF_ND 0x0008 #define PyBUF_STRIDES (0x0010 | PyBUF_ND) #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) #define PyBUF_RECORDS (PyBUF_STRIDES | PyBUF_FORMAT | PyBUF_WRITABLE) #define PyBUF_FULL (PyBUF_INDIRECT | PyBUF_FORMAT | PyBUF_WRITABLE) typedef int (*getbufferproc)(PyObject *, Py_buffer *, int); typedef void (*releasebufferproc)(PyObject *, Py_buffer *); #endif #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #endif #if PY_MAJOR_VERSION < 3 && PY_MINOR_VERSION < 6 #define PyUnicode_FromString(s) PyUnicode_Decode(s, strlen(s), "UTF-8", "strict") #endif #if PY_MAJOR_VERSION >= 3 #define Py_TPFLAGS_CHECKTYPES 0 #define Py_TPFLAGS_HAVE_INDEX 0 #endif #if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3) #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ? \ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #else #define CYTHON_PEP393_ENABLED 0 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_READ(k, d, i) ((k=k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_VERSION_HEX < 0x02060000 #define PyBytesObject PyStringObject #define PyBytes_Type PyString_Type #define PyBytes_Check PyString_Check #define PyBytes_CheckExact PyString_CheckExact #define PyBytes_FromString PyString_FromString #define PyBytes_FromStringAndSize PyString_FromStringAndSize #define PyBytes_FromFormat PyString_FromFormat #define PyBytes_DecodeEscape PyString_DecodeEscape #define PyBytes_AsString PyString_AsString #define PyBytes_AsStringAndSize PyString_AsStringAndSize #define PyBytes_Size PyString_Size #define PyBytes_AS_STRING PyString_AS_STRING #define PyBytes_GET_SIZE PyString_GET_SIZE #define PyBytes_Repr PyString_Repr #define PyBytes_Concat PyString_Concat #define PyBytes_ConcatAndDel PyString_ConcatAndDel #endif #if PY_VERSION_HEX < 0x02060000 #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type) #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_VERSION_HEX < 0x03020000 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300) #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b) #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value) #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b) #else #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0))) #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1))) #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1))) #endif #if PY_MAJOR_VERSION >= 3 #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n))) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n))) #else #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n)) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_NAMESTR(n) ((char *)(n)) #define __Pyx_DOCSTR(n) ((char *)(n)) #else #define __Pyx_NAMESTR(n) (n) #define __Pyx_DOCSTR(n) (n) #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include #define __PYX_HAVE__nipy__labs__group__glm_twolevel #define __PYX_HAVE_API__nipy__labs__group__glm_twolevel #include "stdio.h" #include "stdlib.h" #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "fff_base.h" #include "fff_vector.h" #include "fff_matrix.h" #include "fff_array.h" #include "fffpy.h" #include "fff_glm_twolevel.h" #ifdef _OPENMP #include #endif /* _OPENMP */ #ifdef PYREX_WITHOUT_ASSERTIONS #define CYTHON_WITHOUT_ASSERTIONS #endif /* inline attribute */ #ifndef CYTHON_INLINE #if defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif /* unused attribute */ #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif typedef struct {PyObject **p; char *s; const long n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/ /* Type Conversion Predeclarations */ #define __Pyx_PyBytes_FromUString(s) PyBytes_FromString((char*)s) #define __Pyx_PyBytes_AsUString(s) ((unsigned char*) PyBytes_AsString(s)) #define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x); static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject*); #if CYTHON_COMPILING_IN_CPYTHON #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #ifdef __GNUC__ /* Test for GCC > 2.95 */ #if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* __GNUC__ > 2 ... */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ > 2 ... */ #else /* __GNUC__ */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static PyObject *__pyx_m; static PyObject *__pyx_b; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include #else #include #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "glm_twolevel.pyx", "numpy.pxd", "type.pxd", }; /* "numpy.pxd":723 * # in Cython to enable them only on the right systems. * * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t */ typedef npy_int8 __pyx_t_5numpy_int8_t; /* "numpy.pxd":724 * * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t */ typedef npy_int16 __pyx_t_5numpy_int16_t; /* "numpy.pxd":725 * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< * ctypedef npy_int64 int64_t * #ctypedef npy_int96 int96_t */ typedef npy_int32 __pyx_t_5numpy_int32_t; /* "numpy.pxd":726 * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< * #ctypedef npy_int96 int96_t * #ctypedef npy_int128 int128_t */ typedef npy_int64 __pyx_t_5numpy_int64_t; /* "numpy.pxd":730 * #ctypedef npy_int128 int128_t * * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t */ typedef npy_uint8 __pyx_t_5numpy_uint8_t; /* "numpy.pxd":731 * * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t */ typedef npy_uint16 __pyx_t_5numpy_uint16_t; /* "numpy.pxd":732 * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< * ctypedef npy_uint64 uint64_t * #ctypedef npy_uint96 uint96_t */ typedef npy_uint32 __pyx_t_5numpy_uint32_t; /* "numpy.pxd":733 * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< * #ctypedef npy_uint96 uint96_t * #ctypedef npy_uint128 uint128_t */ typedef npy_uint64 __pyx_t_5numpy_uint64_t; /* "numpy.pxd":737 * #ctypedef npy_uint128 uint128_t * * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< * ctypedef npy_float64 float64_t * #ctypedef npy_float80 float80_t */ typedef npy_float32 __pyx_t_5numpy_float32_t; /* "numpy.pxd":738 * * ctypedef npy_float32 float32_t * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< * #ctypedef npy_float80 float80_t * #ctypedef npy_float128 float128_t */ typedef npy_float64 __pyx_t_5numpy_float64_t; /* "numpy.pxd":747 * # The int types are mapped a bit surprising -- * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t # <<<<<<<<<<<<<< * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t */ typedef npy_long __pyx_t_5numpy_int_t; /* "numpy.pxd":748 * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< * ctypedef npy_longlong longlong_t * */ typedef npy_longlong __pyx_t_5numpy_long_t; /* "numpy.pxd":749 * ctypedef npy_long int_t * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< * * ctypedef npy_ulong uint_t */ typedef npy_longlong __pyx_t_5numpy_longlong_t; /* "numpy.pxd":751 * ctypedef npy_longlong longlong_t * * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t */ typedef npy_ulong __pyx_t_5numpy_uint_t; /* "numpy.pxd":752 * * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulonglong_t * */ typedef npy_ulonglong __pyx_t_5numpy_ulong_t; /* "numpy.pxd":753 * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< * * ctypedef npy_intp intp_t */ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; /* "numpy.pxd":755 * ctypedef npy_ulonglong ulonglong_t * * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< * ctypedef npy_uintp uintp_t * */ typedef npy_intp __pyx_t_5numpy_intp_t; /* "numpy.pxd":756 * * ctypedef npy_intp intp_t * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< * * ctypedef npy_double float_t */ typedef npy_uintp __pyx_t_5numpy_uintp_t; /* "numpy.pxd":758 * ctypedef npy_uintp uintp_t * * ctypedef npy_double float_t # <<<<<<<<<<<<<< * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t */ typedef npy_double __pyx_t_5numpy_float_t; /* "numpy.pxd":759 * * ctypedef npy_double float_t * ctypedef npy_double double_t # <<<<<<<<<<<<<< * ctypedef npy_longdouble longdouble_t * */ typedef npy_double __pyx_t_5numpy_double_t; /* "numpy.pxd":760 * ctypedef npy_double float_t * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cfloat cfloat_t */ typedef npy_longdouble __pyx_t_5numpy_longdouble_t; /* "fff.pxd":9 * * # Redefine size_t * ctypedef unsigned long int size_t # <<<<<<<<<<<<<< * * */ typedef unsigned long __pyx_t_3fff_size_t; #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< float > __pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< double > __pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif /*--- Type declarations ---*/ /* "numpy.pxd":762 * ctypedef npy_longdouble longdouble_t * * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t */ typedef npy_cfloat __pyx_t_5numpy_cfloat_t; /* "numpy.pxd":763 * * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< * ctypedef npy_clongdouble clongdouble_t * */ typedef npy_cdouble __pyx_t_5numpy_cdouble_t; /* "numpy.pxd":764 * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cdouble complex_t */ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; /* "numpy.pxd":766 * ctypedef npy_clongdouble clongdouble_t * * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew1(a): */ typedef npy_cdouble __pyx_t_5numpy_complex_t; #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/ #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil) \ if (acquire_gil) { \ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ PyGILState_Release(__pyx_gilstate_save); \ } else { \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil) \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext() \ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif /* CYTHON_REFNANNY */ #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /*proto*/ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], \ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, \ const char* function_name); /*proto*/ static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact); /*proto*/ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len)) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_PyList_Append(L,x) PyList_Append(L,x) #endif #define __Pyx_SetItemInt(o, i, v, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ __Pyx_SetItemInt_Fast(o, i, v) : \ __Pyx_SetItemInt_Generic(o, to_py_func(i), v)) static CYTHON_INLINE int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v) { int r; if (!j) return -1; r = PyObject_SetItem(o, j, v); Py_DECREF(j); return r; } static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v) { #if CYTHON_COMPILING_IN_CPYTHON if (PyList_CheckExact(o)) { Py_ssize_t n = (likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if (likely((n >= 0) & (n < PyList_GET_SIZE(o)))) { PyObject* old = PyList_GET_ITEM(o, n); Py_INCREF(v); PyList_SET_ITEM(o, n, v); Py_DECREF(old); return 1; } } else { /* inlined PySequence_SetItem() */ PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_ass_item)) { if (unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (unlikely(l < 0)) return -1; i += l; } return m->sq_ass_item(o, i, v); } } #else #if CYTHON_COMPILING_IN_PYPY if (PySequence_Check(o) && !PyDict_Check(o)) { #else if (PySequence_Check(o)) { #endif return PySequence_SetItem(o, i, v); } #endif return __Pyx_SetItemInt_Generic(o, PyInt_FromSsize_t(i), v); } static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); static CYTHON_INLINE int __Pyx_IterFinish(void); /*proto*/ static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); /*proto*/ static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/ static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level); /*proto*/ static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_Py_intptr_t(Py_intptr_t); #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if defined(_WIN32) && defined(__cplusplus) && CYTHON_CCOMPLEX #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); #if CYTHON_CCOMPLEX #define __Pyx_c_eqf(a, b) ((a)==(b)) #define __Pyx_c_sumf(a, b) ((a)+(b)) #define __Pyx_c_difff(a, b) ((a)-(b)) #define __Pyx_c_prodf(a, b) ((a)*(b)) #define __Pyx_c_quotf(a, b) ((a)/(b)) #define __Pyx_c_negf(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zerof(z) ((z)==(float)0) #define __Pyx_c_conjf(z) (::std::conj(z)) #if 1 #define __Pyx_c_absf(z) (::std::abs(z)) #define __Pyx_c_powf(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zerof(z) ((z)==0) #define __Pyx_c_conjf(z) (conjf(z)) #if 1 #define __Pyx_c_absf(z) (cabsf(z)) #define __Pyx_c_powf(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); #if CYTHON_CCOMPLEX #define __Pyx_c_eq(a, b) ((a)==(b)) #define __Pyx_c_sum(a, b) ((a)+(b)) #define __Pyx_c_diff(a, b) ((a)-(b)) #define __Pyx_c_prod(a, b) ((a)*(b)) #define __Pyx_c_quot(a, b) ((a)/(b)) #define __Pyx_c_neg(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero(z) ((z)==(double)0) #define __Pyx_c_conj(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs(z) (::std::abs(z)) #define __Pyx_c_pow(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero(z) ((z)==0) #define __Pyx_c_conj(z) (conj(z)) #if 1 #define __Pyx_c_abs(z) (cabs(z)) #define __Pyx_c_pow(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject *); static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject *); static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject *); static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject *); static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject *); static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject *); static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject *); static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject *); static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject *); static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject *); static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject *); static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject *); static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject *); static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject *); static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject *); static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject *); static int __Pyx_check_binary_version(void); #if !defined(__Pyx_PyIdentifier_FromString) #if PY_MAJOR_VERSION < 3 #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) #else #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) #endif #endif static PyObject *__Pyx_ImportModule(const char *name); /*proto*/ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /*proto*/ typedef struct { int code_line; PyCodeObject* code_object; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /*proto*/ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/ /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from 'cpython.object' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'libc.stdlib' */ /* Module declarations from 'numpy' */ /* Module declarations from 'numpy' */ static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ /* Module declarations from 'fff' */ /* Module declarations from 'nipy.labs.group.glm_twolevel' */ #define __Pyx_MODULE_NAME "nipy.labs.group.glm_twolevel" int __pyx_module_is_main_nipy__labs__group__glm_twolevel = 0; /* Implementation of 'nipy.labs.group.glm_twolevel' */ static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_RuntimeError; static PyObject *__pyx_pf_4nipy_4labs_5group_12glm_twolevel_em(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_Y, PyArrayObject *__pyx_v_VY, PyArrayObject *__pyx_v_X, PyArrayObject *__pyx_v_C, int __pyx_v_axis, int __pyx_v_niter); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_5group_12glm_twolevel_2log_likelihood(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_Y, PyObject *__pyx_v_VY, PyObject *__pyx_v_X, PyObject *__pyx_v_B, PyObject *__pyx_v_S2, int __pyx_v_axis); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_5group_12glm_twolevel_4log_likelihood_ratio(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_Y, PyObject *__pyx_v_VY, PyObject *__pyx_v_X, PyObject *__pyx_v_C, int __pyx_v_axis, int __pyx_v_niter); /* proto */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ static char __pyx_k_3[] = "ndarray is not C contiguous"; static char __pyx_k_5[] = "ndarray is not Fortran contiguous"; static char __pyx_k_7[] = "Non-native byte order not supported"; static char __pyx_k_9[] = "unknown dtype code in numpy.pxd (%d)"; static char __pyx_k_10[] = "Format string allocated too short, see comment in numpy.pxd"; static char __pyx_k_13[] = "Format string allocated too short."; static char __pyx_k_15[] = "\nTwo-level general linear model for group analyses.\n\nAuthor: Alexis Roche, 2008.\n"; static char __pyx_k_16[] = "0.1"; static char __pyx_k_19[] = "/Users/mb312/dev_trees/nipy/nipy/labs/group/glm_twolevel.pyx"; static char __pyx_k_20[] = "nipy.labs.group.glm_twolevel"; static char __pyx_k_25[] = "log_likelihood_ratio"; static char __pyx_k__A[] = "A"; static char __pyx_k__B[] = "B"; static char __pyx_k__C[] = "C"; static char __pyx_k__H[] = "H"; static char __pyx_k__I[] = "I"; static char __pyx_k__L[] = "L"; static char __pyx_k__O[] = "O"; static char __pyx_k__P[] = "P"; static char __pyx_k__Q[] = "Q"; static char __pyx_k__X[] = "X"; static char __pyx_k__Y[] = "Y"; static char __pyx_k__b[] = "b"; static char __pyx_k__d[] = "d"; static char __pyx_k__f[] = "f"; static char __pyx_k__g[] = "g"; static char __pyx_k__h[] = "h"; static char __pyx_k__i[] = "i"; static char __pyx_k__l[] = "l"; static char __pyx_k__n[] = "n"; static char __pyx_k__p[] = "p"; static char __pyx_k__q[] = "q"; static char __pyx_k__x[] = "x"; static char __pyx_k__y[] = "y"; static char __pyx_k__LL[] = "LL"; static char __pyx_k__S2[] = "S2"; static char __pyx_k__VY[] = "VY"; static char __pyx_k__Zd[] = "Zd"; static char __pyx_k__Zf[] = "Zf"; static char __pyx_k__Zg[] = "Zg"; static char __pyx_k__em[] = "em"; static char __pyx_k__ll[] = "ll"; static char __pyx_k__np[] = "np"; static char __pyx_k__s2[] = "s2"; static char __pyx_k__vy[] = "vy"; static char __pyx_k__PpX[] = "PpX"; static char __pyx_k__dot[] = "dot"; static char __pyx_k__eye[] = "eye"; static char __pyx_k__inv[] = "inv"; static char __pyx_k__lda[] = "lda"; static char __pyx_k__ll0[] = "ll0"; static char __pyx_k__ppx[] = "ppx"; static char __pyx_k__tmp[] = "tmp"; static char __pyx_k__axis[] = "axis"; static char __pyx_k__dims[] = "dims"; static char __pyx_k__ndim[] = "ndim"; static char __pyx_k__pinv[] = "pinv"; static char __pyx_k__multi[] = "multi"; static char __pyx_k__niter[] = "niter"; static char __pyx_k__numpy[] = "numpy"; static char __pyx_k__range[] = "range"; static char __pyx_k__shape[] = "shape"; static char __pyx_k__zeros[] = "zeros"; static char __pyx_k__linalg[] = "linalg"; static char __pyx_k__maximum[] = "maximum"; static char __pyx_k____main__[] = "__main__"; static char __pyx_k____test__[] = "__test__"; static char __pyx_k__DEF_NITER[] = "DEF_NITER"; static char __pyx_k__transpose[] = "transpose"; static char __pyx_k__ValueError[] = "ValueError"; static char __pyx_k____version__[] = "__version__"; static char __pyx_k__RuntimeError[] = "RuntimeError"; static char __pyx_k__log_likelihood[] = "log_likelihood"; static PyObject *__pyx_kp_u_10; static PyObject *__pyx_kp_u_13; static PyObject *__pyx_kp_s_16; static PyObject *__pyx_kp_s_19; static PyObject *__pyx_n_s_20; static PyObject *__pyx_n_s_25; static PyObject *__pyx_kp_u_3; static PyObject *__pyx_kp_u_5; static PyObject *__pyx_kp_u_7; static PyObject *__pyx_kp_u_9; static PyObject *__pyx_n_s__A; static PyObject *__pyx_n_s__B; static PyObject *__pyx_n_s__C; static PyObject *__pyx_n_s__DEF_NITER; static PyObject *__pyx_n_s__LL; static PyObject *__pyx_n_s__P; static PyObject *__pyx_n_s__PpX; static PyObject *__pyx_n_s__RuntimeError; static PyObject *__pyx_n_s__S2; static PyObject *__pyx_n_s__VY; static PyObject *__pyx_n_s__ValueError; static PyObject *__pyx_n_s__X; static PyObject *__pyx_n_s__Y; static PyObject *__pyx_n_s____main__; static PyObject *__pyx_n_s____test__; static PyObject *__pyx_n_s____version__; static PyObject *__pyx_n_s__axis; static PyObject *__pyx_n_s__b; static PyObject *__pyx_n_s__dims; static PyObject *__pyx_n_s__dot; static PyObject *__pyx_n_s__em; static PyObject *__pyx_n_s__eye; static PyObject *__pyx_n_s__i; static PyObject *__pyx_n_s__inv; static PyObject *__pyx_n_s__lda; static PyObject *__pyx_n_s__linalg; static PyObject *__pyx_n_s__ll; static PyObject *__pyx_n_s__ll0; static PyObject *__pyx_n_s__log_likelihood; static PyObject *__pyx_n_s__maximum; static PyObject *__pyx_n_s__multi; static PyObject *__pyx_n_s__n; static PyObject *__pyx_n_s__ndim; static PyObject *__pyx_n_s__niter; static PyObject *__pyx_n_s__np; static PyObject *__pyx_n_s__numpy; static PyObject *__pyx_n_s__p; static PyObject *__pyx_n_s__pinv; static PyObject *__pyx_n_s__ppx; static PyObject *__pyx_n_s__range; static PyObject *__pyx_n_s__s2; static PyObject *__pyx_n_s__shape; static PyObject *__pyx_n_s__tmp; static PyObject *__pyx_n_s__transpose; static PyObject *__pyx_n_s__vy; static PyObject *__pyx_n_s__x; static PyObject *__pyx_n_s__y; static PyObject *__pyx_n_s__zeros; static PyObject *__pyx_int_1; static PyObject *__pyx_int_2; static PyObject *__pyx_int_15; static int __pyx_k_1; static int __pyx_k_2; static PyObject *__pyx_k_tuple_4; static PyObject *__pyx_k_tuple_6; static PyObject *__pyx_k_tuple_8; static PyObject *__pyx_k_tuple_11; static PyObject *__pyx_k_tuple_12; static PyObject *__pyx_k_tuple_14; static PyObject *__pyx_k_tuple_17; static PyObject *__pyx_k_tuple_21; static PyObject *__pyx_k_tuple_23; static PyObject *__pyx_k_codeobj_18; static PyObject *__pyx_k_codeobj_22; static PyObject *__pyx_k_codeobj_24; /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_5group_12glm_twolevel_1em(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_5group_12glm_twolevel_em[] = "\n b, s2 = em(y, vy, X, C=None, axis=0, niter=DEF_NITER).\n\n Maximum likelihood regression in a mixed-effect GLM using the\n EM algorithm.\n\n C is the contrast matrix. Conventionally, C is p x q where p\n is the number of regressors. \n \n OUTPUT: beta, s2\n beta -- array of parameter estimates\n s2 -- array of squared scale parameters.\n \n REFERENCE:\n Keller and Roche, ISBI 2008.\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_5group_12glm_twolevel_1em = {__Pyx_NAMESTR("em"), (PyCFunction)__pyx_pw_4nipy_4labs_5group_12glm_twolevel_1em, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_5group_12glm_twolevel_em)}; static PyObject *__pyx_pw_4nipy_4labs_5group_12glm_twolevel_1em(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_Y = 0; PyArrayObject *__pyx_v_VY = 0; PyArrayObject *__pyx_v_X = 0; PyArrayObject *__pyx_v_C = 0; int __pyx_v_axis; int __pyx_v_niter; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("em (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__Y,&__pyx_n_s__VY,&__pyx_n_s__X,&__pyx_n_s__C,&__pyx_n_s__axis,&__pyx_n_s__niter,0}; PyObject* values[6] = {0,0,0,0,0,0}; /* "nipy/labs/group/glm_twolevel.pyx":46 * DEF_NITER = 2 * * def em(ndarray Y, ndarray VY, ndarray X, ndarray C=None, int axis=0, int niter=DEF_NITER): # <<<<<<<<<<<<<< * """ * b, s2 = em(y, vy, X, C=None, axis=0, niter=DEF_NITER). */ values[3] = (PyObject *)((PyArrayObject *)Py_None); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Y)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__VY)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("em", 0, 3, 6, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__X)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("em", 0, 3, 6, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__C); if (value) { values[3] = value; kw_args--; } } case 4: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__axis); if (value) { values[4] = value; kw_args--; } } case 5: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__niter); if (value) { values[5] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "em") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_Y = ((PyArrayObject *)values[0]); __pyx_v_VY = ((PyArrayObject *)values[1]); __pyx_v_X = ((PyArrayObject *)values[2]); __pyx_v_C = ((PyArrayObject *)values[3]); if (values[4]) { __pyx_v_axis = __Pyx_PyInt_AsInt(values[4]); if (unlikely((__pyx_v_axis == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_axis = ((int)0); } if (values[5]) { __pyx_v_niter = __Pyx_PyInt_AsInt(values[5]); if (unlikely((__pyx_v_niter == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_niter = __pyx_k_1; } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("em", 0, 3, 6, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.group.glm_twolevel.em", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_Y), __pyx_ptype_5numpy_ndarray, 1, "Y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_VY), __pyx_ptype_5numpy_ndarray, 1, "VY", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_X), __pyx_ptype_5numpy_ndarray, 1, "X", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_C), __pyx_ptype_5numpy_ndarray, 1, "C", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_4nipy_4labs_5group_12glm_twolevel_em(__pyx_self, __pyx_v_Y, __pyx_v_VY, __pyx_v_X, __pyx_v_C, __pyx_v_axis, __pyx_v_niter); goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4nipy_4labs_5group_12glm_twolevel_em(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_Y, PyArrayObject *__pyx_v_VY, PyArrayObject *__pyx_v_X, PyArrayObject *__pyx_v_C, int __pyx_v_axis, int __pyx_v_niter) { size_t __pyx_v_n; size_t __pyx_v_p; fff_vector *__pyx_v_y; fff_vector *__pyx_v_vy; fff_vector *__pyx_v_b; fff_vector *__pyx_v_s2; fff_matrix *__pyx_v_x; fff_matrix *__pyx_v_ppx; fff_glm_twolevel_EM *__pyx_v_em; fffpy_multi_iterator *__pyx_v_multi; PyObject *__pyx_v_PpX = NULL; PyObject *__pyx_v_A = NULL; PyObject *__pyx_v_B = NULL; PyObject *__pyx_v_P = NULL; PyObject *__pyx_v_dims = NULL; PyObject *__pyx_v_S2 = NULL; int __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations size_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; int __pyx_t_10; double __pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("em", 0); /* "nipy/labs/group/glm_twolevel.pyx":70 * * # View on design matrix * x = fff_matrix_fromPyArray(X) # <<<<<<<<<<<<<< * * # Number of observations / regressors */ __pyx_v_x = fff_matrix_fromPyArray(__pyx_v_X); /* "nipy/labs/group/glm_twolevel.pyx":73 * * # Number of observations / regressors * n = x.size1 # <<<<<<<<<<<<<< * p = x.size2 * */ __pyx_t_1 = __pyx_v_x->size1; __pyx_v_n = __pyx_t_1; /* "nipy/labs/group/glm_twolevel.pyx":74 * # Number of observations / regressors * n = x.size1 * p = x.size2 # <<<<<<<<<<<<<< * * # Compute the projected pseudo-inverse matrix */ __pyx_t_1 = __pyx_v_x->size2; __pyx_v_p = __pyx_t_1; /* "nipy/labs/group/glm_twolevel.pyx":77 * * # Compute the projected pseudo-inverse matrix * if C == None: # <<<<<<<<<<<<<< * PpX = np.linalg.pinv(X) * else: */ __pyx_t_2 = PyObject_RichCompare(((PyObject *)__pyx_v_C), Py_None, Py_EQ); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (__pyx_t_3) { /* "nipy/labs/group/glm_twolevel.pyx":78 * # Compute the projected pseudo-inverse matrix * if C == None: * PpX = np.linalg.pinv(X) # <<<<<<<<<<<<<< * else: * A = np.linalg.inv(np.dot(X.transpose(), X)) # (p,p) */ __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__linalg); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__pinv); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_INCREF(((PyObject *)__pyx_v_X)); PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_v_X)); __Pyx_GIVEREF(((PyObject *)__pyx_v_X)); __pyx_t_5 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __pyx_v_PpX = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L3; } /*else*/ { /* "nipy/labs/group/glm_twolevel.pyx":80 * PpX = np.linalg.pinv(X) * else: * A = np.linalg.inv(np.dot(X.transpose(), X)) # (p,p) # <<<<<<<<<<<<<< * B = np.linalg.inv(np.dot(np.dot(C.transpose(), A), C)) # (q,q) * P = np.eye(p) - np.dot(np.dot(np.dot(A, C), B), C.transpose()) # (p,p) */ __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__linalg); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__inv); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__dot); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyObject_GetAttr(((PyObject *)__pyx_v_X), __pyx_n_s__transpose); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_6); __Pyx_INCREF(((PyObject *)__pyx_v_X)); PyTuple_SET_ITEM(__pyx_t_4, 1, ((PyObject *)__pyx_v_X)); __Pyx_GIVEREF(((PyObject *)__pyx_v_X)); __pyx_t_6 = 0; __pyx_t_6 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __pyx_v_A = __pyx_t_6; __pyx_t_6 = 0; /* "nipy/labs/group/glm_twolevel.pyx":81 * else: * A = np.linalg.inv(np.dot(X.transpose(), X)) # (p,p) * B = np.linalg.inv(np.dot(np.dot(C.transpose(), A), C)) # (q,q) # <<<<<<<<<<<<<< * P = np.eye(p) - np.dot(np.dot(np.dot(A, C), B), C.transpose()) # (p,p) * PpX = np.dot(np.dot(P, A), X.transpose()) # (p,n) */ __pyx_t_6 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 81; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __pyx_t_4 = PyObject_GetAttr(__pyx_t_6, __pyx_n_s__linalg); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 81; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__inv); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 81; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 81; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__dot); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 81; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 81; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__dot); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 81; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyObject_GetAttr(((PyObject *)__pyx_v_C), __pyx_n_s__transpose); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 81; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_7 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 81; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 81; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_7); __Pyx_GIVEREF(__pyx_t_7); __Pyx_INCREF(__pyx_v_A); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_v_A); __Pyx_GIVEREF(__pyx_v_A); __pyx_t_7 = 0; __pyx_t_7 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 81; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 81; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_7); __Pyx_GIVEREF(__pyx_t_7); __Pyx_INCREF(((PyObject *)__pyx_v_C)); PyTuple_SET_ITEM(__pyx_t_4, 1, ((PyObject *)__pyx_v_C)); __Pyx_GIVEREF(((PyObject *)__pyx_v_C)); __pyx_t_7 = 0; __pyx_t_7 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 81; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 81; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_7); __Pyx_GIVEREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = PyObject_Call(__pyx_t_6, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 81; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __pyx_v_B = __pyx_t_7; __pyx_t_7 = 0; /* "nipy/labs/group/glm_twolevel.pyx":82 * A = np.linalg.inv(np.dot(X.transpose(), X)) # (p,p) * B = np.linalg.inv(np.dot(np.dot(C.transpose(), A), C)) # (q,q) * P = np.eye(p) - np.dot(np.dot(np.dot(A, C), B), C.transpose()) # (p,p) # <<<<<<<<<<<<<< * PpX = np.dot(np.dot(P, A), X.transpose()) # (p,n) * ppx = fff_matrix_fromPyArray(PpX) */ __pyx_t_7 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __pyx_t_4 = PyObject_GetAttr(__pyx_t_7, __pyx_n_s__eye); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyInt_FromSize_t(__pyx_v_p); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_7); __Pyx_GIVEREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_6), NULL); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __pyx_t_4 = PyObject_GetAttr(__pyx_t_6, __pyx_n_s__dot); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = PyObject_GetAttr(__pyx_t_6, __pyx_n_s__dot); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __pyx_t_2 = PyObject_GetAttr(__pyx_t_6, __pyx_n_s__dot); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_INCREF(__pyx_v_A); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_v_A); __Pyx_GIVEREF(__pyx_v_A); __Pyx_INCREF(((PyObject *)__pyx_v_C)); PyTuple_SET_ITEM(__pyx_t_6, 1, ((PyObject *)__pyx_v_C)); __Pyx_GIVEREF(((PyObject *)__pyx_v_C)); __pyx_t_8 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_6), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0; __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_8); __Pyx_GIVEREF(__pyx_t_8); __Pyx_INCREF(__pyx_v_B); PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_v_B); __Pyx_GIVEREF(__pyx_v_B); __pyx_t_8 = 0; __pyx_t_8 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_6), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0; __pyx_t_6 = PyObject_GetAttr(((PyObject *)__pyx_v_C), __pyx_n_s__transpose); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = PyObject_Call(__pyx_t_6, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_8); __Pyx_GIVEREF(__pyx_t_8); PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_8 = 0; __pyx_t_5 = 0; __pyx_t_5 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_6), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0; __pyx_t_6 = PyNumber_Subtract(__pyx_t_7, __pyx_t_5); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_P = __pyx_t_6; __pyx_t_6 = 0; /* "nipy/labs/group/glm_twolevel.pyx":83 * B = np.linalg.inv(np.dot(np.dot(C.transpose(), A), C)) # (q,q) * P = np.eye(p) - np.dot(np.dot(np.dot(A, C), B), C.transpose()) # (p,p) * PpX = np.dot(np.dot(P, A), X.transpose()) # (p,n) # <<<<<<<<<<<<<< * ppx = fff_matrix_fromPyArray(PpX) * */ __pyx_t_6 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = PyObject_GetAttr(__pyx_t_6, __pyx_n_s__dot); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = PyObject_GetAttr(__pyx_t_6, __pyx_n_s__dot); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_INCREF(__pyx_v_P); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_v_P); __Pyx_GIVEREF(__pyx_v_P); __Pyx_INCREF(__pyx_v_A); PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_v_A); __Pyx_GIVEREF(__pyx_v_A); __pyx_t_4 = PyObject_Call(__pyx_t_7, ((PyObject *)__pyx_t_6), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0; __pyx_t_6 = PyObject_GetAttr(((PyObject *)__pyx_v_X), __pyx_n_s__transpose); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = PyObject_Call(__pyx_t_6, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_7); __Pyx_GIVEREF(__pyx_t_7); __pyx_t_4 = 0; __pyx_t_7 = 0; __pyx_t_7 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_6), NULL); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0; __pyx_v_PpX = __pyx_t_7; __pyx_t_7 = 0; } __pyx_L3:; /* "nipy/labs/group/glm_twolevel.pyx":84 * P = np.eye(p) - np.dot(np.dot(np.dot(A, C), B), C.transpose()) # (p,p) * PpX = np.dot(np.dot(P, A), X.transpose()) # (p,n) * ppx = fff_matrix_fromPyArray(PpX) # <<<<<<<<<<<<<< * * # Allocate output arrays */ if (!(likely(((__pyx_v_PpX) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_PpX, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_7 = __pyx_v_PpX; __Pyx_INCREF(__pyx_t_7); __pyx_v_ppx = fff_matrix_fromPyArray(((PyArrayObject *)__pyx_t_7)); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "nipy/labs/group/glm_twolevel.pyx":87 * * # Allocate output arrays * dims = [Y.shape[i] for i in range(Y.ndim)] # <<<<<<<<<<<<<< * dims[axis] = p * B = np.zeros(dims) */ __pyx_t_7 = PyList_New(0); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __pyx_t_9 = __pyx_v_Y->nd; for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_9; __pyx_t_10+=1) { __pyx_v_i = __pyx_t_10; __pyx_t_6 = __Pyx_PyInt_to_py_Py_intptr_t((__pyx_v_Y->dimensions[__pyx_v_i])); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); if (unlikely(__Pyx_PyList_Append(__pyx_t_7, (PyObject*)__pyx_t_6))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __pyx_t_6 = ((PyObject *)__pyx_t_7); __Pyx_INCREF(__pyx_t_6); __Pyx_DECREF(((PyObject *)__pyx_t_7)); __pyx_t_7 = 0; __pyx_v_dims = ((PyObject*)__pyx_t_6); __pyx_t_6 = 0; /* "nipy/labs/group/glm_twolevel.pyx":88 * # Allocate output arrays * dims = [Y.shape[i] for i in range(Y.ndim)] * dims[axis] = p # <<<<<<<<<<<<<< * B = np.zeros(dims) * dims[axis] = 1 */ __pyx_t_6 = __Pyx_PyInt_FromSize_t(__pyx_v_p); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); if (__Pyx_SetItemInt(((PyObject *)__pyx_v_dims), __pyx_v_axis, __pyx_t_6, sizeof(int), PyInt_FromLong) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "nipy/labs/group/glm_twolevel.pyx":89 * dims = [Y.shape[i] for i in range(Y.ndim)] * dims[axis] = p * B = np.zeros(dims) # <<<<<<<<<<<<<< * dims[axis] = 1 * S2 = np.zeros(dims) */ __pyx_t_6 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = PyObject_GetAttr(__pyx_t_6, __pyx_n_s__zeros); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_INCREF(((PyObject *)__pyx_v_dims)); PyTuple_SET_ITEM(__pyx_t_6, 0, ((PyObject *)__pyx_v_dims)); __Pyx_GIVEREF(((PyObject *)__pyx_v_dims)); __pyx_t_5 = PyObject_Call(__pyx_t_7, ((PyObject *)__pyx_t_6), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_v_B); __pyx_v_B = __pyx_t_5; __pyx_t_5 = 0; /* "nipy/labs/group/glm_twolevel.pyx":90 * dims[axis] = p * B = np.zeros(dims) * dims[axis] = 1 # <<<<<<<<<<<<<< * S2 = np.zeros(dims) * */ if (__Pyx_SetItemInt(((PyObject *)__pyx_v_dims), __pyx_v_axis, __pyx_int_1, sizeof(int), PyInt_FromLong) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/group/glm_twolevel.pyx":91 * B = np.zeros(dims) * dims[axis] = 1 * S2 = np.zeros(dims) # <<<<<<<<<<<<<< * * # Local structs */ __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__zeros); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(((PyObject *)__pyx_v_dims)); PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_dims)); __Pyx_GIVEREF(((PyObject *)__pyx_v_dims)); __pyx_t_7 = PyObject_Call(__pyx_t_6, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __pyx_v_S2 = __pyx_t_7; __pyx_t_7 = 0; /* "nipy/labs/group/glm_twolevel.pyx":94 * * # Local structs * em = fff_glm_twolevel_EM_new(n, p) # <<<<<<<<<<<<<< * * # Create a new array iterator */ __pyx_v_em = fff_glm_twolevel_EM_new(__pyx_v_n, __pyx_v_p); /* "nipy/labs/group/glm_twolevel.pyx":98 * # Create a new array iterator * multi = fffpy_multi_iterator_new(4, axis, Y, VY, * B, S2) # <<<<<<<<<<<<<< * * # Create views */ __pyx_v_multi = fffpy_multi_iterator_new(4, __pyx_v_axis, ((void *)__pyx_v_Y), ((void *)__pyx_v_VY), ((void *)__pyx_v_B), ((void *)__pyx_v_S2)); /* "nipy/labs/group/glm_twolevel.pyx":101 * * # Create views * y = multi.vector[0] # <<<<<<<<<<<<<< * vy = multi.vector[1] * b = multi.vector[2] */ __pyx_v_y = (__pyx_v_multi->vector[0]); /* "nipy/labs/group/glm_twolevel.pyx":102 * # Create views * y = multi.vector[0] * vy = multi.vector[1] # <<<<<<<<<<<<<< * b = multi.vector[2] * s2 = multi.vector[3] */ __pyx_v_vy = (__pyx_v_multi->vector[1]); /* "nipy/labs/group/glm_twolevel.pyx":103 * y = multi.vector[0] * vy = multi.vector[1] * b = multi.vector[2] # <<<<<<<<<<<<<< * s2 = multi.vector[3] * */ __pyx_v_b = (__pyx_v_multi->vector[2]); /* "nipy/labs/group/glm_twolevel.pyx":104 * vy = multi.vector[1] * b = multi.vector[2] * s2 = multi.vector[3] # <<<<<<<<<<<<<< * * # Loop */ __pyx_v_s2 = (__pyx_v_multi->vector[3]); /* "nipy/labs/group/glm_twolevel.pyx":107 * * # Loop * while(multi.index < multi.size): # <<<<<<<<<<<<<< * fff_glm_twolevel_EM_init(em) * fff_glm_twolevel_EM_run(em, y, vy, x, ppx, niter) */ while (1) { __pyx_t_3 = (__pyx_v_multi->index < __pyx_v_multi->size); if (!__pyx_t_3) break; /* "nipy/labs/group/glm_twolevel.pyx":108 * # Loop * while(multi.index < multi.size): * fff_glm_twolevel_EM_init(em) # <<<<<<<<<<<<<< * fff_glm_twolevel_EM_run(em, y, vy, x, ppx, niter) * fff_vector_memcpy(b, em.b) */ fff_glm_twolevel_EM_init(__pyx_v_em); /* "nipy/labs/group/glm_twolevel.pyx":109 * while(multi.index < multi.size): * fff_glm_twolevel_EM_init(em) * fff_glm_twolevel_EM_run(em, y, vy, x, ppx, niter) # <<<<<<<<<<<<<< * fff_vector_memcpy(b, em.b) * s2.data[0] = em.s2 */ fff_glm_twolevel_EM_run(__pyx_v_em, __pyx_v_y, __pyx_v_vy, __pyx_v_x, __pyx_v_ppx, __pyx_v_niter); /* "nipy/labs/group/glm_twolevel.pyx":110 * fff_glm_twolevel_EM_init(em) * fff_glm_twolevel_EM_run(em, y, vy, x, ppx, niter) * fff_vector_memcpy(b, em.b) # <<<<<<<<<<<<<< * s2.data[0] = em.s2 * fffpy_multi_iterator_update(multi) */ fff_vector_memcpy(__pyx_v_b, __pyx_v_em->b); /* "nipy/labs/group/glm_twolevel.pyx":111 * fff_glm_twolevel_EM_run(em, y, vy, x, ppx, niter) * fff_vector_memcpy(b, em.b) * s2.data[0] = em.s2 # <<<<<<<<<<<<<< * fffpy_multi_iterator_update(multi) * */ __pyx_t_11 = __pyx_v_em->s2; (__pyx_v_s2->data[0]) = __pyx_t_11; /* "nipy/labs/group/glm_twolevel.pyx":112 * fff_vector_memcpy(b, em.b) * s2.data[0] = em.s2 * fffpy_multi_iterator_update(multi) # <<<<<<<<<<<<<< * * # Free memory */ fffpy_multi_iterator_update(__pyx_v_multi); } /* "nipy/labs/group/glm_twolevel.pyx":115 * * # Free memory * fff_matrix_delete(x) # <<<<<<<<<<<<<< * fff_matrix_delete(ppx) * fffpy_multi_iterator_delete(multi) */ fff_matrix_delete(__pyx_v_x); /* "nipy/labs/group/glm_twolevel.pyx":116 * # Free memory * fff_matrix_delete(x) * fff_matrix_delete(ppx) # <<<<<<<<<<<<<< * fffpy_multi_iterator_delete(multi) * fff_glm_twolevel_EM_delete(em) */ fff_matrix_delete(__pyx_v_ppx); /* "nipy/labs/group/glm_twolevel.pyx":117 * fff_matrix_delete(x) * fff_matrix_delete(ppx) * fffpy_multi_iterator_delete(multi) # <<<<<<<<<<<<<< * fff_glm_twolevel_EM_delete(em) * */ fffpy_multi_iterator_delete(__pyx_v_multi); /* "nipy/labs/group/glm_twolevel.pyx":118 * fff_matrix_delete(ppx) * fffpy_multi_iterator_delete(multi) * fff_glm_twolevel_EM_delete(em) # <<<<<<<<<<<<<< * * # Return */ fff_glm_twolevel_EM_delete(__pyx_v_em); /* "nipy/labs/group/glm_twolevel.pyx":121 * * # Return * return B, S2 # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_7 = PyTuple_New(2); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __Pyx_INCREF(__pyx_v_B); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_v_B); __Pyx_GIVEREF(__pyx_v_B); __Pyx_INCREF(__pyx_v_S2); PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_v_S2); __Pyx_GIVEREF(__pyx_v_S2); __pyx_r = ((PyObject *)__pyx_t_7); __pyx_t_7 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("nipy.labs.group.glm_twolevel.em", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_PpX); __Pyx_XDECREF(__pyx_v_A); __Pyx_XDECREF(__pyx_v_B); __Pyx_XDECREF(__pyx_v_P); __Pyx_XDECREF(__pyx_v_dims); __Pyx_XDECREF(__pyx_v_S2); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_5group_12glm_twolevel_3log_likelihood(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_5group_12glm_twolevel_2log_likelihood[] = "\n ll = log_likelihood(y, vy, X, b, s2, axis=0)\n Log likelihood in a mixed-effect GLM.\n OUTPUT: array\n REFERENCE:\n Keller and Roche, ISBI 2008.\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_5group_12glm_twolevel_3log_likelihood = {__Pyx_NAMESTR("log_likelihood"), (PyCFunction)__pyx_pw_4nipy_4labs_5group_12glm_twolevel_3log_likelihood, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_5group_12glm_twolevel_2log_likelihood)}; static PyObject *__pyx_pw_4nipy_4labs_5group_12glm_twolevel_3log_likelihood(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_Y = 0; PyObject *__pyx_v_VY = 0; PyObject *__pyx_v_X = 0; PyObject *__pyx_v_B = 0; PyObject *__pyx_v_S2 = 0; int __pyx_v_axis; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("log_likelihood (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__Y,&__pyx_n_s__VY,&__pyx_n_s__X,&__pyx_n_s__B,&__pyx_n_s__S2,&__pyx_n_s__axis,0}; PyObject* values[6] = {0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Y)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__VY)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("log_likelihood", 0, 5, 6, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 126; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__X)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("log_likelihood", 0, 5, 6, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 126; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__B)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("log_likelihood", 0, 5, 6, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 126; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__S2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("log_likelihood", 0, 5, 6, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 126; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__axis); if (value) { values[5] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "log_likelihood") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 126; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_Y = values[0]; __pyx_v_VY = values[1]; __pyx_v_X = values[2]; __pyx_v_B = values[3]; __pyx_v_S2 = values[4]; if (values[5]) { __pyx_v_axis = __Pyx_PyInt_AsInt(values[5]); if (unlikely((__pyx_v_axis == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 126; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_axis = ((int)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("log_likelihood", 0, 5, 6, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 126; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.group.glm_twolevel.log_likelihood", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_4labs_5group_12glm_twolevel_2log_likelihood(__pyx_self, __pyx_v_Y, __pyx_v_VY, __pyx_v_X, __pyx_v_B, __pyx_v_S2, __pyx_v_axis); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/group/glm_twolevel.pyx":126 * * * def log_likelihood(Y, VY, X, B, S2, int axis=0): # <<<<<<<<<<<<<< * """ * ll = log_likelihood(y, vy, X, b, s2, axis=0) */ static PyObject *__pyx_pf_4nipy_4labs_5group_12glm_twolevel_2log_likelihood(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_Y, PyObject *__pyx_v_VY, PyObject *__pyx_v_X, PyObject *__pyx_v_B, PyObject *__pyx_v_S2, int __pyx_v_axis) { fff_vector *__pyx_v_y; fff_vector *__pyx_v_vy; fff_vector *__pyx_v_b; fff_vector *__pyx_v_s2; fff_vector *__pyx_v_ll; fff_vector *__pyx_v_tmp; fff_matrix *__pyx_v_x; fffpy_multi_iterator *__pyx_v_multi; PyObject *__pyx_v_dims = NULL; PyObject *__pyx_v_LL = NULL; PyObject *__pyx_v_i = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; Py_ssize_t __pyx_t_4; PyObject *(*__pyx_t_5)(PyObject *); PyObject *__pyx_t_6 = NULL; int __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("log_likelihood", 0); /* "nipy/labs/group/glm_twolevel.pyx":139 * * # Allocate output array * dims = [Y.shape[i] for i in range(Y.ndim)] # <<<<<<<<<<<<<< * dims[axis] = 1 * LL = np.zeros(dims) */ __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetAttr(__pyx_v_Y, __pyx_n_s__ndim); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyObject_Call(__pyx_builtin_range, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; if (PyList_CheckExact(__pyx_t_2) || PyTuple_CheckExact(__pyx_t_2)) { __pyx_t_3 = __pyx_t_2; __Pyx_INCREF(__pyx_t_3); __pyx_t_4 = 0; __pyx_t_5 = NULL; } else { __pyx_t_4 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = Py_TYPE(__pyx_t_3)->tp_iternext; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; for (;;) { if (!__pyx_t_5 && PyList_CheckExact(__pyx_t_3)) { if (__pyx_t_4 >= PyList_GET_SIZE(__pyx_t_3)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_2 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_4); __Pyx_INCREF(__pyx_t_2); __pyx_t_4++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_2 = PySequence_ITEM(__pyx_t_3, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else if (!__pyx_t_5 && PyTuple_CheckExact(__pyx_t_3)) { if (__pyx_t_4 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_2 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_4); __Pyx_INCREF(__pyx_t_2); __pyx_t_4++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_2 = PySequence_ITEM(__pyx_t_3, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else { __pyx_t_2 = __pyx_t_5(__pyx_t_3); if (unlikely(!__pyx_t_2)) { if (PyErr_Occurred()) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) PyErr_Clear(); else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } break; } __Pyx_GOTREF(__pyx_t_2); } __Pyx_XDECREF(__pyx_v_i); __pyx_v_i = __pyx_t_2; __pyx_t_2 = 0; __pyx_t_2 = PyObject_GetAttr(__pyx_v_Y, __pyx_n_s__shape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = PyObject_GetItem(__pyx_t_2, __pyx_v_i); if (!__pyx_t_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(__Pyx_PyList_Append(__pyx_t_1, (PyObject*)__pyx_t_6))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = ((PyObject *)__pyx_t_1); __Pyx_INCREF(__pyx_t_3); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_v_dims = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "nipy/labs/group/glm_twolevel.pyx":140 * # Allocate output array * dims = [Y.shape[i] for i in range(Y.ndim)] * dims[axis] = 1 # <<<<<<<<<<<<<< * LL = np.zeros(dims) * */ if (__Pyx_SetItemInt(((PyObject *)__pyx_v_dims), __pyx_v_axis, __pyx_int_1, sizeof(int), PyInt_FromLong) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/group/glm_twolevel.pyx":141 * dims = [Y.shape[i] for i in range(Y.ndim)] * dims[axis] = 1 * LL = np.zeros(dims) # <<<<<<<<<<<<<< * * # View on design matrix */ __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 141; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__zeros); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 141; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 141; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_v_dims)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_dims)); __Pyx_GIVEREF(((PyObject *)__pyx_v_dims)); __pyx_t_6 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 141; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __pyx_v_LL = __pyx_t_6; __pyx_t_6 = 0; /* "nipy/labs/group/glm_twolevel.pyx":144 * * # View on design matrix * x = fff_matrix_fromPyArray(X) # <<<<<<<<<<<<<< * * # Local structure */ if (!(likely(((__pyx_v_X) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_X, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = __pyx_v_X; __Pyx_INCREF(__pyx_t_6); __pyx_v_x = fff_matrix_fromPyArray(((PyArrayObject *)__pyx_t_6)); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /* "nipy/labs/group/glm_twolevel.pyx":147 * * # Local structure * tmp = fff_vector_new(x.size1) # <<<<<<<<<<<<<< * * # Multi iterator */ __pyx_v_tmp = fff_vector_new(__pyx_v_x->size1); /* "nipy/labs/group/glm_twolevel.pyx":151 * # Multi iterator * multi = fffpy_multi_iterator_new(5, axis, Y, VY, * B, S2, LL) # <<<<<<<<<<<<<< * * # View on iterable arrays */ __pyx_v_multi = fffpy_multi_iterator_new(5, __pyx_v_axis, ((void *)__pyx_v_Y), ((void *)__pyx_v_VY), ((void *)__pyx_v_B), ((void *)__pyx_v_S2), ((void *)__pyx_v_LL)); /* "nipy/labs/group/glm_twolevel.pyx":154 * * # View on iterable arrays * y = multi.vector[0] # <<<<<<<<<<<<<< * vy = multi.vector[1] * b = multi.vector[2] */ __pyx_v_y = (__pyx_v_multi->vector[0]); /* "nipy/labs/group/glm_twolevel.pyx":155 * # View on iterable arrays * y = multi.vector[0] * vy = multi.vector[1] # <<<<<<<<<<<<<< * b = multi.vector[2] * s2 = multi.vector[3] */ __pyx_v_vy = (__pyx_v_multi->vector[1]); /* "nipy/labs/group/glm_twolevel.pyx":156 * y = multi.vector[0] * vy = multi.vector[1] * b = multi.vector[2] # <<<<<<<<<<<<<< * s2 = multi.vector[3] * ll = multi.vector[4] */ __pyx_v_b = (__pyx_v_multi->vector[2]); /* "nipy/labs/group/glm_twolevel.pyx":157 * vy = multi.vector[1] * b = multi.vector[2] * s2 = multi.vector[3] # <<<<<<<<<<<<<< * ll = multi.vector[4] * */ __pyx_v_s2 = (__pyx_v_multi->vector[3]); /* "nipy/labs/group/glm_twolevel.pyx":158 * b = multi.vector[2] * s2 = multi.vector[3] * ll = multi.vector[4] # <<<<<<<<<<<<<< * * # Loop */ __pyx_v_ll = (__pyx_v_multi->vector[4]); /* "nipy/labs/group/glm_twolevel.pyx":161 * * # Loop * while(multi.index < multi.size): # <<<<<<<<<<<<<< * ll.data[0] = fff_glm_twolevel_log_likelihood(y, vy, x, b, s2.data[0], tmp) * fffpy_multi_iterator_update(multi) */ while (1) { __pyx_t_7 = (__pyx_v_multi->index < __pyx_v_multi->size); if (!__pyx_t_7) break; /* "nipy/labs/group/glm_twolevel.pyx":162 * # Loop * while(multi.index < multi.size): * ll.data[0] = fff_glm_twolevel_log_likelihood(y, vy, x, b, s2.data[0], tmp) # <<<<<<<<<<<<<< * fffpy_multi_iterator_update(multi) * */ (__pyx_v_ll->data[0]) = fff_glm_twolevel_log_likelihood(__pyx_v_y, __pyx_v_vy, __pyx_v_x, __pyx_v_b, (__pyx_v_s2->data[0]), __pyx_v_tmp); /* "nipy/labs/group/glm_twolevel.pyx":163 * while(multi.index < multi.size): * ll.data[0] = fff_glm_twolevel_log_likelihood(y, vy, x, b, s2.data[0], tmp) * fffpy_multi_iterator_update(multi) # <<<<<<<<<<<<<< * * # Free memory */ fffpy_multi_iterator_update(__pyx_v_multi); } /* "nipy/labs/group/glm_twolevel.pyx":166 * * # Free memory * fff_matrix_delete(x) # <<<<<<<<<<<<<< * fff_vector_delete(tmp) * fffpy_multi_iterator_delete(multi) */ fff_matrix_delete(__pyx_v_x); /* "nipy/labs/group/glm_twolevel.pyx":167 * # Free memory * fff_matrix_delete(x) * fff_vector_delete(tmp) # <<<<<<<<<<<<<< * fffpy_multi_iterator_delete(multi) * */ fff_vector_delete(__pyx_v_tmp); /* "nipy/labs/group/glm_twolevel.pyx":168 * fff_matrix_delete(x) * fff_vector_delete(tmp) * fffpy_multi_iterator_delete(multi) # <<<<<<<<<<<<<< * * # Return */ fffpy_multi_iterator_delete(__pyx_v_multi); /* "nipy/labs/group/glm_twolevel.pyx":171 * * # Return * return LL # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_LL); __pyx_r = __pyx_v_LL; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("nipy.labs.group.glm_twolevel.log_likelihood", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_dims); __Pyx_XDECREF(__pyx_v_LL); __Pyx_XDECREF(__pyx_v_i); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_5group_12glm_twolevel_5log_likelihood_ratio(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_5group_12glm_twolevel_4log_likelihood_ratio[] = "\n lda = em(y, vy, X, C, axis=0, niter=DEF_NITER).\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_5group_12glm_twolevel_5log_likelihood_ratio = {__Pyx_NAMESTR("log_likelihood_ratio"), (PyCFunction)__pyx_pw_4nipy_4labs_5group_12glm_twolevel_5log_likelihood_ratio, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_5group_12glm_twolevel_4log_likelihood_ratio)}; static PyObject *__pyx_pw_4nipy_4labs_5group_12glm_twolevel_5log_likelihood_ratio(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_Y = 0; PyObject *__pyx_v_VY = 0; PyObject *__pyx_v_X = 0; PyObject *__pyx_v_C = 0; int __pyx_v_axis; int __pyx_v_niter; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("log_likelihood_ratio (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__Y,&__pyx_n_s__VY,&__pyx_n_s__X,&__pyx_n_s__C,&__pyx_n_s__axis,&__pyx_n_s__niter,0}; PyObject* values[6] = {0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Y)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__VY)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("log_likelihood_ratio", 0, 4, 6, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__X)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("log_likelihood_ratio", 0, 4, 6, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__C)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("log_likelihood_ratio", 0, 4, 6, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__axis); if (value) { values[4] = value; kw_args--; } } case 5: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__niter); if (value) { values[5] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "log_likelihood_ratio") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_Y = values[0]; __pyx_v_VY = values[1]; __pyx_v_X = values[2]; __pyx_v_C = values[3]; if (values[4]) { __pyx_v_axis = __Pyx_PyInt_AsInt(values[4]); if (unlikely((__pyx_v_axis == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_axis = ((int)0); } if (values[5]) { __pyx_v_niter = __Pyx_PyInt_AsInt(values[5]); if (unlikely((__pyx_v_niter == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_niter = __pyx_k_2; } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("log_likelihood_ratio", 0, 4, 6, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.group.glm_twolevel.log_likelihood_ratio", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_4labs_5group_12glm_twolevel_4log_likelihood_ratio(__pyx_self, __pyx_v_Y, __pyx_v_VY, __pyx_v_X, __pyx_v_C, __pyx_v_axis, __pyx_v_niter); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/group/glm_twolevel.pyx":174 * * * def log_likelihood_ratio(Y, VY, X, C, int axis=0, int niter=DEF_NITER): # <<<<<<<<<<<<<< * """ * lda = em(y, vy, X, C, axis=0, niter=DEF_NITER). */ static PyObject *__pyx_pf_4nipy_4labs_5group_12glm_twolevel_4log_likelihood_ratio(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_Y, PyObject *__pyx_v_VY, PyObject *__pyx_v_X, PyObject *__pyx_v_C, int __pyx_v_axis, int __pyx_v_niter) { PyObject *__pyx_v_B = NULL; PyObject *__pyx_v_S2 = NULL; PyObject *__pyx_v_ll0 = NULL; PyObject *__pyx_v_ll = NULL; PyObject *__pyx_v_lda = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *(*__pyx_t_5)(PyObject *); int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("log_likelihood_ratio", 0); /* "nipy/labs/group/glm_twolevel.pyx":180 * * # Constrained log-likelihood * B, S2 = em(Y, VY, X, C, axis, niter) # <<<<<<<<<<<<<< * ll0 = log_likelihood(Y, VY, X, B, S2, axis) * */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__em); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromLong(__pyx_v_axis); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyInt_FromLong(__pyx_v_niter); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(6); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_INCREF(__pyx_v_Y); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_Y); __Pyx_GIVEREF(__pyx_v_Y); __Pyx_INCREF(__pyx_v_VY); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_v_VY); __Pyx_GIVEREF(__pyx_v_VY); __Pyx_INCREF(__pyx_v_X); PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_v_X); __Pyx_GIVEREF(__pyx_v_X); __Pyx_INCREF(__pyx_v_C); PyTuple_SET_ITEM(__pyx_t_4, 3, __pyx_v_C); __Pyx_GIVEREF(__pyx_v_C); PyTuple_SET_ITEM(__pyx_t_4, 4, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 5, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_2 = 0; __pyx_t_3 = 0; __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; if ((likely(PyTuple_CheckExact(__pyx_t_3))) || (PyList_CheckExact(__pyx_t_3))) { PyObject* sequence = __pyx_t_3; #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #if CYTHON_COMPILING_IN_CPYTHON if (likely(PyTuple_CheckExact(sequence))) { __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_1 = PyTuple_GET_ITEM(sequence, 1); } else { __pyx_t_4 = PyList_GET_ITEM(sequence, 0); __pyx_t_1 = PyList_GET_ITEM(sequence, 1); } __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx_t_1); #else __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { Py_ssize_t index = -1; __pyx_t_2 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_5 = Py_TYPE(__pyx_t_2)->tp_iternext; index = 0; __pyx_t_4 = __pyx_t_5(__pyx_t_2); if (unlikely(!__pyx_t_4)) goto __pyx_L3_unpacking_failed; __Pyx_GOTREF(__pyx_t_4); index = 1; __pyx_t_1 = __pyx_t_5(__pyx_t_2); if (unlikely(!__pyx_t_1)) goto __pyx_L3_unpacking_failed; __Pyx_GOTREF(__pyx_t_1); if (__Pyx_IternextUnpackEndCheck(__pyx_t_5(__pyx_t_2), 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_5 = NULL; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; goto __pyx_L4_unpacking_done; __pyx_L3_unpacking_failed:; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_5 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_L4_unpacking_done:; } __pyx_v_B = __pyx_t_4; __pyx_t_4 = 0; __pyx_v_S2 = __pyx_t_1; __pyx_t_1 = 0; /* "nipy/labs/group/glm_twolevel.pyx":181 * # Constrained log-likelihood * B, S2 = em(Y, VY, X, C, axis, niter) * ll0 = log_likelihood(Y, VY, X, B, S2, axis) # <<<<<<<<<<<<<< * * # Unconstrained log-likelihood */ __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__log_likelihood); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 181; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = PyInt_FromLong(__pyx_v_axis); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 181; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = PyTuple_New(6); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 181; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_INCREF(__pyx_v_Y); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_Y); __Pyx_GIVEREF(__pyx_v_Y); __Pyx_INCREF(__pyx_v_VY); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_v_VY); __Pyx_GIVEREF(__pyx_v_VY); __Pyx_INCREF(__pyx_v_X); PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_v_X); __Pyx_GIVEREF(__pyx_v_X); __Pyx_INCREF(__pyx_v_B); PyTuple_SET_ITEM(__pyx_t_4, 3, __pyx_v_B); __Pyx_GIVEREF(__pyx_v_B); __Pyx_INCREF(__pyx_v_S2); PyTuple_SET_ITEM(__pyx_t_4, 4, __pyx_v_S2); __Pyx_GIVEREF(__pyx_v_S2); PyTuple_SET_ITEM(__pyx_t_4, 5, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 181; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __pyx_v_ll0 = __pyx_t_1; __pyx_t_1 = 0; /* "nipy/labs/group/glm_twolevel.pyx":184 * * # Unconstrained log-likelihood * B, S2 = em(Y, VY, X, None, axis, niter) # <<<<<<<<<<<<<< * ll = log_likelihood(Y, VY, X, B, S2, axis) * */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__em); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = PyInt_FromLong(__pyx_v_axis); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyInt_FromLong(__pyx_v_niter); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyTuple_New(6); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_Y); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_Y); __Pyx_GIVEREF(__pyx_v_Y); __Pyx_INCREF(__pyx_v_VY); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_VY); __Pyx_GIVEREF(__pyx_v_VY); __Pyx_INCREF(__pyx_v_X); PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_v_X); __Pyx_GIVEREF(__pyx_v_X); __Pyx_INCREF(Py_None); PyTuple_SET_ITEM(__pyx_t_2, 3, Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_2, 4, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_2, 5, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_4 = 0; __pyx_t_3 = 0; __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; if ((likely(PyTuple_CheckExact(__pyx_t_3))) || (PyList_CheckExact(__pyx_t_3))) { PyObject* sequence = __pyx_t_3; #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #if CYTHON_COMPILING_IN_CPYTHON if (likely(PyTuple_CheckExact(sequence))) { __pyx_t_2 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_1 = PyTuple_GET_ITEM(sequence, 1); } else { __pyx_t_2 = PyList_GET_ITEM(sequence, 0); __pyx_t_1 = PyList_GET_ITEM(sequence, 1); } __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_1); #else __pyx_t_2 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { Py_ssize_t index = -1; __pyx_t_4 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_5 = Py_TYPE(__pyx_t_4)->tp_iternext; index = 0; __pyx_t_2 = __pyx_t_5(__pyx_t_4); if (unlikely(!__pyx_t_2)) goto __pyx_L5_unpacking_failed; __Pyx_GOTREF(__pyx_t_2); index = 1; __pyx_t_1 = __pyx_t_5(__pyx_t_4); if (unlikely(!__pyx_t_1)) goto __pyx_L5_unpacking_failed; __Pyx_GOTREF(__pyx_t_1); if (__Pyx_IternextUnpackEndCheck(__pyx_t_5(__pyx_t_4), 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_5 = NULL; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; goto __pyx_L6_unpacking_done; __pyx_L5_unpacking_failed:; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_L6_unpacking_done:; } __Pyx_DECREF(__pyx_v_B); __pyx_v_B = __pyx_t_2; __pyx_t_2 = 0; __Pyx_DECREF(__pyx_v_S2); __pyx_v_S2 = __pyx_t_1; __pyx_t_1 = 0; /* "nipy/labs/group/glm_twolevel.pyx":185 * # Unconstrained log-likelihood * B, S2 = em(Y, VY, X, None, axis, niter) * ll = log_likelihood(Y, VY, X, B, S2, axis) # <<<<<<<<<<<<<< * * # -2 log R = 2*(ll-ll0) */ __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__log_likelihood); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = PyInt_FromLong(__pyx_v_axis); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyTuple_New(6); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_Y); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_Y); __Pyx_GIVEREF(__pyx_v_Y); __Pyx_INCREF(__pyx_v_VY); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_VY); __Pyx_GIVEREF(__pyx_v_VY); __Pyx_INCREF(__pyx_v_X); PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_v_X); __Pyx_GIVEREF(__pyx_v_X); __Pyx_INCREF(__pyx_v_B); PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_v_B); __Pyx_GIVEREF(__pyx_v_B); __Pyx_INCREF(__pyx_v_S2); PyTuple_SET_ITEM(__pyx_t_2, 4, __pyx_v_S2); __Pyx_GIVEREF(__pyx_v_S2); PyTuple_SET_ITEM(__pyx_t_2, 5, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_v_ll = __pyx_t_1; __pyx_t_1 = 0; /* "nipy/labs/group/glm_twolevel.pyx":188 * * # -2 log R = 2*(ll-ll0) * lda = 2*(ll-ll0) # <<<<<<<<<<<<<< * return np.maximum(lda, 0.0) */ __pyx_t_1 = PyNumber_Subtract(__pyx_v_ll, __pyx_v_ll0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 188; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyNumber_Multiply(__pyx_int_2, __pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 188; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_lda = __pyx_t_2; __pyx_t_2 = 0; /* "nipy/labs/group/glm_twolevel.pyx":189 * # -2 log R = 2*(ll-ll0) * lda = 2*(ll-ll0) * return np.maximum(lda, 0.0) # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 189; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__maximum); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 189; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 189; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 189; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_lda); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_lda); __Pyx_GIVEREF(__pyx_v_lda); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 189; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("nipy.labs.group.glm_twolevel.log_likelihood_ratio", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_B); __Pyx_XDECREF(__pyx_v_S2); __Pyx_XDECREF(__pyx_v_ll0); __Pyx_XDECREF(__pyx_v_ll); __Pyx_XDECREF(__pyx_v_lda); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":194 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_copy_shape; int __pyx_v_i; int __pyx_v_ndim; int __pyx_v_endian_detector; int __pyx_v_little_endian; int __pyx_v_t; char *__pyx_v_f; PyArray_Descr *__pyx_v_descr = 0; int __pyx_v_offset; int __pyx_v_hasfields; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; char *__pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "numpy.pxd":200 * # of flags * * if info == NULL: return # <<<<<<<<<<<<<< * * cdef int copy_shape, i, ndim */ __pyx_t_1 = (__pyx_v_info == NULL); if (__pyx_t_1) { __pyx_r = 0; goto __pyx_L0; goto __pyx_L3; } __pyx_L3:; /* "numpy.pxd":203 * * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((&endian_detector)[0] != 0) * */ __pyx_v_endian_detector = 1; /* "numpy.pxd":204 * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * * ndim = PyArray_NDIM(self) */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "numpy.pxd":206 * cdef bint little_endian = ((&endian_detector)[0] != 0) * * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< * * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); /* "numpy.pxd":208 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); if (__pyx_t_1) { /* "numpy.pxd":209 * * if sizeof(npy_intp) != sizeof(Py_ssize_t): * copy_shape = 1 # <<<<<<<<<<<<<< * else: * copy_shape = 0 */ __pyx_v_copy_shape = 1; goto __pyx_L4; } /*else*/ { /* "numpy.pxd":211 * copy_shape = 1 * else: * copy_shape = 0 # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) */ __pyx_v_copy_shape = 0; } __pyx_L4:; /* "numpy.pxd":213 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS); if (__pyx_t_1) { /* "numpy.pxd":214 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not C contiguous") * */ __pyx_t_2 = (!PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS)); __pyx_t_3 = __pyx_t_2; } else { __pyx_t_3 = __pyx_t_1; } if (__pyx_t_3) { /* "numpy.pxd":215 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_4), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L5; } __pyx_L5:; /* "numpy.pxd":217 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ __pyx_t_3 = ((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS); if (__pyx_t_3) { /* "numpy.pxd":218 * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not Fortran contiguous") * */ __pyx_t_1 = (!PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS)); __pyx_t_2 = __pyx_t_1; } else { __pyx_t_2 = __pyx_t_3; } if (__pyx_t_2) { /* "numpy.pxd":219 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_6), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L6; } __pyx_L6:; /* "numpy.pxd":221 * raise ValueError(u"ndarray is not Fortran contiguous") * * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< * info.ndim = ndim * if copy_shape: */ __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); /* "numpy.pxd":222 * * info.buf = PyArray_DATA(self) * info.ndim = ndim # <<<<<<<<<<<<<< * if copy_shape: * # Allocate new buffer for strides and shape info. */ __pyx_v_info->ndim = __pyx_v_ndim; /* "numpy.pxd":223 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ if (__pyx_v_copy_shape) { /* "numpy.pxd":226 * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) # <<<<<<<<<<<<<< * info.shape = info.strides + ndim * for i in range(ndim): */ __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2))); /* "numpy.pxd":227 * # This is allocated as one block, strides first. * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) * info.shape = info.strides + ndim # <<<<<<<<<<<<<< * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] */ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); /* "numpy.pxd":228 * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) * info.shape = info.strides + ndim * for i in range(ndim): # <<<<<<<<<<<<<< * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] */ __pyx_t_5 = __pyx_v_ndim; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "numpy.pxd":229 * info.shape = info.strides + ndim * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< * info.shape[i] = PyArray_DIMS(self)[i] * else: */ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); /* "numpy.pxd":230 * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< * else: * info.strides = PyArray_STRIDES(self) */ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); } goto __pyx_L7; } /*else*/ { /* "numpy.pxd":232 * info.shape[i] = PyArray_DIMS(self)[i] * else: * info.strides = PyArray_STRIDES(self) # <<<<<<<<<<<<<< * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL */ __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); /* "numpy.pxd":233 * else: * info.strides = PyArray_STRIDES(self) * info.shape = PyArray_DIMS(self) # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) */ __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); } __pyx_L7:; /* "numpy.pxd":234 * info.strides = PyArray_STRIDES(self) * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) */ __pyx_v_info->suboffsets = NULL; /* "numpy.pxd":235 * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< * info.readonly = not PyArray_ISWRITEABLE(self) * */ __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); /* "numpy.pxd":236 * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< * * cdef int t */ __pyx_v_info->readonly = (!PyArray_ISWRITEABLE(__pyx_v_self)); /* "numpy.pxd":239 * * cdef int t * cdef char* f = NULL # <<<<<<<<<<<<<< * cdef dtype descr = self.descr * cdef list stack */ __pyx_v_f = NULL; /* "numpy.pxd":240 * cdef int t * cdef char* f = NULL * cdef dtype descr = self.descr # <<<<<<<<<<<<<< * cdef list stack * cdef int offset */ __pyx_t_4 = ((PyObject *)__pyx_v_self->descr); __Pyx_INCREF(__pyx_t_4); __pyx_v_descr = ((PyArray_Descr *)__pyx_t_4); __pyx_t_4 = 0; /* "numpy.pxd":244 * cdef int offset * * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< * * if not hasfields and not copy_shape: */ __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); /* "numpy.pxd":246 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ __pyx_t_2 = (!__pyx_v_hasfields); if (__pyx_t_2) { __pyx_t_3 = (!__pyx_v_copy_shape); __pyx_t_1 = __pyx_t_3; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "numpy.pxd":248 * if not hasfields and not copy_shape: * # do not call releasebuffer * info.obj = None # <<<<<<<<<<<<<< * else: * # need to call releasebuffer */ __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = Py_None; goto __pyx_L10; } /*else*/ { /* "numpy.pxd":251 * else: * # need to call releasebuffer * info.obj = self # <<<<<<<<<<<<<< * * if not hasfields: */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); } __pyx_L10:; /* "numpy.pxd":253 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ __pyx_t_1 = (!__pyx_v_hasfields); if (__pyx_t_1) { /* "numpy.pxd":254 * * if not hasfields: * t = descr.type_num # <<<<<<<<<<<<<< * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): */ __pyx_t_5 = __pyx_v_descr->type_num; __pyx_v_t = __pyx_t_5; /* "numpy.pxd":255 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_1 = (__pyx_v_descr->byteorder == '>'); if (__pyx_t_1) { __pyx_t_2 = __pyx_v_little_endian; } else { __pyx_t_2 = __pyx_t_1; } if (!__pyx_t_2) { /* "numpy.pxd":256 * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" */ __pyx_t_1 = (__pyx_v_descr->byteorder == '<'); if (__pyx_t_1) { __pyx_t_3 = (!__pyx_v_little_endian); __pyx_t_7 = __pyx_t_3; } else { __pyx_t_7 = __pyx_t_1; } __pyx_t_1 = __pyx_t_7; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "numpy.pxd":257 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_8), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L12; } __pyx_L12:; /* "numpy.pxd":258 * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" */ __pyx_t_1 = (__pyx_v_t == NPY_BYTE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__b; goto __pyx_L13; } /* "numpy.pxd":259 * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" */ __pyx_t_1 = (__pyx_v_t == NPY_UBYTE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__B; goto __pyx_L13; } /* "numpy.pxd":260 * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" */ __pyx_t_1 = (__pyx_v_t == NPY_SHORT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__h; goto __pyx_L13; } /* "numpy.pxd":261 * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" */ __pyx_t_1 = (__pyx_v_t == NPY_USHORT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__H; goto __pyx_L13; } /* "numpy.pxd":262 * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" */ __pyx_t_1 = (__pyx_v_t == NPY_INT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__i; goto __pyx_L13; } /* "numpy.pxd":263 * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" */ __pyx_t_1 = (__pyx_v_t == NPY_UINT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__I; goto __pyx_L13; } /* "numpy.pxd":264 * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" */ __pyx_t_1 = (__pyx_v_t == NPY_LONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__l; goto __pyx_L13; } /* "numpy.pxd":265 * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" */ __pyx_t_1 = (__pyx_v_t == NPY_ULONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__L; goto __pyx_L13; } /* "numpy.pxd":266 * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" */ __pyx_t_1 = (__pyx_v_t == NPY_LONGLONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__q; goto __pyx_L13; } /* "numpy.pxd":267 * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" */ __pyx_t_1 = (__pyx_v_t == NPY_ULONGLONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Q; goto __pyx_L13; } /* "numpy.pxd":268 * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" */ __pyx_t_1 = (__pyx_v_t == NPY_FLOAT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__f; goto __pyx_L13; } /* "numpy.pxd":269 * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" */ __pyx_t_1 = (__pyx_v_t == NPY_DOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__d; goto __pyx_L13; } /* "numpy.pxd":270 * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" */ __pyx_t_1 = (__pyx_v_t == NPY_LONGDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__g; goto __pyx_L13; } /* "numpy.pxd":271 * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" */ __pyx_t_1 = (__pyx_v_t == NPY_CFLOAT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zf; goto __pyx_L13; } /* "numpy.pxd":272 * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" */ __pyx_t_1 = (__pyx_v_t == NPY_CDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zd; goto __pyx_L13; } /* "numpy.pxd":273 * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f = "O" * else: */ __pyx_t_1 = (__pyx_v_t == NPY_CLONGDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zg; goto __pyx_L13; } /* "numpy.pxd":274 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_1 = (__pyx_v_t == NPY_OBJECT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__O; goto __pyx_L13; } /*else*/ { /* "numpy.pxd":276 * elif t == NPY_OBJECT: f = "O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * info.format = f * return */ __pyx_t_4 = PyInt_FromLong(__pyx_v_t); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_8 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_9), __pyx_t_4); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_8)); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_8)); __Pyx_GIVEREF(((PyObject *)__pyx_t_8)); __pyx_t_8 = 0; __pyx_t_8 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L13:; /* "numpy.pxd":277 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f # <<<<<<<<<<<<<< * return * else: */ __pyx_v_info->format = __pyx_v_f; /* "numpy.pxd":278 * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f * return # <<<<<<<<<<<<<< * else: * info.format = stdlib.malloc(_buffer_format_string_len) */ __pyx_r = 0; goto __pyx_L0; goto __pyx_L11; } /*else*/ { /* "numpy.pxd":280 * return * else: * info.format = stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 */ __pyx_v_info->format = ((char *)malloc(255)); /* "numpy.pxd":281 * else: * info.format = stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< * offset = 0 * f = _util_dtypestring(descr, info.format + 1, */ (__pyx_v_info->format[0]) = '^'; /* "numpy.pxd":282 * info.format = stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 # <<<<<<<<<<<<<< * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, */ __pyx_v_offset = 0; /* "numpy.pxd":285 * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, * &offset) # <<<<<<<<<<<<<< * f[0] = c'\0' # Terminate format string * */ __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_9; /* "numpy.pxd":286 * info.format + _buffer_format_string_len, * &offset) * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< * * def __releasebuffer__(ndarray self, Py_buffer* info): */ (__pyx_v_f[0]) = '\x00'; } __pyx_L11:; __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_descr); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":288 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__releasebuffer__", 0); /* "numpy.pxd":289 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_t_1 = PyArray_HASFIELDS(__pyx_v_self); if (__pyx_t_1) { /* "numpy.pxd":290 * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): * stdlib.free(info.format) # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) */ free(__pyx_v_info->format); goto __pyx_L3; } __pyx_L3:; /* "numpy.pxd":291 * if PyArray_HASFIELDS(self): * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * stdlib.free(info.strides) * # info.shape was stored after info.strides in the same block */ __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); if (__pyx_t_1) { /* "numpy.pxd":292 * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) # <<<<<<<<<<<<<< * # info.shape was stored after info.strides in the same block * */ free(__pyx_v_info->strides); goto __pyx_L4; } __pyx_L4:; __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":768 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, a) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); /* "numpy.pxd":769 * * cdef inline object PyArray_MultiIterNew1(a): * return PyArray_MultiIterNew(1, a) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew2(a, b): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 769; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":771 * return PyArray_MultiIterNew(1, a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, a, b) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); /* "numpy.pxd":772 * * cdef inline object PyArray_MultiIterNew2(a, b): * return PyArray_MultiIterNew(2, a, b) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew3(a, b, c): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":774 * return PyArray_MultiIterNew(2, a, b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, a, b, c) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); /* "numpy.pxd":775 * * cdef inline object PyArray_MultiIterNew3(a, b, c): * return PyArray_MultiIterNew(3, a, b, c) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":777 * return PyArray_MultiIterNew(3, a, b, c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, a, b, c, d) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); /* "numpy.pxd":778 * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): * return PyArray_MultiIterNew(4, a, b, c, d) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 778; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":780 * return PyArray_MultiIterNew(4, a, b, c, d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, a, b, c, d, e) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); /* "numpy.pxd":781 * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): * return PyArray_MultiIterNew(5, a, b, c, d, e) # <<<<<<<<<<<<<< * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 781; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":783 * return PyArray_MultiIterNew(5, a, b, c, d, e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { PyArray_Descr *__pyx_v_child = 0; int __pyx_v_endian_detector; int __pyx_v_little_endian; PyObject *__pyx_v_fields = 0; PyObject *__pyx_v_childname = NULL; PyObject *__pyx_v_new_offset = NULL; PyObject *__pyx_v_t = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *(*__pyx_t_6)(PyObject *); int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_t_10; long __pyx_t_11; char *__pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_util_dtypestring", 0); /* "numpy.pxd":790 * cdef int delta_offset * cdef tuple i * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((&endian_detector)[0] != 0) * cdef tuple fields */ __pyx_v_endian_detector = 1; /* "numpy.pxd":791 * cdef tuple i * cdef int endian_detector = 1 * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * cdef tuple fields * */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "numpy.pxd":794 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ if (unlikely(((PyObject *)__pyx_v_descr->names) == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_1 = ((PyObject *)__pyx_v_descr->names); __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; for (;;) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif __Pyx_XDECREF(__pyx_v_childname); __pyx_v_childname = __pyx_t_3; __pyx_t_3 = 0; /* "numpy.pxd":795 * * for childname in descr.names: * fields = descr.fields[childname] # <<<<<<<<<<<<<< * child, new_offset = fields * */ __pyx_t_3 = PyObject_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (!__pyx_t_3) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected tuple, got %.200s", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF(((PyObject *)__pyx_v_fields)); __pyx_v_fields = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "numpy.pxd":796 * for childname in descr.names: * fields = descr.fields[childname] * child, new_offset = fields # <<<<<<<<<<<<<< * * if (end - f) - (new_offset - offset[0]) < 15: */ if (likely(PyTuple_CheckExact(((PyObject *)__pyx_v_fields)))) { PyObject* sequence = ((PyObject *)__pyx_v_fields); #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else if (1) { __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } else { Py_ssize_t index = -1; __pyx_t_5 = PyObject_GetIter(((PyObject *)__pyx_v_fields)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = Py_TYPE(__pyx_t_5)->tp_iternext; index = 0; __pyx_t_3 = __pyx_t_6(__pyx_t_5); if (unlikely(!__pyx_t_3)) goto __pyx_L5_unpacking_failed; __Pyx_GOTREF(__pyx_t_3); index = 1; __pyx_t_4 = __pyx_t_6(__pyx_t_5); if (unlikely(!__pyx_t_4)) goto __pyx_L5_unpacking_failed; __Pyx_GOTREF(__pyx_t_4); if (__Pyx_IternextUnpackEndCheck(__pyx_t_6(__pyx_t_5), 2) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = NULL; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L6_unpacking_done; __pyx_L5_unpacking_failed:; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_L6_unpacking_done:; } if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF(((PyObject *)__pyx_v_child)); __pyx_v_child = ((PyArray_Descr *)__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_v_new_offset); __pyx_v_new_offset = __pyx_t_4; __pyx_t_4 = 0; /* "numpy.pxd":798 * child, new_offset = fields * * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ __pyx_t_4 = PyInt_FromLong((__pyx_v_end - __pyx_v_f)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_Subtract(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyObject_RichCompare(__pyx_t_3, __pyx_int_15, Py_LT); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { /* "numpy.pxd":799 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_t_5 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_11), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L7; } __pyx_L7:; /* "numpy.pxd":801 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_7 = (__pyx_v_child->byteorder == '>'); if (__pyx_t_7) { __pyx_t_8 = __pyx_v_little_endian; } else { __pyx_t_8 = __pyx_t_7; } if (!__pyx_t_8) { /* "numpy.pxd":802 * * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * # One could encode it in the format string and have Cython */ __pyx_t_7 = (__pyx_v_child->byteorder == '<'); if (__pyx_t_7) { __pyx_t_9 = (!__pyx_v_little_endian); __pyx_t_10 = __pyx_t_9; } else { __pyx_t_10 = __pyx_t_7; } __pyx_t_7 = __pyx_t_10; } else { __pyx_t_7 = __pyx_t_8; } if (__pyx_t_7) { /* "numpy.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_12), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L8; } __pyx_L8:; /* "numpy.pxd":813 * * # Output padding bytes * while offset[0] < new_offset: # <<<<<<<<<<<<<< * f[0] = 120 # "x"; pad byte * f += 1 */ while (1) { __pyx_t_5 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_t_5, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!__pyx_t_7) break; /* "numpy.pxd":814 * # Output padding bytes * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< * f += 1 * offset[0] += 1 */ (__pyx_v_f[0]) = 120; /* "numpy.pxd":815 * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte * f += 1 # <<<<<<<<<<<<<< * offset[0] += 1 * */ __pyx_v_f = (__pyx_v_f + 1); /* "numpy.pxd":816 * f[0] = 120 # "x"; pad byte * f += 1 * offset[0] += 1 # <<<<<<<<<<<<<< * * offset[0] += child.itemsize */ __pyx_t_11 = 0; (__pyx_v_offset[__pyx_t_11]) = ((__pyx_v_offset[__pyx_t_11]) + 1); } /* "numpy.pxd":818 * offset[0] += 1 * * offset[0] += child.itemsize # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(child): */ __pyx_t_11 = 0; (__pyx_v_offset[__pyx_t_11]) = ((__pyx_v_offset[__pyx_t_11]) + __pyx_v_child->elsize); /* "numpy.pxd":820 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ __pyx_t_7 = (!PyDataType_HASFIELDS(__pyx_v_child)); if (__pyx_t_7) { /* "numpy.pxd":821 * * if not PyDataType_HASFIELDS(child): * t = child.type_num # <<<<<<<<<<<<<< * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") */ __pyx_t_3 = PyInt_FromLong(__pyx_v_child->type_num); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 821; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_XDECREF(__pyx_v_t); __pyx_v_t = __pyx_t_3; __pyx_t_3 = 0; /* "numpy.pxd":822 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ __pyx_t_7 = ((__pyx_v_end - __pyx_v_f) < 5); if (__pyx_t_7) { /* "numpy.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_t_3 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_14), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L12; } __pyx_L12:; /* "numpy.pxd":826 * * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" */ __pyx_t_3 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 98; goto __pyx_L13; } /* "numpy.pxd":827 * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" */ __pyx_t_5 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 66; goto __pyx_L13; } /* "numpy.pxd":828 * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" */ __pyx_t_3 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 104; goto __pyx_L13; } /* "numpy.pxd":829 * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" */ __pyx_t_5 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 72; goto __pyx_L13; } /* "numpy.pxd":830 * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" */ __pyx_t_3 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 105; goto __pyx_L13; } /* "numpy.pxd":831 * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" */ __pyx_t_5 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 73; goto __pyx_L13; } /* "numpy.pxd":832 * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" */ __pyx_t_3 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 108; goto __pyx_L13; } /* "numpy.pxd":833 * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" */ __pyx_t_5 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 76; goto __pyx_L13; } /* "numpy.pxd":834 * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" */ __pyx_t_3 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 113; goto __pyx_L13; } /* "numpy.pxd":835 * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" */ __pyx_t_5 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 81; goto __pyx_L13; } /* "numpy.pxd":836 * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" */ __pyx_t_3 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 102; goto __pyx_L13; } /* "numpy.pxd":837 * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf */ __pyx_t_5 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 100; goto __pyx_L13; } /* "numpy.pxd":838 * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd */ __pyx_t_3 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 103; goto __pyx_L13; } /* "numpy.pxd":839 * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg */ __pyx_t_5 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 102; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":840 * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" */ __pyx_t_3 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 100; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":841 * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: */ __pyx_t_5 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 103; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":842 * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_3 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 79; goto __pyx_L13; } /*else*/ { /* "numpy.pxd":844 * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * f += 1 * else: */ __pyx_t_5 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_9), __pyx_v_t); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_5)); __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_5)); __Pyx_GIVEREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L13:; /* "numpy.pxd":845 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * f += 1 # <<<<<<<<<<<<<< * else: * # Cython ignores struct boundary information ("T{...}"), */ __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L11; } /*else*/ { /* "numpy.pxd":849 * # Cython ignores struct boundary information ("T{...}"), * # so don't output it * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< * return f * */ __pyx_t_12 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_12 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 849; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_12; } __pyx_L11:; } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "numpy.pxd":850 * # so don't output it * f = _util_dtypestring(child, f, end, offset) * return f # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_f; goto __pyx_L0; __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_child); __Pyx_XDECREF(__pyx_v_fields); __Pyx_XDECREF(__pyx_v_childname); __Pyx_XDECREF(__pyx_v_new_offset); __Pyx_XDECREF(__pyx_v_t); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":965 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { PyObject *__pyx_v_baseptr; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("set_array_base", 0); /* "numpy.pxd":967 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ __pyx_t_1 = (__pyx_v_base == Py_None); if (__pyx_t_1) { /* "numpy.pxd":968 * cdef PyObject* baseptr * if base is None: * baseptr = NULL # <<<<<<<<<<<<<< * else: * Py_INCREF(base) # important to do this before decref below! */ __pyx_v_baseptr = NULL; goto __pyx_L3; } /*else*/ { /* "numpy.pxd":970 * baseptr = NULL * else: * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< * baseptr = base * Py_XDECREF(arr.base) */ Py_INCREF(__pyx_v_base); /* "numpy.pxd":971 * else: * Py_INCREF(base) # important to do this before decref below! * baseptr = base # <<<<<<<<<<<<<< * Py_XDECREF(arr.base) * arr.base = baseptr */ __pyx_v_baseptr = ((PyObject *)__pyx_v_base); } __pyx_L3:; /* "numpy.pxd":972 * Py_INCREF(base) # important to do this before decref below! * baseptr = base * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< * arr.base = baseptr * */ Py_XDECREF(__pyx_v_arr->base); /* "numpy.pxd":973 * baseptr = base * Py_XDECREF(arr.base) * arr.base = baseptr # <<<<<<<<<<<<<< * * cdef inline object get_array_base(ndarray arr): */ __pyx_v_arr->base = __pyx_v_baseptr; __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":975 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("get_array_base", 0); /* "numpy.pxd":976 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ __pyx_t_1 = (__pyx_v_arr->base == NULL); if (__pyx_t_1) { /* "numpy.pxd":977 * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: * return None # <<<<<<<<<<<<<< * else: * return arr.base */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; goto __pyx_L3; } /*else*/ { /* "numpy.pxd":979 * return None * else: * return arr.base # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); __pyx_r = ((PyObject *)__pyx_v_arr->base); goto __pyx_L0; } __pyx_L3:; __pyx_r = Py_None; __Pyx_INCREF(Py_None); __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, __Pyx_NAMESTR("glm_twolevel"), __Pyx_DOCSTR(__pyx_k_15), /* m_doc */ -1, /* m_size */ __pyx_methods /* m_methods */, NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_kp_u_10, __pyx_k_10, sizeof(__pyx_k_10), 0, 1, 0, 0}, {&__pyx_kp_u_13, __pyx_k_13, sizeof(__pyx_k_13), 0, 1, 0, 0}, {&__pyx_kp_s_16, __pyx_k_16, sizeof(__pyx_k_16), 0, 0, 1, 0}, {&__pyx_kp_s_19, __pyx_k_19, sizeof(__pyx_k_19), 0, 0, 1, 0}, {&__pyx_n_s_20, __pyx_k_20, sizeof(__pyx_k_20), 0, 0, 1, 1}, {&__pyx_n_s_25, __pyx_k_25, sizeof(__pyx_k_25), 0, 0, 1, 1}, {&__pyx_kp_u_3, __pyx_k_3, sizeof(__pyx_k_3), 0, 1, 0, 0}, {&__pyx_kp_u_5, __pyx_k_5, sizeof(__pyx_k_5), 0, 1, 0, 0}, {&__pyx_kp_u_7, __pyx_k_7, sizeof(__pyx_k_7), 0, 1, 0, 0}, {&__pyx_kp_u_9, __pyx_k_9, sizeof(__pyx_k_9), 0, 1, 0, 0}, {&__pyx_n_s__A, __pyx_k__A, sizeof(__pyx_k__A), 0, 0, 1, 1}, {&__pyx_n_s__B, __pyx_k__B, sizeof(__pyx_k__B), 0, 0, 1, 1}, {&__pyx_n_s__C, __pyx_k__C, sizeof(__pyx_k__C), 0, 0, 1, 1}, {&__pyx_n_s__DEF_NITER, __pyx_k__DEF_NITER, sizeof(__pyx_k__DEF_NITER), 0, 0, 1, 1}, {&__pyx_n_s__LL, __pyx_k__LL, sizeof(__pyx_k__LL), 0, 0, 1, 1}, {&__pyx_n_s__P, __pyx_k__P, sizeof(__pyx_k__P), 0, 0, 1, 1}, {&__pyx_n_s__PpX, __pyx_k__PpX, sizeof(__pyx_k__PpX), 0, 0, 1, 1}, {&__pyx_n_s__RuntimeError, __pyx_k__RuntimeError, sizeof(__pyx_k__RuntimeError), 0, 0, 1, 1}, {&__pyx_n_s__S2, __pyx_k__S2, sizeof(__pyx_k__S2), 0, 0, 1, 1}, {&__pyx_n_s__VY, __pyx_k__VY, sizeof(__pyx_k__VY), 0, 0, 1, 1}, {&__pyx_n_s__ValueError, __pyx_k__ValueError, sizeof(__pyx_k__ValueError), 0, 0, 1, 1}, {&__pyx_n_s__X, __pyx_k__X, sizeof(__pyx_k__X), 0, 0, 1, 1}, {&__pyx_n_s__Y, __pyx_k__Y, sizeof(__pyx_k__Y), 0, 0, 1, 1}, {&__pyx_n_s____main__, __pyx_k____main__, sizeof(__pyx_k____main__), 0, 0, 1, 1}, {&__pyx_n_s____test__, __pyx_k____test__, sizeof(__pyx_k____test__), 0, 0, 1, 1}, {&__pyx_n_s____version__, __pyx_k____version__, sizeof(__pyx_k____version__), 0, 0, 1, 1}, {&__pyx_n_s__axis, __pyx_k__axis, sizeof(__pyx_k__axis), 0, 0, 1, 1}, {&__pyx_n_s__b, __pyx_k__b, sizeof(__pyx_k__b), 0, 0, 1, 1}, {&__pyx_n_s__dims, __pyx_k__dims, sizeof(__pyx_k__dims), 0, 0, 1, 1}, {&__pyx_n_s__dot, __pyx_k__dot, sizeof(__pyx_k__dot), 0, 0, 1, 1}, {&__pyx_n_s__em, __pyx_k__em, sizeof(__pyx_k__em), 0, 0, 1, 1}, {&__pyx_n_s__eye, __pyx_k__eye, sizeof(__pyx_k__eye), 0, 0, 1, 1}, {&__pyx_n_s__i, __pyx_k__i, sizeof(__pyx_k__i), 0, 0, 1, 1}, {&__pyx_n_s__inv, __pyx_k__inv, sizeof(__pyx_k__inv), 0, 0, 1, 1}, {&__pyx_n_s__lda, __pyx_k__lda, sizeof(__pyx_k__lda), 0, 0, 1, 1}, {&__pyx_n_s__linalg, __pyx_k__linalg, sizeof(__pyx_k__linalg), 0, 0, 1, 1}, {&__pyx_n_s__ll, __pyx_k__ll, sizeof(__pyx_k__ll), 0, 0, 1, 1}, {&__pyx_n_s__ll0, __pyx_k__ll0, sizeof(__pyx_k__ll0), 0, 0, 1, 1}, {&__pyx_n_s__log_likelihood, __pyx_k__log_likelihood, sizeof(__pyx_k__log_likelihood), 0, 0, 1, 1}, {&__pyx_n_s__maximum, __pyx_k__maximum, sizeof(__pyx_k__maximum), 0, 0, 1, 1}, {&__pyx_n_s__multi, __pyx_k__multi, sizeof(__pyx_k__multi), 0, 0, 1, 1}, {&__pyx_n_s__n, __pyx_k__n, sizeof(__pyx_k__n), 0, 0, 1, 1}, {&__pyx_n_s__ndim, __pyx_k__ndim, sizeof(__pyx_k__ndim), 0, 0, 1, 1}, {&__pyx_n_s__niter, __pyx_k__niter, sizeof(__pyx_k__niter), 0, 0, 1, 1}, {&__pyx_n_s__np, __pyx_k__np, sizeof(__pyx_k__np), 0, 0, 1, 1}, {&__pyx_n_s__numpy, __pyx_k__numpy, sizeof(__pyx_k__numpy), 0, 0, 1, 1}, {&__pyx_n_s__p, __pyx_k__p, sizeof(__pyx_k__p), 0, 0, 1, 1}, {&__pyx_n_s__pinv, __pyx_k__pinv, sizeof(__pyx_k__pinv), 0, 0, 1, 1}, {&__pyx_n_s__ppx, __pyx_k__ppx, sizeof(__pyx_k__ppx), 0, 0, 1, 1}, {&__pyx_n_s__range, __pyx_k__range, sizeof(__pyx_k__range), 0, 0, 1, 1}, {&__pyx_n_s__s2, __pyx_k__s2, sizeof(__pyx_k__s2), 0, 0, 1, 1}, {&__pyx_n_s__shape, __pyx_k__shape, sizeof(__pyx_k__shape), 0, 0, 1, 1}, {&__pyx_n_s__tmp, __pyx_k__tmp, sizeof(__pyx_k__tmp), 0, 0, 1, 1}, {&__pyx_n_s__transpose, __pyx_k__transpose, sizeof(__pyx_k__transpose), 0, 0, 1, 1}, {&__pyx_n_s__vy, __pyx_k__vy, sizeof(__pyx_k__vy), 0, 0, 1, 1}, {&__pyx_n_s__x, __pyx_k__x, sizeof(__pyx_k__x), 0, 0, 1, 1}, {&__pyx_n_s__y, __pyx_k__y, sizeof(__pyx_k__y), 0, 0, 1, 1}, {&__pyx_n_s__zeros, __pyx_k__zeros, sizeof(__pyx_k__zeros), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_range = __Pyx_GetName(__pyx_b, __pyx_n_s__range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_ValueError = __Pyx_GetName(__pyx_b, __pyx_n_s__ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_RuntimeError = __Pyx_GetName(__pyx_b, __pyx_n_s__RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "numpy.pxd":215 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_k_tuple_4 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_4); __Pyx_INCREF(((PyObject *)__pyx_kp_u_3)); PyTuple_SET_ITEM(__pyx_k_tuple_4, 0, ((PyObject *)__pyx_kp_u_3)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_3)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_4)); /* "numpy.pxd":219 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_k_tuple_6 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_6); __Pyx_INCREF(((PyObject *)__pyx_kp_u_5)); PyTuple_SET_ITEM(__pyx_k_tuple_6, 0, ((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_6)); /* "numpy.pxd":257 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_k_tuple_8 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_8); __Pyx_INCREF(((PyObject *)__pyx_kp_u_7)); PyTuple_SET_ITEM(__pyx_k_tuple_8, 0, ((PyObject *)__pyx_kp_u_7)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_7)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_8)); /* "numpy.pxd":799 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_k_tuple_11 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_11)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_11); __Pyx_INCREF(((PyObject *)__pyx_kp_u_10)); PyTuple_SET_ITEM(__pyx_k_tuple_11, 0, ((PyObject *)__pyx_kp_u_10)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_10)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_11)); /* "numpy.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_k_tuple_12 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_12)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_12); __Pyx_INCREF(((PyObject *)__pyx_kp_u_7)); PyTuple_SET_ITEM(__pyx_k_tuple_12, 0, ((PyObject *)__pyx_kp_u_7)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_7)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_12)); /* "numpy.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_k_tuple_14 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_14)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_14); __Pyx_INCREF(((PyObject *)__pyx_kp_u_13)); PyTuple_SET_ITEM(__pyx_k_tuple_14, 0, ((PyObject *)__pyx_kp_u_13)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_13)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_14)); /* "nipy/labs/group/glm_twolevel.pyx":46 * DEF_NITER = 2 * * def em(ndarray Y, ndarray VY, ndarray X, ndarray C=None, int axis=0, int niter=DEF_NITER): # <<<<<<<<<<<<<< * """ * b, s2 = em(y, vy, X, C=None, axis=0, niter=DEF_NITER). */ __pyx_k_tuple_17 = PyTuple_New(23); if (unlikely(!__pyx_k_tuple_17)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_17); __Pyx_INCREF(((PyObject *)__pyx_n_s__Y)); PyTuple_SET_ITEM(__pyx_k_tuple_17, 0, ((PyObject *)__pyx_n_s__Y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__VY)); PyTuple_SET_ITEM(__pyx_k_tuple_17, 1, ((PyObject *)__pyx_n_s__VY)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__VY)); __Pyx_INCREF(((PyObject *)__pyx_n_s__X)); PyTuple_SET_ITEM(__pyx_k_tuple_17, 2, ((PyObject *)__pyx_n_s__X)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__X)); __Pyx_INCREF(((PyObject *)__pyx_n_s__C)); PyTuple_SET_ITEM(__pyx_k_tuple_17, 3, ((PyObject *)__pyx_n_s__C)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__C)); __Pyx_INCREF(((PyObject *)__pyx_n_s__axis)); PyTuple_SET_ITEM(__pyx_k_tuple_17, 4, ((PyObject *)__pyx_n_s__axis)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__axis)); __Pyx_INCREF(((PyObject *)__pyx_n_s__niter)); PyTuple_SET_ITEM(__pyx_k_tuple_17, 5, ((PyObject *)__pyx_n_s__niter)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__niter)); __Pyx_INCREF(((PyObject *)__pyx_n_s__n)); PyTuple_SET_ITEM(__pyx_k_tuple_17, 6, ((PyObject *)__pyx_n_s__n)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__n)); __Pyx_INCREF(((PyObject *)__pyx_n_s__p)); PyTuple_SET_ITEM(__pyx_k_tuple_17, 7, ((PyObject *)__pyx_n_s__p)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__p)); __Pyx_INCREF(((PyObject *)__pyx_n_s__y)); PyTuple_SET_ITEM(__pyx_k_tuple_17, 8, ((PyObject *)__pyx_n_s__y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__vy)); PyTuple_SET_ITEM(__pyx_k_tuple_17, 9, ((PyObject *)__pyx_n_s__vy)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__vy)); __Pyx_INCREF(((PyObject *)__pyx_n_s__b)); PyTuple_SET_ITEM(__pyx_k_tuple_17, 10, ((PyObject *)__pyx_n_s__b)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__b)); __Pyx_INCREF(((PyObject *)__pyx_n_s__s2)); PyTuple_SET_ITEM(__pyx_k_tuple_17, 11, ((PyObject *)__pyx_n_s__s2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__s2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_17, 12, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ppx)); PyTuple_SET_ITEM(__pyx_k_tuple_17, 13, ((PyObject *)__pyx_n_s__ppx)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ppx)); __Pyx_INCREF(((PyObject *)__pyx_n_s__em)); PyTuple_SET_ITEM(__pyx_k_tuple_17, 14, ((PyObject *)__pyx_n_s__em)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__em)); __Pyx_INCREF(((PyObject *)__pyx_n_s__multi)); PyTuple_SET_ITEM(__pyx_k_tuple_17, 15, ((PyObject *)__pyx_n_s__multi)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__multi)); __Pyx_INCREF(((PyObject *)__pyx_n_s__PpX)); PyTuple_SET_ITEM(__pyx_k_tuple_17, 16, ((PyObject *)__pyx_n_s__PpX)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__PpX)); __Pyx_INCREF(((PyObject *)__pyx_n_s__A)); PyTuple_SET_ITEM(__pyx_k_tuple_17, 17, ((PyObject *)__pyx_n_s__A)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__A)); __Pyx_INCREF(((PyObject *)__pyx_n_s__B)); PyTuple_SET_ITEM(__pyx_k_tuple_17, 18, ((PyObject *)__pyx_n_s__B)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__B)); __Pyx_INCREF(((PyObject *)__pyx_n_s__P)); PyTuple_SET_ITEM(__pyx_k_tuple_17, 19, ((PyObject *)__pyx_n_s__P)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__P)); __Pyx_INCREF(((PyObject *)__pyx_n_s__dims)); PyTuple_SET_ITEM(__pyx_k_tuple_17, 20, ((PyObject *)__pyx_n_s__dims)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__dims)); __Pyx_INCREF(((PyObject *)__pyx_n_s__S2)); PyTuple_SET_ITEM(__pyx_k_tuple_17, 21, ((PyObject *)__pyx_n_s__S2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__S2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__i)); PyTuple_SET_ITEM(__pyx_k_tuple_17, 22, ((PyObject *)__pyx_n_s__i)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__i)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_17)); __pyx_k_codeobj_18 = (PyObject*)__Pyx_PyCode_New(6, 0, 23, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_17, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_19, __pyx_n_s__em, 46, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_18)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/group/glm_twolevel.pyx":126 * * * def log_likelihood(Y, VY, X, B, S2, int axis=0): # <<<<<<<<<<<<<< * """ * ll = log_likelihood(y, vy, X, b, s2, axis=0) */ __pyx_k_tuple_21 = PyTuple_New(17); if (unlikely(!__pyx_k_tuple_21)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 126; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_21); __Pyx_INCREF(((PyObject *)__pyx_n_s__Y)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 0, ((PyObject *)__pyx_n_s__Y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__VY)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 1, ((PyObject *)__pyx_n_s__VY)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__VY)); __Pyx_INCREF(((PyObject *)__pyx_n_s__X)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 2, ((PyObject *)__pyx_n_s__X)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__X)); __Pyx_INCREF(((PyObject *)__pyx_n_s__B)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 3, ((PyObject *)__pyx_n_s__B)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__B)); __Pyx_INCREF(((PyObject *)__pyx_n_s__S2)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 4, ((PyObject *)__pyx_n_s__S2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__S2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__axis)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 5, ((PyObject *)__pyx_n_s__axis)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__axis)); __Pyx_INCREF(((PyObject *)__pyx_n_s__y)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 6, ((PyObject *)__pyx_n_s__y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__vy)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 7, ((PyObject *)__pyx_n_s__vy)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__vy)); __Pyx_INCREF(((PyObject *)__pyx_n_s__b)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 8, ((PyObject *)__pyx_n_s__b)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__b)); __Pyx_INCREF(((PyObject *)__pyx_n_s__s2)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 9, ((PyObject *)__pyx_n_s__s2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__s2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ll)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 10, ((PyObject *)__pyx_n_s__ll)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ll)); __Pyx_INCREF(((PyObject *)__pyx_n_s__tmp)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 11, ((PyObject *)__pyx_n_s__tmp)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__tmp)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 12, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__multi)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 13, ((PyObject *)__pyx_n_s__multi)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__multi)); __Pyx_INCREF(((PyObject *)__pyx_n_s__dims)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 14, ((PyObject *)__pyx_n_s__dims)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__dims)); __Pyx_INCREF(((PyObject *)__pyx_n_s__LL)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 15, ((PyObject *)__pyx_n_s__LL)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__LL)); __Pyx_INCREF(((PyObject *)__pyx_n_s__i)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 16, ((PyObject *)__pyx_n_s__i)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__i)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_21)); __pyx_k_codeobj_22 = (PyObject*)__Pyx_PyCode_New(6, 0, 17, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_21, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_19, __pyx_n_s__log_likelihood, 126, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_22)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 126; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/group/glm_twolevel.pyx":174 * * * def log_likelihood_ratio(Y, VY, X, C, int axis=0, int niter=DEF_NITER): # <<<<<<<<<<<<<< * """ * lda = em(y, vy, X, C, axis=0, niter=DEF_NITER). */ __pyx_k_tuple_23 = PyTuple_New(11); if (unlikely(!__pyx_k_tuple_23)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_23); __Pyx_INCREF(((PyObject *)__pyx_n_s__Y)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 0, ((PyObject *)__pyx_n_s__Y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__VY)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 1, ((PyObject *)__pyx_n_s__VY)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__VY)); __Pyx_INCREF(((PyObject *)__pyx_n_s__X)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 2, ((PyObject *)__pyx_n_s__X)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__X)); __Pyx_INCREF(((PyObject *)__pyx_n_s__C)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 3, ((PyObject *)__pyx_n_s__C)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__C)); __Pyx_INCREF(((PyObject *)__pyx_n_s__axis)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 4, ((PyObject *)__pyx_n_s__axis)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__axis)); __Pyx_INCREF(((PyObject *)__pyx_n_s__niter)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 5, ((PyObject *)__pyx_n_s__niter)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__niter)); __Pyx_INCREF(((PyObject *)__pyx_n_s__B)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 6, ((PyObject *)__pyx_n_s__B)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__B)); __Pyx_INCREF(((PyObject *)__pyx_n_s__S2)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 7, ((PyObject *)__pyx_n_s__S2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__S2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ll0)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 8, ((PyObject *)__pyx_n_s__ll0)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ll0)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ll)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 9, ((PyObject *)__pyx_n_s__ll)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ll)); __Pyx_INCREF(((PyObject *)__pyx_n_s__lda)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 10, ((PyObject *)__pyx_n_s__lda)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__lda)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_23)); __pyx_k_codeobj_24 = (PyObject*)__Pyx_PyCode_New(6, 0, 11, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_23, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_19, __pyx_n_s_25, 174, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_24)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_2 = PyInt_FromLong(2); if (unlikely(!__pyx_int_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_15 = PyInt_FromLong(15); if (unlikely(!__pyx_int_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC initglm_twolevel(void); /*proto*/ PyMODINIT_FUNC initglm_twolevel(void) #else PyMODINIT_FUNC PyInit_glm_twolevel(void); /*proto*/ PyMODINIT_FUNC PyInit_glm_twolevel(void) #endif { PyObject *__pyx_t_1 = NULL; int __pyx_t_2; __Pyx_RefNannyDeclarations #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_glm_twolevel(void)", 0); if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #ifdef __Pyx_CyFunction_USED if (__Pyx_CyFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4(__Pyx_NAMESTR("glm_twolevel"), __pyx_methods, __Pyx_DOCSTR(__pyx_k_15), 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (!PyDict_GetItemString(modules, "nipy.labs.group.glm_twolevel")) { if (unlikely(PyDict_SetItemString(modules, "nipy.labs.group.glm_twolevel", __pyx_m) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } } #endif __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); if (unlikely(!__pyx_b)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; /*--- Initialize various global constants etc. ---*/ if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_module_is_main_nipy__labs__group__glm_twolevel) { if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s____main__) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; } /*--- Builtin init code ---*/ if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Constants init code ---*/ if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Global init code ---*/ /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ /*--- Type import code ---*/ __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", #if CYTHON_COMPILING_IN_PYPY sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 861; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Variable import code ---*/ /*--- Function import code ---*/ /*--- Execution code ---*/ /* "nipy/labs/group/glm_twolevel.pyx":8 * """ * * __version__ = '0.1' # <<<<<<<<<<<<<< * * # Includes */ if (PyObject_SetAttr(__pyx_m, __pyx_n_s____version__, ((PyObject *)__pyx_kp_s_16)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 8; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/group/glm_twolevel.pyx":39 * * # Initialize numpy * fffpy_import_array() # <<<<<<<<<<<<<< * import_array() * import numpy as np */ fffpy_import_array(); /* "nipy/labs/group/glm_twolevel.pyx":40 * # Initialize numpy * fffpy_import_array() * import_array() # <<<<<<<<<<<<<< * import numpy as np * */ import_array(); /* "nipy/labs/group/glm_twolevel.pyx":41 * fffpy_import_array() * import_array() * import numpy as np # <<<<<<<<<<<<<< * * # Constants */ __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__numpy), 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 41; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__np, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 41; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/group/glm_twolevel.pyx":44 * * # Constants * DEF_NITER = 2 # <<<<<<<<<<<<<< * * def em(ndarray Y, ndarray VY, ndarray X, ndarray C=None, int axis=0, int niter=DEF_NITER): */ if (PyObject_SetAttr(__pyx_m, __pyx_n_s__DEF_NITER, __pyx_int_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/group/glm_twolevel.pyx":46 * DEF_NITER = 2 * * def em(ndarray Y, ndarray VY, ndarray X, ndarray C=None, int axis=0, int niter=DEF_NITER): # <<<<<<<<<<<<<< * """ * b, s2 = em(y, vy, X, C=None, axis=0, niter=DEF_NITER). */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__DEF_NITER); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyInt_AsInt(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_k_1 = __pyx_t_2; __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_5group_12glm_twolevel_1em, NULL, __pyx_n_s_20); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__em, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/group/glm_twolevel.pyx":126 * * * def log_likelihood(Y, VY, X, B, S2, int axis=0): # <<<<<<<<<<<<<< * """ * ll = log_likelihood(y, vy, X, b, s2, axis=0) */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_5group_12glm_twolevel_3log_likelihood, NULL, __pyx_n_s_20); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 126; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__log_likelihood, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 126; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/group/glm_twolevel.pyx":174 * * * def log_likelihood_ratio(Y, VY, X, C, int axis=0, int niter=DEF_NITER): # <<<<<<<<<<<<<< * """ * lda = em(y, vy, X, C, axis=0, niter=DEF_NITER). */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__DEF_NITER); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyInt_AsInt(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_k_2 = __pyx_t_2; __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_5group_12glm_twolevel_5log_likelihood_ratio, NULL, __pyx_n_s_20); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s_25, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/group/glm_twolevel.pyx":1 * # -*- Mode: Python -*- Not really, but the syntax is close enough # <<<<<<<<<<<<<< * """ * Two-level general linear model for group analyses. */ __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); if (PyObject_SetAttr(__pyx_m, __pyx_n_s____test__, ((PyObject *)__pyx_t_1)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; /* "numpy.pxd":975 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); if (__pyx_m) { __Pyx_AddTraceback("init nipy.labs.group.glm_twolevel", __pyx_clineno, __pyx_lineno, __pyx_filename); Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init nipy.labs.group.glm_twolevel"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if PY_MAJOR_VERSION < 3 return; #else return __pyx_m; #endif } /* Runtime support code */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* CYTHON_REFNANNY */ static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) { PyObject *result; result = PyObject_GetAttr(dict, name); if (!result) { if (dict != __pyx_b) { PyErr_Clear(); result = PyObject_GetAttr(__pyx_b, name); } if (!result) { PyErr_SetObject(PyExc_NameError, name); } } return result; } static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%s() takes %s %" CYTHON_FORMAT_SSIZE_T "d positional argument%s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%s() got an unexpected keyword argument '%s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact) { if (!type) { PyErr_Format(PyExc_SystemError, "Missing type object"); return 0; } if (none_allowed && obj == Py_None) return 1; else if (exact) { if (Py_TYPE(obj) == type) return 1; } else { if (PyObject_TypeCheck(obj, type)) return 1; } PyErr_Format(PyExc_TypeError, "Argument '%s' has incorrect type (expected %s, got %s)", name, type->tp_name, Py_TYPE(obj)->tp_name); return 0; } static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_Format(PyExc_SystemError, "Missing type object"); return 0; } if (likely(PyObject_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%s to unpack", index, (index == 1) ? "" : "s"); } static CYTHON_INLINE int __Pyx_IterFinish(void) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); PyObject* exc_type = tstate->curexc_type; if (unlikely(exc_type)) { if (likely(exc_type == PyExc_StopIteration) || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration)) { PyObject *exc_value, *exc_tb; exc_value = tstate->curexc_value; exc_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; Py_DECREF(exc_type); Py_XDECREF(exc_value); Py_XDECREF(exc_tb); return 0; } else { return -1; } } return 0; #else if (unlikely(PyErr_Occurred())) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) { PyErr_Clear(); return 0; } else { return -1; } } return 0; #endif } static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) { if (unlikely(retval)) { Py_DECREF(retval); __Pyx_RaiseTooManyValuesError(expected); return -1; } else { return __Pyx_IterFinish(); } return 0; } static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) { #if CYTHON_COMPILING_IN_CPYTHON PyObject *tmp_type, *tmp_value, *tmp_tb; PyThreadState *tstate = PyThreadState_GET(); tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_Restore(type, value, tb); #endif } static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(type, value, tb); #endif } #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } #if PY_VERSION_HEX < 0x02050000 if (PyClass_Check(type)) { #else if (PyType_Check(type)) { #endif #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; #if PY_VERSION_HEX < 0x02050000 if (PyInstance_Check(type)) { type = (PyObject*) ((PyInstanceObject*)type)->in_class; Py_INCREF(type); } else { type = 0; PyErr_SetString(PyExc_TypeError, "raise: exception must be an old-style class or instance"); goto raise_error; } #else type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } #endif } __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else /* Python 3+ */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyEval_CallObject(type, args); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause && cause != Py_None) { PyObject *fixed_cause; if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { PyThreadState *tstate = PyThreadState_GET(); PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } } bad: Py_XDECREF(owned_instance); return; } #endif static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level) { PyObject *py_import = 0; PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; py_import = __Pyx_GetAttrString(__pyx_b, "__import__"); if (!py_import) goto bad; if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; #if PY_VERSION_HEX >= 0x02050000 { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { /* try package relative import first */ PyObject *py_level = PyInt_FromLong(1); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; /* try absolute import on failure */ } #endif if (!module) { PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); } } #else if (level>0) { PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4."); goto bad; } module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, NULL); #endif bad: Py_XDECREF(empty_list); Py_XDECREF(py_import); Py_XDECREF(empty_dict); return module; } static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_Py_intptr_t(Py_intptr_t val) { const Py_intptr_t neg_one = (Py_intptr_t)-1, const_zero = (Py_intptr_t)0; const int is_unsigned = const_zero < neg_one; if ((sizeof(Py_intptr_t) == sizeof(char)) || (sizeof(Py_intptr_t) == sizeof(short))) { return PyInt_FromLong((long)val); } else if ((sizeof(Py_intptr_t) == sizeof(int)) || (sizeof(Py_intptr_t) == sizeof(long))) { if (is_unsigned) return PyLong_FromUnsignedLong((unsigned long)val); else return PyInt_FromLong((long)val); } else if (sizeof(Py_intptr_t) == sizeof(PY_LONG_LONG)) { if (is_unsigned) return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG)val); else return PyLong_FromLongLong((PY_LONG_LONG)val); } else { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; return _PyLong_FromByteArray(bytes, sizeof(Py_intptr_t), little, !is_unsigned); } } #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return ::std::complex< float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y*(__pyx_t_float_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real*z.real + z.imag*z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(a, a); case 3: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, a); case 4: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_absf(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return ::std::complex< double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y*(__pyx_t_double_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real*z.real + z.imag*z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(a, a); case 3: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, a); case 4: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_abs(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject* x) { const unsigned char neg_one = (unsigned char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned char" : "value too large to convert to unsigned char"); } return (unsigned char)-1; } return (unsigned char)val; } return (unsigned char)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject* x) { const unsigned short neg_one = (unsigned short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned short" : "value too large to convert to unsigned short"); } return (unsigned short)-1; } return (unsigned short)val; } return (unsigned short)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject* x) { const unsigned int neg_one = (unsigned int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned int" : "value too large to convert to unsigned int"); } return (unsigned int)-1; } return (unsigned int)val; } return (unsigned int)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject* x) { const char neg_one = (char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to char" : "value too large to convert to char"); } return (char)-1; } return (char)val; } return (char)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject* x) { const short neg_one = (short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to short" : "value too large to convert to short"); } return (short)-1; } return (short)val; } return (short)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject* x) { const int neg_one = (int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to int" : "value too large to convert to int"); } return (int)-1; } return (int)val; } return (int)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject* x) { const signed char neg_one = (signed char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed char" : "value too large to convert to signed char"); } return (signed char)-1; } return (signed char)val; } return (signed char)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject* x) { const signed short neg_one = (signed short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed short" : "value too large to convert to signed short"); } return (signed short)-1; } return (signed short)val; } return (signed short)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject* x) { const signed int neg_one = (signed int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed int" : "value too large to convert to signed int"); } return (signed int)-1; } return (signed int)val; } return (signed int)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject* x) { const int neg_one = (int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to int" : "value too large to convert to int"); } return (int)-1; } return (int)val; } return (int)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) { const unsigned long neg_one = (unsigned long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned long"); return (unsigned long)-1; } return (unsigned long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned long"); return (unsigned long)-1; } return (unsigned long)PyLong_AsUnsignedLong(x); } else { return (unsigned long)PyLong_AsLong(x); } } else { unsigned long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned long)-1; val = __Pyx_PyInt_AsUnsignedLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject* x) { const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned PY_LONG_LONG"); return (unsigned PY_LONG_LONG)-1; } return (unsigned PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned PY_LONG_LONG"); return (unsigned PY_LONG_LONG)-1; } return (unsigned PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (unsigned PY_LONG_LONG)PyLong_AsLongLong(x); } } else { unsigned PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned PY_LONG_LONG)-1; val = __Pyx_PyInt_AsUnsignedLongLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject* x) { const long neg_one = (long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long)-1; } return (long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long)-1; } return (long)PyLong_AsUnsignedLong(x); } else { return (long)PyLong_AsLong(x); } } else { long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (long)-1; val = __Pyx_PyInt_AsLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) { const PY_LONG_LONG neg_one = (PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to PY_LONG_LONG"); return (PY_LONG_LONG)-1; } return (PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to PY_LONG_LONG"); return (PY_LONG_LONG)-1; } return (PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (PY_LONG_LONG)PyLong_AsLongLong(x); } } else { PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (PY_LONG_LONG)-1; val = __Pyx_PyInt_AsLongLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) { const signed long neg_one = (signed long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed long"); return (signed long)-1; } return (signed long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed long"); return (signed long)-1; } return (signed long)PyLong_AsUnsignedLong(x); } else { return (signed long)PyLong_AsLong(x); } } else { signed long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (signed long)-1; val = __Pyx_PyInt_AsSignedLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject* x) { const signed PY_LONG_LONG neg_one = (signed PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed PY_LONG_LONG"); return (signed PY_LONG_LONG)-1; } return (signed PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed PY_LONG_LONG"); return (signed PY_LONG_LONG)-1; } return (signed PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (signed PY_LONG_LONG)PyLong_AsLongLong(x); } } else { signed PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (signed PY_LONG_LONG)-1; val = __Pyx_PyInt_AsSignedLongLong(tmp); Py_DECREF(tmp); return val; } } static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); #if PY_VERSION_HEX < 0x02050000 return PyErr_Warn(NULL, message); #else return PyErr_WarnEx(NULL, message, 1); #endif } return 0; } #ifndef __PYX_HAVE_RT_ImportModule #define __PYX_HAVE_RT_ImportModule static PyObject *__Pyx_ImportModule(const char *name) { PyObject *py_name = 0; PyObject *py_module = 0; py_name = __Pyx_PyIdentifier_FromString(name); if (!py_name) goto bad; py_module = PyImport_Import(py_name); Py_DECREF(py_name); return py_module; bad: Py_XDECREF(py_name); return 0; } #endif #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict) { PyObject *py_module = 0; PyObject *result = 0; PyObject *py_name = 0; char warning[200]; py_module = __Pyx_ImportModule(module_name); if (!py_module) goto bad; py_name = __Pyx_PyIdentifier_FromString(class_name); if (!py_name) goto bad; result = PyObject_GetAttr(py_module, py_name); Py_DECREF(py_name); py_name = 0; Py_DECREF(py_module); py_module = 0; if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%s.%s is not a type object", module_name, class_name); goto bad; } if (!strict && (size_t)((PyTypeObject *)result)->tp_basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility", module_name, class_name); #if PY_VERSION_HEX < 0x02050000 if (PyErr_Warn(NULL, warning) < 0) goto bad; #else if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; #endif } else if ((size_t)((PyTypeObject *)result)->tp_basicsize != size) { PyErr_Format(PyExc_ValueError, "%s.%s has the wrong size, try recompiling", module_name, class_name); goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(py_module); Py_XDECREF(result); return NULL; } #endif static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = (start + end) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, /*int argcount,*/ 0, /*int kwonlyargcount,*/ 0, /*int nlocals,*/ 0, /*int stacksize,*/ 0, /*int flags,*/ __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, /*int firstlineno,*/ __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_globals = 0; PyFrameObject *py_frame = 0; py_code = __pyx_find_code_object(c_line ? c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? c_line : py_line, py_code); } py_globals = PyModule_GetDict(__pyx_m); if (!py_globals) goto bad; py_frame = PyFrame_New( PyThreadState_GET(), /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ py_globals, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; py_frame->f_lineno = py_line; PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else /* Python 3+ has unicode identifiers */ if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; ++t; } return 0; } /* Type Conversion Functions */ static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) { PyNumberMethods *m; const char *name = NULL; PyObject *res = NULL; #if PY_VERSION_HEX < 0x03000000 if (PyInt_Check(x) || PyLong_Check(x)) #else if (PyLong_Check(x)) #endif return Py_INCREF(x), x; m = Py_TYPE(x)->tp_as_number; #if PY_VERSION_HEX < 0x03000000 if (m && m->nb_int) { name = "int"; res = PyNumber_Int(x); } else if (m && m->nb_long) { name = "long"; res = PyNumber_Long(x); } #else if (m && m->nb_int) { name = "int"; res = PyNumber_Long(x); } #endif if (res) { #if PY_VERSION_HEX < 0x03000000 if (!PyInt_Check(res) && !PyLong_Check(res)) { #else if (!PyLong_Check(res)) { #endif PyErr_Format(PyExc_TypeError, "__%s__ returned non-%s (type %.200s)", name, name, Py_TYPE(res)->tp_name); Py_DECREF(res); return NULL; } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject* x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { #if PY_VERSION_HEX < 0x02050000 if (ival <= LONG_MAX) return PyInt_FromLong((long)ival); else { unsigned char *bytes = (unsigned char *) &ival; int one = 1; int little = (int)*(unsigned char*)&one; return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0); } #else return PyInt_FromSize_t(ival); #endif } static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject* x) { unsigned PY_LONG_LONG val = __Pyx_PyInt_AsUnsignedLongLong(x); if (unlikely(val == (unsigned PY_LONG_LONG)-1 && PyErr_Occurred())) { return (size_t)-1; } else if (unlikely(val != (unsigned PY_LONG_LONG)(size_t)val)) { PyErr_SetString(PyExc_OverflowError, "value too large to convert to size_t"); return (size_t)-1; } return (size_t)val; } #endif /* Py_PYTHON_H */ nipy-0.3.0/nipy/labs/group/glm_twolevel.pyx000066400000000000000000000120721210344137400207700ustar00rootroot00000000000000# -*- Mode: Python -*- Not really, but the syntax is close enough """ Two-level general linear model for group analyses. Author: Alexis Roche, 2008. """ __version__ = '0.1' # Includes from fff cimport * # Exports from fff_glm_twolevel.h cdef extern from "fff_glm_twolevel.h": ctypedef struct fff_glm_twolevel_EM: fff_vector* b double s2 fff_vector* z fff_vector* vz fff_glm_twolevel_EM* fff_glm_twolevel_EM_new(size_t n, size_t p) void fff_glm_twolevel_EM_delete(fff_glm_twolevel_EM* thisone) void fff_glm_twolevel_EM_init(fff_glm_twolevel_EM* em) void fff_glm_twolevel_EM_run(fff_glm_twolevel_EM* em, fff_vector* y, fff_vector* vy, fff_matrix* X, fff_matrix* PpiX, unsigned int niter) double fff_glm_twolevel_log_likelihood(fff_vector* y, fff_vector* vy, fff_matrix* X, fff_vector* b, double s2, fff_vector* tmp) # Initialize numpy fffpy_import_array() import_array() import numpy as np # Constants DEF_NITER = 2 def em(ndarray Y, ndarray VY, ndarray X, ndarray C=None, int axis=0, int niter=DEF_NITER): """ b, s2 = em(y, vy, X, C=None, axis=0, niter=DEF_NITER). Maximum likelihood regression in a mixed-effect GLM using the EM algorithm. C is the contrast matrix. Conventionally, C is p x q where p is the number of regressors. OUTPUT: beta, s2 beta -- array of parameter estimates s2 -- array of squared scale parameters. REFERENCE: Keller and Roche, ISBI 2008. """ cdef size_t n, p cdef fff_vector *y, *vy, *b, *s2 cdef fff_matrix *x, *ppx cdef fff_glm_twolevel_EM *em cdef fffpy_multi_iterator* multi # View on design matrix x = fff_matrix_fromPyArray(X) # Number of observations / regressors n = x.size1 p = x.size2 # Compute the projected pseudo-inverse matrix if C == None: PpX = np.linalg.pinv(X) else: A = np.linalg.inv(np.dot(X.transpose(), X)) # (p,p) B = np.linalg.inv(np.dot(np.dot(C.transpose(), A), C)) # (q,q) P = np.eye(p) - np.dot(np.dot(np.dot(A, C), B), C.transpose()) # (p,p) PpX = np.dot(np.dot(P, A), X.transpose()) # (p,n) ppx = fff_matrix_fromPyArray(PpX) # Allocate output arrays dims = [Y.shape[i] for i in range(Y.ndim)] dims[axis] = p B = np.zeros(dims) dims[axis] = 1 S2 = np.zeros(dims) # Local structs em = fff_glm_twolevel_EM_new(n, p) # Create a new array iterator multi = fffpy_multi_iterator_new(4, axis, Y, VY, B, S2) # Create views y = multi.vector[0] vy = multi.vector[1] b = multi.vector[2] s2 = multi.vector[3] # Loop while(multi.index < multi.size): fff_glm_twolevel_EM_init(em) fff_glm_twolevel_EM_run(em, y, vy, x, ppx, niter) fff_vector_memcpy(b, em.b) s2.data[0] = em.s2 fffpy_multi_iterator_update(multi) # Free memory fff_matrix_delete(x) fff_matrix_delete(ppx) fffpy_multi_iterator_delete(multi) fff_glm_twolevel_EM_delete(em) # Return return B, S2 def log_likelihood(Y, VY, X, B, S2, int axis=0): """ ll = log_likelihood(y, vy, X, b, s2, axis=0) Log likelihood in a mixed-effect GLM. OUTPUT: array REFERENCE: Keller and Roche, ISBI 2008. """ cdef fff_vector *y, *vy, *b, *s2, *ll, *tmp cdef fff_matrix *x cdef fffpy_multi_iterator* multi # Allocate output array dims = [Y.shape[i] for i in range(Y.ndim)] dims[axis] = 1 LL = np.zeros(dims) # View on design matrix x = fff_matrix_fromPyArray(X) # Local structure tmp = fff_vector_new(x.size1) # Multi iterator multi = fffpy_multi_iterator_new(5, axis, Y, VY, B, S2, LL) # View on iterable arrays y = multi.vector[0] vy = multi.vector[1] b = multi.vector[2] s2 = multi.vector[3] ll = multi.vector[4] # Loop while(multi.index < multi.size): ll.data[0] = fff_glm_twolevel_log_likelihood(y, vy, x, b, s2.data[0], tmp) fffpy_multi_iterator_update(multi) # Free memory fff_matrix_delete(x) fff_vector_delete(tmp) fffpy_multi_iterator_delete(multi) # Return return LL def log_likelihood_ratio(Y, VY, X, C, int axis=0, int niter=DEF_NITER): """ lda = em(y, vy, X, C, axis=0, niter=DEF_NITER). """ # Constrained log-likelihood B, S2 = em(Y, VY, X, C, axis, niter) ll0 = log_likelihood(Y, VY, X, B, S2, axis) # Unconstrained log-likelihood B, S2 = em(Y, VY, X, None, axis, niter) ll = log_likelihood(Y, VY, X, B, S2, axis) # -2 log R = 2*(ll-ll0) lda = 2*(ll-ll0) return np.maximum(lda, 0.0) nipy-0.3.0/nipy/labs/group/mixed_effects.py000066400000000000000000000055261210344137400207130ustar00rootroot00000000000000""" New generic implementation of multiple regression analysis under noisy measurements. """ import numpy as np _NITER = 2 TINY = float(np.finfo(np.double).tiny) def nonzero(x): """ Force strictly positive values. """ return np.maximum(x, TINY) def em(Y, VY, X, C=None, niter=_NITER, log_likelihood=False): """ Maximum likelihood regression in a mixed-effect linear model using the EM algorithm. Parameters ---------- Y : array Array of observations. VY : array C is the contrast matrix. Conventionally, C is p x q where p is the number of regressors. OUTPUT: beta, s2 beta -- array of parameter estimates s2 -- array of squared scale parameters. REFERENCE: Keller and Roche, ISBI 2008. """ # Number of observations, regressors and points nobs = X.shape[0] nreg = X.shape[1] npts = np.prod(Y.shape[1:]) # Reshape input array y = np.reshape(Y, (nobs, npts)) vy = np.reshape(VY, (nobs, npts)) # Compute the projected pseudo-inverse matrix if C == None: PpX = np.linalg.pinv(X) else: A = np.linalg.inv(np.dot(X.T, X)) # (nreg, nreg) B = np.linalg.inv(np.dot(np.dot(C.T, A), C)) # (q,q) P = np.eye(nreg) - np.dot(np.dot(np.dot(A, C), B), C.T) # (nreg, nreg) PpX = np.dot(np.dot(P, A), X.T) # (nreg, nobs) # Initialize outputs b = np.zeros((nreg, npts)) yfit = np.zeros((nobs, npts)) s2 = np.inf # EM loop it = 0 while it < niter: # E-step: posterior mean and variance of each "true" effect w1 = 1/nonzero(vy) w2 = 1/nonzero(s2) vz = 1/(w1+w2) z = vz*(w1*y + w2*yfit) # M-step: update effect and variance b = np.dot(PpX, z) yfit = np.dot(X, b) s2 = np.sum((z-yfit)**2 + vz, 0)/float(nobs) # Increase iteration number it += 1 # Ouput arrays B = np.reshape(b, [nreg] + list(Y.shape[1:])) S2 = np.reshape(s2, list(Y.shape[1:])) # Log-likelihood computation if not log_likelihood: return B, S2 else: return B, S2, _log_likelihood(y, vy, X, b, s2) def _log_likelihood(y, vy, X, b, s2): res = y - np.dot(X, b) w = nonzero(vy+s2) L = np.sum(np.log(w)+ res**2/w) L *= -0.5 return L def log_likelihood_ratio(Y, VY, X, C, niter=_NITER): """ Log-likelihood ratio statistic: 2*(log L - log L0) It is asymptotically distributed like a chi-square with rank(C) degrees of freedom under the null hypothesis H0: Cb = 0. """ # Constrained log-likelihood B, S2, ll0 = em(Y, VY, X, C, niter, log_likelihood=True) # Unconstrained log-likelihood B, S2, ll = em(Y, VY, X, None, niter, log_likelihood=True) # -2 log R = 2*(ll-ll0) return np.maximum(2*(ll-ll0), 0.0) nipy-0.3.0/nipy/labs/group/onesample.c000066400000000000000000011140641210344137400176620ustar00rootroot00000000000000/* Generated by Cython 0.17.4 on Sat Jan 12 17:27:37 2013 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02040000 #error Cython requires Python 2.4+. #else #include /* For offsetof */ #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #endif #if PY_VERSION_HEX < 0x02050000 typedef int Py_ssize_t; #define PY_SSIZE_T_MAX INT_MAX #define PY_SSIZE_T_MIN INT_MIN #define PY_FORMAT_SIZE_T "" #define CYTHON_FORMAT_SSIZE_T "" #define PyInt_FromSsize_t(z) PyInt_FromLong(z) #define PyInt_AsSsize_t(o) __Pyx_PyInt_AsInt(o) #define PyNumber_Index(o) ((PyNumber_Check(o) && !PyFloat_Check(o)) ? PyNumber_Int(o) : \ (PyErr_Format(PyExc_TypeError, \ "expected index value, got %.200s", Py_TYPE(o)->tp_name), \ (PyObject*)0)) #define __Pyx_PyIndex_Check(o) (PyNumber_Check(o) && !PyFloat_Check(o) && \ !PyComplex_Check(o)) #define PyIndex_Check __Pyx_PyIndex_Check #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message) #define __PYX_BUILD_PY_SSIZE_T "i" #else #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #define __Pyx_PyIndex_Check PyIndex_Check #endif #if PY_VERSION_HEX < 0x02060000 #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) #define PyVarObject_HEAD_INIT(type, size) \ PyObject_HEAD_INIT(type) size, #define PyType_Modified(t) typedef struct { void *buf; PyObject *obj; Py_ssize_t len; Py_ssize_t itemsize; int readonly; int ndim; char *format; Py_ssize_t *shape; Py_ssize_t *strides; Py_ssize_t *suboffsets; void *internal; } Py_buffer; #define PyBUF_SIMPLE 0 #define PyBUF_WRITABLE 0x0001 #define PyBUF_FORMAT 0x0004 #define PyBUF_ND 0x0008 #define PyBUF_STRIDES (0x0010 | PyBUF_ND) #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) #define PyBUF_RECORDS (PyBUF_STRIDES | PyBUF_FORMAT | PyBUF_WRITABLE) #define PyBUF_FULL (PyBUF_INDIRECT | PyBUF_FORMAT | PyBUF_WRITABLE) typedef int (*getbufferproc)(PyObject *, Py_buffer *, int); typedef void (*releasebufferproc)(PyObject *, Py_buffer *); #endif #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #endif #if PY_MAJOR_VERSION < 3 && PY_MINOR_VERSION < 6 #define PyUnicode_FromString(s) PyUnicode_Decode(s, strlen(s), "UTF-8", "strict") #endif #if PY_MAJOR_VERSION >= 3 #define Py_TPFLAGS_CHECKTYPES 0 #define Py_TPFLAGS_HAVE_INDEX 0 #endif #if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3) #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ? \ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #else #define CYTHON_PEP393_ENABLED 0 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_READ(k, d, i) ((k=k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_VERSION_HEX < 0x02060000 #define PyBytesObject PyStringObject #define PyBytes_Type PyString_Type #define PyBytes_Check PyString_Check #define PyBytes_CheckExact PyString_CheckExact #define PyBytes_FromString PyString_FromString #define PyBytes_FromStringAndSize PyString_FromStringAndSize #define PyBytes_FromFormat PyString_FromFormat #define PyBytes_DecodeEscape PyString_DecodeEscape #define PyBytes_AsString PyString_AsString #define PyBytes_AsStringAndSize PyString_AsStringAndSize #define PyBytes_Size PyString_Size #define PyBytes_AS_STRING PyString_AS_STRING #define PyBytes_GET_SIZE PyString_GET_SIZE #define PyBytes_Repr PyString_Repr #define PyBytes_Concat PyString_Concat #define PyBytes_ConcatAndDel PyString_ConcatAndDel #endif #if PY_VERSION_HEX < 0x02060000 #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type) #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_VERSION_HEX < 0x03020000 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300) #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b) #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value) #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b) #else #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0))) #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1))) #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1))) #endif #if PY_MAJOR_VERSION >= 3 #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n))) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n))) #else #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n)) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_NAMESTR(n) ((char *)(n)) #define __Pyx_DOCSTR(n) ((char *)(n)) #else #define __Pyx_NAMESTR(n) (n) #define __Pyx_DOCSTR(n) (n) #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include #define __PYX_HAVE__nipy__labs__group__onesample #define __PYX_HAVE_API__nipy__labs__group__onesample #include "stdio.h" #include "stdlib.h" #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "fff_base.h" #include "fff_vector.h" #include "fff_matrix.h" #include "fff_array.h" #include "fffpy.h" #include "fff_onesample_stat.h" #ifdef _OPENMP #include #endif /* _OPENMP */ #ifdef PYREX_WITHOUT_ASSERTIONS #define CYTHON_WITHOUT_ASSERTIONS #endif /* inline attribute */ #ifndef CYTHON_INLINE #if defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif /* unused attribute */ #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif typedef struct {PyObject **p; char *s; const long n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/ /* Type Conversion Predeclarations */ #define __Pyx_PyBytes_FromUString(s) PyBytes_FromString((char*)s) #define __Pyx_PyBytes_AsUString(s) ((unsigned char*) PyBytes_AsString(s)) #define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x); static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject*); #if CYTHON_COMPILING_IN_CPYTHON #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #ifdef __GNUC__ /* Test for GCC > 2.95 */ #if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* __GNUC__ > 2 ... */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ > 2 ... */ #else /* __GNUC__ */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static PyObject *__pyx_m; static PyObject *__pyx_b; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include #else #include #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "onesample.pyx", "numpy.pxd", "type.pxd", }; /* "numpy.pxd":723 * # in Cython to enable them only on the right systems. * * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t */ typedef npy_int8 __pyx_t_5numpy_int8_t; /* "numpy.pxd":724 * * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t */ typedef npy_int16 __pyx_t_5numpy_int16_t; /* "numpy.pxd":725 * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< * ctypedef npy_int64 int64_t * #ctypedef npy_int96 int96_t */ typedef npy_int32 __pyx_t_5numpy_int32_t; /* "numpy.pxd":726 * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< * #ctypedef npy_int96 int96_t * #ctypedef npy_int128 int128_t */ typedef npy_int64 __pyx_t_5numpy_int64_t; /* "numpy.pxd":730 * #ctypedef npy_int128 int128_t * * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t */ typedef npy_uint8 __pyx_t_5numpy_uint8_t; /* "numpy.pxd":731 * * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t */ typedef npy_uint16 __pyx_t_5numpy_uint16_t; /* "numpy.pxd":732 * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< * ctypedef npy_uint64 uint64_t * #ctypedef npy_uint96 uint96_t */ typedef npy_uint32 __pyx_t_5numpy_uint32_t; /* "numpy.pxd":733 * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< * #ctypedef npy_uint96 uint96_t * #ctypedef npy_uint128 uint128_t */ typedef npy_uint64 __pyx_t_5numpy_uint64_t; /* "numpy.pxd":737 * #ctypedef npy_uint128 uint128_t * * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< * ctypedef npy_float64 float64_t * #ctypedef npy_float80 float80_t */ typedef npy_float32 __pyx_t_5numpy_float32_t; /* "numpy.pxd":738 * * ctypedef npy_float32 float32_t * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< * #ctypedef npy_float80 float80_t * #ctypedef npy_float128 float128_t */ typedef npy_float64 __pyx_t_5numpy_float64_t; /* "numpy.pxd":747 * # The int types are mapped a bit surprising -- * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t # <<<<<<<<<<<<<< * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t */ typedef npy_long __pyx_t_5numpy_int_t; /* "numpy.pxd":748 * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< * ctypedef npy_longlong longlong_t * */ typedef npy_longlong __pyx_t_5numpy_long_t; /* "numpy.pxd":749 * ctypedef npy_long int_t * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< * * ctypedef npy_ulong uint_t */ typedef npy_longlong __pyx_t_5numpy_longlong_t; /* "numpy.pxd":751 * ctypedef npy_longlong longlong_t * * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t */ typedef npy_ulong __pyx_t_5numpy_uint_t; /* "numpy.pxd":752 * * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulonglong_t * */ typedef npy_ulonglong __pyx_t_5numpy_ulong_t; /* "numpy.pxd":753 * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< * * ctypedef npy_intp intp_t */ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; /* "numpy.pxd":755 * ctypedef npy_ulonglong ulonglong_t * * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< * ctypedef npy_uintp uintp_t * */ typedef npy_intp __pyx_t_5numpy_intp_t; /* "numpy.pxd":756 * * ctypedef npy_intp intp_t * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< * * ctypedef npy_double float_t */ typedef npy_uintp __pyx_t_5numpy_uintp_t; /* "numpy.pxd":758 * ctypedef npy_uintp uintp_t * * ctypedef npy_double float_t # <<<<<<<<<<<<<< * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t */ typedef npy_double __pyx_t_5numpy_float_t; /* "numpy.pxd":759 * * ctypedef npy_double float_t * ctypedef npy_double double_t # <<<<<<<<<<<<<< * ctypedef npy_longdouble longdouble_t * */ typedef npy_double __pyx_t_5numpy_double_t; /* "numpy.pxd":760 * ctypedef npy_double float_t * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cfloat cfloat_t */ typedef npy_longdouble __pyx_t_5numpy_longdouble_t; /* "fff.pxd":9 * * # Redefine size_t * ctypedef unsigned long int size_t # <<<<<<<<<<<<<< * * */ typedef unsigned long __pyx_t_3fff_size_t; #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< float > __pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< double > __pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif /*--- Type declarations ---*/ /* "numpy.pxd":762 * ctypedef npy_longdouble longdouble_t * * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t */ typedef npy_cfloat __pyx_t_5numpy_cfloat_t; /* "numpy.pxd":763 * * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< * ctypedef npy_clongdouble clongdouble_t * */ typedef npy_cdouble __pyx_t_5numpy_cdouble_t; /* "numpy.pxd":764 * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cdouble complex_t */ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; /* "numpy.pxd":766 * ctypedef npy_clongdouble clongdouble_t * * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew1(a): */ typedef npy_cdouble __pyx_t_5numpy_complex_t; #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/ #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil) \ if (acquire_gil) { \ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ PyGILState_Release(__pyx_gilstate_save); \ } else { \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil) \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext() \ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif /* CYTHON_REFNANNY */ #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /*proto*/ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], \ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, \ const char* function_name); /*proto*/ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/ static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact); /*proto*/ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len)) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_PyList_Append(L,x) PyList_Append(L,x) #endif #define __Pyx_SetItemInt(o, i, v, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ __Pyx_SetItemInt_Fast(o, i, v) : \ __Pyx_SetItemInt_Generic(o, to_py_func(i), v)) static CYTHON_INLINE int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v) { int r; if (!j) return -1; r = PyObject_SetItem(o, j, v); Py_DECREF(j); return r; } static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v) { #if CYTHON_COMPILING_IN_CPYTHON if (PyList_CheckExact(o)) { Py_ssize_t n = (likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if (likely((n >= 0) & (n < PyList_GET_SIZE(o)))) { PyObject* old = PyList_GET_ITEM(o, n); Py_INCREF(v); PyList_SET_ITEM(o, n, v); Py_DECREF(old); return 1; } } else { /* inlined PySequence_SetItem() */ PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_ass_item)) { if (unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (unlikely(l < 0)) return -1; i += l; } return m->sq_ass_item(o, i, v); } } #else #if CYTHON_COMPILING_IN_PYPY if (PySequence_Check(o) && !PyDict_Check(o)) { #else if (PySequence_Check(o)) { #endif return PySequence_SetItem(o, i, v); } #endif return __Pyx_SetItemInt_Generic(o, PyInt_FromSsize_t(i), v); } static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/ static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); static CYTHON_INLINE int __Pyx_IterFinish(void); /*proto*/ static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); /*proto*/ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level); /*proto*/ static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_Py_intptr_t(Py_intptr_t); #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if defined(_WIN32) && defined(__cplusplus) && CYTHON_CCOMPLEX #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); #if CYTHON_CCOMPLEX #define __Pyx_c_eqf(a, b) ((a)==(b)) #define __Pyx_c_sumf(a, b) ((a)+(b)) #define __Pyx_c_difff(a, b) ((a)-(b)) #define __Pyx_c_prodf(a, b) ((a)*(b)) #define __Pyx_c_quotf(a, b) ((a)/(b)) #define __Pyx_c_negf(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zerof(z) ((z)==(float)0) #define __Pyx_c_conjf(z) (::std::conj(z)) #if 1 #define __Pyx_c_absf(z) (::std::abs(z)) #define __Pyx_c_powf(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zerof(z) ((z)==0) #define __Pyx_c_conjf(z) (conjf(z)) #if 1 #define __Pyx_c_absf(z) (cabsf(z)) #define __Pyx_c_powf(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); #if CYTHON_CCOMPLEX #define __Pyx_c_eq(a, b) ((a)==(b)) #define __Pyx_c_sum(a, b) ((a)+(b)) #define __Pyx_c_diff(a, b) ((a)-(b)) #define __Pyx_c_prod(a, b) ((a)*(b)) #define __Pyx_c_quot(a, b) ((a)/(b)) #define __Pyx_c_neg(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero(z) ((z)==(double)0) #define __Pyx_c_conj(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs(z) (::std::abs(z)) #define __Pyx_c_pow(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero(z) ((z)==0) #define __Pyx_c_conj(z) (conj(z)) #if 1 #define __Pyx_c_abs(z) (cabs(z)) #define __Pyx_c_pow(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject *); static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject *); static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject *); static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject *); static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject *); static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject *); static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject *); static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject *); static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject *); static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject *); static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject *); static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject *); static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject *); static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject *); static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject *); static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject *); static int __Pyx_check_binary_version(void); #if !defined(__Pyx_PyIdentifier_FromString) #if PY_MAJOR_VERSION < 3 #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) #else #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) #endif #endif static PyObject *__Pyx_ImportModule(const char *name); /*proto*/ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /*proto*/ typedef struct { int code_line; PyCodeObject* code_object; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /*proto*/ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/ /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from 'cpython.object' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'libc.stdlib' */ /* Module declarations from 'numpy' */ /* Module declarations from 'numpy' */ static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ /* Module declarations from 'fff' */ /* Module declarations from 'nipy.labs.group.onesample' */ #define __Pyx_MODULE_NAME "nipy.labs.group.onesample" int __pyx_module_is_main_nipy__labs__group__onesample = 0; /* Implementation of 'nipy.labs.group.onesample' */ static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_RuntimeError; static PyObject *__pyx_pf_4nipy_4labs_5group_9onesample_stat(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_Y, PyObject *__pyx_v_id, double __pyx_v_base, int __pyx_v_axis, PyArrayObject *__pyx_v_Magics); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_5group_9onesample_2stat_mfx(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_Y, PyArrayObject *__pyx_v_V, PyObject *__pyx_v_id, double __pyx_v_base, int __pyx_v_axis, PyArrayObject *__pyx_v_Magics, unsigned int __pyx_v_niter); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_5group_9onesample_4pdf_fit_mfx(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_Y, PyArrayObject *__pyx_v_V, int __pyx_v_axis, int __pyx_v_niter, int __pyx_v_constraint, double __pyx_v_base); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_5group_9onesample_6pdf_fit_gmfx(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_Y, PyArrayObject *__pyx_v_V, int __pyx_v_axis, int __pyx_v_niter, int __pyx_v_constraint, double __pyx_v_base); /* proto */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ static char __pyx_k_1[] = "ndarray is not C contiguous"; static char __pyx_k_3[] = "ndarray is not Fortran contiguous"; static char __pyx_k_5[] = "Non-native byte order not supported"; static char __pyx_k_7[] = "unknown dtype code in numpy.pxd (%d)"; static char __pyx_k_8[] = "Format string allocated too short, see comment in numpy.pxd"; static char __pyx_k_11[] = "Format string allocated too short."; static char __pyx_k_13[] = "\nRoutines for massively univariate random-effect and mixed-effect analysis.\n\nAuthor: Alexis Roche, 2008.\n"; static char __pyx_k_14[] = "0.1"; static char __pyx_k_17[] = "/Users/mb312/dev_trees/nipy/nipy/labs/group/onesample.pyx"; static char __pyx_k_18[] = "nipy.labs.group.onesample"; static char __pyx_k__B[] = "B"; static char __pyx_k__H[] = "H"; static char __pyx_k__I[] = "I"; static char __pyx_k__L[] = "L"; static char __pyx_k__O[] = "O"; static char __pyx_k__Q[] = "Q"; static char __pyx_k__T[] = "T"; static char __pyx_k__V[] = "V"; static char __pyx_k__W[] = "W"; static char __pyx_k__Y[] = "Y"; static char __pyx_k__Z[] = "Z"; static char __pyx_k__b[] = "b"; static char __pyx_k__d[] = "d"; static char __pyx_k__f[] = "f"; static char __pyx_k__g[] = "g"; static char __pyx_k__h[] = "h"; static char __pyx_k__i[] = "i"; static char __pyx_k__l[] = "l"; static char __pyx_k__n[] = "n"; static char __pyx_k__q[] = "q"; static char __pyx_k__t[] = "t"; static char __pyx_k__v[] = "v"; static char __pyx_k__w[] = "w"; static char __pyx_k__y[] = "y"; static char __pyx_k__z[] = "z"; static char __pyx_k__MU[] = "MU"; static char __pyx_k__S2[] = "S2"; static char __pyx_k__Zd[] = "Zd"; static char __pyx_k__Zf[] = "Zf"; static char __pyx_k__Zg[] = "Zg"; static char __pyx_k__id[] = "id"; static char __pyx_k__mu[] = "mu"; static char __pyx_k__np[] = "np"; static char __pyx_k__s2[] = "s2"; static char __pyx_k__yp[] = "yp"; static char __pyx_k__elr[] = "elr"; static char __pyx_k__idx[] = "idx"; static char __pyx_k__axis[] = "axis"; static char __pyx_k__base[] = "base"; static char __pyx_k__dims[] = "dims"; static char __pyx_k__mean[] = "mean"; static char __pyx_k__sign[] = "sign"; static char __pyx_k__simu[] = "simu"; static char __pyx_k__stat[] = "stat"; static char __pyx_k__grubb[] = "grubb"; static char __pyx_k__magic[] = "magic"; static char __pyx_k__multi[] = "multi"; static char __pyx_k__niter[] = "niter"; static char __pyx_k__nsimu[] = "nsimu"; static char __pyx_k__numpy[] = "numpy"; static char __pyx_k__range[] = "range"; static char __pyx_k__stats[] = "stats"; static char __pyx_k__tukey[] = "tukey"; static char __pyx_k__zeros[] = "zeros"; static char __pyx_k__Magics[] = "Magics"; static char __pyx_k__magics[] = "magics"; static char __pyx_k__median[] = "median"; static char __pyx_k__elr_mfx[] = "elr_mfx"; static char __pyx_k__laplace[] = "laplace"; static char __pyx_k__student[] = "student"; static char __pyx_k____main__[] = "__main__"; static char __pyx_k____test__[] = "__test__"; static char __pyx_k__mean_mfx[] = "mean_mfx"; static char __pyx_k__sign_mfx[] = "sign_mfx"; static char __pyx_k__stat_mfx[] = "stat_mfx"; static char __pyx_k__wilcoxon[] = "wilcoxon"; static char __pyx_k__flag_stat[] = "flag_stat"; static char __pyx_k__nsimu_max[] = "nsimu_max"; static char __pyx_k__ValueError[] = "ValueError"; static char __pyx_k__constraint[] = "constraint"; static char __pyx_k__median_mfx[] = "median_mfx"; static char __pyx_k____version__[] = "__version__"; static char __pyx_k__pdf_fit_mfx[] = "pdf_fit_mfx"; static char __pyx_k__student_mfx[] = "student_mfx"; static char __pyx_k__RuntimeError[] = "RuntimeError"; static char __pyx_k__pdf_fit_gmfx[] = "pdf_fit_gmfx"; static char __pyx_k__wilcoxon_mfx[] = "wilcoxon_mfx"; static char __pyx_k__mean_gauss_mfx[] = "mean_gauss_mfx"; static PyObject *__pyx_kp_u_1; static PyObject *__pyx_kp_u_11; static PyObject *__pyx_kp_s_14; static PyObject *__pyx_kp_s_17; static PyObject *__pyx_n_s_18; static PyObject *__pyx_kp_u_3; static PyObject *__pyx_kp_u_5; static PyObject *__pyx_kp_u_7; static PyObject *__pyx_kp_u_8; static PyObject *__pyx_n_s__MU; static PyObject *__pyx_n_s__Magics; static PyObject *__pyx_n_s__RuntimeError; static PyObject *__pyx_n_s__S2; static PyObject *__pyx_n_s__T; static PyObject *__pyx_n_s__V; static PyObject *__pyx_n_s__ValueError; static PyObject *__pyx_n_s__W; static PyObject *__pyx_n_s__Y; static PyObject *__pyx_n_s__Z; static PyObject *__pyx_n_s____main__; static PyObject *__pyx_n_s____test__; static PyObject *__pyx_n_s____version__; static PyObject *__pyx_n_s__axis; static PyObject *__pyx_n_s__base; static PyObject *__pyx_n_s__constraint; static PyObject *__pyx_n_s__dims; static PyObject *__pyx_n_s__elr; static PyObject *__pyx_n_s__elr_mfx; static PyObject *__pyx_n_s__flag_stat; static PyObject *__pyx_n_s__grubb; static PyObject *__pyx_n_s__i; static PyObject *__pyx_n_s__id; static PyObject *__pyx_n_s__idx; static PyObject *__pyx_n_s__laplace; static PyObject *__pyx_n_s__magic; static PyObject *__pyx_n_s__magics; static PyObject *__pyx_n_s__mean; static PyObject *__pyx_n_s__mean_gauss_mfx; static PyObject *__pyx_n_s__mean_mfx; static PyObject *__pyx_n_s__median; static PyObject *__pyx_n_s__median_mfx; static PyObject *__pyx_n_s__mu; static PyObject *__pyx_n_s__multi; static PyObject *__pyx_n_s__n; static PyObject *__pyx_n_s__niter; static PyObject *__pyx_n_s__np; static PyObject *__pyx_n_s__nsimu; static PyObject *__pyx_n_s__nsimu_max; static PyObject *__pyx_n_s__numpy; static PyObject *__pyx_n_s__pdf_fit_gmfx; static PyObject *__pyx_n_s__pdf_fit_mfx; static PyObject *__pyx_n_s__range; static PyObject *__pyx_n_s__s2; static PyObject *__pyx_n_s__sign; static PyObject *__pyx_n_s__sign_mfx; static PyObject *__pyx_n_s__simu; static PyObject *__pyx_n_s__stat; static PyObject *__pyx_n_s__stat_mfx; static PyObject *__pyx_n_s__stats; static PyObject *__pyx_n_s__student; static PyObject *__pyx_n_s__student_mfx; static PyObject *__pyx_n_s__t; static PyObject *__pyx_n_s__tukey; static PyObject *__pyx_n_s__v; static PyObject *__pyx_n_s__w; static PyObject *__pyx_n_s__wilcoxon; static PyObject *__pyx_n_s__wilcoxon_mfx; static PyObject *__pyx_n_s__y; static PyObject *__pyx_n_s__yp; static PyObject *__pyx_n_s__z; static PyObject *__pyx_n_s__zeros; static PyObject *__pyx_int_1; static PyObject *__pyx_int_15; static PyObject *__pyx_k_tuple_2; static PyObject *__pyx_k_tuple_4; static PyObject *__pyx_k_tuple_6; static PyObject *__pyx_k_tuple_9; static PyObject *__pyx_k_tuple_10; static PyObject *__pyx_k_tuple_12; static PyObject *__pyx_k_tuple_15; static PyObject *__pyx_k_tuple_19; static PyObject *__pyx_k_tuple_21; static PyObject *__pyx_k_tuple_23; static PyObject *__pyx_k_codeobj_16; static PyObject *__pyx_k_codeobj_20; static PyObject *__pyx_k_codeobj_22; static PyObject *__pyx_k_codeobj_24; /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_5group_9onesample_1stat(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_5group_9onesample_stat[] = "\n T = stat(Y, id='student', base=0.0, axis=0, magics=None).\n \n Compute a one-sample test statistic over a number of deterministic\n or random permutations. \n "; static PyMethodDef __pyx_mdef_4nipy_4labs_5group_9onesample_1stat = {__Pyx_NAMESTR("stat"), (PyCFunction)__pyx_pw_4nipy_4labs_5group_9onesample_1stat, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_5group_9onesample_stat)}; static PyObject *__pyx_pw_4nipy_4labs_5group_9onesample_1stat(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_Y = 0; PyObject *__pyx_v_id = 0; double __pyx_v_base; int __pyx_v_axis; PyArrayObject *__pyx_v_Magics = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("stat (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__Y,&__pyx_n_s__id,&__pyx_n_s__base,&__pyx_n_s__axis,&__pyx_n_s__Magics,0}; PyObject* values[5] = {0,0,0,0,0}; values[1] = ((PyObject *)__pyx_n_s__student); /* "nipy/labs/group/onesample.pyx":89 * # Test stat without mixed-effect correction * def stat(ndarray Y, id='student', double base=0.0, * int axis=0, ndarray Magics=None): # <<<<<<<<<<<<<< * """ * T = stat(Y, id='student', base=0.0, axis=0, magics=None). */ values[4] = (PyObject *)((PyArrayObject *)Py_None); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Y)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__id); if (value) { values[1] = value; kw_args--; } } case 2: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__base); if (value) { values[2] = value; kw_args--; } } case 3: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__axis); if (value) { values[3] = value; kw_args--; } } case 4: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Magics); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "stat") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_Y = ((PyArrayObject *)values[0]); __pyx_v_id = values[1]; if (values[2]) { __pyx_v_base = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_base == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { /* "nipy/labs/group/onesample.pyx":88 * * # Test stat without mixed-effect correction * def stat(ndarray Y, id='student', double base=0.0, # <<<<<<<<<<<<<< * int axis=0, ndarray Magics=None): * """ */ __pyx_v_base = ((double)0.0); } if (values[3]) { __pyx_v_axis = __Pyx_PyInt_AsInt(values[3]); if (unlikely((__pyx_v_axis == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_axis = ((int)0); } __pyx_v_Magics = ((PyArrayObject *)values[4]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("stat", 0, 1, 5, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.group.onesample.stat", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_Y), __pyx_ptype_5numpy_ndarray, 1, "Y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_Magics), __pyx_ptype_5numpy_ndarray, 1, "Magics", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_4nipy_4labs_5group_9onesample_stat(__pyx_self, __pyx_v_Y, __pyx_v_id, __pyx_v_base, __pyx_v_axis, __pyx_v_Magics); goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4nipy_4labs_5group_9onesample_stat(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_Y, PyObject *__pyx_v_id, double __pyx_v_base, int __pyx_v_axis, PyArrayObject *__pyx_v_Magics) { fff_vector *__pyx_v_y; fff_vector *__pyx_v_t; fff_vector *__pyx_v_magics; fff_vector *__pyx_v_yp; fff_onesample_stat *__pyx_v_stat; fff_onesample_stat_flag __pyx_v_flag_stat; unsigned int __pyx_v_n; unsigned long __pyx_v_simu; unsigned long __pyx_v_nsimu; unsigned long __pyx_v_idx; double __pyx_v_magic; fffpy_multi_iterator *__pyx_v_multi; PyObject *__pyx_v_dims = NULL; PyObject *__pyx_v_T = NULL; int __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; fff_onesample_stat_flag __pyx_t_3; int __pyx_t_4; size_t __pyx_t_5; int __pyx_t_6; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; unsigned long __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("stat", 0); /* "nipy/labs/group/onesample.pyx":98 * cdef fff_vector *y, *t, *magics, *yp * cdef fff_onesample_stat* stat * cdef fff_onesample_stat_flag flag_stat = stats[id] # <<<<<<<<<<<<<< * cdef unsigned int n * cdef unsigned long int simu, nsimu, idx */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__stats); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetItem(__pyx_t_1, __pyx_v_id); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_3 = ((fff_onesample_stat_flag)PyInt_AsLong(__pyx_t_2)); if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_flag_stat = __pyx_t_3; /* "nipy/labs/group/onesample.pyx":105 * * # Get number of observations * n = Y.shape[axis] # <<<<<<<<<<<<<< * * # Read out magic numbers */ __pyx_v_n = ((unsigned int)(__pyx_v_Y->dimensions[__pyx_v_axis])); /* "nipy/labs/group/onesample.pyx":108 * * # Read out magic numbers * if Magics == None: # <<<<<<<<<<<<<< * magics = fff_vector_new(1) * magics.data[0] = 0 ## Just to make sure */ __pyx_t_2 = PyObject_RichCompare(((PyObject *)__pyx_v_Magics), Py_None, Py_EQ); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (__pyx_t_4) { /* "nipy/labs/group/onesample.pyx":109 * # Read out magic numbers * if Magics == None: * magics = fff_vector_new(1) # <<<<<<<<<<<<<< * magics.data[0] = 0 ## Just to make sure * else: */ __pyx_v_magics = fff_vector_new(1); /* "nipy/labs/group/onesample.pyx":110 * if Magics == None: * magics = fff_vector_new(1) * magics.data[0] = 0 ## Just to make sure # <<<<<<<<<<<<<< * else: * magics = fff_vector_fromPyArray(Magics) */ (__pyx_v_magics->data[0]) = 0.0; goto __pyx_L3; } /*else*/ { /* "nipy/labs/group/onesample.pyx":112 * magics.data[0] = 0 ## Just to make sure * else: * magics = fff_vector_fromPyArray(Magics) # <<<<<<<<<<<<<< * * # Create output array */ __pyx_v_magics = fff_vector_fromPyArray(__pyx_v_Magics); } __pyx_L3:; /* "nipy/labs/group/onesample.pyx":115 * * # Create output array * nsimu = magics.size # <<<<<<<<<<<<<< * dims = [Y.shape[i] for i in range(Y.ndim)] * dims[axis] = nsimu */ __pyx_t_5 = __pyx_v_magics->size; __pyx_v_nsimu = __pyx_t_5; /* "nipy/labs/group/onesample.pyx":116 * # Create output array * nsimu = magics.size * dims = [Y.shape[i] for i in range(Y.ndim)] # <<<<<<<<<<<<<< * dims[axis] = nsimu * T = np.zeros(dims) */ __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = __pyx_v_Y->nd; for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_6; __pyx_t_7+=1) { __pyx_v_i = __pyx_t_7; __pyx_t_1 = __Pyx_PyInt_to_py_Py_intptr_t((__pyx_v_Y->dimensions[__pyx_v_i])); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (unlikely(__Pyx_PyList_Append(__pyx_t_2, (PyObject*)__pyx_t_1))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } __pyx_t_1 = ((PyObject *)__pyx_t_2); __Pyx_INCREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_v_dims = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/group/onesample.pyx":117 * nsimu = magics.size * dims = [Y.shape[i] for i in range(Y.ndim)] * dims[axis] = nsimu # <<<<<<<<<<<<<< * T = np.zeros(dims) * */ __pyx_t_1 = PyLong_FromUnsignedLong(__pyx_v_nsimu); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 117; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (__Pyx_SetItemInt(((PyObject *)__pyx_v_dims), __pyx_v_axis, __pyx_t_1, sizeof(int), PyInt_FromLong) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 117; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/group/onesample.pyx":118 * dims = [Y.shape[i] for i in range(Y.ndim)] * dims[axis] = nsimu * T = np.zeros(dims) # <<<<<<<<<<<<<< * * # Create local structure */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__zeros); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)__pyx_v_dims)); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_v_dims)); __Pyx_GIVEREF(((PyObject *)__pyx_v_dims)); __pyx_t_8 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_v_T = __pyx_t_8; __pyx_t_8 = 0; /* "nipy/labs/group/onesample.pyx":121 * * # Create local structure * stat = fff_onesample_stat_new(n, flag_stat, base) # <<<<<<<<<<<<<< * yp = fff_vector_new(n) * */ __pyx_v_stat = fff_onesample_stat_new(__pyx_v_n, __pyx_v_flag_stat, __pyx_v_base); /* "nipy/labs/group/onesample.pyx":122 * # Create local structure * stat = fff_onesample_stat_new(n, flag_stat, base) * yp = fff_vector_new(n) # <<<<<<<<<<<<<< * * # Multi-iterator */ __pyx_v_yp = fff_vector_new(__pyx_v_n); /* "nipy/labs/group/onesample.pyx":125 * * # Multi-iterator * multi = fffpy_multi_iterator_new(2, axis, Y, T) # <<<<<<<<<<<<<< * * # Vector views */ __pyx_v_multi = fffpy_multi_iterator_new(2, __pyx_v_axis, ((void *)__pyx_v_Y), ((void *)__pyx_v_T)); /* "nipy/labs/group/onesample.pyx":128 * * # Vector views * y = multi.vector[0] # <<<<<<<<<<<<<< * t = multi.vector[1] * */ __pyx_v_y = (__pyx_v_multi->vector[0]); /* "nipy/labs/group/onesample.pyx":129 * # Vector views * y = multi.vector[0] * t = multi.vector[1] # <<<<<<<<<<<<<< * * # Loop */ __pyx_v_t = (__pyx_v_multi->vector[1]); /* "nipy/labs/group/onesample.pyx":132 * * # Loop * for simu from 0 <= simu < nsimu: # <<<<<<<<<<<<<< * * # Set the magic number */ __pyx_t_9 = __pyx_v_nsimu; for (__pyx_v_simu = 0; __pyx_v_simu < __pyx_t_9; __pyx_v_simu++) { /* "nipy/labs/group/onesample.pyx":135 * * # Set the magic number * magic = magics.data[simu*magics.stride] # <<<<<<<<<<<<<< * * # Reset the multi-iterator */ __pyx_v_magic = (__pyx_v_magics->data[(__pyx_v_simu * __pyx_v_magics->stride)]); /* "nipy/labs/group/onesample.pyx":138 * * # Reset the multi-iterator * fffpy_multi_iterator_reset(multi); # <<<<<<<<<<<<<< * * # Perform the loop */ fffpy_multi_iterator_reset(__pyx_v_multi); /* "nipy/labs/group/onesample.pyx":141 * * # Perform the loop * idx = simu*t.stride # <<<<<<<<<<<<<< * while(multi.index < multi.size): * fff_onesample_permute_signs(yp, y, magic) */ __pyx_v_idx = (__pyx_v_simu * __pyx_v_t->stride); /* "nipy/labs/group/onesample.pyx":142 * # Perform the loop * idx = simu*t.stride * while(multi.index < multi.size): # <<<<<<<<<<<<<< * fff_onesample_permute_signs(yp, y, magic) * t.data[idx] = fff_onesample_stat_eval(stat, yp) */ while (1) { __pyx_t_4 = (__pyx_v_multi->index < __pyx_v_multi->size); if (!__pyx_t_4) break; /* "nipy/labs/group/onesample.pyx":143 * idx = simu*t.stride * while(multi.index < multi.size): * fff_onesample_permute_signs(yp, y, magic) # <<<<<<<<<<<<<< * t.data[idx] = fff_onesample_stat_eval(stat, yp) * fffpy_multi_iterator_update(multi) */ fff_onesample_permute_signs(__pyx_v_yp, __pyx_v_y, __pyx_v_magic); /* "nipy/labs/group/onesample.pyx":144 * while(multi.index < multi.size): * fff_onesample_permute_signs(yp, y, magic) * t.data[idx] = fff_onesample_stat_eval(stat, yp) # <<<<<<<<<<<<<< * fffpy_multi_iterator_update(multi) * */ (__pyx_v_t->data[__pyx_v_idx]) = fff_onesample_stat_eval(__pyx_v_stat, __pyx_v_yp); /* "nipy/labs/group/onesample.pyx":145 * fff_onesample_permute_signs(yp, y, magic) * t.data[idx] = fff_onesample_stat_eval(stat, yp) * fffpy_multi_iterator_update(multi) # <<<<<<<<<<<<<< * * # Free memory */ fffpy_multi_iterator_update(__pyx_v_multi); } } /* "nipy/labs/group/onesample.pyx":148 * * # Free memory * fffpy_multi_iterator_delete(multi) # <<<<<<<<<<<<<< * fff_vector_delete(yp) * fff_vector_delete(magics) */ fffpy_multi_iterator_delete(__pyx_v_multi); /* "nipy/labs/group/onesample.pyx":149 * # Free memory * fffpy_multi_iterator_delete(multi) * fff_vector_delete(yp) # <<<<<<<<<<<<<< * fff_vector_delete(magics) * fff_onesample_stat_delete(stat) */ fff_vector_delete(__pyx_v_yp); /* "nipy/labs/group/onesample.pyx":150 * fffpy_multi_iterator_delete(multi) * fff_vector_delete(yp) * fff_vector_delete(magics) # <<<<<<<<<<<<<< * fff_onesample_stat_delete(stat) * */ fff_vector_delete(__pyx_v_magics); /* "nipy/labs/group/onesample.pyx":151 * fff_vector_delete(yp) * fff_vector_delete(magics) * fff_onesample_stat_delete(stat) # <<<<<<<<<<<<<< * * # Return */ fff_onesample_stat_delete(__pyx_v_stat); /* "nipy/labs/group/onesample.pyx":154 * * # Return * return T # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_T); __pyx_r = __pyx_v_T; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("nipy.labs.group.onesample.stat", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_dims); __Pyx_XDECREF(__pyx_v_T); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_5group_9onesample_3stat_mfx(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_5group_9onesample_2stat_mfx[] = "\n T = stat_mfx(Y, V, id='student_mfx', base=0.0, axis=0, magics=None, niter=5).\n \n Compute a one-sample test statistic, with mixed-effect correction,\n over a number of deterministic or random permutations.\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_5group_9onesample_3stat_mfx = {__Pyx_NAMESTR("stat_mfx"), (PyCFunction)__pyx_pw_4nipy_4labs_5group_9onesample_3stat_mfx, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_5group_9onesample_2stat_mfx)}; static PyObject *__pyx_pw_4nipy_4labs_5group_9onesample_3stat_mfx(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_Y = 0; PyArrayObject *__pyx_v_V = 0; PyObject *__pyx_v_id = 0; double __pyx_v_base; int __pyx_v_axis; PyArrayObject *__pyx_v_Magics = 0; unsigned int __pyx_v_niter; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("stat_mfx (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__Y,&__pyx_n_s__V,&__pyx_n_s__id,&__pyx_n_s__base,&__pyx_n_s__axis,&__pyx_n_s__Magics,&__pyx_n_s__niter,0}; PyObject* values[7] = {0,0,0,0,0,0,0}; values[2] = ((PyObject *)__pyx_n_s__student_mfx); /* "nipy/labs/group/onesample.pyx":158 * * def stat_mfx(ndarray Y, ndarray V, id='student_mfx', double base=0.0, * int axis=0, ndarray Magics=None, unsigned int niter=5): # <<<<<<<<<<<<<< * """ * T = stat_mfx(Y, V, id='student_mfx', base=0.0, axis=0, magics=None, niter=5). */ values[5] = (PyObject *)((PyArrayObject *)Py_None); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Y)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__V)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("stat_mfx", 0, 2, 7, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__id); if (value) { values[2] = value; kw_args--; } } case 3: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__base); if (value) { values[3] = value; kw_args--; } } case 4: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__axis); if (value) { values[4] = value; kw_args--; } } case 5: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Magics); if (value) { values[5] = value; kw_args--; } } case 6: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__niter); if (value) { values[6] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "stat_mfx") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_Y = ((PyArrayObject *)values[0]); __pyx_v_V = ((PyArrayObject *)values[1]); __pyx_v_id = values[2]; if (values[3]) { __pyx_v_base = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_base == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { /* "nipy/labs/group/onesample.pyx":157 * * * def stat_mfx(ndarray Y, ndarray V, id='student_mfx', double base=0.0, # <<<<<<<<<<<<<< * int axis=0, ndarray Magics=None, unsigned int niter=5): * """ */ __pyx_v_base = ((double)0.0); } if (values[4]) { __pyx_v_axis = __Pyx_PyInt_AsInt(values[4]); if (unlikely((__pyx_v_axis == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_axis = ((int)0); } __pyx_v_Magics = ((PyArrayObject *)values[5]); if (values[6]) { __pyx_v_niter = __Pyx_PyInt_AsUnsignedInt(values[6]); if (unlikely((__pyx_v_niter == (unsigned int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_niter = ((unsigned int)5); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("stat_mfx", 0, 2, 7, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.group.onesample.stat_mfx", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_Y), __pyx_ptype_5numpy_ndarray, 1, "Y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_V), __pyx_ptype_5numpy_ndarray, 1, "V", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_Magics), __pyx_ptype_5numpy_ndarray, 1, "Magics", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_4nipy_4labs_5group_9onesample_2stat_mfx(__pyx_self, __pyx_v_Y, __pyx_v_V, __pyx_v_id, __pyx_v_base, __pyx_v_axis, __pyx_v_Magics, __pyx_v_niter); goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4nipy_4labs_5group_9onesample_2stat_mfx(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_Y, PyArrayObject *__pyx_v_V, PyObject *__pyx_v_id, double __pyx_v_base, int __pyx_v_axis, PyArrayObject *__pyx_v_Magics, unsigned int __pyx_v_niter) { fff_vector *__pyx_v_y; fff_vector *__pyx_v_v; fff_vector *__pyx_v_t; fff_vector *__pyx_v_magics; fff_vector *__pyx_v_yp; fff_onesample_stat_mfx *__pyx_v_stat; fff_onesample_stat_flag __pyx_v_flag_stat; int __pyx_v_n; unsigned long __pyx_v_simu; unsigned long __pyx_v_idx; double __pyx_v_magic; fffpy_multi_iterator *__pyx_v_multi; size_t __pyx_v_nsimu; PyObject *__pyx_v_dims = NULL; PyObject *__pyx_v_T = NULL; int __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; fff_onesample_stat_flag __pyx_t_3; int __pyx_t_4; size_t __pyx_t_5; int __pyx_t_6; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("stat_mfx", 0); /* "nipy/labs/group/onesample.pyx":167 * cdef fff_vector *y, *v, *t, *magics, *yp * cdef fff_onesample_stat_mfx* stat * cdef fff_onesample_stat_flag flag_stat = stats[id] # <<<<<<<<<<<<<< * cdef int n * cdef unsigned long int nsimu_max, simu, idx */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__stats); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 167; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetItem(__pyx_t_1, __pyx_v_id); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 167; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_3 = ((fff_onesample_stat_flag)PyInt_AsLong(__pyx_t_2)); if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 167; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_flag_stat = __pyx_t_3; /* "nipy/labs/group/onesample.pyx":174 * * # Get number of observations * n = Y.shape[axis] # <<<<<<<<<<<<<< * * # Read out magic numbers */ __pyx_v_n = ((int)(__pyx_v_Y->dimensions[__pyx_v_axis])); /* "nipy/labs/group/onesample.pyx":177 * * # Read out magic numbers * if Magics == None: # <<<<<<<<<<<<<< * magics = fff_vector_new(1) * magics.data[0] = 0 ## Just to make sure */ __pyx_t_2 = PyObject_RichCompare(((PyObject *)__pyx_v_Magics), Py_None, Py_EQ); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 177; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 177; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (__pyx_t_4) { /* "nipy/labs/group/onesample.pyx":178 * # Read out magic numbers * if Magics == None: * magics = fff_vector_new(1) # <<<<<<<<<<<<<< * magics.data[0] = 0 ## Just to make sure * else: */ __pyx_v_magics = fff_vector_new(1); /* "nipy/labs/group/onesample.pyx":179 * if Magics == None: * magics = fff_vector_new(1) * magics.data[0] = 0 ## Just to make sure # <<<<<<<<<<<<<< * else: * magics = fff_vector_fromPyArray(Magics) */ (__pyx_v_magics->data[0]) = 0.0; goto __pyx_L3; } /*else*/ { /* "nipy/labs/group/onesample.pyx":181 * magics.data[0] = 0 ## Just to make sure * else: * magics = fff_vector_fromPyArray(Magics) # <<<<<<<<<<<<<< * * # Create output array */ __pyx_v_magics = fff_vector_fromPyArray(__pyx_v_Magics); } __pyx_L3:; /* "nipy/labs/group/onesample.pyx":184 * * # Create output array * nsimu = magics.size # <<<<<<<<<<<<<< * dims = [Y.shape[i] for i in range(Y.ndim)] * dims[axis] = nsimu */ __pyx_t_5 = __pyx_v_magics->size; __pyx_v_nsimu = __pyx_t_5; /* "nipy/labs/group/onesample.pyx":185 * # Create output array * nsimu = magics.size * dims = [Y.shape[i] for i in range(Y.ndim)] # <<<<<<<<<<<<<< * dims[axis] = nsimu * T = np.zeros(dims) */ __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = __pyx_v_Y->nd; for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_6; __pyx_t_7+=1) { __pyx_v_i = __pyx_t_7; __pyx_t_1 = __Pyx_PyInt_to_py_Py_intptr_t((__pyx_v_Y->dimensions[__pyx_v_i])); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (unlikely(__Pyx_PyList_Append(__pyx_t_2, (PyObject*)__pyx_t_1))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } __pyx_t_1 = ((PyObject *)__pyx_t_2); __Pyx_INCREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_v_dims = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/group/onesample.pyx":186 * nsimu = magics.size * dims = [Y.shape[i] for i in range(Y.ndim)] * dims[axis] = nsimu # <<<<<<<<<<<<<< * T = np.zeros(dims) * */ __pyx_t_1 = __Pyx_PyInt_FromSize_t(__pyx_v_nsimu); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (__Pyx_SetItemInt(((PyObject *)__pyx_v_dims), __pyx_v_axis, __pyx_t_1, sizeof(int), PyInt_FromLong) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/group/onesample.pyx":187 * dims = [Y.shape[i] for i in range(Y.ndim)] * dims[axis] = nsimu * T = np.zeros(dims) # <<<<<<<<<<<<<< * * # Create local structure */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 187; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__zeros); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 187; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 187; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)__pyx_v_dims)); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_v_dims)); __Pyx_GIVEREF(((PyObject *)__pyx_v_dims)); __pyx_t_8 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 187; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_v_T = __pyx_t_8; __pyx_t_8 = 0; /* "nipy/labs/group/onesample.pyx":190 * * # Create local structure * stat = fff_onesample_stat_mfx_new(n, flag_stat, base) # <<<<<<<<<<<<<< * stat.niter = niter * yp = fff_vector_new(n) */ __pyx_v_stat = fff_onesample_stat_mfx_new(__pyx_v_n, __pyx_v_flag_stat, __pyx_v_base); /* "nipy/labs/group/onesample.pyx":191 * # Create local structure * stat = fff_onesample_stat_mfx_new(n, flag_stat, base) * stat.niter = niter # <<<<<<<<<<<<<< * yp = fff_vector_new(n) * */ __pyx_v_stat->niter = __pyx_v_niter; /* "nipy/labs/group/onesample.pyx":192 * stat = fff_onesample_stat_mfx_new(n, flag_stat, base) * stat.niter = niter * yp = fff_vector_new(n) # <<<<<<<<<<<<<< * * # Multi-iterator */ __pyx_v_yp = fff_vector_new(__pyx_v_n); /* "nipy/labs/group/onesample.pyx":195 * * # Multi-iterator * multi = fffpy_multi_iterator_new(3, axis, Y, V, T) # <<<<<<<<<<<<<< * * # Vector views */ __pyx_v_multi = fffpy_multi_iterator_new(3, __pyx_v_axis, ((void *)__pyx_v_Y), ((void *)__pyx_v_V), ((void *)__pyx_v_T)); /* "nipy/labs/group/onesample.pyx":198 * * # Vector views * y = multi.vector[0] # <<<<<<<<<<<<<< * v = multi.vector[1] * t = multi.vector[2] */ __pyx_v_y = (__pyx_v_multi->vector[0]); /* "nipy/labs/group/onesample.pyx":199 * # Vector views * y = multi.vector[0] * v = multi.vector[1] # <<<<<<<<<<<<<< * t = multi.vector[2] * */ __pyx_v_v = (__pyx_v_multi->vector[1]); /* "nipy/labs/group/onesample.pyx":200 * y = multi.vector[0] * v = multi.vector[1] * t = multi.vector[2] # <<<<<<<<<<<<<< * * # Loop */ __pyx_v_t = (__pyx_v_multi->vector[2]); /* "nipy/labs/group/onesample.pyx":203 * * # Loop * for simu from 0 <= simu < nsimu: # <<<<<<<<<<<<<< * * # Set the magic number */ __pyx_t_5 = __pyx_v_nsimu; for (__pyx_v_simu = 0; __pyx_v_simu < __pyx_t_5; __pyx_v_simu++) { /* "nipy/labs/group/onesample.pyx":206 * * # Set the magic number * magic = magics.data[simu*magics.stride] # <<<<<<<<<<<<<< * * # Reset the multi-iterator */ __pyx_v_magic = (__pyx_v_magics->data[(__pyx_v_simu * __pyx_v_magics->stride)]); /* "nipy/labs/group/onesample.pyx":209 * * # Reset the multi-iterator * fffpy_multi_iterator_reset(multi) # <<<<<<<<<<<<<< * * # Perform the loop */ fffpy_multi_iterator_reset(__pyx_v_multi); /* "nipy/labs/group/onesample.pyx":212 * * # Perform the loop * idx = simu*t.stride # <<<<<<<<<<<<<< * while(multi.index < multi.size): * fff_onesample_permute_signs(yp, y, magic) */ __pyx_v_idx = (__pyx_v_simu * __pyx_v_t->stride); /* "nipy/labs/group/onesample.pyx":213 * # Perform the loop * idx = simu*t.stride * while(multi.index < multi.size): # <<<<<<<<<<<<<< * fff_onesample_permute_signs(yp, y, magic) * t.data[idx] = fff_onesample_stat_mfx_eval(stat, yp, v) */ while (1) { __pyx_t_4 = (__pyx_v_multi->index < __pyx_v_multi->size); if (!__pyx_t_4) break; /* "nipy/labs/group/onesample.pyx":214 * idx = simu*t.stride * while(multi.index < multi.size): * fff_onesample_permute_signs(yp, y, magic) # <<<<<<<<<<<<<< * t.data[idx] = fff_onesample_stat_mfx_eval(stat, yp, v) * fffpy_multi_iterator_update(multi) */ fff_onesample_permute_signs(__pyx_v_yp, __pyx_v_y, __pyx_v_magic); /* "nipy/labs/group/onesample.pyx":215 * while(multi.index < multi.size): * fff_onesample_permute_signs(yp, y, magic) * t.data[idx] = fff_onesample_stat_mfx_eval(stat, yp, v) # <<<<<<<<<<<<<< * fffpy_multi_iterator_update(multi) * */ (__pyx_v_t->data[__pyx_v_idx]) = fff_onesample_stat_mfx_eval(__pyx_v_stat, __pyx_v_yp, __pyx_v_v); /* "nipy/labs/group/onesample.pyx":216 * fff_onesample_permute_signs(yp, y, magic) * t.data[idx] = fff_onesample_stat_mfx_eval(stat, yp, v) * fffpy_multi_iterator_update(multi) # <<<<<<<<<<<<<< * * */ fffpy_multi_iterator_update(__pyx_v_multi); } } /* "nipy/labs/group/onesample.pyx":220 * * # Free memory * fffpy_multi_iterator_delete(multi) # <<<<<<<<<<<<<< * fff_vector_delete(yp) * fff_vector_delete(magics) */ fffpy_multi_iterator_delete(__pyx_v_multi); /* "nipy/labs/group/onesample.pyx":221 * # Free memory * fffpy_multi_iterator_delete(multi) * fff_vector_delete(yp) # <<<<<<<<<<<<<< * fff_vector_delete(magics) * fff_onesample_stat_mfx_delete(stat) */ fff_vector_delete(__pyx_v_yp); /* "nipy/labs/group/onesample.pyx":222 * fffpy_multi_iterator_delete(multi) * fff_vector_delete(yp) * fff_vector_delete(magics) # <<<<<<<<<<<<<< * fff_onesample_stat_mfx_delete(stat) * */ fff_vector_delete(__pyx_v_magics); /* "nipy/labs/group/onesample.pyx":223 * fff_vector_delete(yp) * fff_vector_delete(magics) * fff_onesample_stat_mfx_delete(stat) # <<<<<<<<<<<<<< * * # Return */ fff_onesample_stat_mfx_delete(__pyx_v_stat); /* "nipy/labs/group/onesample.pyx":226 * * # Return * return T # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_T); __pyx_r = __pyx_v_T; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("nipy.labs.group.onesample.stat_mfx", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_dims); __Pyx_XDECREF(__pyx_v_T); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_5group_9onesample_5pdf_fit_mfx(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_5group_9onesample_4pdf_fit_mfx[] = "\n (W, Z) = pdf_fit_mfx(data=Y, vardata=V, axis=0, niter=5, constraint=False, base=0.0).\n \n Comments to follow.\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_5group_9onesample_5pdf_fit_mfx = {__Pyx_NAMESTR("pdf_fit_mfx"), (PyCFunction)__pyx_pw_4nipy_4labs_5group_9onesample_5pdf_fit_mfx, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_5group_9onesample_4pdf_fit_mfx)}; static PyObject *__pyx_pw_4nipy_4labs_5group_9onesample_5pdf_fit_mfx(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_Y = 0; PyArrayObject *__pyx_v_V = 0; int __pyx_v_axis; int __pyx_v_niter; int __pyx_v_constraint; double __pyx_v_base; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("pdf_fit_mfx (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__Y,&__pyx_n_s__V,&__pyx_n_s__axis,&__pyx_n_s__niter,&__pyx_n_s__constraint,&__pyx_n_s__base,0}; PyObject* values[6] = {0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Y)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__V)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("pdf_fit_mfx", 0, 2, 6, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__axis); if (value) { values[2] = value; kw_args--; } } case 3: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__niter); if (value) { values[3] = value; kw_args--; } } case 4: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__constraint); if (value) { values[4] = value; kw_args--; } } case 5: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__base); if (value) { values[5] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "pdf_fit_mfx") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_Y = ((PyArrayObject *)values[0]); __pyx_v_V = ((PyArrayObject *)values[1]); if (values[2]) { __pyx_v_axis = __Pyx_PyInt_AsInt(values[2]); if (unlikely((__pyx_v_axis == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_axis = ((int)0); } if (values[3]) { __pyx_v_niter = __Pyx_PyInt_AsInt(values[3]); if (unlikely((__pyx_v_niter == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_niter = ((int)5); } if (values[4]) { __pyx_v_constraint = __Pyx_PyInt_AsInt(values[4]); if (unlikely((__pyx_v_constraint == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_constraint = ((int)0); } if (values[5]) { __pyx_v_base = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_base == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { /* "nipy/labs/group/onesample.pyx":230 * * * def pdf_fit_mfx(ndarray Y, ndarray V, int axis=0, int niter=5, int constraint=0, double base=0.0): # <<<<<<<<<<<<<< * """ * (W, Z) = pdf_fit_mfx(data=Y, vardata=V, axis=0, niter=5, constraint=False, base=0.0). */ __pyx_v_base = ((double)0.0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("pdf_fit_mfx", 0, 2, 6, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.group.onesample.pdf_fit_mfx", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_Y), __pyx_ptype_5numpy_ndarray, 1, "Y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_V), __pyx_ptype_5numpy_ndarray, 1, "V", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_4nipy_4labs_5group_9onesample_4pdf_fit_mfx(__pyx_self, __pyx_v_Y, __pyx_v_V, __pyx_v_axis, __pyx_v_niter, __pyx_v_constraint, __pyx_v_base); goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4nipy_4labs_5group_9onesample_4pdf_fit_mfx(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_Y, PyArrayObject *__pyx_v_V, int __pyx_v_axis, int __pyx_v_niter, int __pyx_v_constraint, double __pyx_v_base) { fff_vector *__pyx_v_y; fff_vector *__pyx_v_v; fff_vector *__pyx_v_w; fff_vector *__pyx_v_z; fff_onesample_stat_mfx *__pyx_v_stat; fffpy_multi_iterator *__pyx_v_multi; int __pyx_v_n; PyObject *__pyx_v_dims = NULL; PyObject *__pyx_v_W = NULL; PyObject *__pyx_v_Z = NULL; int __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("pdf_fit_mfx", 0); /* "nipy/labs/group/onesample.pyx":239 * cdef fff_onesample_stat_mfx* stat * cdef fffpy_multi_iterator* multi * cdef int n = Y.shape[axis] # <<<<<<<<<<<<<< * * # Create output array */ __pyx_v_n = (__pyx_v_Y->dimensions[__pyx_v_axis]); /* "nipy/labs/group/onesample.pyx":242 * * # Create output array * dims = [Y.shape[i] for i in range(Y.ndim)] # <<<<<<<<<<<<<< * W = np.zeros(dims) * Z = np.zeros(dims) */ __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_v_Y->nd; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; __pyx_t_4 = __Pyx_PyInt_to_py_Py_intptr_t((__pyx_v_Y->dimensions[__pyx_v_i])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); if (unlikely(__Pyx_PyList_Append(__pyx_t_1, (PyObject*)__pyx_t_4))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } __pyx_t_4 = ((PyObject *)__pyx_t_1); __Pyx_INCREF(__pyx_t_4); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_v_dims = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; /* "nipy/labs/group/onesample.pyx":243 * # Create output array * dims = [Y.shape[i] for i in range(Y.ndim)] * W = np.zeros(dims) # <<<<<<<<<<<<<< * Z = np.zeros(dims) * */ __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__zeros); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_INCREF(((PyObject *)__pyx_v_dims)); PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_v_dims)); __Pyx_GIVEREF(((PyObject *)__pyx_v_dims)); __pyx_t_5 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __pyx_v_W = __pyx_t_5; __pyx_t_5 = 0; /* "nipy/labs/group/onesample.pyx":244 * dims = [Y.shape[i] for i in range(Y.ndim)] * W = np.zeros(dims) * Z = np.zeros(dims) # <<<<<<<<<<<<<< * * # Create local structure */ __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__zeros); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(((PyObject *)__pyx_v_dims)); PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_dims)); __Pyx_GIVEREF(((PyObject *)__pyx_v_dims)); __pyx_t_1 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __pyx_v_Z = __pyx_t_1; __pyx_t_1 = 0; /* "nipy/labs/group/onesample.pyx":247 * * # Create local structure * stat = fff_onesample_stat_mfx_new(n, FFF_ONESAMPLE_EMPIRICAL_MEAN_MFX, base) # <<<<<<<<<<<<<< * stat.niter = niter * stat.constraint = constraint */ __pyx_v_stat = fff_onesample_stat_mfx_new(__pyx_v_n, FFF_ONESAMPLE_EMPIRICAL_MEAN_MFX, __pyx_v_base); /* "nipy/labs/group/onesample.pyx":248 * # Create local structure * stat = fff_onesample_stat_mfx_new(n, FFF_ONESAMPLE_EMPIRICAL_MEAN_MFX, base) * stat.niter = niter # <<<<<<<<<<<<<< * stat.constraint = constraint * */ __pyx_v_stat->niter = __pyx_v_niter; /* "nipy/labs/group/onesample.pyx":249 * stat = fff_onesample_stat_mfx_new(n, FFF_ONESAMPLE_EMPIRICAL_MEAN_MFX, base) * stat.niter = niter * stat.constraint = constraint # <<<<<<<<<<<<<< * * # Multi-iterator */ __pyx_v_stat->constraint = __pyx_v_constraint; /* "nipy/labs/group/onesample.pyx":252 * * # Multi-iterator * multi = fffpy_multi_iterator_new(4, axis, Y, V, W, Z) # <<<<<<<<<<<<<< * * # Create views on nd-arrays */ __pyx_v_multi = fffpy_multi_iterator_new(4, __pyx_v_axis, ((void *)__pyx_v_Y), ((void *)__pyx_v_V), ((void *)__pyx_v_W), ((void *)__pyx_v_Z)); /* "nipy/labs/group/onesample.pyx":255 * * # Create views on nd-arrays * y = multi.vector[0] # <<<<<<<<<<<<<< * v = multi.vector[1] * w = multi.vector[2] */ __pyx_v_y = (__pyx_v_multi->vector[0]); /* "nipy/labs/group/onesample.pyx":256 * # Create views on nd-arrays * y = multi.vector[0] * v = multi.vector[1] # <<<<<<<<<<<<<< * w = multi.vector[2] * z = multi.vector[3] */ __pyx_v_v = (__pyx_v_multi->vector[1]); /* "nipy/labs/group/onesample.pyx":257 * y = multi.vector[0] * v = multi.vector[1] * w = multi.vector[2] # <<<<<<<<<<<<<< * z = multi.vector[3] * */ __pyx_v_w = (__pyx_v_multi->vector[2]); /* "nipy/labs/group/onesample.pyx":258 * v = multi.vector[1] * w = multi.vector[2] * z = multi.vector[3] # <<<<<<<<<<<<<< * * # Loop */ __pyx_v_z = (__pyx_v_multi->vector[3]); /* "nipy/labs/group/onesample.pyx":261 * * # Loop * while(multi.index < multi.size): # <<<<<<<<<<<<<< * fff_onesample_stat_mfx_pdf_fit(w, z, stat, y, v) * fffpy_multi_iterator_update(multi) */ while (1) { __pyx_t_6 = (__pyx_v_multi->index < __pyx_v_multi->size); if (!__pyx_t_6) break; /* "nipy/labs/group/onesample.pyx":262 * # Loop * while(multi.index < multi.size): * fff_onesample_stat_mfx_pdf_fit(w, z, stat, y, v) # <<<<<<<<<<<<<< * fffpy_multi_iterator_update(multi) * */ fff_onesample_stat_mfx_pdf_fit(__pyx_v_w, __pyx_v_z, __pyx_v_stat, __pyx_v_y, __pyx_v_v); /* "nipy/labs/group/onesample.pyx":263 * while(multi.index < multi.size): * fff_onesample_stat_mfx_pdf_fit(w, z, stat, y, v) * fffpy_multi_iterator_update(multi) # <<<<<<<<<<<<<< * * */ fffpy_multi_iterator_update(__pyx_v_multi); } /* "nipy/labs/group/onesample.pyx":267 * * # Delete local structures * fffpy_multi_iterator_delete(multi) # <<<<<<<<<<<<<< * fff_onesample_stat_mfx_delete(stat) * */ fffpy_multi_iterator_delete(__pyx_v_multi); /* "nipy/labs/group/onesample.pyx":268 * # Delete local structures * fffpy_multi_iterator_delete(multi) * fff_onesample_stat_mfx_delete(stat) # <<<<<<<<<<<<<< * * # Return */ fff_onesample_stat_mfx_delete(__pyx_v_stat); /* "nipy/labs/group/onesample.pyx":271 * * # Return * return W, Z # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_W); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_W); __Pyx_GIVEREF(__pyx_v_W); __Pyx_INCREF(__pyx_v_Z); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_Z); __Pyx_GIVEREF(__pyx_v_Z); __pyx_r = ((PyObject *)__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("nipy.labs.group.onesample.pdf_fit_mfx", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_dims); __Pyx_XDECREF(__pyx_v_W); __Pyx_XDECREF(__pyx_v_Z); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_5group_9onesample_7pdf_fit_gmfx(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_5group_9onesample_6pdf_fit_gmfx[] = "\n (MU, S2) = pdf_fit_gmfx(data=Y, vardata=V, axis=0, niter=5, constraint=False, base=0.0).\n \n Comments to follow.\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_5group_9onesample_7pdf_fit_gmfx = {__Pyx_NAMESTR("pdf_fit_gmfx"), (PyCFunction)__pyx_pw_4nipy_4labs_5group_9onesample_7pdf_fit_gmfx, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_5group_9onesample_6pdf_fit_gmfx)}; static PyObject *__pyx_pw_4nipy_4labs_5group_9onesample_7pdf_fit_gmfx(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_Y = 0; PyArrayObject *__pyx_v_V = 0; int __pyx_v_axis; int __pyx_v_niter; int __pyx_v_constraint; double __pyx_v_base; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("pdf_fit_gmfx (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__Y,&__pyx_n_s__V,&__pyx_n_s__axis,&__pyx_n_s__niter,&__pyx_n_s__constraint,&__pyx_n_s__base,0}; PyObject* values[6] = {0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Y)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__V)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("pdf_fit_gmfx", 0, 2, 6, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 274; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__axis); if (value) { values[2] = value; kw_args--; } } case 3: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__niter); if (value) { values[3] = value; kw_args--; } } case 4: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__constraint); if (value) { values[4] = value; kw_args--; } } case 5: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__base); if (value) { values[5] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "pdf_fit_gmfx") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 274; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_Y = ((PyArrayObject *)values[0]); __pyx_v_V = ((PyArrayObject *)values[1]); if (values[2]) { __pyx_v_axis = __Pyx_PyInt_AsInt(values[2]); if (unlikely((__pyx_v_axis == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 274; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_axis = ((int)0); } if (values[3]) { __pyx_v_niter = __Pyx_PyInt_AsInt(values[3]); if (unlikely((__pyx_v_niter == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 274; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_niter = ((int)5); } if (values[4]) { __pyx_v_constraint = __Pyx_PyInt_AsInt(values[4]); if (unlikely((__pyx_v_constraint == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 274; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_constraint = ((int)0); } if (values[5]) { __pyx_v_base = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_base == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 274; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { /* "nipy/labs/group/onesample.pyx":274 * * * def pdf_fit_gmfx(ndarray Y, ndarray V, int axis=0, int niter=5, int constraint=0, double base=0.0): # <<<<<<<<<<<<<< * """ * (MU, S2) = pdf_fit_gmfx(data=Y, vardata=V, axis=0, niter=5, constraint=False, base=0.0). */ __pyx_v_base = ((double)0.0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("pdf_fit_gmfx", 0, 2, 6, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 274; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.group.onesample.pdf_fit_gmfx", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_Y), __pyx_ptype_5numpy_ndarray, 1, "Y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 274; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_V), __pyx_ptype_5numpy_ndarray, 1, "V", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 274; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_4nipy_4labs_5group_9onesample_6pdf_fit_gmfx(__pyx_self, __pyx_v_Y, __pyx_v_V, __pyx_v_axis, __pyx_v_niter, __pyx_v_constraint, __pyx_v_base); goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4nipy_4labs_5group_9onesample_6pdf_fit_gmfx(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_Y, PyArrayObject *__pyx_v_V, int __pyx_v_axis, int __pyx_v_niter, int __pyx_v_constraint, double __pyx_v_base) { fff_vector *__pyx_v_y; fff_vector *__pyx_v_v; fff_vector *__pyx_v_mu; fff_vector *__pyx_v_s2; fff_onesample_stat_mfx *__pyx_v_stat; fffpy_multi_iterator *__pyx_v_multi; int __pyx_v_n; PyObject *__pyx_v_dims = NULL; PyObject *__pyx_v_MU = NULL; PyObject *__pyx_v_S2 = NULL; int __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("pdf_fit_gmfx", 0); /* "nipy/labs/group/onesample.pyx":283 * cdef fff_onesample_stat_mfx* stat * cdef fffpy_multi_iterator* multi * cdef int n = Y.shape[axis] # <<<<<<<<<<<<<< * * # Create output array */ __pyx_v_n = (__pyx_v_Y->dimensions[__pyx_v_axis]); /* "nipy/labs/group/onesample.pyx":286 * * # Create output array * dims = [Y.shape[i] for i in range(Y.ndim)] # <<<<<<<<<<<<<< * dims[axis] = 1 * MU = np.zeros(dims) */ __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_v_Y->nd; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; __pyx_t_4 = __Pyx_PyInt_to_py_Py_intptr_t((__pyx_v_Y->dimensions[__pyx_v_i])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); if (unlikely(__Pyx_PyList_Append(__pyx_t_1, (PyObject*)__pyx_t_4))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } __pyx_t_4 = ((PyObject *)__pyx_t_1); __Pyx_INCREF(__pyx_t_4); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_v_dims = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; /* "nipy/labs/group/onesample.pyx":287 * # Create output array * dims = [Y.shape[i] for i in range(Y.ndim)] * dims[axis] = 1 # <<<<<<<<<<<<<< * MU = np.zeros(dims) * S2 = np.zeros(dims) */ if (__Pyx_SetItemInt(((PyObject *)__pyx_v_dims), __pyx_v_axis, __pyx_int_1, sizeof(int), PyInt_FromLong) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 287; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/group/onesample.pyx":288 * dims = [Y.shape[i] for i in range(Y.ndim)] * dims[axis] = 1 * MU = np.zeros(dims) # <<<<<<<<<<<<<< * S2 = np.zeros(dims) * */ __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 288; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__zeros); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 288; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 288; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_INCREF(((PyObject *)__pyx_v_dims)); PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_v_dims)); __Pyx_GIVEREF(((PyObject *)__pyx_v_dims)); __pyx_t_5 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 288; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __pyx_v_MU = __pyx_t_5; __pyx_t_5 = 0; /* "nipy/labs/group/onesample.pyx":289 * dims[axis] = 1 * MU = np.zeros(dims) * S2 = np.zeros(dims) # <<<<<<<<<<<<<< * * # Create local structure */ __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 289; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__zeros); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 289; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 289; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(((PyObject *)__pyx_v_dims)); PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_dims)); __Pyx_GIVEREF(((PyObject *)__pyx_v_dims)); __pyx_t_1 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 289; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __pyx_v_S2 = __pyx_t_1; __pyx_t_1 = 0; /* "nipy/labs/group/onesample.pyx":292 * * # Create local structure * stat = fff_onesample_stat_mfx_new(n, FFF_ONESAMPLE_STUDENT_MFX, base) # <<<<<<<<<<<<<< * stat.niter = niter * stat.constraint = constraint */ __pyx_v_stat = fff_onesample_stat_mfx_new(__pyx_v_n, FFF_ONESAMPLE_STUDENT_MFX, __pyx_v_base); /* "nipy/labs/group/onesample.pyx":293 * # Create local structure * stat = fff_onesample_stat_mfx_new(n, FFF_ONESAMPLE_STUDENT_MFX, base) * stat.niter = niter # <<<<<<<<<<<<<< * stat.constraint = constraint * */ __pyx_v_stat->niter = __pyx_v_niter; /* "nipy/labs/group/onesample.pyx":294 * stat = fff_onesample_stat_mfx_new(n, FFF_ONESAMPLE_STUDENT_MFX, base) * stat.niter = niter * stat.constraint = constraint # <<<<<<<<<<<<<< * * # Multi-iterator */ __pyx_v_stat->constraint = __pyx_v_constraint; /* "nipy/labs/group/onesample.pyx":297 * * # Multi-iterator * multi = fffpy_multi_iterator_new(4, axis, Y, V, MU, S2) # <<<<<<<<<<<<<< * * # Create views on nd-arrays */ __pyx_v_multi = fffpy_multi_iterator_new(4, __pyx_v_axis, ((void *)__pyx_v_Y), ((void *)__pyx_v_V), ((void *)__pyx_v_MU), ((void *)__pyx_v_S2)); /* "nipy/labs/group/onesample.pyx":300 * * # Create views on nd-arrays * y = multi.vector[0] # <<<<<<<<<<<<<< * v = multi.vector[1] * mu = multi.vector[2] */ __pyx_v_y = (__pyx_v_multi->vector[0]); /* "nipy/labs/group/onesample.pyx":301 * # Create views on nd-arrays * y = multi.vector[0] * v = multi.vector[1] # <<<<<<<<<<<<<< * mu = multi.vector[2] * s2 = multi.vector[3] */ __pyx_v_v = (__pyx_v_multi->vector[1]); /* "nipy/labs/group/onesample.pyx":302 * y = multi.vector[0] * v = multi.vector[1] * mu = multi.vector[2] # <<<<<<<<<<<<<< * s2 = multi.vector[3] * */ __pyx_v_mu = (__pyx_v_multi->vector[2]); /* "nipy/labs/group/onesample.pyx":303 * v = multi.vector[1] * mu = multi.vector[2] * s2 = multi.vector[3] # <<<<<<<<<<<<<< * * # Loop */ __pyx_v_s2 = (__pyx_v_multi->vector[3]); /* "nipy/labs/group/onesample.pyx":306 * * # Loop * while(multi.index < multi.size): # <<<<<<<<<<<<<< * fff_onesample_stat_gmfx_pdf_fit(mu.data, s2.data, stat, y, v) * fffpy_multi_iterator_update(multi) */ while (1) { __pyx_t_6 = (__pyx_v_multi->index < __pyx_v_multi->size); if (!__pyx_t_6) break; /* "nipy/labs/group/onesample.pyx":307 * # Loop * while(multi.index < multi.size): * fff_onesample_stat_gmfx_pdf_fit(mu.data, s2.data, stat, y, v) # <<<<<<<<<<<<<< * fffpy_multi_iterator_update(multi) * */ fff_onesample_stat_gmfx_pdf_fit(__pyx_v_mu->data, __pyx_v_s2->data, __pyx_v_stat, __pyx_v_y, __pyx_v_v); /* "nipy/labs/group/onesample.pyx":308 * while(multi.index < multi.size): * fff_onesample_stat_gmfx_pdf_fit(mu.data, s2.data, stat, y, v) * fffpy_multi_iterator_update(multi) # <<<<<<<<<<<<<< * * */ fffpy_multi_iterator_update(__pyx_v_multi); } /* "nipy/labs/group/onesample.pyx":312 * * # Delete local structures * fffpy_multi_iterator_delete(multi) # <<<<<<<<<<<<<< * fff_onesample_stat_mfx_delete(stat) * */ fffpy_multi_iterator_delete(__pyx_v_multi); /* "nipy/labs/group/onesample.pyx":313 * # Delete local structures * fffpy_multi_iterator_delete(multi) * fff_onesample_stat_mfx_delete(stat) # <<<<<<<<<<<<<< * * # Return */ fff_onesample_stat_mfx_delete(__pyx_v_stat); /* "nipy/labs/group/onesample.pyx":316 * * # Return * return MU, S2 # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 316; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_MU); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_MU); __Pyx_GIVEREF(__pyx_v_MU); __Pyx_INCREF(__pyx_v_S2); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_S2); __Pyx_GIVEREF(__pyx_v_S2); __pyx_r = ((PyObject *)__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("nipy.labs.group.onesample.pdf_fit_gmfx", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_dims); __Pyx_XDECREF(__pyx_v_MU); __Pyx_XDECREF(__pyx_v_S2); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":194 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_copy_shape; int __pyx_v_i; int __pyx_v_ndim; int __pyx_v_endian_detector; int __pyx_v_little_endian; int __pyx_v_t; char *__pyx_v_f; PyArray_Descr *__pyx_v_descr = 0; int __pyx_v_offset; int __pyx_v_hasfields; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; char *__pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "numpy.pxd":200 * # of flags * * if info == NULL: return # <<<<<<<<<<<<<< * * cdef int copy_shape, i, ndim */ __pyx_t_1 = (__pyx_v_info == NULL); if (__pyx_t_1) { __pyx_r = 0; goto __pyx_L0; goto __pyx_L3; } __pyx_L3:; /* "numpy.pxd":203 * * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((&endian_detector)[0] != 0) * */ __pyx_v_endian_detector = 1; /* "numpy.pxd":204 * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * * ndim = PyArray_NDIM(self) */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "numpy.pxd":206 * cdef bint little_endian = ((&endian_detector)[0] != 0) * * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< * * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); /* "numpy.pxd":208 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); if (__pyx_t_1) { /* "numpy.pxd":209 * * if sizeof(npy_intp) != sizeof(Py_ssize_t): * copy_shape = 1 # <<<<<<<<<<<<<< * else: * copy_shape = 0 */ __pyx_v_copy_shape = 1; goto __pyx_L4; } /*else*/ { /* "numpy.pxd":211 * copy_shape = 1 * else: * copy_shape = 0 # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) */ __pyx_v_copy_shape = 0; } __pyx_L4:; /* "numpy.pxd":213 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS); if (__pyx_t_1) { /* "numpy.pxd":214 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not C contiguous") * */ __pyx_t_2 = (!PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS)); __pyx_t_3 = __pyx_t_2; } else { __pyx_t_3 = __pyx_t_1; } if (__pyx_t_3) { /* "numpy.pxd":215 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L5; } __pyx_L5:; /* "numpy.pxd":217 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ __pyx_t_3 = ((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS); if (__pyx_t_3) { /* "numpy.pxd":218 * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not Fortran contiguous") * */ __pyx_t_1 = (!PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS)); __pyx_t_2 = __pyx_t_1; } else { __pyx_t_2 = __pyx_t_3; } if (__pyx_t_2) { /* "numpy.pxd":219 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_4), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L6; } __pyx_L6:; /* "numpy.pxd":221 * raise ValueError(u"ndarray is not Fortran contiguous") * * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< * info.ndim = ndim * if copy_shape: */ __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); /* "numpy.pxd":222 * * info.buf = PyArray_DATA(self) * info.ndim = ndim # <<<<<<<<<<<<<< * if copy_shape: * # Allocate new buffer for strides and shape info. */ __pyx_v_info->ndim = __pyx_v_ndim; /* "numpy.pxd":223 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ if (__pyx_v_copy_shape) { /* "numpy.pxd":226 * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) # <<<<<<<<<<<<<< * info.shape = info.strides + ndim * for i in range(ndim): */ __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2))); /* "numpy.pxd":227 * # This is allocated as one block, strides first. * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) * info.shape = info.strides + ndim # <<<<<<<<<<<<<< * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] */ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); /* "numpy.pxd":228 * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) * info.shape = info.strides + ndim * for i in range(ndim): # <<<<<<<<<<<<<< * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] */ __pyx_t_5 = __pyx_v_ndim; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "numpy.pxd":229 * info.shape = info.strides + ndim * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< * info.shape[i] = PyArray_DIMS(self)[i] * else: */ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); /* "numpy.pxd":230 * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< * else: * info.strides = PyArray_STRIDES(self) */ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); } goto __pyx_L7; } /*else*/ { /* "numpy.pxd":232 * info.shape[i] = PyArray_DIMS(self)[i] * else: * info.strides = PyArray_STRIDES(self) # <<<<<<<<<<<<<< * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL */ __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); /* "numpy.pxd":233 * else: * info.strides = PyArray_STRIDES(self) * info.shape = PyArray_DIMS(self) # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) */ __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); } __pyx_L7:; /* "numpy.pxd":234 * info.strides = PyArray_STRIDES(self) * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) */ __pyx_v_info->suboffsets = NULL; /* "numpy.pxd":235 * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< * info.readonly = not PyArray_ISWRITEABLE(self) * */ __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); /* "numpy.pxd":236 * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< * * cdef int t */ __pyx_v_info->readonly = (!PyArray_ISWRITEABLE(__pyx_v_self)); /* "numpy.pxd":239 * * cdef int t * cdef char* f = NULL # <<<<<<<<<<<<<< * cdef dtype descr = self.descr * cdef list stack */ __pyx_v_f = NULL; /* "numpy.pxd":240 * cdef int t * cdef char* f = NULL * cdef dtype descr = self.descr # <<<<<<<<<<<<<< * cdef list stack * cdef int offset */ __pyx_t_4 = ((PyObject *)__pyx_v_self->descr); __Pyx_INCREF(__pyx_t_4); __pyx_v_descr = ((PyArray_Descr *)__pyx_t_4); __pyx_t_4 = 0; /* "numpy.pxd":244 * cdef int offset * * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< * * if not hasfields and not copy_shape: */ __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); /* "numpy.pxd":246 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ __pyx_t_2 = (!__pyx_v_hasfields); if (__pyx_t_2) { __pyx_t_3 = (!__pyx_v_copy_shape); __pyx_t_1 = __pyx_t_3; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "numpy.pxd":248 * if not hasfields and not copy_shape: * # do not call releasebuffer * info.obj = None # <<<<<<<<<<<<<< * else: * # need to call releasebuffer */ __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = Py_None; goto __pyx_L10; } /*else*/ { /* "numpy.pxd":251 * else: * # need to call releasebuffer * info.obj = self # <<<<<<<<<<<<<< * * if not hasfields: */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); } __pyx_L10:; /* "numpy.pxd":253 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ __pyx_t_1 = (!__pyx_v_hasfields); if (__pyx_t_1) { /* "numpy.pxd":254 * * if not hasfields: * t = descr.type_num # <<<<<<<<<<<<<< * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): */ __pyx_t_5 = __pyx_v_descr->type_num; __pyx_v_t = __pyx_t_5; /* "numpy.pxd":255 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_1 = (__pyx_v_descr->byteorder == '>'); if (__pyx_t_1) { __pyx_t_2 = __pyx_v_little_endian; } else { __pyx_t_2 = __pyx_t_1; } if (!__pyx_t_2) { /* "numpy.pxd":256 * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" */ __pyx_t_1 = (__pyx_v_descr->byteorder == '<'); if (__pyx_t_1) { __pyx_t_3 = (!__pyx_v_little_endian); __pyx_t_7 = __pyx_t_3; } else { __pyx_t_7 = __pyx_t_1; } __pyx_t_1 = __pyx_t_7; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "numpy.pxd":257 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_6), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L12; } __pyx_L12:; /* "numpy.pxd":258 * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" */ __pyx_t_1 = (__pyx_v_t == NPY_BYTE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__b; goto __pyx_L13; } /* "numpy.pxd":259 * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" */ __pyx_t_1 = (__pyx_v_t == NPY_UBYTE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__B; goto __pyx_L13; } /* "numpy.pxd":260 * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" */ __pyx_t_1 = (__pyx_v_t == NPY_SHORT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__h; goto __pyx_L13; } /* "numpy.pxd":261 * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" */ __pyx_t_1 = (__pyx_v_t == NPY_USHORT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__H; goto __pyx_L13; } /* "numpy.pxd":262 * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" */ __pyx_t_1 = (__pyx_v_t == NPY_INT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__i; goto __pyx_L13; } /* "numpy.pxd":263 * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" */ __pyx_t_1 = (__pyx_v_t == NPY_UINT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__I; goto __pyx_L13; } /* "numpy.pxd":264 * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" */ __pyx_t_1 = (__pyx_v_t == NPY_LONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__l; goto __pyx_L13; } /* "numpy.pxd":265 * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" */ __pyx_t_1 = (__pyx_v_t == NPY_ULONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__L; goto __pyx_L13; } /* "numpy.pxd":266 * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" */ __pyx_t_1 = (__pyx_v_t == NPY_LONGLONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__q; goto __pyx_L13; } /* "numpy.pxd":267 * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" */ __pyx_t_1 = (__pyx_v_t == NPY_ULONGLONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Q; goto __pyx_L13; } /* "numpy.pxd":268 * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" */ __pyx_t_1 = (__pyx_v_t == NPY_FLOAT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__f; goto __pyx_L13; } /* "numpy.pxd":269 * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" */ __pyx_t_1 = (__pyx_v_t == NPY_DOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__d; goto __pyx_L13; } /* "numpy.pxd":270 * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" */ __pyx_t_1 = (__pyx_v_t == NPY_LONGDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__g; goto __pyx_L13; } /* "numpy.pxd":271 * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" */ __pyx_t_1 = (__pyx_v_t == NPY_CFLOAT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zf; goto __pyx_L13; } /* "numpy.pxd":272 * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" */ __pyx_t_1 = (__pyx_v_t == NPY_CDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zd; goto __pyx_L13; } /* "numpy.pxd":273 * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f = "O" * else: */ __pyx_t_1 = (__pyx_v_t == NPY_CLONGDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zg; goto __pyx_L13; } /* "numpy.pxd":274 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_1 = (__pyx_v_t == NPY_OBJECT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__O; goto __pyx_L13; } /*else*/ { /* "numpy.pxd":276 * elif t == NPY_OBJECT: f = "O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * info.format = f * return */ __pyx_t_4 = PyInt_FromLong(__pyx_v_t); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_8 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_7), __pyx_t_4); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_8)); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_8)); __Pyx_GIVEREF(((PyObject *)__pyx_t_8)); __pyx_t_8 = 0; __pyx_t_8 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L13:; /* "numpy.pxd":277 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f # <<<<<<<<<<<<<< * return * else: */ __pyx_v_info->format = __pyx_v_f; /* "numpy.pxd":278 * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f * return # <<<<<<<<<<<<<< * else: * info.format = stdlib.malloc(_buffer_format_string_len) */ __pyx_r = 0; goto __pyx_L0; goto __pyx_L11; } /*else*/ { /* "numpy.pxd":280 * return * else: * info.format = stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 */ __pyx_v_info->format = ((char *)malloc(255)); /* "numpy.pxd":281 * else: * info.format = stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< * offset = 0 * f = _util_dtypestring(descr, info.format + 1, */ (__pyx_v_info->format[0]) = '^'; /* "numpy.pxd":282 * info.format = stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 # <<<<<<<<<<<<<< * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, */ __pyx_v_offset = 0; /* "numpy.pxd":285 * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, * &offset) # <<<<<<<<<<<<<< * f[0] = c'\0' # Terminate format string * */ __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_9; /* "numpy.pxd":286 * info.format + _buffer_format_string_len, * &offset) * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< * * def __releasebuffer__(ndarray self, Py_buffer* info): */ (__pyx_v_f[0]) = '\x00'; } __pyx_L11:; __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_descr); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":288 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__releasebuffer__", 0); /* "numpy.pxd":289 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_t_1 = PyArray_HASFIELDS(__pyx_v_self); if (__pyx_t_1) { /* "numpy.pxd":290 * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): * stdlib.free(info.format) # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) */ free(__pyx_v_info->format); goto __pyx_L3; } __pyx_L3:; /* "numpy.pxd":291 * if PyArray_HASFIELDS(self): * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * stdlib.free(info.strides) * # info.shape was stored after info.strides in the same block */ __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); if (__pyx_t_1) { /* "numpy.pxd":292 * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) # <<<<<<<<<<<<<< * # info.shape was stored after info.strides in the same block * */ free(__pyx_v_info->strides); goto __pyx_L4; } __pyx_L4:; __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":768 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, a) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); /* "numpy.pxd":769 * * cdef inline object PyArray_MultiIterNew1(a): * return PyArray_MultiIterNew(1, a) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew2(a, b): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 769; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":771 * return PyArray_MultiIterNew(1, a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, a, b) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); /* "numpy.pxd":772 * * cdef inline object PyArray_MultiIterNew2(a, b): * return PyArray_MultiIterNew(2, a, b) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew3(a, b, c): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":774 * return PyArray_MultiIterNew(2, a, b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, a, b, c) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); /* "numpy.pxd":775 * * cdef inline object PyArray_MultiIterNew3(a, b, c): * return PyArray_MultiIterNew(3, a, b, c) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":777 * return PyArray_MultiIterNew(3, a, b, c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, a, b, c, d) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); /* "numpy.pxd":778 * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): * return PyArray_MultiIterNew(4, a, b, c, d) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 778; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":780 * return PyArray_MultiIterNew(4, a, b, c, d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, a, b, c, d, e) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); /* "numpy.pxd":781 * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): * return PyArray_MultiIterNew(5, a, b, c, d, e) # <<<<<<<<<<<<<< * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 781; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":783 * return PyArray_MultiIterNew(5, a, b, c, d, e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { PyArray_Descr *__pyx_v_child = 0; int __pyx_v_endian_detector; int __pyx_v_little_endian; PyObject *__pyx_v_fields = 0; PyObject *__pyx_v_childname = NULL; PyObject *__pyx_v_new_offset = NULL; PyObject *__pyx_v_t = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *(*__pyx_t_6)(PyObject *); int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_t_10; long __pyx_t_11; char *__pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_util_dtypestring", 0); /* "numpy.pxd":790 * cdef int delta_offset * cdef tuple i * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((&endian_detector)[0] != 0) * cdef tuple fields */ __pyx_v_endian_detector = 1; /* "numpy.pxd":791 * cdef tuple i * cdef int endian_detector = 1 * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * cdef tuple fields * */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "numpy.pxd":794 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ if (unlikely(((PyObject *)__pyx_v_descr->names) == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_1 = ((PyObject *)__pyx_v_descr->names); __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; for (;;) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif __Pyx_XDECREF(__pyx_v_childname); __pyx_v_childname = __pyx_t_3; __pyx_t_3 = 0; /* "numpy.pxd":795 * * for childname in descr.names: * fields = descr.fields[childname] # <<<<<<<<<<<<<< * child, new_offset = fields * */ __pyx_t_3 = PyObject_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (!__pyx_t_3) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected tuple, got %.200s", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF(((PyObject *)__pyx_v_fields)); __pyx_v_fields = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "numpy.pxd":796 * for childname in descr.names: * fields = descr.fields[childname] * child, new_offset = fields # <<<<<<<<<<<<<< * * if (end - f) - (new_offset - offset[0]) < 15: */ if (likely(PyTuple_CheckExact(((PyObject *)__pyx_v_fields)))) { PyObject* sequence = ((PyObject *)__pyx_v_fields); #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else if (1) { __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } else { Py_ssize_t index = -1; __pyx_t_5 = PyObject_GetIter(((PyObject *)__pyx_v_fields)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = Py_TYPE(__pyx_t_5)->tp_iternext; index = 0; __pyx_t_3 = __pyx_t_6(__pyx_t_5); if (unlikely(!__pyx_t_3)) goto __pyx_L5_unpacking_failed; __Pyx_GOTREF(__pyx_t_3); index = 1; __pyx_t_4 = __pyx_t_6(__pyx_t_5); if (unlikely(!__pyx_t_4)) goto __pyx_L5_unpacking_failed; __Pyx_GOTREF(__pyx_t_4); if (__Pyx_IternextUnpackEndCheck(__pyx_t_6(__pyx_t_5), 2) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = NULL; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L6_unpacking_done; __pyx_L5_unpacking_failed:; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_L6_unpacking_done:; } if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF(((PyObject *)__pyx_v_child)); __pyx_v_child = ((PyArray_Descr *)__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_v_new_offset); __pyx_v_new_offset = __pyx_t_4; __pyx_t_4 = 0; /* "numpy.pxd":798 * child, new_offset = fields * * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ __pyx_t_4 = PyInt_FromLong((__pyx_v_end - __pyx_v_f)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_Subtract(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyObject_RichCompare(__pyx_t_3, __pyx_int_15, Py_LT); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { /* "numpy.pxd":799 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_t_5 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_9), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L7; } __pyx_L7:; /* "numpy.pxd":801 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_7 = (__pyx_v_child->byteorder == '>'); if (__pyx_t_7) { __pyx_t_8 = __pyx_v_little_endian; } else { __pyx_t_8 = __pyx_t_7; } if (!__pyx_t_8) { /* "numpy.pxd":802 * * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * # One could encode it in the format string and have Cython */ __pyx_t_7 = (__pyx_v_child->byteorder == '<'); if (__pyx_t_7) { __pyx_t_9 = (!__pyx_v_little_endian); __pyx_t_10 = __pyx_t_9; } else { __pyx_t_10 = __pyx_t_7; } __pyx_t_7 = __pyx_t_10; } else { __pyx_t_7 = __pyx_t_8; } if (__pyx_t_7) { /* "numpy.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_10), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L8; } __pyx_L8:; /* "numpy.pxd":813 * * # Output padding bytes * while offset[0] < new_offset: # <<<<<<<<<<<<<< * f[0] = 120 # "x"; pad byte * f += 1 */ while (1) { __pyx_t_5 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_t_5, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!__pyx_t_7) break; /* "numpy.pxd":814 * # Output padding bytes * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< * f += 1 * offset[0] += 1 */ (__pyx_v_f[0]) = 120; /* "numpy.pxd":815 * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte * f += 1 # <<<<<<<<<<<<<< * offset[0] += 1 * */ __pyx_v_f = (__pyx_v_f + 1); /* "numpy.pxd":816 * f[0] = 120 # "x"; pad byte * f += 1 * offset[0] += 1 # <<<<<<<<<<<<<< * * offset[0] += child.itemsize */ __pyx_t_11 = 0; (__pyx_v_offset[__pyx_t_11]) = ((__pyx_v_offset[__pyx_t_11]) + 1); } /* "numpy.pxd":818 * offset[0] += 1 * * offset[0] += child.itemsize # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(child): */ __pyx_t_11 = 0; (__pyx_v_offset[__pyx_t_11]) = ((__pyx_v_offset[__pyx_t_11]) + __pyx_v_child->elsize); /* "numpy.pxd":820 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ __pyx_t_7 = (!PyDataType_HASFIELDS(__pyx_v_child)); if (__pyx_t_7) { /* "numpy.pxd":821 * * if not PyDataType_HASFIELDS(child): * t = child.type_num # <<<<<<<<<<<<<< * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") */ __pyx_t_3 = PyInt_FromLong(__pyx_v_child->type_num); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 821; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_XDECREF(__pyx_v_t); __pyx_v_t = __pyx_t_3; __pyx_t_3 = 0; /* "numpy.pxd":822 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ __pyx_t_7 = ((__pyx_v_end - __pyx_v_f) < 5); if (__pyx_t_7) { /* "numpy.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_t_3 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_12), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L12; } __pyx_L12:; /* "numpy.pxd":826 * * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" */ __pyx_t_3 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 98; goto __pyx_L13; } /* "numpy.pxd":827 * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" */ __pyx_t_5 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 66; goto __pyx_L13; } /* "numpy.pxd":828 * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" */ __pyx_t_3 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 104; goto __pyx_L13; } /* "numpy.pxd":829 * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" */ __pyx_t_5 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 72; goto __pyx_L13; } /* "numpy.pxd":830 * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" */ __pyx_t_3 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 105; goto __pyx_L13; } /* "numpy.pxd":831 * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" */ __pyx_t_5 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 73; goto __pyx_L13; } /* "numpy.pxd":832 * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" */ __pyx_t_3 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 108; goto __pyx_L13; } /* "numpy.pxd":833 * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" */ __pyx_t_5 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 76; goto __pyx_L13; } /* "numpy.pxd":834 * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" */ __pyx_t_3 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 113; goto __pyx_L13; } /* "numpy.pxd":835 * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" */ __pyx_t_5 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 81; goto __pyx_L13; } /* "numpy.pxd":836 * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" */ __pyx_t_3 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 102; goto __pyx_L13; } /* "numpy.pxd":837 * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf */ __pyx_t_5 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 100; goto __pyx_L13; } /* "numpy.pxd":838 * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd */ __pyx_t_3 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 103; goto __pyx_L13; } /* "numpy.pxd":839 * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg */ __pyx_t_5 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 102; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":840 * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" */ __pyx_t_3 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 100; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":841 * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: */ __pyx_t_5 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 103; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":842 * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_3 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 79; goto __pyx_L13; } /*else*/ { /* "numpy.pxd":844 * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * f += 1 * else: */ __pyx_t_5 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_7), __pyx_v_t); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_5)); __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_5)); __Pyx_GIVEREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L13:; /* "numpy.pxd":845 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * f += 1 # <<<<<<<<<<<<<< * else: * # Cython ignores struct boundary information ("T{...}"), */ __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L11; } /*else*/ { /* "numpy.pxd":849 * # Cython ignores struct boundary information ("T{...}"), * # so don't output it * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< * return f * */ __pyx_t_12 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_12 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 849; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_12; } __pyx_L11:; } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "numpy.pxd":850 * # so don't output it * f = _util_dtypestring(child, f, end, offset) * return f # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_f; goto __pyx_L0; __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_child); __Pyx_XDECREF(__pyx_v_fields); __Pyx_XDECREF(__pyx_v_childname); __Pyx_XDECREF(__pyx_v_new_offset); __Pyx_XDECREF(__pyx_v_t); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":965 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { PyObject *__pyx_v_baseptr; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("set_array_base", 0); /* "numpy.pxd":967 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ __pyx_t_1 = (__pyx_v_base == Py_None); if (__pyx_t_1) { /* "numpy.pxd":968 * cdef PyObject* baseptr * if base is None: * baseptr = NULL # <<<<<<<<<<<<<< * else: * Py_INCREF(base) # important to do this before decref below! */ __pyx_v_baseptr = NULL; goto __pyx_L3; } /*else*/ { /* "numpy.pxd":970 * baseptr = NULL * else: * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< * baseptr = base * Py_XDECREF(arr.base) */ Py_INCREF(__pyx_v_base); /* "numpy.pxd":971 * else: * Py_INCREF(base) # important to do this before decref below! * baseptr = base # <<<<<<<<<<<<<< * Py_XDECREF(arr.base) * arr.base = baseptr */ __pyx_v_baseptr = ((PyObject *)__pyx_v_base); } __pyx_L3:; /* "numpy.pxd":972 * Py_INCREF(base) # important to do this before decref below! * baseptr = base * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< * arr.base = baseptr * */ Py_XDECREF(__pyx_v_arr->base); /* "numpy.pxd":973 * baseptr = base * Py_XDECREF(arr.base) * arr.base = baseptr # <<<<<<<<<<<<<< * * cdef inline object get_array_base(ndarray arr): */ __pyx_v_arr->base = __pyx_v_baseptr; __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":975 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("get_array_base", 0); /* "numpy.pxd":976 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ __pyx_t_1 = (__pyx_v_arr->base == NULL); if (__pyx_t_1) { /* "numpy.pxd":977 * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: * return None # <<<<<<<<<<<<<< * else: * return arr.base */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; goto __pyx_L3; } /*else*/ { /* "numpy.pxd":979 * return None * else: * return arr.base # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); __pyx_r = ((PyObject *)__pyx_v_arr->base); goto __pyx_L0; } __pyx_L3:; __pyx_r = Py_None; __Pyx_INCREF(Py_None); __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, __Pyx_NAMESTR("onesample"), __Pyx_DOCSTR(__pyx_k_13), /* m_doc */ -1, /* m_size */ __pyx_methods /* m_methods */, NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_kp_u_1, __pyx_k_1, sizeof(__pyx_k_1), 0, 1, 0, 0}, {&__pyx_kp_u_11, __pyx_k_11, sizeof(__pyx_k_11), 0, 1, 0, 0}, {&__pyx_kp_s_14, __pyx_k_14, sizeof(__pyx_k_14), 0, 0, 1, 0}, {&__pyx_kp_s_17, __pyx_k_17, sizeof(__pyx_k_17), 0, 0, 1, 0}, {&__pyx_n_s_18, __pyx_k_18, sizeof(__pyx_k_18), 0, 0, 1, 1}, {&__pyx_kp_u_3, __pyx_k_3, sizeof(__pyx_k_3), 0, 1, 0, 0}, {&__pyx_kp_u_5, __pyx_k_5, sizeof(__pyx_k_5), 0, 1, 0, 0}, {&__pyx_kp_u_7, __pyx_k_7, sizeof(__pyx_k_7), 0, 1, 0, 0}, {&__pyx_kp_u_8, __pyx_k_8, sizeof(__pyx_k_8), 0, 1, 0, 0}, {&__pyx_n_s__MU, __pyx_k__MU, sizeof(__pyx_k__MU), 0, 0, 1, 1}, {&__pyx_n_s__Magics, __pyx_k__Magics, sizeof(__pyx_k__Magics), 0, 0, 1, 1}, {&__pyx_n_s__RuntimeError, __pyx_k__RuntimeError, sizeof(__pyx_k__RuntimeError), 0, 0, 1, 1}, {&__pyx_n_s__S2, __pyx_k__S2, sizeof(__pyx_k__S2), 0, 0, 1, 1}, {&__pyx_n_s__T, __pyx_k__T, sizeof(__pyx_k__T), 0, 0, 1, 1}, {&__pyx_n_s__V, __pyx_k__V, sizeof(__pyx_k__V), 0, 0, 1, 1}, {&__pyx_n_s__ValueError, __pyx_k__ValueError, sizeof(__pyx_k__ValueError), 0, 0, 1, 1}, {&__pyx_n_s__W, __pyx_k__W, sizeof(__pyx_k__W), 0, 0, 1, 1}, {&__pyx_n_s__Y, __pyx_k__Y, sizeof(__pyx_k__Y), 0, 0, 1, 1}, {&__pyx_n_s__Z, __pyx_k__Z, sizeof(__pyx_k__Z), 0, 0, 1, 1}, {&__pyx_n_s____main__, __pyx_k____main__, sizeof(__pyx_k____main__), 0, 0, 1, 1}, {&__pyx_n_s____test__, __pyx_k____test__, sizeof(__pyx_k____test__), 0, 0, 1, 1}, {&__pyx_n_s____version__, __pyx_k____version__, sizeof(__pyx_k____version__), 0, 0, 1, 1}, {&__pyx_n_s__axis, __pyx_k__axis, sizeof(__pyx_k__axis), 0, 0, 1, 1}, {&__pyx_n_s__base, __pyx_k__base, sizeof(__pyx_k__base), 0, 0, 1, 1}, {&__pyx_n_s__constraint, __pyx_k__constraint, sizeof(__pyx_k__constraint), 0, 0, 1, 1}, {&__pyx_n_s__dims, __pyx_k__dims, sizeof(__pyx_k__dims), 0, 0, 1, 1}, {&__pyx_n_s__elr, __pyx_k__elr, sizeof(__pyx_k__elr), 0, 0, 1, 1}, {&__pyx_n_s__elr_mfx, __pyx_k__elr_mfx, sizeof(__pyx_k__elr_mfx), 0, 0, 1, 1}, {&__pyx_n_s__flag_stat, __pyx_k__flag_stat, sizeof(__pyx_k__flag_stat), 0, 0, 1, 1}, {&__pyx_n_s__grubb, __pyx_k__grubb, sizeof(__pyx_k__grubb), 0, 0, 1, 1}, {&__pyx_n_s__i, __pyx_k__i, sizeof(__pyx_k__i), 0, 0, 1, 1}, {&__pyx_n_s__id, __pyx_k__id, sizeof(__pyx_k__id), 0, 0, 1, 1}, {&__pyx_n_s__idx, __pyx_k__idx, sizeof(__pyx_k__idx), 0, 0, 1, 1}, {&__pyx_n_s__laplace, __pyx_k__laplace, sizeof(__pyx_k__laplace), 0, 0, 1, 1}, {&__pyx_n_s__magic, __pyx_k__magic, sizeof(__pyx_k__magic), 0, 0, 1, 1}, {&__pyx_n_s__magics, __pyx_k__magics, sizeof(__pyx_k__magics), 0, 0, 1, 1}, {&__pyx_n_s__mean, __pyx_k__mean, sizeof(__pyx_k__mean), 0, 0, 1, 1}, {&__pyx_n_s__mean_gauss_mfx, __pyx_k__mean_gauss_mfx, sizeof(__pyx_k__mean_gauss_mfx), 0, 0, 1, 1}, {&__pyx_n_s__mean_mfx, __pyx_k__mean_mfx, sizeof(__pyx_k__mean_mfx), 0, 0, 1, 1}, {&__pyx_n_s__median, __pyx_k__median, sizeof(__pyx_k__median), 0, 0, 1, 1}, {&__pyx_n_s__median_mfx, __pyx_k__median_mfx, sizeof(__pyx_k__median_mfx), 0, 0, 1, 1}, {&__pyx_n_s__mu, __pyx_k__mu, sizeof(__pyx_k__mu), 0, 0, 1, 1}, {&__pyx_n_s__multi, __pyx_k__multi, sizeof(__pyx_k__multi), 0, 0, 1, 1}, {&__pyx_n_s__n, __pyx_k__n, sizeof(__pyx_k__n), 0, 0, 1, 1}, {&__pyx_n_s__niter, __pyx_k__niter, sizeof(__pyx_k__niter), 0, 0, 1, 1}, {&__pyx_n_s__np, __pyx_k__np, sizeof(__pyx_k__np), 0, 0, 1, 1}, {&__pyx_n_s__nsimu, __pyx_k__nsimu, sizeof(__pyx_k__nsimu), 0, 0, 1, 1}, {&__pyx_n_s__nsimu_max, __pyx_k__nsimu_max, sizeof(__pyx_k__nsimu_max), 0, 0, 1, 1}, {&__pyx_n_s__numpy, __pyx_k__numpy, sizeof(__pyx_k__numpy), 0, 0, 1, 1}, {&__pyx_n_s__pdf_fit_gmfx, __pyx_k__pdf_fit_gmfx, sizeof(__pyx_k__pdf_fit_gmfx), 0, 0, 1, 1}, {&__pyx_n_s__pdf_fit_mfx, __pyx_k__pdf_fit_mfx, sizeof(__pyx_k__pdf_fit_mfx), 0, 0, 1, 1}, {&__pyx_n_s__range, __pyx_k__range, sizeof(__pyx_k__range), 0, 0, 1, 1}, {&__pyx_n_s__s2, __pyx_k__s2, sizeof(__pyx_k__s2), 0, 0, 1, 1}, {&__pyx_n_s__sign, __pyx_k__sign, sizeof(__pyx_k__sign), 0, 0, 1, 1}, {&__pyx_n_s__sign_mfx, __pyx_k__sign_mfx, sizeof(__pyx_k__sign_mfx), 0, 0, 1, 1}, {&__pyx_n_s__simu, __pyx_k__simu, sizeof(__pyx_k__simu), 0, 0, 1, 1}, {&__pyx_n_s__stat, __pyx_k__stat, sizeof(__pyx_k__stat), 0, 0, 1, 1}, {&__pyx_n_s__stat_mfx, __pyx_k__stat_mfx, sizeof(__pyx_k__stat_mfx), 0, 0, 1, 1}, {&__pyx_n_s__stats, __pyx_k__stats, sizeof(__pyx_k__stats), 0, 0, 1, 1}, {&__pyx_n_s__student, __pyx_k__student, sizeof(__pyx_k__student), 0, 0, 1, 1}, {&__pyx_n_s__student_mfx, __pyx_k__student_mfx, sizeof(__pyx_k__student_mfx), 0, 0, 1, 1}, {&__pyx_n_s__t, __pyx_k__t, sizeof(__pyx_k__t), 0, 0, 1, 1}, {&__pyx_n_s__tukey, __pyx_k__tukey, sizeof(__pyx_k__tukey), 0, 0, 1, 1}, {&__pyx_n_s__v, __pyx_k__v, sizeof(__pyx_k__v), 0, 0, 1, 1}, {&__pyx_n_s__w, __pyx_k__w, sizeof(__pyx_k__w), 0, 0, 1, 1}, {&__pyx_n_s__wilcoxon, __pyx_k__wilcoxon, sizeof(__pyx_k__wilcoxon), 0, 0, 1, 1}, {&__pyx_n_s__wilcoxon_mfx, __pyx_k__wilcoxon_mfx, sizeof(__pyx_k__wilcoxon_mfx), 0, 0, 1, 1}, {&__pyx_n_s__y, __pyx_k__y, sizeof(__pyx_k__y), 0, 0, 1, 1}, {&__pyx_n_s__yp, __pyx_k__yp, sizeof(__pyx_k__yp), 0, 0, 1, 1}, {&__pyx_n_s__z, __pyx_k__z, sizeof(__pyx_k__z), 0, 0, 1, 1}, {&__pyx_n_s__zeros, __pyx_k__zeros, sizeof(__pyx_k__zeros), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_range = __Pyx_GetName(__pyx_b, __pyx_n_s__range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_ValueError = __Pyx_GetName(__pyx_b, __pyx_n_s__ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_RuntimeError = __Pyx_GetName(__pyx_b, __pyx_n_s__RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "numpy.pxd":215 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_k_tuple_2 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_2); __Pyx_INCREF(((PyObject *)__pyx_kp_u_1)); PyTuple_SET_ITEM(__pyx_k_tuple_2, 0, ((PyObject *)__pyx_kp_u_1)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_1)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_2)); /* "numpy.pxd":219 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_k_tuple_4 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_4); __Pyx_INCREF(((PyObject *)__pyx_kp_u_3)); PyTuple_SET_ITEM(__pyx_k_tuple_4, 0, ((PyObject *)__pyx_kp_u_3)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_3)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_4)); /* "numpy.pxd":257 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_k_tuple_6 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_6); __Pyx_INCREF(((PyObject *)__pyx_kp_u_5)); PyTuple_SET_ITEM(__pyx_k_tuple_6, 0, ((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_6)); /* "numpy.pxd":799 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_k_tuple_9 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_9); __Pyx_INCREF(((PyObject *)__pyx_kp_u_8)); PyTuple_SET_ITEM(__pyx_k_tuple_9, 0, ((PyObject *)__pyx_kp_u_8)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_8)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_9)); /* "numpy.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_k_tuple_10 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_10)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_10); __Pyx_INCREF(((PyObject *)__pyx_kp_u_5)); PyTuple_SET_ITEM(__pyx_k_tuple_10, 0, ((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_10)); /* "numpy.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_k_tuple_12 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_12)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_12); __Pyx_INCREF(((PyObject *)__pyx_kp_u_11)); PyTuple_SET_ITEM(__pyx_k_tuple_12, 0, ((PyObject *)__pyx_kp_u_11)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_11)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_12)); /* "nipy/labs/group/onesample.pyx":88 * * # Test stat without mixed-effect correction * def stat(ndarray Y, id='student', double base=0.0, # <<<<<<<<<<<<<< * int axis=0, ndarray Magics=None): * """ */ __pyx_k_tuple_15 = PyTuple_New(20); if (unlikely(!__pyx_k_tuple_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_15); __Pyx_INCREF(((PyObject *)__pyx_n_s__Y)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 0, ((PyObject *)__pyx_n_s__Y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__id)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 1, ((PyObject *)__pyx_n_s__id)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__id)); __Pyx_INCREF(((PyObject *)__pyx_n_s__base)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 2, ((PyObject *)__pyx_n_s__base)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__base)); __Pyx_INCREF(((PyObject *)__pyx_n_s__axis)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 3, ((PyObject *)__pyx_n_s__axis)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__axis)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Magics)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 4, ((PyObject *)__pyx_n_s__Magics)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Magics)); __Pyx_INCREF(((PyObject *)__pyx_n_s__y)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 5, ((PyObject *)__pyx_n_s__y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__t)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 6, ((PyObject *)__pyx_n_s__t)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__t)); __Pyx_INCREF(((PyObject *)__pyx_n_s__magics)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 7, ((PyObject *)__pyx_n_s__magics)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__magics)); __Pyx_INCREF(((PyObject *)__pyx_n_s__yp)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 8, ((PyObject *)__pyx_n_s__yp)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__yp)); __Pyx_INCREF(((PyObject *)__pyx_n_s__stat)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 9, ((PyObject *)__pyx_n_s__stat)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__stat)); __Pyx_INCREF(((PyObject *)__pyx_n_s__flag_stat)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 10, ((PyObject *)__pyx_n_s__flag_stat)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__flag_stat)); __Pyx_INCREF(((PyObject *)__pyx_n_s__n)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 11, ((PyObject *)__pyx_n_s__n)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__n)); __Pyx_INCREF(((PyObject *)__pyx_n_s__simu)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 12, ((PyObject *)__pyx_n_s__simu)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__simu)); __Pyx_INCREF(((PyObject *)__pyx_n_s__nsimu)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 13, ((PyObject *)__pyx_n_s__nsimu)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__nsimu)); __Pyx_INCREF(((PyObject *)__pyx_n_s__idx)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 14, ((PyObject *)__pyx_n_s__idx)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__idx)); __Pyx_INCREF(((PyObject *)__pyx_n_s__magic)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 15, ((PyObject *)__pyx_n_s__magic)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__magic)); __Pyx_INCREF(((PyObject *)__pyx_n_s__multi)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 16, ((PyObject *)__pyx_n_s__multi)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__multi)); __Pyx_INCREF(((PyObject *)__pyx_n_s__dims)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 17, ((PyObject *)__pyx_n_s__dims)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__dims)); __Pyx_INCREF(((PyObject *)__pyx_n_s__T)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 18, ((PyObject *)__pyx_n_s__T)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__T)); __Pyx_INCREF(((PyObject *)__pyx_n_s__i)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 19, ((PyObject *)__pyx_n_s__i)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__i)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_15)); __pyx_k_codeobj_16 = (PyObject*)__Pyx_PyCode_New(5, 0, 20, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_15, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__stat, 88, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/group/onesample.pyx":157 * * * def stat_mfx(ndarray Y, ndarray V, id='student_mfx', double base=0.0, # <<<<<<<<<<<<<< * int axis=0, ndarray Magics=None, unsigned int niter=5): * """ */ __pyx_k_tuple_19 = PyTuple_New(24); if (unlikely(!__pyx_k_tuple_19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_19); __Pyx_INCREF(((PyObject *)__pyx_n_s__Y)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 0, ((PyObject *)__pyx_n_s__Y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__V)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 1, ((PyObject *)__pyx_n_s__V)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__V)); __Pyx_INCREF(((PyObject *)__pyx_n_s__id)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 2, ((PyObject *)__pyx_n_s__id)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__id)); __Pyx_INCREF(((PyObject *)__pyx_n_s__base)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 3, ((PyObject *)__pyx_n_s__base)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__base)); __Pyx_INCREF(((PyObject *)__pyx_n_s__axis)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 4, ((PyObject *)__pyx_n_s__axis)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__axis)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Magics)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 5, ((PyObject *)__pyx_n_s__Magics)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Magics)); __Pyx_INCREF(((PyObject *)__pyx_n_s__niter)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 6, ((PyObject *)__pyx_n_s__niter)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__niter)); __Pyx_INCREF(((PyObject *)__pyx_n_s__y)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 7, ((PyObject *)__pyx_n_s__y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__v)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 8, ((PyObject *)__pyx_n_s__v)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__v)); __Pyx_INCREF(((PyObject *)__pyx_n_s__t)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 9, ((PyObject *)__pyx_n_s__t)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__t)); __Pyx_INCREF(((PyObject *)__pyx_n_s__magics)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 10, ((PyObject *)__pyx_n_s__magics)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__magics)); __Pyx_INCREF(((PyObject *)__pyx_n_s__yp)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 11, ((PyObject *)__pyx_n_s__yp)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__yp)); __Pyx_INCREF(((PyObject *)__pyx_n_s__stat)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 12, ((PyObject *)__pyx_n_s__stat)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__stat)); __Pyx_INCREF(((PyObject *)__pyx_n_s__flag_stat)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 13, ((PyObject *)__pyx_n_s__flag_stat)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__flag_stat)); __Pyx_INCREF(((PyObject *)__pyx_n_s__n)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 14, ((PyObject *)__pyx_n_s__n)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__n)); __Pyx_INCREF(((PyObject *)__pyx_n_s__nsimu_max)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 15, ((PyObject *)__pyx_n_s__nsimu_max)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__nsimu_max)); __Pyx_INCREF(((PyObject *)__pyx_n_s__simu)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 16, ((PyObject *)__pyx_n_s__simu)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__simu)); __Pyx_INCREF(((PyObject *)__pyx_n_s__idx)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 17, ((PyObject *)__pyx_n_s__idx)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__idx)); __Pyx_INCREF(((PyObject *)__pyx_n_s__magic)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 18, ((PyObject *)__pyx_n_s__magic)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__magic)); __Pyx_INCREF(((PyObject *)__pyx_n_s__multi)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 19, ((PyObject *)__pyx_n_s__multi)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__multi)); __Pyx_INCREF(((PyObject *)__pyx_n_s__nsimu)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 20, ((PyObject *)__pyx_n_s__nsimu)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__nsimu)); __Pyx_INCREF(((PyObject *)__pyx_n_s__dims)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 21, ((PyObject *)__pyx_n_s__dims)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__dims)); __Pyx_INCREF(((PyObject *)__pyx_n_s__T)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 22, ((PyObject *)__pyx_n_s__T)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__T)); __Pyx_INCREF(((PyObject *)__pyx_n_s__i)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 23, ((PyObject *)__pyx_n_s__i)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__i)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_19)); __pyx_k_codeobj_20 = (PyObject*)__Pyx_PyCode_New(7, 0, 24, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_19, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__stat_mfx, 157, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_20)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/group/onesample.pyx":230 * * * def pdf_fit_mfx(ndarray Y, ndarray V, int axis=0, int niter=5, int constraint=0, double base=0.0): # <<<<<<<<<<<<<< * """ * (W, Z) = pdf_fit_mfx(data=Y, vardata=V, axis=0, niter=5, constraint=False, base=0.0). */ __pyx_k_tuple_21 = PyTuple_New(17); if (unlikely(!__pyx_k_tuple_21)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_21); __Pyx_INCREF(((PyObject *)__pyx_n_s__Y)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 0, ((PyObject *)__pyx_n_s__Y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__V)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 1, ((PyObject *)__pyx_n_s__V)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__V)); __Pyx_INCREF(((PyObject *)__pyx_n_s__axis)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 2, ((PyObject *)__pyx_n_s__axis)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__axis)); __Pyx_INCREF(((PyObject *)__pyx_n_s__niter)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 3, ((PyObject *)__pyx_n_s__niter)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__niter)); __Pyx_INCREF(((PyObject *)__pyx_n_s__constraint)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 4, ((PyObject *)__pyx_n_s__constraint)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__constraint)); __Pyx_INCREF(((PyObject *)__pyx_n_s__base)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 5, ((PyObject *)__pyx_n_s__base)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__base)); __Pyx_INCREF(((PyObject *)__pyx_n_s__y)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 6, ((PyObject *)__pyx_n_s__y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__v)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 7, ((PyObject *)__pyx_n_s__v)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__v)); __Pyx_INCREF(((PyObject *)__pyx_n_s__w)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 8, ((PyObject *)__pyx_n_s__w)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__w)); __Pyx_INCREF(((PyObject *)__pyx_n_s__z)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 9, ((PyObject *)__pyx_n_s__z)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__z)); __Pyx_INCREF(((PyObject *)__pyx_n_s__stat)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 10, ((PyObject *)__pyx_n_s__stat)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__stat)); __Pyx_INCREF(((PyObject *)__pyx_n_s__multi)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 11, ((PyObject *)__pyx_n_s__multi)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__multi)); __Pyx_INCREF(((PyObject *)__pyx_n_s__n)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 12, ((PyObject *)__pyx_n_s__n)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__n)); __Pyx_INCREF(((PyObject *)__pyx_n_s__dims)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 13, ((PyObject *)__pyx_n_s__dims)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__dims)); __Pyx_INCREF(((PyObject *)__pyx_n_s__W)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 14, ((PyObject *)__pyx_n_s__W)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__W)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Z)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 15, ((PyObject *)__pyx_n_s__Z)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Z)); __Pyx_INCREF(((PyObject *)__pyx_n_s__i)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 16, ((PyObject *)__pyx_n_s__i)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__i)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_21)); __pyx_k_codeobj_22 = (PyObject*)__Pyx_PyCode_New(6, 0, 17, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_21, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__pdf_fit_mfx, 230, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_22)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/group/onesample.pyx":274 * * * def pdf_fit_gmfx(ndarray Y, ndarray V, int axis=0, int niter=5, int constraint=0, double base=0.0): # <<<<<<<<<<<<<< * """ * (MU, S2) = pdf_fit_gmfx(data=Y, vardata=V, axis=0, niter=5, constraint=False, base=0.0). */ __pyx_k_tuple_23 = PyTuple_New(17); if (unlikely(!__pyx_k_tuple_23)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 274; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_23); __Pyx_INCREF(((PyObject *)__pyx_n_s__Y)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 0, ((PyObject *)__pyx_n_s__Y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__V)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 1, ((PyObject *)__pyx_n_s__V)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__V)); __Pyx_INCREF(((PyObject *)__pyx_n_s__axis)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 2, ((PyObject *)__pyx_n_s__axis)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__axis)); __Pyx_INCREF(((PyObject *)__pyx_n_s__niter)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 3, ((PyObject *)__pyx_n_s__niter)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__niter)); __Pyx_INCREF(((PyObject *)__pyx_n_s__constraint)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 4, ((PyObject *)__pyx_n_s__constraint)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__constraint)); __Pyx_INCREF(((PyObject *)__pyx_n_s__base)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 5, ((PyObject *)__pyx_n_s__base)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__base)); __Pyx_INCREF(((PyObject *)__pyx_n_s__y)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 6, ((PyObject *)__pyx_n_s__y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__v)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 7, ((PyObject *)__pyx_n_s__v)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__v)); __Pyx_INCREF(((PyObject *)__pyx_n_s__mu)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 8, ((PyObject *)__pyx_n_s__mu)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__mu)); __Pyx_INCREF(((PyObject *)__pyx_n_s__s2)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 9, ((PyObject *)__pyx_n_s__s2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__s2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__stat)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 10, ((PyObject *)__pyx_n_s__stat)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__stat)); __Pyx_INCREF(((PyObject *)__pyx_n_s__multi)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 11, ((PyObject *)__pyx_n_s__multi)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__multi)); __Pyx_INCREF(((PyObject *)__pyx_n_s__n)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 12, ((PyObject *)__pyx_n_s__n)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__n)); __Pyx_INCREF(((PyObject *)__pyx_n_s__dims)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 13, ((PyObject *)__pyx_n_s__dims)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__dims)); __Pyx_INCREF(((PyObject *)__pyx_n_s__MU)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 14, ((PyObject *)__pyx_n_s__MU)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__MU)); __Pyx_INCREF(((PyObject *)__pyx_n_s__S2)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 15, ((PyObject *)__pyx_n_s__S2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__S2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__i)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 16, ((PyObject *)__pyx_n_s__i)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__i)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_23)); __pyx_k_codeobj_24 = (PyObject*)__Pyx_PyCode_New(6, 0, 17, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_23, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__pdf_fit_gmfx, 274, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_24)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 274; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_15 = PyInt_FromLong(15); if (unlikely(!__pyx_int_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC initonesample(void); /*proto*/ PyMODINIT_FUNC initonesample(void) #else PyMODINIT_FUNC PyInit_onesample(void); /*proto*/ PyMODINIT_FUNC PyInit_onesample(void) #endif { PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannyDeclarations #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_onesample(void)", 0); if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #ifdef __Pyx_CyFunction_USED if (__Pyx_CyFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4(__Pyx_NAMESTR("onesample"), __pyx_methods, __Pyx_DOCSTR(__pyx_k_13), 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (!PyDict_GetItemString(modules, "nipy.labs.group.onesample")) { if (unlikely(PyDict_SetItemString(modules, "nipy.labs.group.onesample", __pyx_m) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } } #endif __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); if (unlikely(!__pyx_b)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; /*--- Initialize various global constants etc. ---*/ if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_module_is_main_nipy__labs__group__onesample) { if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s____main__) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; } /*--- Builtin init code ---*/ if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Constants init code ---*/ if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Global init code ---*/ /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ /*--- Type import code ---*/ __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", #if CYTHON_COMPILING_IN_PYPY sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 861; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Variable import code ---*/ /*--- Function import code ---*/ /*--- Execution code ---*/ /* "nipy/labs/group/onesample.pyx":10 * """ * * __version__ = '0.1' # <<<<<<<<<<<<<< * * */ if (PyObject_SetAttr(__pyx_m, __pyx_n_s____version__, ((PyObject *)__pyx_kp_s_14)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 10; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/group/onesample.pyx":63 * * # Initialize numpy * fffpy_import_array() # <<<<<<<<<<<<<< * import_array() * import numpy as np */ fffpy_import_array(); /* "nipy/labs/group/onesample.pyx":64 * # Initialize numpy * fffpy_import_array() * import_array() # <<<<<<<<<<<<<< * import numpy as np * */ import_array(); /* "nipy/labs/group/onesample.pyx":65 * fffpy_import_array() * import_array() * import numpy as np # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__numpy), 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__np, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/group/onesample.pyx":69 * * # Stat dictionary * stats = {'mean': FFF_ONESAMPLE_EMPIRICAL_MEAN, # <<<<<<<<<<<<<< * 'median': FFF_ONESAMPLE_EMPIRICAL_MEDIAN, * 'student': FFF_ONESAMPLE_STUDENT, */ __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); __pyx_t_2 = PyInt_FromLong(FFF_ONESAMPLE_EMPIRICAL_MEAN); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__mean), __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "nipy/labs/group/onesample.pyx":70 * # Stat dictionary * stats = {'mean': FFF_ONESAMPLE_EMPIRICAL_MEAN, * 'median': FFF_ONESAMPLE_EMPIRICAL_MEDIAN, # <<<<<<<<<<<<<< * 'student': FFF_ONESAMPLE_STUDENT, * 'laplace': FFF_ONESAMPLE_LAPLACE, */ __pyx_t_2 = PyInt_FromLong(FFF_ONESAMPLE_EMPIRICAL_MEDIAN); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__median), __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "nipy/labs/group/onesample.pyx":71 * stats = {'mean': FFF_ONESAMPLE_EMPIRICAL_MEAN, * 'median': FFF_ONESAMPLE_EMPIRICAL_MEDIAN, * 'student': FFF_ONESAMPLE_STUDENT, # <<<<<<<<<<<<<< * 'laplace': FFF_ONESAMPLE_LAPLACE, * 'tukey': FFF_ONESAMPLE_TUKEY, */ __pyx_t_2 = PyInt_FromLong(FFF_ONESAMPLE_STUDENT); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__student), __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "nipy/labs/group/onesample.pyx":72 * 'median': FFF_ONESAMPLE_EMPIRICAL_MEDIAN, * 'student': FFF_ONESAMPLE_STUDENT, * 'laplace': FFF_ONESAMPLE_LAPLACE, # <<<<<<<<<<<<<< * 'tukey': FFF_ONESAMPLE_TUKEY, * 'sign': FFF_ONESAMPLE_SIGN_STAT, */ __pyx_t_2 = PyInt_FromLong(FFF_ONESAMPLE_LAPLACE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 72; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__laplace), __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "nipy/labs/group/onesample.pyx":73 * 'student': FFF_ONESAMPLE_STUDENT, * 'laplace': FFF_ONESAMPLE_LAPLACE, * 'tukey': FFF_ONESAMPLE_TUKEY, # <<<<<<<<<<<<<< * 'sign': FFF_ONESAMPLE_SIGN_STAT, * 'wilcoxon': FFF_ONESAMPLE_WILCOXON, */ __pyx_t_2 = PyInt_FromLong(FFF_ONESAMPLE_TUKEY); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__tukey), __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "nipy/labs/group/onesample.pyx":74 * 'laplace': FFF_ONESAMPLE_LAPLACE, * 'tukey': FFF_ONESAMPLE_TUKEY, * 'sign': FFF_ONESAMPLE_SIGN_STAT, # <<<<<<<<<<<<<< * 'wilcoxon': FFF_ONESAMPLE_WILCOXON, * 'elr': FFF_ONESAMPLE_ELR, */ __pyx_t_2 = PyInt_FromLong(FFF_ONESAMPLE_SIGN_STAT); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__sign), __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "nipy/labs/group/onesample.pyx":75 * 'tukey': FFF_ONESAMPLE_TUKEY, * 'sign': FFF_ONESAMPLE_SIGN_STAT, * 'wilcoxon': FFF_ONESAMPLE_WILCOXON, # <<<<<<<<<<<<<< * 'elr': FFF_ONESAMPLE_ELR, * 'grubb': FFF_ONESAMPLE_GRUBB, */ __pyx_t_2 = PyInt_FromLong(FFF_ONESAMPLE_WILCOXON); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 75; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__wilcoxon), __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "nipy/labs/group/onesample.pyx":76 * 'sign': FFF_ONESAMPLE_SIGN_STAT, * 'wilcoxon': FFF_ONESAMPLE_WILCOXON, * 'elr': FFF_ONESAMPLE_ELR, # <<<<<<<<<<<<<< * 'grubb': FFF_ONESAMPLE_GRUBB, * 'mean_mfx': FFF_ONESAMPLE_EMPIRICAL_MEAN_MFX, */ __pyx_t_2 = PyInt_FromLong(FFF_ONESAMPLE_ELR); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__elr), __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "nipy/labs/group/onesample.pyx":77 * 'wilcoxon': FFF_ONESAMPLE_WILCOXON, * 'elr': FFF_ONESAMPLE_ELR, * 'grubb': FFF_ONESAMPLE_GRUBB, # <<<<<<<<<<<<<< * 'mean_mfx': FFF_ONESAMPLE_EMPIRICAL_MEAN_MFX, * 'median_mfx': FFF_ONESAMPLE_EMPIRICAL_MEDIAN_MFX, */ __pyx_t_2 = PyInt_FromLong(FFF_ONESAMPLE_GRUBB); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__grubb), __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "nipy/labs/group/onesample.pyx":78 * 'elr': FFF_ONESAMPLE_ELR, * 'grubb': FFF_ONESAMPLE_GRUBB, * 'mean_mfx': FFF_ONESAMPLE_EMPIRICAL_MEAN_MFX, # <<<<<<<<<<<<<< * 'median_mfx': FFF_ONESAMPLE_EMPIRICAL_MEDIAN_MFX, * 'mean_gauss_mfx': FFF_ONESAMPLE_GAUSSIAN_MEAN_MFX, */ __pyx_t_2 = PyInt_FromLong(FFF_ONESAMPLE_EMPIRICAL_MEAN_MFX); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__mean_mfx), __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "nipy/labs/group/onesample.pyx":79 * 'grubb': FFF_ONESAMPLE_GRUBB, * 'mean_mfx': FFF_ONESAMPLE_EMPIRICAL_MEAN_MFX, * 'median_mfx': FFF_ONESAMPLE_EMPIRICAL_MEDIAN_MFX, # <<<<<<<<<<<<<< * 'mean_gauss_mfx': FFF_ONESAMPLE_GAUSSIAN_MEAN_MFX, * 'student_mfx': FFF_ONESAMPLE_STUDENT_MFX, */ __pyx_t_2 = PyInt_FromLong(FFF_ONESAMPLE_EMPIRICAL_MEDIAN_MFX); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__median_mfx), __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "nipy/labs/group/onesample.pyx":80 * 'mean_mfx': FFF_ONESAMPLE_EMPIRICAL_MEAN_MFX, * 'median_mfx': FFF_ONESAMPLE_EMPIRICAL_MEDIAN_MFX, * 'mean_gauss_mfx': FFF_ONESAMPLE_GAUSSIAN_MEAN_MFX, # <<<<<<<<<<<<<< * 'student_mfx': FFF_ONESAMPLE_STUDENT_MFX, * 'sign_mfx': FFF_ONESAMPLE_SIGN_STAT_MFX, */ __pyx_t_2 = PyInt_FromLong(FFF_ONESAMPLE_GAUSSIAN_MEAN_MFX); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__mean_gauss_mfx), __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "nipy/labs/group/onesample.pyx":81 * 'median_mfx': FFF_ONESAMPLE_EMPIRICAL_MEDIAN_MFX, * 'mean_gauss_mfx': FFF_ONESAMPLE_GAUSSIAN_MEAN_MFX, * 'student_mfx': FFF_ONESAMPLE_STUDENT_MFX, # <<<<<<<<<<<<<< * 'sign_mfx': FFF_ONESAMPLE_SIGN_STAT_MFX, * 'wilcoxon_mfx': FFF_ONESAMPLE_WILCOXON_MFX, */ __pyx_t_2 = PyInt_FromLong(FFF_ONESAMPLE_STUDENT_MFX); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 81; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__student_mfx), __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "nipy/labs/group/onesample.pyx":82 * 'mean_gauss_mfx': FFF_ONESAMPLE_GAUSSIAN_MEAN_MFX, * 'student_mfx': FFF_ONESAMPLE_STUDENT_MFX, * 'sign_mfx': FFF_ONESAMPLE_SIGN_STAT_MFX, # <<<<<<<<<<<<<< * 'wilcoxon_mfx': FFF_ONESAMPLE_WILCOXON_MFX, * 'elr_mfx': FFF_ONESAMPLE_ELR_MFX} */ __pyx_t_2 = PyInt_FromLong(FFF_ONESAMPLE_SIGN_STAT_MFX); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__sign_mfx), __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "nipy/labs/group/onesample.pyx":83 * 'student_mfx': FFF_ONESAMPLE_STUDENT_MFX, * 'sign_mfx': FFF_ONESAMPLE_SIGN_STAT_MFX, * 'wilcoxon_mfx': FFF_ONESAMPLE_WILCOXON_MFX, # <<<<<<<<<<<<<< * 'elr_mfx': FFF_ONESAMPLE_ELR_MFX} * */ __pyx_t_2 = PyInt_FromLong(FFF_ONESAMPLE_WILCOXON_MFX); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__wilcoxon_mfx), __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "nipy/labs/group/onesample.pyx":84 * 'sign_mfx': FFF_ONESAMPLE_SIGN_STAT_MFX, * 'wilcoxon_mfx': FFF_ONESAMPLE_WILCOXON_MFX, * 'elr_mfx': FFF_ONESAMPLE_ELR_MFX} # <<<<<<<<<<<<<< * * */ __pyx_t_2 = PyInt_FromLong(FFF_ONESAMPLE_ELR_MFX); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__elr_mfx), __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (PyObject_SetAttr(__pyx_m, __pyx_n_s__stats, ((PyObject *)__pyx_t_1)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; /* "nipy/labs/group/onesample.pyx":88 * * # Test stat without mixed-effect correction * def stat(ndarray Y, id='student', double base=0.0, # <<<<<<<<<<<<<< * int axis=0, ndarray Magics=None): * """ */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_5group_9onesample_1stat, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__stat, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/group/onesample.pyx":157 * * * def stat_mfx(ndarray Y, ndarray V, id='student_mfx', double base=0.0, # <<<<<<<<<<<<<< * int axis=0, ndarray Magics=None, unsigned int niter=5): * """ */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_5group_9onesample_3stat_mfx, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__stat_mfx, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/group/onesample.pyx":230 * * * def pdf_fit_mfx(ndarray Y, ndarray V, int axis=0, int niter=5, int constraint=0, double base=0.0): # <<<<<<<<<<<<<< * """ * (W, Z) = pdf_fit_mfx(data=Y, vardata=V, axis=0, niter=5, constraint=False, base=0.0). */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_5group_9onesample_5pdf_fit_mfx, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__pdf_fit_mfx, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/group/onesample.pyx":274 * * * def pdf_fit_gmfx(ndarray Y, ndarray V, int axis=0, int niter=5, int constraint=0, double base=0.0): # <<<<<<<<<<<<<< * """ * (MU, S2) = pdf_fit_gmfx(data=Y, vardata=V, axis=0, niter=5, constraint=False, base=0.0). */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_5group_9onesample_7pdf_fit_gmfx, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 274; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__pdf_fit_gmfx, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 274; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/group/onesample.pyx":1 * # -*- Mode: Python -*- Not really, but the syntax is close enough # <<<<<<<<<<<<<< * * */ __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); if (PyObject_SetAttr(__pyx_m, __pyx_n_s____test__, ((PyObject *)__pyx_t_1)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; /* "numpy.pxd":975 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); if (__pyx_m) { __Pyx_AddTraceback("init nipy.labs.group.onesample", __pyx_clineno, __pyx_lineno, __pyx_filename); Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init nipy.labs.group.onesample"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if PY_MAJOR_VERSION < 3 return; #else return __pyx_m; #endif } /* Runtime support code */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* CYTHON_REFNANNY */ static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) { PyObject *result; result = PyObject_GetAttr(dict, name); if (!result) { if (dict != __pyx_b) { PyErr_Clear(); result = PyObject_GetAttr(__pyx_b, name); } if (!result) { PyErr_SetObject(PyExc_NameError, name); } } return result; } static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%s() got an unexpected keyword argument '%s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%s() takes %s %" CYTHON_FORMAT_SSIZE_T "d positional argument%s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact) { if (!type) { PyErr_Format(PyExc_SystemError, "Missing type object"); return 0; } if (none_allowed && obj == Py_None) return 1; else if (exact) { if (Py_TYPE(obj) == type) return 1; } else { if (PyObject_TypeCheck(obj, type)) return 1; } PyErr_Format(PyExc_TypeError, "Argument '%s' has incorrect type (expected %s, got %s)", name, type->tp_name, Py_TYPE(obj)->tp_name); return 0; } static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) { #if CYTHON_COMPILING_IN_CPYTHON PyObject *tmp_type, *tmp_value, *tmp_tb; PyThreadState *tstate = PyThreadState_GET(); tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_Restore(type, value, tb); #endif } static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(type, value, tb); #endif } #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } #if PY_VERSION_HEX < 0x02050000 if (PyClass_Check(type)) { #else if (PyType_Check(type)) { #endif #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; #if PY_VERSION_HEX < 0x02050000 if (PyInstance_Check(type)) { type = (PyObject*) ((PyInstanceObject*)type)->in_class; Py_INCREF(type); } else { type = 0; PyErr_SetString(PyExc_TypeError, "raise: exception must be an old-style class or instance"); goto raise_error; } #else type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } #endif } __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else /* Python 3+ */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyEval_CallObject(type, args); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause && cause != Py_None) { PyObject *fixed_cause; if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { PyThreadState *tstate = PyThreadState_GET(); PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } } bad: Py_XDECREF(owned_instance); return; } #endif static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%s to unpack", index, (index == 1) ? "" : "s"); } static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } static CYTHON_INLINE int __Pyx_IterFinish(void) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); PyObject* exc_type = tstate->curexc_type; if (unlikely(exc_type)) { if (likely(exc_type == PyExc_StopIteration) || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration)) { PyObject *exc_value, *exc_tb; exc_value = tstate->curexc_value; exc_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; Py_DECREF(exc_type); Py_XDECREF(exc_value); Py_XDECREF(exc_tb); return 0; } else { return -1; } } return 0; #else if (unlikely(PyErr_Occurred())) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) { PyErr_Clear(); return 0; } else { return -1; } } return 0; #endif } static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) { if (unlikely(retval)) { Py_DECREF(retval); __Pyx_RaiseTooManyValuesError(expected); return -1; } else { return __Pyx_IterFinish(); } return 0; } static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_Format(PyExc_SystemError, "Missing type object"); return 0; } if (likely(PyObject_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level) { PyObject *py_import = 0; PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; py_import = __Pyx_GetAttrString(__pyx_b, "__import__"); if (!py_import) goto bad; if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; #if PY_VERSION_HEX >= 0x02050000 { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { /* try package relative import first */ PyObject *py_level = PyInt_FromLong(1); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; /* try absolute import on failure */ } #endif if (!module) { PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); } } #else if (level>0) { PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4."); goto bad; } module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, NULL); #endif bad: Py_XDECREF(empty_list); Py_XDECREF(py_import); Py_XDECREF(empty_dict); return module; } static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_Py_intptr_t(Py_intptr_t val) { const Py_intptr_t neg_one = (Py_intptr_t)-1, const_zero = (Py_intptr_t)0; const int is_unsigned = const_zero < neg_one; if ((sizeof(Py_intptr_t) == sizeof(char)) || (sizeof(Py_intptr_t) == sizeof(short))) { return PyInt_FromLong((long)val); } else if ((sizeof(Py_intptr_t) == sizeof(int)) || (sizeof(Py_intptr_t) == sizeof(long))) { if (is_unsigned) return PyLong_FromUnsignedLong((unsigned long)val); else return PyInt_FromLong((long)val); } else if (sizeof(Py_intptr_t) == sizeof(PY_LONG_LONG)) { if (is_unsigned) return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG)val); else return PyLong_FromLongLong((PY_LONG_LONG)val); } else { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; return _PyLong_FromByteArray(bytes, sizeof(Py_intptr_t), little, !is_unsigned); } } #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return ::std::complex< float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y*(__pyx_t_float_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real*z.real + z.imag*z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(a, a); case 3: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, a); case 4: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_absf(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return ::std::complex< double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y*(__pyx_t_double_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real*z.real + z.imag*z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(a, a); case 3: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, a); case 4: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_abs(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject* x) { const unsigned char neg_one = (unsigned char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned char" : "value too large to convert to unsigned char"); } return (unsigned char)-1; } return (unsigned char)val; } return (unsigned char)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject* x) { const unsigned short neg_one = (unsigned short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned short" : "value too large to convert to unsigned short"); } return (unsigned short)-1; } return (unsigned short)val; } return (unsigned short)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject* x) { const unsigned int neg_one = (unsigned int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned int" : "value too large to convert to unsigned int"); } return (unsigned int)-1; } return (unsigned int)val; } return (unsigned int)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject* x) { const char neg_one = (char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to char" : "value too large to convert to char"); } return (char)-1; } return (char)val; } return (char)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject* x) { const short neg_one = (short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to short" : "value too large to convert to short"); } return (short)-1; } return (short)val; } return (short)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject* x) { const int neg_one = (int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to int" : "value too large to convert to int"); } return (int)-1; } return (int)val; } return (int)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject* x) { const signed char neg_one = (signed char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed char" : "value too large to convert to signed char"); } return (signed char)-1; } return (signed char)val; } return (signed char)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject* x) { const signed short neg_one = (signed short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed short" : "value too large to convert to signed short"); } return (signed short)-1; } return (signed short)val; } return (signed short)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject* x) { const signed int neg_one = (signed int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed int" : "value too large to convert to signed int"); } return (signed int)-1; } return (signed int)val; } return (signed int)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject* x) { const int neg_one = (int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to int" : "value too large to convert to int"); } return (int)-1; } return (int)val; } return (int)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) { const unsigned long neg_one = (unsigned long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned long"); return (unsigned long)-1; } return (unsigned long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned long"); return (unsigned long)-1; } return (unsigned long)PyLong_AsUnsignedLong(x); } else { return (unsigned long)PyLong_AsLong(x); } } else { unsigned long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned long)-1; val = __Pyx_PyInt_AsUnsignedLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject* x) { const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned PY_LONG_LONG"); return (unsigned PY_LONG_LONG)-1; } return (unsigned PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned PY_LONG_LONG"); return (unsigned PY_LONG_LONG)-1; } return (unsigned PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (unsigned PY_LONG_LONG)PyLong_AsLongLong(x); } } else { unsigned PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned PY_LONG_LONG)-1; val = __Pyx_PyInt_AsUnsignedLongLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject* x) { const long neg_one = (long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long)-1; } return (long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long)-1; } return (long)PyLong_AsUnsignedLong(x); } else { return (long)PyLong_AsLong(x); } } else { long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (long)-1; val = __Pyx_PyInt_AsLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) { const PY_LONG_LONG neg_one = (PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to PY_LONG_LONG"); return (PY_LONG_LONG)-1; } return (PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to PY_LONG_LONG"); return (PY_LONG_LONG)-1; } return (PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (PY_LONG_LONG)PyLong_AsLongLong(x); } } else { PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (PY_LONG_LONG)-1; val = __Pyx_PyInt_AsLongLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) { const signed long neg_one = (signed long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed long"); return (signed long)-1; } return (signed long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed long"); return (signed long)-1; } return (signed long)PyLong_AsUnsignedLong(x); } else { return (signed long)PyLong_AsLong(x); } } else { signed long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (signed long)-1; val = __Pyx_PyInt_AsSignedLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject* x) { const signed PY_LONG_LONG neg_one = (signed PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed PY_LONG_LONG"); return (signed PY_LONG_LONG)-1; } return (signed PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed PY_LONG_LONG"); return (signed PY_LONG_LONG)-1; } return (signed PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (signed PY_LONG_LONG)PyLong_AsLongLong(x); } } else { signed PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (signed PY_LONG_LONG)-1; val = __Pyx_PyInt_AsSignedLongLong(tmp); Py_DECREF(tmp); return val; } } static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); #if PY_VERSION_HEX < 0x02050000 return PyErr_Warn(NULL, message); #else return PyErr_WarnEx(NULL, message, 1); #endif } return 0; } #ifndef __PYX_HAVE_RT_ImportModule #define __PYX_HAVE_RT_ImportModule static PyObject *__Pyx_ImportModule(const char *name) { PyObject *py_name = 0; PyObject *py_module = 0; py_name = __Pyx_PyIdentifier_FromString(name); if (!py_name) goto bad; py_module = PyImport_Import(py_name); Py_DECREF(py_name); return py_module; bad: Py_XDECREF(py_name); return 0; } #endif #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict) { PyObject *py_module = 0; PyObject *result = 0; PyObject *py_name = 0; char warning[200]; py_module = __Pyx_ImportModule(module_name); if (!py_module) goto bad; py_name = __Pyx_PyIdentifier_FromString(class_name); if (!py_name) goto bad; result = PyObject_GetAttr(py_module, py_name); Py_DECREF(py_name); py_name = 0; Py_DECREF(py_module); py_module = 0; if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%s.%s is not a type object", module_name, class_name); goto bad; } if (!strict && (size_t)((PyTypeObject *)result)->tp_basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility", module_name, class_name); #if PY_VERSION_HEX < 0x02050000 if (PyErr_Warn(NULL, warning) < 0) goto bad; #else if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; #endif } else if ((size_t)((PyTypeObject *)result)->tp_basicsize != size) { PyErr_Format(PyExc_ValueError, "%s.%s has the wrong size, try recompiling", module_name, class_name); goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(py_module); Py_XDECREF(result); return NULL; } #endif static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = (start + end) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, /*int argcount,*/ 0, /*int kwonlyargcount,*/ 0, /*int nlocals,*/ 0, /*int stacksize,*/ 0, /*int flags,*/ __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, /*int firstlineno,*/ __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_globals = 0; PyFrameObject *py_frame = 0; py_code = __pyx_find_code_object(c_line ? c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? c_line : py_line, py_code); } py_globals = PyModule_GetDict(__pyx_m); if (!py_globals) goto bad; py_frame = PyFrame_New( PyThreadState_GET(), /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ py_globals, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; py_frame->f_lineno = py_line; PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else /* Python 3+ has unicode identifiers */ if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; ++t; } return 0; } /* Type Conversion Functions */ static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) { PyNumberMethods *m; const char *name = NULL; PyObject *res = NULL; #if PY_VERSION_HEX < 0x03000000 if (PyInt_Check(x) || PyLong_Check(x)) #else if (PyLong_Check(x)) #endif return Py_INCREF(x), x; m = Py_TYPE(x)->tp_as_number; #if PY_VERSION_HEX < 0x03000000 if (m && m->nb_int) { name = "int"; res = PyNumber_Int(x); } else if (m && m->nb_long) { name = "long"; res = PyNumber_Long(x); } #else if (m && m->nb_int) { name = "int"; res = PyNumber_Long(x); } #endif if (res) { #if PY_VERSION_HEX < 0x03000000 if (!PyInt_Check(res) && !PyLong_Check(res)) { #else if (!PyLong_Check(res)) { #endif PyErr_Format(PyExc_TypeError, "__%s__ returned non-%s (type %.200s)", name, name, Py_TYPE(res)->tp_name); Py_DECREF(res); return NULL; } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject* x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { #if PY_VERSION_HEX < 0x02050000 if (ival <= LONG_MAX) return PyInt_FromLong((long)ival); else { unsigned char *bytes = (unsigned char *) &ival; int one = 1; int little = (int)*(unsigned char*)&one; return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0); } #else return PyInt_FromSize_t(ival); #endif } static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject* x) { unsigned PY_LONG_LONG val = __Pyx_PyInt_AsUnsignedLongLong(x); if (unlikely(val == (unsigned PY_LONG_LONG)-1 && PyErr_Occurred())) { return (size_t)-1; } else if (unlikely(val != (unsigned PY_LONG_LONG)(size_t)val)) { PyErr_SetString(PyExc_OverflowError, "value too large to convert to size_t"); return (size_t)-1; } return (size_t)val; } #endif /* Py_PYTHON_H */ nipy-0.3.0/nipy/labs/group/onesample.pyx000066400000000000000000000213121210344137400202500ustar00rootroot00000000000000# -*- Mode: Python -*- Not really, but the syntax is close enough """ Routines for massively univariate random-effect and mixed-effect analysis. Author: Alexis Roche, 2008. """ __version__ = '0.1' # Includes from fff cimport * # Exports from fff_onesample_stat.h cdef extern from "fff_onesample_stat.h": ctypedef enum fff_onesample_stat_flag: FFF_ONESAMPLE_EMPIRICAL_MEAN = 0 FFF_ONESAMPLE_EMPIRICAL_MEDIAN = 1 FFF_ONESAMPLE_STUDENT = 2 FFF_ONESAMPLE_LAPLACE = 3 FFF_ONESAMPLE_TUKEY = 4 FFF_ONESAMPLE_SIGN_STAT = 5 FFF_ONESAMPLE_WILCOXON = 6 FFF_ONESAMPLE_ELR = 7 FFF_ONESAMPLE_GRUBB = 8 FFF_ONESAMPLE_EMPIRICAL_MEAN_MFX = 10, FFF_ONESAMPLE_EMPIRICAL_MEDIAN_MFX = 11, FFF_ONESAMPLE_STUDENT_MFX = 12, FFF_ONESAMPLE_SIGN_STAT_MFX = 15, FFF_ONESAMPLE_WILCOXON_MFX = 16, FFF_ONESAMPLE_ELR_MFX = 17, FFF_ONESAMPLE_GAUSSIAN_MEAN_MFX = 19 ctypedef struct fff_onesample_stat: pass ctypedef struct fff_onesample_stat_mfx: unsigned int niter unsigned int constraint fff_onesample_stat* fff_onesample_stat_new(size_t n, fff_onesample_stat_flag flag, double base) void fff_onesample_stat_delete(fff_onesample_stat* thisone) double fff_onesample_stat_eval(fff_onesample_stat* thisone, fff_vector* x) fff_onesample_stat_mfx* fff_onesample_stat_mfx_new(size_t n, fff_onesample_stat_flag flag, double base) void fff_onesample_stat_mfx_delete(fff_onesample_stat_mfx* thisone) double fff_onesample_stat_mfx_eval(fff_onesample_stat_mfx* thisone, fff_vector* x, fff_vector* vx) void fff_onesample_stat_mfx_pdf_fit(fff_vector* w, fff_vector* z, fff_onesample_stat_mfx* thisone, fff_vector* x, fff_vector* vx) void fff_onesample_stat_gmfx_pdf_fit(double* mu, double* v, fff_onesample_stat_mfx* thisone, fff_vector* x, fff_vector* vx) void fff_onesample_permute_signs(fff_vector* xx, fff_vector* x, double magic) # Initialize numpy fffpy_import_array() import_array() import numpy as np # Stat dictionary stats = {'mean': FFF_ONESAMPLE_EMPIRICAL_MEAN, 'median': FFF_ONESAMPLE_EMPIRICAL_MEDIAN, 'student': FFF_ONESAMPLE_STUDENT, 'laplace': FFF_ONESAMPLE_LAPLACE, 'tukey': FFF_ONESAMPLE_TUKEY, 'sign': FFF_ONESAMPLE_SIGN_STAT, 'wilcoxon': FFF_ONESAMPLE_WILCOXON, 'elr': FFF_ONESAMPLE_ELR, 'grubb': FFF_ONESAMPLE_GRUBB, 'mean_mfx': FFF_ONESAMPLE_EMPIRICAL_MEAN_MFX, 'median_mfx': FFF_ONESAMPLE_EMPIRICAL_MEDIAN_MFX, 'mean_gauss_mfx': FFF_ONESAMPLE_GAUSSIAN_MEAN_MFX, 'student_mfx': FFF_ONESAMPLE_STUDENT_MFX, 'sign_mfx': FFF_ONESAMPLE_SIGN_STAT_MFX, 'wilcoxon_mfx': FFF_ONESAMPLE_WILCOXON_MFX, 'elr_mfx': FFF_ONESAMPLE_ELR_MFX} # Test stat without mixed-effect correction def stat(ndarray Y, id='student', double base=0.0, int axis=0, ndarray Magics=None): """ T = stat(Y, id='student', base=0.0, axis=0, magics=None). Compute a one-sample test statistic over a number of deterministic or random permutations. """ cdef fff_vector *y, *t, *magics, *yp cdef fff_onesample_stat* stat cdef fff_onesample_stat_flag flag_stat = stats[id] cdef unsigned int n cdef unsigned long int simu, nsimu, idx cdef double magic cdef fffpy_multi_iterator* multi # Get number of observations n = Y.shape[axis] # Read out magic numbers if Magics == None: magics = fff_vector_new(1) magics.data[0] = 0 ## Just to make sure else: magics = fff_vector_fromPyArray(Magics) # Create output array nsimu = magics.size dims = [Y.shape[i] for i in range(Y.ndim)] dims[axis] = nsimu T = np.zeros(dims) # Create local structure stat = fff_onesample_stat_new(n, flag_stat, base) yp = fff_vector_new(n) # Multi-iterator multi = fffpy_multi_iterator_new(2, axis, Y, T) # Vector views y = multi.vector[0] t = multi.vector[1] # Loop for simu from 0 <= simu < nsimu: # Set the magic number magic = magics.data[simu*magics.stride] # Reset the multi-iterator fffpy_multi_iterator_reset(multi); # Perform the loop idx = simu*t.stride while(multi.index < multi.size): fff_onesample_permute_signs(yp, y, magic) t.data[idx] = fff_onesample_stat_eval(stat, yp) fffpy_multi_iterator_update(multi) # Free memory fffpy_multi_iterator_delete(multi) fff_vector_delete(yp) fff_vector_delete(magics) fff_onesample_stat_delete(stat) # Return return T def stat_mfx(ndarray Y, ndarray V, id='student_mfx', double base=0.0, int axis=0, ndarray Magics=None, unsigned int niter=5): """ T = stat_mfx(Y, V, id='student_mfx', base=0.0, axis=0, magics=None, niter=5). Compute a one-sample test statistic, with mixed-effect correction, over a number of deterministic or random permutations. """ cdef fff_vector *y, *v, *t, *magics, *yp cdef fff_onesample_stat_mfx* stat cdef fff_onesample_stat_flag flag_stat = stats[id] cdef int n cdef unsigned long int nsimu_max, simu, idx cdef double magic cdef fffpy_multi_iterator* multi # Get number of observations n = Y.shape[axis] # Read out magic numbers if Magics == None: magics = fff_vector_new(1) magics.data[0] = 0 ## Just to make sure else: magics = fff_vector_fromPyArray(Magics) # Create output array nsimu = magics.size dims = [Y.shape[i] for i in range(Y.ndim)] dims[axis] = nsimu T = np.zeros(dims) # Create local structure stat = fff_onesample_stat_mfx_new(n, flag_stat, base) stat.niter = niter yp = fff_vector_new(n) # Multi-iterator multi = fffpy_multi_iterator_new(3, axis, Y, V, T) # Vector views y = multi.vector[0] v = multi.vector[1] t = multi.vector[2] # Loop for simu from 0 <= simu < nsimu: # Set the magic number magic = magics.data[simu*magics.stride] # Reset the multi-iterator fffpy_multi_iterator_reset(multi) # Perform the loop idx = simu*t.stride while(multi.index < multi.size): fff_onesample_permute_signs(yp, y, magic) t.data[idx] = fff_onesample_stat_mfx_eval(stat, yp, v) fffpy_multi_iterator_update(multi) # Free memory fffpy_multi_iterator_delete(multi) fff_vector_delete(yp) fff_vector_delete(magics) fff_onesample_stat_mfx_delete(stat) # Return return T def pdf_fit_mfx(ndarray Y, ndarray V, int axis=0, int niter=5, int constraint=0, double base=0.0): """ (W, Z) = pdf_fit_mfx(data=Y, vardata=V, axis=0, niter=5, constraint=False, base=0.0). Comments to follow. """ cdef fff_vector *y, *v, *w, *z cdef fff_onesample_stat_mfx* stat cdef fffpy_multi_iterator* multi cdef int n = Y.shape[axis] # Create output array dims = [Y.shape[i] for i in range(Y.ndim)] W = np.zeros(dims) Z = np.zeros(dims) # Create local structure stat = fff_onesample_stat_mfx_new(n, FFF_ONESAMPLE_EMPIRICAL_MEAN_MFX, base) stat.niter = niter stat.constraint = constraint # Multi-iterator multi = fffpy_multi_iterator_new(4, axis, Y, V, W, Z) # Create views on nd-arrays y = multi.vector[0] v = multi.vector[1] w = multi.vector[2] z = multi.vector[3] # Loop while(multi.index < multi.size): fff_onesample_stat_mfx_pdf_fit(w, z, stat, y, v) fffpy_multi_iterator_update(multi) # Delete local structures fffpy_multi_iterator_delete(multi) fff_onesample_stat_mfx_delete(stat) # Return return W, Z def pdf_fit_gmfx(ndarray Y, ndarray V, int axis=0, int niter=5, int constraint=0, double base=0.0): """ (MU, S2) = pdf_fit_gmfx(data=Y, vardata=V, axis=0, niter=5, constraint=False, base=0.0). Comments to follow. """ cdef fff_vector *y, *v, *mu, *s2 cdef fff_onesample_stat_mfx* stat cdef fffpy_multi_iterator* multi cdef int n = Y.shape[axis] # Create output array dims = [Y.shape[i] for i in range(Y.ndim)] dims[axis] = 1 MU = np.zeros(dims) S2 = np.zeros(dims) # Create local structure stat = fff_onesample_stat_mfx_new(n, FFF_ONESAMPLE_STUDENT_MFX, base) stat.niter = niter stat.constraint = constraint # Multi-iterator multi = fffpy_multi_iterator_new(4, axis, Y, V, MU, S2) # Create views on nd-arrays y = multi.vector[0] v = multi.vector[1] mu = multi.vector[2] s2 = multi.vector[3] # Loop while(multi.index < multi.size): fff_onesample_stat_gmfx_pdf_fit(mu.data, s2.data, stat, y, v) fffpy_multi_iterator_update(multi) # Delete local structures fffpy_multi_iterator_delete(multi) fff_onesample_stat_mfx_delete(stat) # Return return MU, S2 nipy-0.3.0/nipy/labs/group/permutation_test.py000066400000000000000000001055621210344137400215150ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """One and two sample permutation tests. """ # Third-party imports import numpy as np import scipy.misc as sm import warnings # Our own imports from nipy.algorithms.graph import wgraph_from_3d_grid from nipy.algorithms.graph.field import Field, field_from_graph_and_data from ..utils import zscore from .onesample import stat as os_stat, stat_mfx as os_stat_mfx from .twosample import stat as ts_stat, stat_mfx as ts_stat_mfx # Default parameters DEF_NDRAWS = int(1e5) DEF_NPERMS = int(1e4) DEF_NITER = 5 DEF_STAT_ONESAMPLE = 'student' DEF_STAT_TWOSAMPLE = 'student' #=========================================== #=========================================== # Cluster extraction functions #=========================================== #=========================================== def extract_clusters_from_thresh(T,XYZ,th,k=18): """ Extract clusters from statistical map above specified threshold In: T (p) statistical map XYZ (3,p) voxels coordinates th threshold k the number of neighbours considered. (6,18 or 26) Out: labels (p) cluster labels """ labels = -np.ones(len(T),int) I = np.where(T >= th)[0] if len(I)>0: SupraThreshXYZ = XYZ[:, I] CC_label = wgraph_from_3d_grid(SupraThreshXYZ.T, k).cc() labels[I] = CC_label return labels def max_dist(XYZ,I,J): """ Maximum distance between two set of points In: XYZ (3,p) voxels coordinates I (q) index of points J (r) index of points Out: d """ if min(min(np.shape(I)), min(np.shape(J))) == 0: return 0 else: # Square distance matrix D = np.sum(np.square(XYZ[:,I].reshape(3, len(I), 1) - XYZ[:, J].reshape(3, 1, len(J))), axis=0) return np.sqrt((D).max()) def extract_clusters_from_diam(T,XYZ,th,diam,k=18): """ Extract clusters from a statistical map under diameter constraint and above given threshold In: T (p) statistical map XYZ (3,p) voxels coordinates th minimum threshold diam maximal diameter (in voxels) k the number of neighbours considered. (6,18 or 26) Out: labels (p) cluster labels Comment by alexis-roche, September 15th 2012: this function was originally developed by Merlin Keller in an attempt to generalize classical cluster-level analysis by subdividing clusters in blobs with limited diameter (at least, this is my understanding). This piece of code seems to have remained very experimental and its usefulness in real-world neuroimaging image studies is still to be demonstrated. """ CClabels = extract_clusters_from_thresh(T,XYZ,th,k) nCC = CClabels.max() + 1 labels = -np.ones(len(CClabels),int) # Calls _extract_clusters_from_diam, a recursive function, and # catches an exception if maximum recursion depth is reached try: labels = _extract_clusters_from_diam(labels, T, XYZ, th, diam, k, nCC, CClabels) except RuntimeError: warnings.warn('_extract_clusters_from_diam did not converge') return labels def _extract_clusters_from_diam(labels, T, XYZ, th, diam, k, nCC, CClabels): """ This recursive function modifies the `labels` input array. """ clust_label = 0 for i in xrange(nCC): #print "Searching connected component ", i, " out of ", nCC I = np.where(CClabels==i)[0] extCC = len(I) if extCC <= (diam+1)**3: diamCC = max_dist(XYZ,I,I) else: diamCC = diam+1 if diamCC <= diam: labels[I] = np.zeros(extCC,int) + clust_label #print "cluster ", clust_label, ", diam = ", diamCC #print "ext = ", len(I), ", diam = ", max_dist(XYZ,I,I) clust_label += 1 else: # build the field p = len(T[I]) F = field_from_graph_and_data( wgraph_from_3d_grid(XYZ[:, I].T, k), np.reshape(T[I],(p,1))) # compute the blobs idx, parent,label = F.threshold_bifurcations(0,th) nidx = np.size(idx) height = np.array([np.ceil(np.sum(label == i) ** (1./3)) for i in np.arange(nidx)]) #root = nidx-1 root = np.where(np.arange(nidx)==parent)[0] # Can constraint be met within current region? Imin = I[T[I]>=height[root]] extmin = len(Imin) if extmin <= (diam+1)**3: dmin = max_dist(XYZ,Imin,Imin) else: dmin = diam+1 if dmin <= diam:# If so, search for the largest cluster meeting the constraint Iclust = Imin # Smallest cluster J = I[T[I]height[root]] rest_labels = extract_clusters_from_diam(T[Irest],XYZ[:,Irest],th,diam,k) rest_labels[rest_labels>=0] += clust_label clust_label = rest_labels.max() + 1 labels[Irest] = rest_labels return labels def extract_clusters_from_graph(T, G, th): """ This returns a label vector of same size as T, defining connected components for subgraph of weighted graph G containing vertices s.t. T >= th """ labels = np.zeros(len(T), int) - 1 I = T >= th nlabels = I.sum() if nlabels > 0: labels[I] = G.subgraph(I).cc() return labels #====================================== #====================================== # Useful functions #====================================== #====================================== def sorted_values(a): """ Extract list of distinct sortedvalues from an array """ if len(a) == 0: return [] else: m = min(a) L = [m] L.extend( sorted_values(a[a>m]) ) return L def onesample_stat(Y, V, stat_id, base=0.0, axis=0, Magics=None, niter=DEF_NITER): """ Wrapper for os_stat and os_stat_mfx """ if stat_id.find('_mfx')<0: return os_stat(Y, stat_id, base, axis, Magics) else: return os_stat_mfx(Y, V, stat_id, base, axis, Magics, niter) def twosample_stat(Y1, V1, Y2, V2, stat_id, axis=0, Magics=None, niter=DEF_NITER): """ Wrapper for ts_stat and ts_stat_mfx """ if stat_id.find('_mfx')<0: return ts_stat(Y1, Y2, stat_id, axis, Magics) else: return ts_stat_mfx(Y1, V1, Y2, V2, stat_id, axis, Magics, niter) #================================================= #================================================= # Compute cluster and region summary statistics #================================================= #================================================= def compute_cluster_stats(Tvalues, labels, random_Tvalues, cluster_stats=["size","Fisher"]): """ size_values, Fisher_values = compute_cluster_stats(Tvalues, labels, random_Tvalues, cluster_stats=["size","Fisher"]) Compute summary statistics in each cluster In: see permutation_test_onesample class docstring Out: size_values Array of size nclust, or None if "size" not in cluster_stats Fisher_values Array of size nclust, or None if "Fisher" not in cluster_stats """ nclust = max(labels)+1 if nclust == 0: if "size" in cluster_stats: size_values = np.array([0]) else: size_values = None if "Fisher" in cluster_stats: Fisher_values = np.array([0]) else: Fisher_values = None else: if "size" in cluster_stats: size_values = np.zeros(nclust,int) else: size_values = None if "Fisher" in cluster_stats: Fisher_values = np.zeros(nclust,float) ndraws = len(random_Tvalues) pseudo_p_values = 1 - np.searchsorted(random_Tvalues,Tvalues)/float(ndraws) else: Fisher_values = None for i in xrange(nclust): I = np.where(labels==i)[0] if "size" in cluster_stats: size_values[i] = len(I) if "Fisher" in cluster_stats: Fisher_values[i] = -np.sum(np.log(pseudo_p_values[I])) return size_values, Fisher_values def compute_region_stat(Tvalues, labels, label_values, random_Tvalues): """ Fisher_values = compute_region_stat(Tvalues, labels, label_values, random_Tvalues) Compute summary statistics in each cluster In: see permutation_test_onesample class docstring Out: Fisher_values Array of size nregions """ Fisher_values = np.zeros(len(label_values),float) pseudo_p_values = 1 - np.searchsorted(random_Tvalues,Tvalues)/float(len(random_Tvalues)) for i in xrange(len(label_values)): I = np.where(labels==label_values[i])[0] Fisher_values[i] = -np.sum(np.log(pseudo_p_values[I])) return Fisher_values def peak_XYZ(XYZ, Tvalues, labels, label_values): """ Returns (3, n_labels) array of maximum T values coordinates for each label value """ C = np.zeros((3, len(label_values)), int) for i in xrange(len(label_values)): I = np.where(labels == label_values[i])[0] C[:, i] = XYZ[:, I[np.argmax(Tvalues[I])]] return C #====================================== #====================================== # Generic permutation test class #====================================== #====================================== class permutation_test(object): """ This generic permutation test class contains the calibration method which is common to the derived classes permutation_test_onesample and permutation_test_twosample (as well as other common methods) """ #======================================================= # Permutation test calibration of summary statistics #======================================================= def calibrate(self, nperms=DEF_NPERMS, clusters=None, cluster_stats=["size","Fisher"], regions=None, region_stats=["Fisher"], verbose=False): """ Calibrate cluster and region summary statistics using permutation test Parameters ---------- nperms : int, optional Number of random permutations generated. Exhaustive permutations are used only if nperms=None, or exceeds total number of possible permutations clusters : list [(thresh1,diam1),(thresh2,diam2),...], optional List of cluster extraction pairs: (thresh,diam). *thresh* provides T values threshold, *diam* is the maximum cluster diameter, in voxels. Using *diam*==None yields classical suprathreshold clusters. cluster_stats : list [stat1,...], optional List of cluster summary statistics id (either 'size' or 'Fisher') regions : list [Labels1,Labels2,...] List of region labels arrays, of size (p,) where p is the number of voxels region_stats : list [stat1,...], optional List of cluster summary statistics id (only 'Fisher' supported for now) verbose : boolean, optional "Chatterbox" mode switch Returns ------- voxel_results : dict A dictionary containing the following keys: ``p_values`` (p,) Uncorrected p-values.``Corr_p_values`` (p,) Corrected p-values, computed by the Tmax procedure. ``perm_maxT_values`` (nperms) values of the maximum statistic under permutation. cluster_results : list [results1,results2,...] List of permutation test results for each cluster extraction pair. These are dictionaries with the following keys "thresh", "diam", "labels", "expected_voxels_per_cluster", "expected_number_of_clusters", and "peak_XYZ" if XYZ field is nonempty and for each summary statistic id "S": "size_values", "size_p_values", "S_Corr_p_values", "perm_size_values", "perm_maxsize_values" region_results :list [results1,results2,...] List of permutation test results for each region labels arrays. These are dictionaries with the following keys: "label_values", "peak_XYZ" (if XYZ field nonempty) and for each summary statistic id "S": "size_values", "size_p_values", "perm_size_values", "perm_maxsize_values" """ # Permutation indices if self.nsamples ==1: n, p = self.data.shape[self.axis], self.data.shape[1-self.axis] max_nperms = 2**n elif self.nsamples == 2: n1,p = self.data1.shape[self.axis], self.data1.shape[1-self.axis] n2 = self.data2.shape[self.axis] max_nperms = sm.comb(n1+n2,n1,exact=1) data = np.concatenate((self.data1,self.data2), self.axis) if self.vardata1 != None: vardata = np.concatenate((self.vardata1,self.vardata2), self.axis) if nperms == None or nperms >= max_nperms: magic_numbers = np.arange(max_nperms) else: #magic_numbers = np.random.randint(max_nperms,size=nperms) # np.random.randint does not handle longint! # So we use the following hack instead: magic_numbers = np.random.uniform(max_nperms,size=nperms) # Initialize cluster_results cluster_results = [] if clusters != None: for (thresh,diam) in clusters: if diam == None: if self.XYZ == None: labels = extract_clusters_from_graph(self.Tvalues,self.G,thresh) else: labels = extract_clusters_from_thresh(self.Tvalues,self.XYZ,thresh) else: labels = extract_clusters_from_diam(self.Tvalues,self.XYZ,thresh,diam) results = {"thresh" : thresh, "diam" : diam, "labels" : labels} size_values, Fisher_values = compute_cluster_stats(self.Tvalues, labels, self.random_Tvalues, cluster_stats) nclust = labels.max() + 1 results["expected_voxels_per_thresh"] = 0.0 results["expected_number_of_clusters"] = 0.0 if self.XYZ != None: results["peak_XYZ"] = peak_XYZ(self.XYZ, self.Tvalues, labels, np.arange(nclust)) if "size" in cluster_stats: results["size_values"] = size_values results["perm_size_values"] = [] results["perm_maxsize_values"] = np.zeros(len(magic_numbers),int) if "Fisher" in cluster_stats: results["Fisher_values"] = Fisher_values results["perm_Fisher_values"] = [] results["perm_maxFisher_values"] = np.zeros(len(magic_numbers),float) cluster_results.append( results ) # Initialize region_results region_results = [] if regions != None: for labels in regions: label_values = sorted_values(labels) nregions = len(label_values) results = { "label_values" : label_values } if self.XYZ != None: results["peak_XYZ"] = peak_XYZ(self.XYZ, self.Tvalues, labels, label_values) if "Fisher" in region_stats: results["Fisher_values"] = compute_region_stat(self.Tvalues, labels, label_values, self.random_Tvalues) results["perm_Fisher_values"] = np.zeros((nregions,len(magic_numbers)),float) results["Fisher_p_values"] = np.zeros(nregions,float) results["Fisher_Corr_p_values"] = np.zeros(nregions,float) region_results.append( results ) # Permutation test p_values = np.zeros(p,float) Corr_p_values = np.zeros(p,float) nmagic = len(magic_numbers) perm_maxT_values = np.zeros(nmagic, float) for j in xrange(nmagic): m = magic_numbers[j] if verbose: print "Permutation", j+1, "out of", nmagic # T values under permutation if self.nsamples == 1: #perm_Tvalues = onesample_stat(self.data, self.vardata, self.stat_id, self.base, self.axis, np.array([m]), self.niter).squeeze() rand_sign = (np.random.randint(2,size=n)*2-1).reshape(n,1) rand_data = rand_sign*self.data if self.vardata == None: rand_vardata = None else: rand_vardata = rand_sign*self.vardata perm_Tvalues = onesample_stat(rand_data, rand_vardata, self.stat_id, self.base, self.axis, None, self.niter).squeeze() elif self.nsamples == 2: perm_Tvalues = twosample_stat(self.data1, self.vardata1, self.data2, self.vardata2, self.stat_id, self.axis, np.array([m]), self.niter).squeeze() rand_perm = np.random.permutation(np.arange(n1+n2)) rand_data1 = data[:n1] rand_data2 = data[n1:] if self.vardata1 == None: rand_vardata1 = None rand_vardata2 = None else: rand_vardata1 = vardata[:n1] rand_vardata2 = vardata[n1:] # update p values p_values += perm_Tvalues >= self.Tvalues Corr_p_values += max(perm_Tvalues) >= self.Tvalues perm_maxT_values[j] = max(perm_Tvalues) # Update cluster_results if clusters != None: for i in xrange(len(clusters)): thresh, diam = clusters[i] if diam == None: if self.XYZ == None: perm_labels = extract_clusters_from_graph(perm_Tvalues,self.G,thresh) else: perm_labels = extract_clusters_from_thresh(perm_Tvalues,self.XYZ,thresh) else: perm_labels = extract_clusters_from_diam(perm_Tvalues,self.XYZ,thresh,diam) perm_size_values, perm_Fisher_values = compute_cluster_stats(perm_Tvalues, perm_labels, self.random_Tvalues, cluster_stats) perm_nclust = labels.max() + 1 cluster_results[i]["expected_voxels_per_thresh"] += perm_size_values.sum()/float(nclust) cluster_results[i]["expected_number_of_clusters"] += nclust if "size" in cluster_stats: cluster_results[i]["perm_size_values"][:0] = perm_size_values cluster_results[i]["perm_maxsize_values"][j] = max(perm_size_values) if "Fisher" in cluster_stats: cluster_results[i]["perm_Fisher_values"][:0] = perm_Fisher_values cluster_results[i]["perm_maxFisher_values"][j] = max(perm_Fisher_values) # Update region_results if regions != None: for i in xrange(len(regions)): labels = regions[i] label_values = region_results[i]["label_values"] nregions = len(label_values) if "Fisher" in region_stats: perm_Fisher_values = compute_region_stat(perm_Tvalues, labels, label_values, self.random_Tvalues) region_results[i]["perm_Fisher_values"][:,j] = perm_Fisher_values # Compute p-values for clusters summary statistics if clusters != None: for i in xrange(len(clusters)): if "size" in cluster_stats: cluster_results[i]["perm_size_values"] = np.array(cluster_results[i]["perm_size_values"]) cluster_results[i]["perm_size_values"].sort() cluster_results[i]["perm_maxsize_values"].sort() cluster_results[i]["size_p_values"] = 1 - np.searchsorted(cluster_results[i]["perm_size_values"], cluster_results[i]["size_values"])/float(cluster_results[i]["expected_number_of_clusters"]) cluster_results[i]["size_Corr_p_values"] = 1 - np.searchsorted(cluster_results[i]["perm_maxsize_values"], cluster_results[i]["size_values"])/float(nmagic) if "Fisher" in cluster_stats: cluster_results[i]["perm_Fisher_values"] = np.array(cluster_results[i]["perm_Fisher_values"]) cluster_results[i]["perm_Fisher_values"].sort() cluster_results[i]["perm_maxFisher_values"].sort() cluster_results[i]["Fisher_p_values"] = 1 - np.searchsorted(cluster_results[i]["perm_Fisher_values"], cluster_results[i]["Fisher_values"])/float(cluster_results[i]["expected_number_of_clusters"]) cluster_results[i]["Fisher_Corr_p_values"] = 1 - np.searchsorted(cluster_results[i]["perm_maxFisher_values"], cluster_results[i]["Fisher_values"])/float(nmagic) cluster_results[i]["expected_voxels_per_thresh"] /= float(nmagic) cluster_results[i]["expected_number_of_clusters"] /= float(nmagic) # Compute p-values for regions summary statistics if regions != None: for i in xrange(len(regions)): if "Fisher" in region_stats: sorted_perm_Fisher_values = np.sort(region_results[i]["perm_Fisher_values"],axis=1) label_values = region_results[i]["label_values"] nregions = len(label_values) # Compute uncorrected p-values for j in xrange(nregions): region_results[i]["Fisher_p_values"][j] = 1 - np.searchsorted(sorted_perm_Fisher_values[j],region_results[i]["Fisher_values"][j])/float(nmagic) #Compute corrected p-values perm_Fisher_p_values = np.zeros((nregions,nmagic),float) for j in xrange(nregions): I = np.argsort(region_results[i]["perm_Fisher_values"][j]) perm_Fisher_p_values[j][I] = 1 - np.arange(1,nmagic+1)/float(nmagic) perm_min_Fisher_p_values = np.sort(perm_Fisher_p_values.min(axis=0)) region_results[i]["Fisher_Corr_p_values"] = 1 - np.searchsorted(-perm_min_Fisher_p_values,-region_results[i]["Fisher_p_values"])/float(nmagic) voxel_results = {'p_values':p_values/float(nmagic), 'Corr_p_values':Corr_p_values/float(nmagic), 'perm_maxT_values':perm_maxT_values} return voxel_results, cluster_results, region_results def height_threshold(self, pval): """ Return the uniform height threshold matching a given permutation-based P-value. """ tvals = self.random_Tvalues ndraws = tvals.size idx = np.ceil(ndraws*(1-pval)) if idx >= ndraws: return np.inf candidate = tvals[idx] if tvals[max(0, idx-1)]= ndraws: return np.inf return tvals[idx] def pvalue(self, Tvalues=None): """ Return uncorrected voxel-level pseudo p-values. """ if Tvalues == None: Tvalues = self.Tvalues return 1 - np.searchsorted(self.random_Tvalues, Tvalues)/float(self.ndraws) def zscore(self, Tvalues=None): """ Return z score corresponding to the uncorrected voxel-level pseudo p-value. """ if Tvalues == None: Tvalues = self.Tvalues return zscore(self.pvalue(Tvalues)) #====================================== #====================================== # One sample permutation test class #====================================== #====================================== class permutation_test_onesample(permutation_test): """ Class derived from the generic permutation_test class. Inherits the calibrate method """ def __init__(self, data, XYZ, axis=0, vardata=None, stat_id=DEF_STAT_ONESAMPLE, base=0.0, niter=DEF_NITER, ndraws=DEF_NDRAWS): """ Initialize permutation_test_onesample instance, compute statistic values in each voxel and under permutation In: data data array XYZ voxels coordinates axis Subject axis in data vardata variance (same shape as data) optional (if None, mfx statistics cannot be used) stat_id choice of test statistic (see onesample.stats for a list of possible stats) base mean signal under H0 niter number of iterations of EM algorithm ndraws Number of generated random t values Out: self.Tvalues voxelwise test statistic values self.random_Tvalues sorted statistic values in random voxels and under random sign permutation """ # Create data fields n,p = data.shape[axis], data.shape[1-axis] self.data = data self.stat_id = stat_id self.XYZ = XYZ self.axis = axis self.vardata = vardata self.niter = niter self.base = base self.ndraws = ndraws self.Tvalues = onesample_stat(data, vardata, stat_id, base, axis, Magics=None, niter=niter).squeeze() self.nsamples = 1 # Compute statistic values in random voxels and under random permutations # Use a self.verbose flag for this output? #print "Computing average null distribution of test statistic..." self.random_Tvalues = np.zeros(ndraws,float) # Random voxel selection I = np.random.randint(0,p,size=ndraws) if axis == 0: rand_data = data[:,I] if vardata == None: rand_vardata = None else: rand_vardata = vardata[:,I] else: rand_data = data[I] if vardata == None: rand_vardata = None else: rand_vardata = vardata[I] # Random sign permutation rand_sign = (np.random.binomial(1,0.5,size = n*ndraws)*2-1).reshape(n,ndraws) if axis == 1: rand_sign = rand_sign.transpose() rand_data *= rand_sign self.random_Tvalues = onesample_stat(rand_data, rand_vardata, stat_id, base, axis).squeeze() self.random_Tvalues.sort() #================================================================== #================================================================== # One sample permutation test class with arbitrary graph structure #================================================================== #================================================================== class permutation_test_onesample_graph(permutation_test): """ Class derived from the generic permutation_test class. Inherits the calibrate method """ def __init__(self,data,G,axis=0,vardata=None,stat_id=DEF_STAT_ONESAMPLE,base=0.0,niter=DEF_NITER,ndraws=DEF_NDRAWS): """ Initialize permutation_test_onesample instance, compute statistic values in each voxel and under permutation In: data data array G weighted graph (each vertex corresponds to a voxel) axis Subject axis in data vardata variance (same shape as data) optional (if None, mfx statistics cannot be used) stat_id choice of test statistic (see onesample.stats for a list of possible stats) base mean signal under H0 niter number of iterations of EM algorithm ndraws Number of generated random t values Out: self.Tvalues voxelwise test statistic values self.random_Tvalues sorted statistic values in random voxels and under random sign permutation """ # Create data fields n,p = data.shape[axis], data.shape[1-axis] self.data = data self.stat_id = stat_id self.XYZ = None self.G = G self.axis = axis self.vardata = vardata self.niter = niter self.base = base self.ndraws = ndraws self.Tvalues = onesample_stat(data, vardata, stat_id, base, axis, Magics=None, niter=niter).squeeze() self.nsamples = 1 # Compute statistic values in random voxels and under random permutations # Use a self.verbose flag for this output? #print "Computing average null distribution of test statistic..." self.random_Tvalues = np.zeros(ndraws,float) # Random voxel selection I = np.random.randint(0,p,size=ndraws) if axis == 0: rand_data = data[:,I] if vardata == None: rand_vardata = None else: rand_vardata = vardata[:,I] else: rand_data = data[I] if vardata == None: rand_vardata = None else: rand_vardata = vardata[I] # Random sign permutation rand_sign = (np.random.binomial(1,0.5,size = n*ndraws)*2-1).reshape(n,ndraws) if axis == 1: rand_sign = rand_sign.transpose() rand_data *= rand_sign self.random_Tvalues = onesample_stat(rand_data, rand_vardata, stat_id, base, axis).squeeze() self.random_Tvalues.sort() #====================================== #====================================== # Two sample permutation test class #====================================== #====================================== class permutation_test_twosample(permutation_test): """ Class derived from the generic permutation_test class. Inherits the calibrate method """ def __init__(self,data1,data2,XYZ,axis=0,vardata1=None,vardata2=None,stat_id=DEF_STAT_TWOSAMPLE,niter=DEF_NITER,ndraws=DEF_NDRAWS): """ Initialize permutation_test_twosample instance, compute statistic values in each voxel and under permutation In: data1, data2 data arrays XYZ voxels coordinates axis Subject axis in data vardata1, vardata2 variance (same shape as data) optional (if None, mfx statistics cannot be used) stat_id choice of test statistic (see onesample.stats for a list of possible stats) niter number of iterations of EM algorithm ndraws Number of generated random t values Out: self.Tvalues voxelwise test statistic values self.random_Tvalues sorted statistic values in random voxels and under random sign permutation """ # Create data fields n1,p = data1.shape[axis], data1.shape[1-axis] n2 = data2.shape[axis] self.data1 = data1 self.data2 = data2 self.stat_id = stat_id self.XYZ = XYZ self.axis = axis self.vardata1 = vardata1 self.vardata2 = vardata2 self.niter = niter self.ndraws = ndraws self.Tvalues = twosample_stat(data1, vardata1, data2, vardata2, stat_id, axis, Magics=None, niter=niter).squeeze() self.nsamples = 2 # Compute statistic values in random voxels and under random permutations # Use a self.verbose flag for this output? #print "Computing average null distribution of test statistic..." self.random_Tvalues = np.zeros(ndraws,float) # Random voxel selection I = np.random.randint(0,p,size=ndraws) if axis == 0: perm_data = np.zeros((n1+n2,ndraws),float) perm_data[:n1] = data1[:,I] perm_data[n1:] = data2[:,I] if vardata1 != None: perm_vardata = np.zeros((n1+n2,ndraws),float) perm_vardata[:n1] = vardata1[:,I] perm_vardata[n1:] = vardata2[:,I] else: perm_data = np.zeros((ndraws,n1+n2),float) perm_data[:,:n1] = data1[I] perm_data[:,n1:] = data2[I] if vardata1 != None: perm_vardata = np.zeros((ndraws, n1+n2),float) perm_vardata[:,:n1] = vardata1[I] perm_vardata[:,n1:] = vardata2[I] rand_perm = np.array([np.random.permutation(np.arange(n1+n2)) for i in xrange(ndraws)]).transpose() ravel_rand_perm = rand_perm*ndraws + np.arange(ndraws).reshape(1,ndraws) if axis == 0: perm_data = perm_data.ravel()[ravel_rand_perm.ravel()].reshape(n1+n2,ndraws) if vardata1 != None: perm_vardata = perm_vardata.ravel()[ravel_rand_perm.ravel()].reshape(n1+n2,ndraws) else: perm_data = (perm_data.transpose().ravel()[ravel_rand_perm.ravel()].reshape(n1+n2,ndraws)).transpose() if vardata1 != None: perm_vardata = (perm_vardata.transpose().ravel()[ravel_rand_perm.ravel()].reshape(n1+n2,ndraws)).transpose() perm_data1 = perm_data[:n1] perm_data2 = perm_data[n1:] if vardata1 == None: perm_vardata1 = None perm_vardata2 = None else: perm_vardata1 = perm_vardata[:n1] perm_vardata2 = perm_vardata[n1:] self.random_Tvalues = twosample_stat(perm_data1, perm_vardata1, perm_data2, perm_vardata2, stat_id, axis).squeeze() self.random_Tvalues.sort() nipy-0.3.0/nipy/labs/group/routines.c000066400000000000000000006666201210344137400175600ustar00rootroot00000000000000/* Generated by Cython 0.17.4 on Sat Jan 12 17:27:38 2013 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02040000 #error Cython requires Python 2.4+. #else #include /* For offsetof */ #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #endif #if PY_VERSION_HEX < 0x02050000 typedef int Py_ssize_t; #define PY_SSIZE_T_MAX INT_MAX #define PY_SSIZE_T_MIN INT_MIN #define PY_FORMAT_SIZE_T "" #define CYTHON_FORMAT_SSIZE_T "" #define PyInt_FromSsize_t(z) PyInt_FromLong(z) #define PyInt_AsSsize_t(o) __Pyx_PyInt_AsInt(o) #define PyNumber_Index(o) ((PyNumber_Check(o) && !PyFloat_Check(o)) ? PyNumber_Int(o) : \ (PyErr_Format(PyExc_TypeError, \ "expected index value, got %.200s", Py_TYPE(o)->tp_name), \ (PyObject*)0)) #define __Pyx_PyIndex_Check(o) (PyNumber_Check(o) && !PyFloat_Check(o) && \ !PyComplex_Check(o)) #define PyIndex_Check __Pyx_PyIndex_Check #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message) #define __PYX_BUILD_PY_SSIZE_T "i" #else #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #define __Pyx_PyIndex_Check PyIndex_Check #endif #if PY_VERSION_HEX < 0x02060000 #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) #define PyVarObject_HEAD_INIT(type, size) \ PyObject_HEAD_INIT(type) size, #define PyType_Modified(t) typedef struct { void *buf; PyObject *obj; Py_ssize_t len; Py_ssize_t itemsize; int readonly; int ndim; char *format; Py_ssize_t *shape; Py_ssize_t *strides; Py_ssize_t *suboffsets; void *internal; } Py_buffer; #define PyBUF_SIMPLE 0 #define PyBUF_WRITABLE 0x0001 #define PyBUF_FORMAT 0x0004 #define PyBUF_ND 0x0008 #define PyBUF_STRIDES (0x0010 | PyBUF_ND) #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) #define PyBUF_RECORDS (PyBUF_STRIDES | PyBUF_FORMAT | PyBUF_WRITABLE) #define PyBUF_FULL (PyBUF_INDIRECT | PyBUF_FORMAT | PyBUF_WRITABLE) typedef int (*getbufferproc)(PyObject *, Py_buffer *, int); typedef void (*releasebufferproc)(PyObject *, Py_buffer *); #endif #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #endif #if PY_MAJOR_VERSION < 3 && PY_MINOR_VERSION < 6 #define PyUnicode_FromString(s) PyUnicode_Decode(s, strlen(s), "UTF-8", "strict") #endif #if PY_MAJOR_VERSION >= 3 #define Py_TPFLAGS_CHECKTYPES 0 #define Py_TPFLAGS_HAVE_INDEX 0 #endif #if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3) #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ? \ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #else #define CYTHON_PEP393_ENABLED 0 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_READ(k, d, i) ((k=k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_VERSION_HEX < 0x02060000 #define PyBytesObject PyStringObject #define PyBytes_Type PyString_Type #define PyBytes_Check PyString_Check #define PyBytes_CheckExact PyString_CheckExact #define PyBytes_FromString PyString_FromString #define PyBytes_FromStringAndSize PyString_FromStringAndSize #define PyBytes_FromFormat PyString_FromFormat #define PyBytes_DecodeEscape PyString_DecodeEscape #define PyBytes_AsString PyString_AsString #define PyBytes_AsStringAndSize PyString_AsStringAndSize #define PyBytes_Size PyString_Size #define PyBytes_AS_STRING PyString_AS_STRING #define PyBytes_GET_SIZE PyString_GET_SIZE #define PyBytes_Repr PyString_Repr #define PyBytes_Concat PyString_Concat #define PyBytes_ConcatAndDel PyString_ConcatAndDel #endif #if PY_VERSION_HEX < 0x02060000 #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type) #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_VERSION_HEX < 0x03020000 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300) #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b) #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value) #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b) #else #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0))) #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1))) #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1))) #endif #if PY_MAJOR_VERSION >= 3 #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n))) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n))) #else #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n)) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_NAMESTR(n) ((char *)(n)) #define __Pyx_DOCSTR(n) ((char *)(n)) #else #define __Pyx_NAMESTR(n) (n) #define __Pyx_DOCSTR(n) (n) #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include #define __PYX_HAVE__nipy__labs__group__routines #define __PYX_HAVE_API__nipy__labs__group__routines #include "stdio.h" #include "stdlib.h" #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #ifdef _OPENMP #include #endif /* _OPENMP */ #ifdef PYREX_WITHOUT_ASSERTIONS #define CYTHON_WITHOUT_ASSERTIONS #endif /* inline attribute */ #ifndef CYTHON_INLINE #if defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif /* unused attribute */ #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif typedef struct {PyObject **p; char *s; const long n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/ /* Type Conversion Predeclarations */ #define __Pyx_PyBytes_FromUString(s) PyBytes_FromString((char*)s) #define __Pyx_PyBytes_AsUString(s) ((unsigned char*) PyBytes_AsString(s)) #define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x); static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject*); #if CYTHON_COMPILING_IN_CPYTHON #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #ifdef __GNUC__ /* Test for GCC > 2.95 */ #if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* __GNUC__ > 2 ... */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ > 2 ... */ #else /* __GNUC__ */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static PyObject *__pyx_m; static PyObject *__pyx_b; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include #else #include #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "routines.pyx", "numpy.pxd", "type.pxd", }; #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; /* for error messages only */ struct __Pyx_StructField_* fields; size_t size; /* sizeof(type) */ size_t arraysize[8]; /* length of array in each dimension */ int ndim; char typegroup; /* _R_eal, _C_omplex, Signed _I_nt, _U_nsigned int, _S_truct, _P_ointer, _O_bject, c_H_ar */ char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /* "numpy.pxd":723 * # in Cython to enable them only on the right systems. * * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t */ typedef npy_int8 __pyx_t_5numpy_int8_t; /* "numpy.pxd":724 * * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t */ typedef npy_int16 __pyx_t_5numpy_int16_t; /* "numpy.pxd":725 * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< * ctypedef npy_int64 int64_t * #ctypedef npy_int96 int96_t */ typedef npy_int32 __pyx_t_5numpy_int32_t; /* "numpy.pxd":726 * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< * #ctypedef npy_int96 int96_t * #ctypedef npy_int128 int128_t */ typedef npy_int64 __pyx_t_5numpy_int64_t; /* "numpy.pxd":730 * #ctypedef npy_int128 int128_t * * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t */ typedef npy_uint8 __pyx_t_5numpy_uint8_t; /* "numpy.pxd":731 * * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t */ typedef npy_uint16 __pyx_t_5numpy_uint16_t; /* "numpy.pxd":732 * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< * ctypedef npy_uint64 uint64_t * #ctypedef npy_uint96 uint96_t */ typedef npy_uint32 __pyx_t_5numpy_uint32_t; /* "numpy.pxd":733 * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< * #ctypedef npy_uint96 uint96_t * #ctypedef npy_uint128 uint128_t */ typedef npy_uint64 __pyx_t_5numpy_uint64_t; /* "numpy.pxd":737 * #ctypedef npy_uint128 uint128_t * * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< * ctypedef npy_float64 float64_t * #ctypedef npy_float80 float80_t */ typedef npy_float32 __pyx_t_5numpy_float32_t; /* "numpy.pxd":738 * * ctypedef npy_float32 float32_t * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< * #ctypedef npy_float80 float80_t * #ctypedef npy_float128 float128_t */ typedef npy_float64 __pyx_t_5numpy_float64_t; /* "numpy.pxd":747 * # The int types are mapped a bit surprising -- * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t # <<<<<<<<<<<<<< * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t */ typedef npy_long __pyx_t_5numpy_int_t; /* "numpy.pxd":748 * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< * ctypedef npy_longlong longlong_t * */ typedef npy_longlong __pyx_t_5numpy_long_t; /* "numpy.pxd":749 * ctypedef npy_long int_t * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< * * ctypedef npy_ulong uint_t */ typedef npy_longlong __pyx_t_5numpy_longlong_t; /* "numpy.pxd":751 * ctypedef npy_longlong longlong_t * * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t */ typedef npy_ulong __pyx_t_5numpy_uint_t; /* "numpy.pxd":752 * * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulonglong_t * */ typedef npy_ulonglong __pyx_t_5numpy_ulong_t; /* "numpy.pxd":753 * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< * * ctypedef npy_intp intp_t */ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; /* "numpy.pxd":755 * ctypedef npy_ulonglong ulonglong_t * * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< * ctypedef npy_uintp uintp_t * */ typedef npy_intp __pyx_t_5numpy_intp_t; /* "numpy.pxd":756 * * ctypedef npy_intp intp_t * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< * * ctypedef npy_double float_t */ typedef npy_uintp __pyx_t_5numpy_uintp_t; /* "numpy.pxd":758 * ctypedef npy_uintp uintp_t * * ctypedef npy_double float_t # <<<<<<<<<<<<<< * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t */ typedef npy_double __pyx_t_5numpy_float_t; /* "numpy.pxd":759 * * ctypedef npy_double float_t * ctypedef npy_double double_t # <<<<<<<<<<<<<< * ctypedef npy_longdouble longdouble_t * */ typedef npy_double __pyx_t_5numpy_double_t; /* "numpy.pxd":760 * ctypedef npy_double float_t * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cfloat cfloat_t */ typedef npy_longdouble __pyx_t_5numpy_longdouble_t; #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< float > __pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< double > __pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif /*--- Type declarations ---*/ /* "numpy.pxd":762 * ctypedef npy_longdouble longdouble_t * * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t */ typedef npy_cfloat __pyx_t_5numpy_cfloat_t; /* "numpy.pxd":763 * * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< * ctypedef npy_clongdouble clongdouble_t * */ typedef npy_cdouble __pyx_t_5numpy_cdouble_t; /* "numpy.pxd":764 * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cdouble complex_t */ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; /* "numpy.pxd":766 * ctypedef npy_clongdouble clongdouble_t * * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew1(a): */ typedef npy_cdouble __pyx_t_5numpy_complex_t; #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/ #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil) \ if (acquire_gil) { \ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ PyGILState_Release(__pyx_gilstate_save); \ } else { \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil) \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext() \ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif /* CYTHON_REFNANNY */ #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /*proto*/ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], \ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, \ const char* function_name); /*proto*/ static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact); /*proto*/ static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); #define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0) #define __Pyx_BufPtrStrided2d(type, buf, i0, s0, i1, s1) (type)((char*)buf + i0 * s0 + i1 * s1) static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/ static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); static CYTHON_INLINE int __Pyx_IterFinish(void); /*proto*/ static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); /*proto*/ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif static Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0}; static Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1}; static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level); /*proto*/ #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if defined(_WIN32) && defined(__cplusplus) && CYTHON_CCOMPLEX #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); #if CYTHON_CCOMPLEX #define __Pyx_c_eqf(a, b) ((a)==(b)) #define __Pyx_c_sumf(a, b) ((a)+(b)) #define __Pyx_c_difff(a, b) ((a)-(b)) #define __Pyx_c_prodf(a, b) ((a)*(b)) #define __Pyx_c_quotf(a, b) ((a)/(b)) #define __Pyx_c_negf(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zerof(z) ((z)==(float)0) #define __Pyx_c_conjf(z) (::std::conj(z)) #if 1 #define __Pyx_c_absf(z) (::std::abs(z)) #define __Pyx_c_powf(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zerof(z) ((z)==0) #define __Pyx_c_conjf(z) (conjf(z)) #if 1 #define __Pyx_c_absf(z) (cabsf(z)) #define __Pyx_c_powf(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); #if CYTHON_CCOMPLEX #define __Pyx_c_eq(a, b) ((a)==(b)) #define __Pyx_c_sum(a, b) ((a)+(b)) #define __Pyx_c_diff(a, b) ((a)-(b)) #define __Pyx_c_prod(a, b) ((a)*(b)) #define __Pyx_c_quot(a, b) ((a)/(b)) #define __Pyx_c_neg(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero(z) ((z)==(double)0) #define __Pyx_c_conj(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs(z) (::std::abs(z)) #define __Pyx_c_pow(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero(z) ((z)==0) #define __Pyx_c_conj(z) (conj(z)) #if 1 #define __Pyx_c_abs(z) (cabs(z)) #define __Pyx_c_pow(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject *); static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject *); static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject *); static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject *); static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject *); static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject *); static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject *); static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject *); static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject *); static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject *); static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject *); static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject *); static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject *); static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject *); static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject *); static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject *); static int __Pyx_check_binary_version(void); #if !defined(__Pyx_PyIdentifier_FromString) #if PY_MAJOR_VERSION < 3 #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) #else #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) #endif #endif static PyObject *__Pyx_ImportModule(const char *name); /*proto*/ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /*proto*/ typedef struct { int code_line; PyCodeObject* code_object; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /*proto*/ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/ /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from 'cpython.object' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'libc.stdlib' */ /* Module declarations from 'numpy' */ /* Module declarations from 'numpy' */ static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ /* Module declarations from 'cython' */ /* Module declarations from 'nipy.labs.group.routines' */ static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_float_t = { "float_t", NULL, sizeof(__pyx_t_5numpy_float_t), { 0 }, 0, 'R', 0, 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_int_t = { "int_t", NULL, sizeof(__pyx_t_5numpy_int_t), { 0 }, 0, IS_UNSIGNED(__pyx_t_5numpy_int_t) ? 'U' : 'I', IS_UNSIGNED(__pyx_t_5numpy_int_t), 0 }; #define __Pyx_MODULE_NAME "nipy.labs.group.routines" int __pyx_module_is_main_nipy__labs__group__routines = 0; /* Implementation of 'nipy.labs.group.routines' */ static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_RuntimeError; static PyObject *__pyx_pf_4nipy_4labs_5group_8routines_add_lines(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_A, PyArrayObject *__pyx_v_B, PyArrayObject *__pyx_v_I); /* proto */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ static char __pyx_k_1[] = "ndarray is not C contiguous"; static char __pyx_k_3[] = "ndarray is not Fortran contiguous"; static char __pyx_k_5[] = "Non-native byte order not supported"; static char __pyx_k_7[] = "unknown dtype code in numpy.pxd (%d)"; static char __pyx_k_8[] = "Format string allocated too short, see comment in numpy.pxd"; static char __pyx_k_11[] = "Format string allocated too short."; static char __pyx_k_13[] = "\nBasic ndarray routines for faster computations.\n\nAuthor: Alexis Roche, 2008.\n"; static char __pyx_k_14[] = "0.1"; static char __pyx_k_17[] = "/Users/mb312/dev_trees/nipy/nipy/labs/group/routines.pyx"; static char __pyx_k_18[] = "nipy.labs.group.routines"; static char __pyx_k__A[] = "A"; static char __pyx_k__B[] = "B"; static char __pyx_k__H[] = "H"; static char __pyx_k__I[] = "I"; static char __pyx_k__L[] = "L"; static char __pyx_k__O[] = "O"; static char __pyx_k__Q[] = "Q"; static char __pyx_k__b[] = "b"; static char __pyx_k__d[] = "d"; static char __pyx_k__f[] = "f"; static char __pyx_k__g[] = "g"; static char __pyx_k__h[] = "h"; static char __pyx_k__i[] = "i"; static char __pyx_k__j[] = "j"; static char __pyx_k__l[] = "l"; static char __pyx_k__q[] = "q"; static char __pyx_k__Zd[] = "Zd"; static char __pyx_k__Zf[] = "Zf"; static char __pyx_k__Zg[] = "Zg"; static char __pyx_k__np[] = "np"; static char __pyx_k__i_max[] = "i_max"; static char __pyx_k__index[] = "index"; static char __pyx_k__j_max[] = "j_max"; static char __pyx_k__numpy[] = "numpy"; static char __pyx_k__range[] = "range"; static char __pyx_k____main__[] = "__main__"; static char __pyx_k____test__[] = "__test__"; static char __pyx_k__add_lines[] = "add_lines"; static char __pyx_k__ValueError[] = "ValueError"; static char __pyx_k____version__[] = "__version__"; static char __pyx_k__RuntimeError[] = "RuntimeError"; static PyObject *__pyx_kp_u_1; static PyObject *__pyx_kp_u_11; static PyObject *__pyx_kp_s_14; static PyObject *__pyx_kp_s_17; static PyObject *__pyx_n_s_18; static PyObject *__pyx_kp_u_3; static PyObject *__pyx_kp_u_5; static PyObject *__pyx_kp_u_7; static PyObject *__pyx_kp_u_8; static PyObject *__pyx_n_s__A; static PyObject *__pyx_n_s__B; static PyObject *__pyx_n_s__I; static PyObject *__pyx_n_s__RuntimeError; static PyObject *__pyx_n_s__ValueError; static PyObject *__pyx_n_s____main__; static PyObject *__pyx_n_s____test__; static PyObject *__pyx_n_s____version__; static PyObject *__pyx_n_s__add_lines; static PyObject *__pyx_n_s__i; static PyObject *__pyx_n_s__i_max; static PyObject *__pyx_n_s__index; static PyObject *__pyx_n_s__j; static PyObject *__pyx_n_s__j_max; static PyObject *__pyx_n_s__np; static PyObject *__pyx_n_s__numpy; static PyObject *__pyx_n_s__range; static PyObject *__pyx_int_15; static PyObject *__pyx_k_tuple_2; static PyObject *__pyx_k_tuple_4; static PyObject *__pyx_k_tuple_6; static PyObject *__pyx_k_tuple_9; static PyObject *__pyx_k_tuple_10; static PyObject *__pyx_k_tuple_12; static PyObject *__pyx_k_tuple_15; static PyObject *__pyx_k_codeobj_16; /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_5group_8routines_1add_lines(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_5group_8routines_add_lines[] = "\n add(A, B, I)\n\n Add each line of A to a line of B indexed by I, where\n A and B are two-dimensional arrays and I is a\n one-dimensional array of indices.\n\n This is equivalent to: \n\n for i in xrange(len(I)):\n B[I[i]] += A[i]\n\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_5group_8routines_1add_lines = {__Pyx_NAMESTR("add_lines"), (PyCFunction)__pyx_pw_4nipy_4labs_5group_8routines_1add_lines, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_5group_8routines_add_lines)}; static PyObject *__pyx_pw_4nipy_4labs_5group_8routines_1add_lines(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_A = 0; PyArrayObject *__pyx_v_B = 0; PyArrayObject *__pyx_v_I = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("add_lines (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__A,&__pyx_n_s__B,&__pyx_n_s__I,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__A)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__B)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("add_lines", 1, 3, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__I)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("add_lines", 1, 3, 3, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "add_lines") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_A = ((PyArrayObject *)values[0]); __pyx_v_B = ((PyArrayObject *)values[1]); __pyx_v_I = ((PyArrayObject *)values[2]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("add_lines", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.group.routines.add_lines", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_A), __pyx_ptype_5numpy_ndarray, 1, "A", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_B), __pyx_ptype_5numpy_ndarray, 1, "B", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_I), __pyx_ptype_5numpy_ndarray, 1, "I", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 18; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_4nipy_4labs_5group_8routines_add_lines(__pyx_self, __pyx_v_A, __pyx_v_B, __pyx_v_I); goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/group/routines.pyx":16 * * @cython.boundscheck(False) * def add_lines(np.ndarray[np.float_t, ndim=2] A, # <<<<<<<<<<<<<< * np.ndarray[np.float_t, ndim=2] B, * np.ndarray[np.int_t, ndim=1] I): */ static PyObject *__pyx_pf_4nipy_4labs_5group_8routines_add_lines(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_A, PyArrayObject *__pyx_v_B, PyArrayObject *__pyx_v_I) { int __pyx_v_i; int __pyx_v_j; int __pyx_v_index; int __pyx_v_i_max; int __pyx_v_j_max; __Pyx_LocalBuf_ND __pyx_pybuffernd_A; __Pyx_Buffer __pyx_pybuffer_A; __Pyx_LocalBuf_ND __pyx_pybuffernd_B; __Pyx_Buffer __pyx_pybuffer_B; __Pyx_LocalBuf_ND __pyx_pybuffernd_I; __Pyx_Buffer __pyx_pybuffer_I; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_t_10; int __pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("add_lines", 0); __pyx_pybuffer_A.pybuffer.buf = NULL; __pyx_pybuffer_A.refcount = 0; __pyx_pybuffernd_A.data = NULL; __pyx_pybuffernd_A.rcbuffer = &__pyx_pybuffer_A; __pyx_pybuffer_B.pybuffer.buf = NULL; __pyx_pybuffer_B.refcount = 0; __pyx_pybuffernd_B.data = NULL; __pyx_pybuffernd_B.rcbuffer = &__pyx_pybuffer_B; __pyx_pybuffer_I.pybuffer.buf = NULL; __pyx_pybuffer_I.refcount = 0; __pyx_pybuffernd_I.data = NULL; __pyx_pybuffernd_I.rcbuffer = &__pyx_pybuffer_I; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_A.rcbuffer->pybuffer, (PyObject*)__pyx_v_A, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_A.diminfo[0].strides = __pyx_pybuffernd_A.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_A.diminfo[0].shape = __pyx_pybuffernd_A.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_A.diminfo[1].strides = __pyx_pybuffernd_A.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_A.diminfo[1].shape = __pyx_pybuffernd_A.rcbuffer->pybuffer.shape[1]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_B.rcbuffer->pybuffer, (PyObject*)__pyx_v_B, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_B.diminfo[0].strides = __pyx_pybuffernd_B.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_B.diminfo[0].shape = __pyx_pybuffernd_B.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_B.diminfo[1].strides = __pyx_pybuffernd_B.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_B.diminfo[1].shape = __pyx_pybuffernd_B.rcbuffer->pybuffer.shape[1]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_I.rcbuffer->pybuffer, (PyObject*)__pyx_v_I, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_I.diminfo[0].strides = __pyx_pybuffernd_I.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_I.diminfo[0].shape = __pyx_pybuffernd_I.rcbuffer->pybuffer.shape[0]; /* "nipy/labs/group/routines.pyx":33 * """ * cdef int i, j, index * cdef int i_max = I.shape[0] # <<<<<<<<<<<<<< * cdef int j_max = B.shape[1] * */ __pyx_v_i_max = (__pyx_v_I->dimensions[0]); /* "nipy/labs/group/routines.pyx":34 * cdef int i, j, index * cdef int i_max = I.shape[0] * cdef int j_max = B.shape[1] # <<<<<<<<<<<<<< * * for i in range(i_max): */ __pyx_v_j_max = (__pyx_v_B->dimensions[1]); /* "nipy/labs/group/routines.pyx":36 * cdef int j_max = B.shape[1] * * for i in range(i_max): # <<<<<<<<<<<<<< * index = I[i] * for j in range(j_max): */ __pyx_t_1 = __pyx_v_i_max; for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_v_i = __pyx_t_2; /* "nipy/labs/group/routines.pyx":37 * * for i in range(i_max): * index = I[i] # <<<<<<<<<<<<<< * for j in range(j_max): * B[index, j] = B[index, j] + A[i, j] */ __pyx_t_3 = __pyx_v_i; if (__pyx_t_3 < 0) __pyx_t_3 += __pyx_pybuffernd_I.diminfo[0].shape; __pyx_v_index = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_int_t *, __pyx_pybuffernd_I.rcbuffer->pybuffer.buf, __pyx_t_3, __pyx_pybuffernd_I.diminfo[0].strides)); /* "nipy/labs/group/routines.pyx":38 * for i in range(i_max): * index = I[i] * for j in range(j_max): # <<<<<<<<<<<<<< * B[index, j] = B[index, j] + A[i, j] * */ __pyx_t_4 = __pyx_v_j_max; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_j = __pyx_t_5; /* "nipy/labs/group/routines.pyx":39 * index = I[i] * for j in range(j_max): * B[index, j] = B[index, j] + A[i, j] # <<<<<<<<<<<<<< * */ __pyx_t_6 = __pyx_v_index; __pyx_t_7 = __pyx_v_j; if (__pyx_t_6 < 0) __pyx_t_6 += __pyx_pybuffernd_B.diminfo[0].shape; if (__pyx_t_7 < 0) __pyx_t_7 += __pyx_pybuffernd_B.diminfo[1].shape; __pyx_t_8 = __pyx_v_i; __pyx_t_9 = __pyx_v_j; if (__pyx_t_8 < 0) __pyx_t_8 += __pyx_pybuffernd_A.diminfo[0].shape; if (__pyx_t_9 < 0) __pyx_t_9 += __pyx_pybuffernd_A.diminfo[1].shape; __pyx_t_10 = __pyx_v_index; __pyx_t_11 = __pyx_v_j; if (__pyx_t_10 < 0) __pyx_t_10 += __pyx_pybuffernd_B.diminfo[0].shape; if (__pyx_t_11 < 0) __pyx_t_11 += __pyx_pybuffernd_B.diminfo[1].shape; *__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_B.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_B.diminfo[0].strides, __pyx_t_11, __pyx_pybuffernd_B.diminfo[1].strides) = ((*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_B.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_B.diminfo[0].strides, __pyx_t_7, __pyx_pybuffernd_B.diminfo[1].strides)) + (*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float_t *, __pyx_pybuffernd_A.rcbuffer->pybuffer.buf, __pyx_t_8, __pyx_pybuffernd_A.diminfo[0].strides, __pyx_t_9, __pyx_pybuffernd_A.diminfo[1].strides))); } } __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_A.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_B.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_I.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("nipy.labs.group.routines.add_lines", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_A.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_B.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_I.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":194 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_copy_shape; int __pyx_v_i; int __pyx_v_ndim; int __pyx_v_endian_detector; int __pyx_v_little_endian; int __pyx_v_t; char *__pyx_v_f; PyArray_Descr *__pyx_v_descr = 0; int __pyx_v_offset; int __pyx_v_hasfields; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; char *__pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "numpy.pxd":200 * # of flags * * if info == NULL: return # <<<<<<<<<<<<<< * * cdef int copy_shape, i, ndim */ __pyx_t_1 = (__pyx_v_info == NULL); if (__pyx_t_1) { __pyx_r = 0; goto __pyx_L0; goto __pyx_L3; } __pyx_L3:; /* "numpy.pxd":203 * * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((&endian_detector)[0] != 0) * */ __pyx_v_endian_detector = 1; /* "numpy.pxd":204 * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * * ndim = PyArray_NDIM(self) */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "numpy.pxd":206 * cdef bint little_endian = ((&endian_detector)[0] != 0) * * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< * * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); /* "numpy.pxd":208 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); if (__pyx_t_1) { /* "numpy.pxd":209 * * if sizeof(npy_intp) != sizeof(Py_ssize_t): * copy_shape = 1 # <<<<<<<<<<<<<< * else: * copy_shape = 0 */ __pyx_v_copy_shape = 1; goto __pyx_L4; } /*else*/ { /* "numpy.pxd":211 * copy_shape = 1 * else: * copy_shape = 0 # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) */ __pyx_v_copy_shape = 0; } __pyx_L4:; /* "numpy.pxd":213 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS); if (__pyx_t_1) { /* "numpy.pxd":214 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not C contiguous") * */ __pyx_t_2 = (!PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS)); __pyx_t_3 = __pyx_t_2; } else { __pyx_t_3 = __pyx_t_1; } if (__pyx_t_3) { /* "numpy.pxd":215 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L5; } __pyx_L5:; /* "numpy.pxd":217 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ __pyx_t_3 = ((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS); if (__pyx_t_3) { /* "numpy.pxd":218 * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not Fortran contiguous") * */ __pyx_t_1 = (!PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS)); __pyx_t_2 = __pyx_t_1; } else { __pyx_t_2 = __pyx_t_3; } if (__pyx_t_2) { /* "numpy.pxd":219 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_4), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L6; } __pyx_L6:; /* "numpy.pxd":221 * raise ValueError(u"ndarray is not Fortran contiguous") * * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< * info.ndim = ndim * if copy_shape: */ __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); /* "numpy.pxd":222 * * info.buf = PyArray_DATA(self) * info.ndim = ndim # <<<<<<<<<<<<<< * if copy_shape: * # Allocate new buffer for strides and shape info. */ __pyx_v_info->ndim = __pyx_v_ndim; /* "numpy.pxd":223 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ if (__pyx_v_copy_shape) { /* "numpy.pxd":226 * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) # <<<<<<<<<<<<<< * info.shape = info.strides + ndim * for i in range(ndim): */ __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2))); /* "numpy.pxd":227 * # This is allocated as one block, strides first. * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) * info.shape = info.strides + ndim # <<<<<<<<<<<<<< * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] */ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); /* "numpy.pxd":228 * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) * info.shape = info.strides + ndim * for i in range(ndim): # <<<<<<<<<<<<<< * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] */ __pyx_t_5 = __pyx_v_ndim; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "numpy.pxd":229 * info.shape = info.strides + ndim * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< * info.shape[i] = PyArray_DIMS(self)[i] * else: */ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); /* "numpy.pxd":230 * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< * else: * info.strides = PyArray_STRIDES(self) */ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); } goto __pyx_L7; } /*else*/ { /* "numpy.pxd":232 * info.shape[i] = PyArray_DIMS(self)[i] * else: * info.strides = PyArray_STRIDES(self) # <<<<<<<<<<<<<< * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL */ __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); /* "numpy.pxd":233 * else: * info.strides = PyArray_STRIDES(self) * info.shape = PyArray_DIMS(self) # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) */ __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); } __pyx_L7:; /* "numpy.pxd":234 * info.strides = PyArray_STRIDES(self) * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) */ __pyx_v_info->suboffsets = NULL; /* "numpy.pxd":235 * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< * info.readonly = not PyArray_ISWRITEABLE(self) * */ __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); /* "numpy.pxd":236 * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< * * cdef int t */ __pyx_v_info->readonly = (!PyArray_ISWRITEABLE(__pyx_v_self)); /* "numpy.pxd":239 * * cdef int t * cdef char* f = NULL # <<<<<<<<<<<<<< * cdef dtype descr = self.descr * cdef list stack */ __pyx_v_f = NULL; /* "numpy.pxd":240 * cdef int t * cdef char* f = NULL * cdef dtype descr = self.descr # <<<<<<<<<<<<<< * cdef list stack * cdef int offset */ __pyx_t_4 = ((PyObject *)__pyx_v_self->descr); __Pyx_INCREF(__pyx_t_4); __pyx_v_descr = ((PyArray_Descr *)__pyx_t_4); __pyx_t_4 = 0; /* "numpy.pxd":244 * cdef int offset * * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< * * if not hasfields and not copy_shape: */ __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); /* "numpy.pxd":246 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ __pyx_t_2 = (!__pyx_v_hasfields); if (__pyx_t_2) { __pyx_t_3 = (!__pyx_v_copy_shape); __pyx_t_1 = __pyx_t_3; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "numpy.pxd":248 * if not hasfields and not copy_shape: * # do not call releasebuffer * info.obj = None # <<<<<<<<<<<<<< * else: * # need to call releasebuffer */ __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = Py_None; goto __pyx_L10; } /*else*/ { /* "numpy.pxd":251 * else: * # need to call releasebuffer * info.obj = self # <<<<<<<<<<<<<< * * if not hasfields: */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); } __pyx_L10:; /* "numpy.pxd":253 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ __pyx_t_1 = (!__pyx_v_hasfields); if (__pyx_t_1) { /* "numpy.pxd":254 * * if not hasfields: * t = descr.type_num # <<<<<<<<<<<<<< * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): */ __pyx_t_5 = __pyx_v_descr->type_num; __pyx_v_t = __pyx_t_5; /* "numpy.pxd":255 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_1 = (__pyx_v_descr->byteorder == '>'); if (__pyx_t_1) { __pyx_t_2 = __pyx_v_little_endian; } else { __pyx_t_2 = __pyx_t_1; } if (!__pyx_t_2) { /* "numpy.pxd":256 * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" */ __pyx_t_1 = (__pyx_v_descr->byteorder == '<'); if (__pyx_t_1) { __pyx_t_3 = (!__pyx_v_little_endian); __pyx_t_7 = __pyx_t_3; } else { __pyx_t_7 = __pyx_t_1; } __pyx_t_1 = __pyx_t_7; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "numpy.pxd":257 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_6), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L12; } __pyx_L12:; /* "numpy.pxd":258 * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" */ __pyx_t_1 = (__pyx_v_t == NPY_BYTE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__b; goto __pyx_L13; } /* "numpy.pxd":259 * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" */ __pyx_t_1 = (__pyx_v_t == NPY_UBYTE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__B; goto __pyx_L13; } /* "numpy.pxd":260 * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" */ __pyx_t_1 = (__pyx_v_t == NPY_SHORT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__h; goto __pyx_L13; } /* "numpy.pxd":261 * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" */ __pyx_t_1 = (__pyx_v_t == NPY_USHORT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__H; goto __pyx_L13; } /* "numpy.pxd":262 * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" */ __pyx_t_1 = (__pyx_v_t == NPY_INT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__i; goto __pyx_L13; } /* "numpy.pxd":263 * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" */ __pyx_t_1 = (__pyx_v_t == NPY_UINT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__I; goto __pyx_L13; } /* "numpy.pxd":264 * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" */ __pyx_t_1 = (__pyx_v_t == NPY_LONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__l; goto __pyx_L13; } /* "numpy.pxd":265 * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" */ __pyx_t_1 = (__pyx_v_t == NPY_ULONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__L; goto __pyx_L13; } /* "numpy.pxd":266 * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" */ __pyx_t_1 = (__pyx_v_t == NPY_LONGLONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__q; goto __pyx_L13; } /* "numpy.pxd":267 * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" */ __pyx_t_1 = (__pyx_v_t == NPY_ULONGLONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Q; goto __pyx_L13; } /* "numpy.pxd":268 * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" */ __pyx_t_1 = (__pyx_v_t == NPY_FLOAT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__f; goto __pyx_L13; } /* "numpy.pxd":269 * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" */ __pyx_t_1 = (__pyx_v_t == NPY_DOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__d; goto __pyx_L13; } /* "numpy.pxd":270 * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" */ __pyx_t_1 = (__pyx_v_t == NPY_LONGDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__g; goto __pyx_L13; } /* "numpy.pxd":271 * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" */ __pyx_t_1 = (__pyx_v_t == NPY_CFLOAT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zf; goto __pyx_L13; } /* "numpy.pxd":272 * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" */ __pyx_t_1 = (__pyx_v_t == NPY_CDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zd; goto __pyx_L13; } /* "numpy.pxd":273 * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f = "O" * else: */ __pyx_t_1 = (__pyx_v_t == NPY_CLONGDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zg; goto __pyx_L13; } /* "numpy.pxd":274 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_1 = (__pyx_v_t == NPY_OBJECT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__O; goto __pyx_L13; } /*else*/ { /* "numpy.pxd":276 * elif t == NPY_OBJECT: f = "O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * info.format = f * return */ __pyx_t_4 = PyInt_FromLong(__pyx_v_t); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_8 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_7), __pyx_t_4); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_8)); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_8)); __Pyx_GIVEREF(((PyObject *)__pyx_t_8)); __pyx_t_8 = 0; __pyx_t_8 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L13:; /* "numpy.pxd":277 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f # <<<<<<<<<<<<<< * return * else: */ __pyx_v_info->format = __pyx_v_f; /* "numpy.pxd":278 * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f * return # <<<<<<<<<<<<<< * else: * info.format = stdlib.malloc(_buffer_format_string_len) */ __pyx_r = 0; goto __pyx_L0; goto __pyx_L11; } /*else*/ { /* "numpy.pxd":280 * return * else: * info.format = stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 */ __pyx_v_info->format = ((char *)malloc(255)); /* "numpy.pxd":281 * else: * info.format = stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< * offset = 0 * f = _util_dtypestring(descr, info.format + 1, */ (__pyx_v_info->format[0]) = '^'; /* "numpy.pxd":282 * info.format = stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 # <<<<<<<<<<<<<< * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, */ __pyx_v_offset = 0; /* "numpy.pxd":285 * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, * &offset) # <<<<<<<<<<<<<< * f[0] = c'\0' # Terminate format string * */ __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_9; /* "numpy.pxd":286 * info.format + _buffer_format_string_len, * &offset) * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< * * def __releasebuffer__(ndarray self, Py_buffer* info): */ (__pyx_v_f[0]) = '\x00'; } __pyx_L11:; __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_descr); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":288 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__releasebuffer__", 0); /* "numpy.pxd":289 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_t_1 = PyArray_HASFIELDS(__pyx_v_self); if (__pyx_t_1) { /* "numpy.pxd":290 * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): * stdlib.free(info.format) # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) */ free(__pyx_v_info->format); goto __pyx_L3; } __pyx_L3:; /* "numpy.pxd":291 * if PyArray_HASFIELDS(self): * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * stdlib.free(info.strides) * # info.shape was stored after info.strides in the same block */ __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); if (__pyx_t_1) { /* "numpy.pxd":292 * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) # <<<<<<<<<<<<<< * # info.shape was stored after info.strides in the same block * */ free(__pyx_v_info->strides); goto __pyx_L4; } __pyx_L4:; __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":768 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, a) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); /* "numpy.pxd":769 * * cdef inline object PyArray_MultiIterNew1(a): * return PyArray_MultiIterNew(1, a) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew2(a, b): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 769; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":771 * return PyArray_MultiIterNew(1, a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, a, b) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); /* "numpy.pxd":772 * * cdef inline object PyArray_MultiIterNew2(a, b): * return PyArray_MultiIterNew(2, a, b) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew3(a, b, c): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":774 * return PyArray_MultiIterNew(2, a, b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, a, b, c) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); /* "numpy.pxd":775 * * cdef inline object PyArray_MultiIterNew3(a, b, c): * return PyArray_MultiIterNew(3, a, b, c) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":777 * return PyArray_MultiIterNew(3, a, b, c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, a, b, c, d) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); /* "numpy.pxd":778 * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): * return PyArray_MultiIterNew(4, a, b, c, d) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 778; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":780 * return PyArray_MultiIterNew(4, a, b, c, d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, a, b, c, d, e) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); /* "numpy.pxd":781 * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): * return PyArray_MultiIterNew(5, a, b, c, d, e) # <<<<<<<<<<<<<< * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 781; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":783 * return PyArray_MultiIterNew(5, a, b, c, d, e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { PyArray_Descr *__pyx_v_child = 0; int __pyx_v_endian_detector; int __pyx_v_little_endian; PyObject *__pyx_v_fields = 0; PyObject *__pyx_v_childname = NULL; PyObject *__pyx_v_new_offset = NULL; PyObject *__pyx_v_t = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *(*__pyx_t_6)(PyObject *); int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_t_10; long __pyx_t_11; char *__pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_util_dtypestring", 0); /* "numpy.pxd":790 * cdef int delta_offset * cdef tuple i * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((&endian_detector)[0] != 0) * cdef tuple fields */ __pyx_v_endian_detector = 1; /* "numpy.pxd":791 * cdef tuple i * cdef int endian_detector = 1 * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * cdef tuple fields * */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "numpy.pxd":794 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ if (unlikely(((PyObject *)__pyx_v_descr->names) == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_1 = ((PyObject *)__pyx_v_descr->names); __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; for (;;) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif __Pyx_XDECREF(__pyx_v_childname); __pyx_v_childname = __pyx_t_3; __pyx_t_3 = 0; /* "numpy.pxd":795 * * for childname in descr.names: * fields = descr.fields[childname] # <<<<<<<<<<<<<< * child, new_offset = fields * */ __pyx_t_3 = PyObject_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (!__pyx_t_3) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected tuple, got %.200s", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF(((PyObject *)__pyx_v_fields)); __pyx_v_fields = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "numpy.pxd":796 * for childname in descr.names: * fields = descr.fields[childname] * child, new_offset = fields # <<<<<<<<<<<<<< * * if (end - f) - (new_offset - offset[0]) < 15: */ if (likely(PyTuple_CheckExact(((PyObject *)__pyx_v_fields)))) { PyObject* sequence = ((PyObject *)__pyx_v_fields); #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else if (1) { __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } else { Py_ssize_t index = -1; __pyx_t_5 = PyObject_GetIter(((PyObject *)__pyx_v_fields)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = Py_TYPE(__pyx_t_5)->tp_iternext; index = 0; __pyx_t_3 = __pyx_t_6(__pyx_t_5); if (unlikely(!__pyx_t_3)) goto __pyx_L5_unpacking_failed; __Pyx_GOTREF(__pyx_t_3); index = 1; __pyx_t_4 = __pyx_t_6(__pyx_t_5); if (unlikely(!__pyx_t_4)) goto __pyx_L5_unpacking_failed; __Pyx_GOTREF(__pyx_t_4); if (__Pyx_IternextUnpackEndCheck(__pyx_t_6(__pyx_t_5), 2) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = NULL; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L6_unpacking_done; __pyx_L5_unpacking_failed:; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_L6_unpacking_done:; } if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF(((PyObject *)__pyx_v_child)); __pyx_v_child = ((PyArray_Descr *)__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_v_new_offset); __pyx_v_new_offset = __pyx_t_4; __pyx_t_4 = 0; /* "numpy.pxd":798 * child, new_offset = fields * * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ __pyx_t_4 = PyInt_FromLong((__pyx_v_end - __pyx_v_f)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_Subtract(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyObject_RichCompare(__pyx_t_3, __pyx_int_15, Py_LT); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { /* "numpy.pxd":799 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_t_5 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_9), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L7; } __pyx_L7:; /* "numpy.pxd":801 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_7 = (__pyx_v_child->byteorder == '>'); if (__pyx_t_7) { __pyx_t_8 = __pyx_v_little_endian; } else { __pyx_t_8 = __pyx_t_7; } if (!__pyx_t_8) { /* "numpy.pxd":802 * * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * # One could encode it in the format string and have Cython */ __pyx_t_7 = (__pyx_v_child->byteorder == '<'); if (__pyx_t_7) { __pyx_t_9 = (!__pyx_v_little_endian); __pyx_t_10 = __pyx_t_9; } else { __pyx_t_10 = __pyx_t_7; } __pyx_t_7 = __pyx_t_10; } else { __pyx_t_7 = __pyx_t_8; } if (__pyx_t_7) { /* "numpy.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_10), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L8; } __pyx_L8:; /* "numpy.pxd":813 * * # Output padding bytes * while offset[0] < new_offset: # <<<<<<<<<<<<<< * f[0] = 120 # "x"; pad byte * f += 1 */ while (1) { __pyx_t_5 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_t_5, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!__pyx_t_7) break; /* "numpy.pxd":814 * # Output padding bytes * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< * f += 1 * offset[0] += 1 */ (__pyx_v_f[0]) = 120; /* "numpy.pxd":815 * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte * f += 1 # <<<<<<<<<<<<<< * offset[0] += 1 * */ __pyx_v_f = (__pyx_v_f + 1); /* "numpy.pxd":816 * f[0] = 120 # "x"; pad byte * f += 1 * offset[0] += 1 # <<<<<<<<<<<<<< * * offset[0] += child.itemsize */ __pyx_t_11 = 0; (__pyx_v_offset[__pyx_t_11]) = ((__pyx_v_offset[__pyx_t_11]) + 1); } /* "numpy.pxd":818 * offset[0] += 1 * * offset[0] += child.itemsize # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(child): */ __pyx_t_11 = 0; (__pyx_v_offset[__pyx_t_11]) = ((__pyx_v_offset[__pyx_t_11]) + __pyx_v_child->elsize); /* "numpy.pxd":820 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ __pyx_t_7 = (!PyDataType_HASFIELDS(__pyx_v_child)); if (__pyx_t_7) { /* "numpy.pxd":821 * * if not PyDataType_HASFIELDS(child): * t = child.type_num # <<<<<<<<<<<<<< * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") */ __pyx_t_3 = PyInt_FromLong(__pyx_v_child->type_num); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 821; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_XDECREF(__pyx_v_t); __pyx_v_t = __pyx_t_3; __pyx_t_3 = 0; /* "numpy.pxd":822 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ __pyx_t_7 = ((__pyx_v_end - __pyx_v_f) < 5); if (__pyx_t_7) { /* "numpy.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_t_3 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_12), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L12; } __pyx_L12:; /* "numpy.pxd":826 * * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" */ __pyx_t_3 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 98; goto __pyx_L13; } /* "numpy.pxd":827 * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" */ __pyx_t_5 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 66; goto __pyx_L13; } /* "numpy.pxd":828 * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" */ __pyx_t_3 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 104; goto __pyx_L13; } /* "numpy.pxd":829 * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" */ __pyx_t_5 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 72; goto __pyx_L13; } /* "numpy.pxd":830 * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" */ __pyx_t_3 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 105; goto __pyx_L13; } /* "numpy.pxd":831 * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" */ __pyx_t_5 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 73; goto __pyx_L13; } /* "numpy.pxd":832 * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" */ __pyx_t_3 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 108; goto __pyx_L13; } /* "numpy.pxd":833 * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" */ __pyx_t_5 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 76; goto __pyx_L13; } /* "numpy.pxd":834 * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" */ __pyx_t_3 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 113; goto __pyx_L13; } /* "numpy.pxd":835 * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" */ __pyx_t_5 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 81; goto __pyx_L13; } /* "numpy.pxd":836 * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" */ __pyx_t_3 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 102; goto __pyx_L13; } /* "numpy.pxd":837 * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf */ __pyx_t_5 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 100; goto __pyx_L13; } /* "numpy.pxd":838 * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd */ __pyx_t_3 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 103; goto __pyx_L13; } /* "numpy.pxd":839 * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg */ __pyx_t_5 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 102; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":840 * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" */ __pyx_t_3 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 100; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":841 * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: */ __pyx_t_5 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 103; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":842 * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_3 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 79; goto __pyx_L13; } /*else*/ { /* "numpy.pxd":844 * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * f += 1 * else: */ __pyx_t_5 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_7), __pyx_v_t); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_5)); __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_5)); __Pyx_GIVEREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L13:; /* "numpy.pxd":845 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * f += 1 # <<<<<<<<<<<<<< * else: * # Cython ignores struct boundary information ("T{...}"), */ __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L11; } /*else*/ { /* "numpy.pxd":849 * # Cython ignores struct boundary information ("T{...}"), * # so don't output it * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< * return f * */ __pyx_t_12 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_12 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 849; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_12; } __pyx_L11:; } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "numpy.pxd":850 * # so don't output it * f = _util_dtypestring(child, f, end, offset) * return f # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_f; goto __pyx_L0; __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_child); __Pyx_XDECREF(__pyx_v_fields); __Pyx_XDECREF(__pyx_v_childname); __Pyx_XDECREF(__pyx_v_new_offset); __Pyx_XDECREF(__pyx_v_t); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":965 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { PyObject *__pyx_v_baseptr; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("set_array_base", 0); /* "numpy.pxd":967 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ __pyx_t_1 = (__pyx_v_base == Py_None); if (__pyx_t_1) { /* "numpy.pxd":968 * cdef PyObject* baseptr * if base is None: * baseptr = NULL # <<<<<<<<<<<<<< * else: * Py_INCREF(base) # important to do this before decref below! */ __pyx_v_baseptr = NULL; goto __pyx_L3; } /*else*/ { /* "numpy.pxd":970 * baseptr = NULL * else: * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< * baseptr = base * Py_XDECREF(arr.base) */ Py_INCREF(__pyx_v_base); /* "numpy.pxd":971 * else: * Py_INCREF(base) # important to do this before decref below! * baseptr = base # <<<<<<<<<<<<<< * Py_XDECREF(arr.base) * arr.base = baseptr */ __pyx_v_baseptr = ((PyObject *)__pyx_v_base); } __pyx_L3:; /* "numpy.pxd":972 * Py_INCREF(base) # important to do this before decref below! * baseptr = base * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< * arr.base = baseptr * */ Py_XDECREF(__pyx_v_arr->base); /* "numpy.pxd":973 * baseptr = base * Py_XDECREF(arr.base) * arr.base = baseptr # <<<<<<<<<<<<<< * * cdef inline object get_array_base(ndarray arr): */ __pyx_v_arr->base = __pyx_v_baseptr; __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":975 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("get_array_base", 0); /* "numpy.pxd":976 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ __pyx_t_1 = (__pyx_v_arr->base == NULL); if (__pyx_t_1) { /* "numpy.pxd":977 * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: * return None # <<<<<<<<<<<<<< * else: * return arr.base */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; goto __pyx_L3; } /*else*/ { /* "numpy.pxd":979 * return None * else: * return arr.base # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); __pyx_r = ((PyObject *)__pyx_v_arr->base); goto __pyx_L0; } __pyx_L3:; __pyx_r = Py_None; __Pyx_INCREF(Py_None); __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, __Pyx_NAMESTR("routines"), __Pyx_DOCSTR(__pyx_k_13), /* m_doc */ -1, /* m_size */ __pyx_methods /* m_methods */, NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_kp_u_1, __pyx_k_1, sizeof(__pyx_k_1), 0, 1, 0, 0}, {&__pyx_kp_u_11, __pyx_k_11, sizeof(__pyx_k_11), 0, 1, 0, 0}, {&__pyx_kp_s_14, __pyx_k_14, sizeof(__pyx_k_14), 0, 0, 1, 0}, {&__pyx_kp_s_17, __pyx_k_17, sizeof(__pyx_k_17), 0, 0, 1, 0}, {&__pyx_n_s_18, __pyx_k_18, sizeof(__pyx_k_18), 0, 0, 1, 1}, {&__pyx_kp_u_3, __pyx_k_3, sizeof(__pyx_k_3), 0, 1, 0, 0}, {&__pyx_kp_u_5, __pyx_k_5, sizeof(__pyx_k_5), 0, 1, 0, 0}, {&__pyx_kp_u_7, __pyx_k_7, sizeof(__pyx_k_7), 0, 1, 0, 0}, {&__pyx_kp_u_8, __pyx_k_8, sizeof(__pyx_k_8), 0, 1, 0, 0}, {&__pyx_n_s__A, __pyx_k__A, sizeof(__pyx_k__A), 0, 0, 1, 1}, {&__pyx_n_s__B, __pyx_k__B, sizeof(__pyx_k__B), 0, 0, 1, 1}, {&__pyx_n_s__I, __pyx_k__I, sizeof(__pyx_k__I), 0, 0, 1, 1}, {&__pyx_n_s__RuntimeError, __pyx_k__RuntimeError, sizeof(__pyx_k__RuntimeError), 0, 0, 1, 1}, {&__pyx_n_s__ValueError, __pyx_k__ValueError, sizeof(__pyx_k__ValueError), 0, 0, 1, 1}, {&__pyx_n_s____main__, __pyx_k____main__, sizeof(__pyx_k____main__), 0, 0, 1, 1}, {&__pyx_n_s____test__, __pyx_k____test__, sizeof(__pyx_k____test__), 0, 0, 1, 1}, {&__pyx_n_s____version__, __pyx_k____version__, sizeof(__pyx_k____version__), 0, 0, 1, 1}, {&__pyx_n_s__add_lines, __pyx_k__add_lines, sizeof(__pyx_k__add_lines), 0, 0, 1, 1}, {&__pyx_n_s__i, __pyx_k__i, sizeof(__pyx_k__i), 0, 0, 1, 1}, {&__pyx_n_s__i_max, __pyx_k__i_max, sizeof(__pyx_k__i_max), 0, 0, 1, 1}, {&__pyx_n_s__index, __pyx_k__index, sizeof(__pyx_k__index), 0, 0, 1, 1}, {&__pyx_n_s__j, __pyx_k__j, sizeof(__pyx_k__j), 0, 0, 1, 1}, {&__pyx_n_s__j_max, __pyx_k__j_max, sizeof(__pyx_k__j_max), 0, 0, 1, 1}, {&__pyx_n_s__np, __pyx_k__np, sizeof(__pyx_k__np), 0, 0, 1, 1}, {&__pyx_n_s__numpy, __pyx_k__numpy, sizeof(__pyx_k__numpy), 0, 0, 1, 1}, {&__pyx_n_s__range, __pyx_k__range, sizeof(__pyx_k__range), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_range = __Pyx_GetName(__pyx_b, __pyx_n_s__range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_ValueError = __Pyx_GetName(__pyx_b, __pyx_n_s__ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_RuntimeError = __Pyx_GetName(__pyx_b, __pyx_n_s__RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "numpy.pxd":215 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_k_tuple_2 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_2); __Pyx_INCREF(((PyObject *)__pyx_kp_u_1)); PyTuple_SET_ITEM(__pyx_k_tuple_2, 0, ((PyObject *)__pyx_kp_u_1)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_1)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_2)); /* "numpy.pxd":219 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_k_tuple_4 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_4); __Pyx_INCREF(((PyObject *)__pyx_kp_u_3)); PyTuple_SET_ITEM(__pyx_k_tuple_4, 0, ((PyObject *)__pyx_kp_u_3)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_3)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_4)); /* "numpy.pxd":257 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_k_tuple_6 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_6); __Pyx_INCREF(((PyObject *)__pyx_kp_u_5)); PyTuple_SET_ITEM(__pyx_k_tuple_6, 0, ((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_6)); /* "numpy.pxd":799 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_k_tuple_9 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_9); __Pyx_INCREF(((PyObject *)__pyx_kp_u_8)); PyTuple_SET_ITEM(__pyx_k_tuple_9, 0, ((PyObject *)__pyx_kp_u_8)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_8)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_9)); /* "numpy.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_k_tuple_10 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_10)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_10); __Pyx_INCREF(((PyObject *)__pyx_kp_u_5)); PyTuple_SET_ITEM(__pyx_k_tuple_10, 0, ((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_10)); /* "numpy.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_k_tuple_12 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_12)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_12); __Pyx_INCREF(((PyObject *)__pyx_kp_u_11)); PyTuple_SET_ITEM(__pyx_k_tuple_12, 0, ((PyObject *)__pyx_kp_u_11)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_11)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_12)); /* "nipy/labs/group/routines.pyx":16 * * @cython.boundscheck(False) * def add_lines(np.ndarray[np.float_t, ndim=2] A, # <<<<<<<<<<<<<< * np.ndarray[np.float_t, ndim=2] B, * np.ndarray[np.int_t, ndim=1] I): */ __pyx_k_tuple_15 = PyTuple_New(8); if (unlikely(!__pyx_k_tuple_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_15); __Pyx_INCREF(((PyObject *)__pyx_n_s__A)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 0, ((PyObject *)__pyx_n_s__A)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__A)); __Pyx_INCREF(((PyObject *)__pyx_n_s__B)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 1, ((PyObject *)__pyx_n_s__B)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__B)); __Pyx_INCREF(((PyObject *)__pyx_n_s__I)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 2, ((PyObject *)__pyx_n_s__I)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__I)); __Pyx_INCREF(((PyObject *)__pyx_n_s__i)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 3, ((PyObject *)__pyx_n_s__i)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__i)); __Pyx_INCREF(((PyObject *)__pyx_n_s__j)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 4, ((PyObject *)__pyx_n_s__j)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__j)); __Pyx_INCREF(((PyObject *)__pyx_n_s__index)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 5, ((PyObject *)__pyx_n_s__index)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__index)); __Pyx_INCREF(((PyObject *)__pyx_n_s__i_max)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 6, ((PyObject *)__pyx_n_s__i_max)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__i_max)); __Pyx_INCREF(((PyObject *)__pyx_n_s__j_max)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 7, ((PyObject *)__pyx_n_s__j_max)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__j_max)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_15)); __pyx_k_codeobj_16 = (PyObject*)__Pyx_PyCode_New(3, 0, 8, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_15, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__add_lines, 16, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_15 = PyInt_FromLong(15); if (unlikely(!__pyx_int_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC initroutines(void); /*proto*/ PyMODINIT_FUNC initroutines(void) #else PyMODINIT_FUNC PyInit_routines(void); /*proto*/ PyMODINIT_FUNC PyInit_routines(void) #endif { PyObject *__pyx_t_1 = NULL; __Pyx_RefNannyDeclarations #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_routines(void)", 0); if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #ifdef __Pyx_CyFunction_USED if (__Pyx_CyFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4(__Pyx_NAMESTR("routines"), __pyx_methods, __Pyx_DOCSTR(__pyx_k_13), 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (!PyDict_GetItemString(modules, "nipy.labs.group.routines")) { if (unlikely(PyDict_SetItemString(modules, "nipy.labs.group.routines", __pyx_m) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } } #endif __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); if (unlikely(!__pyx_b)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; /*--- Initialize various global constants etc. ---*/ if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_module_is_main_nipy__labs__group__routines) { if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s____main__) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; } /*--- Builtin init code ---*/ if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Constants init code ---*/ if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Global init code ---*/ /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ /*--- Type import code ---*/ __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", #if CYTHON_COMPILING_IN_PYPY sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 861; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Variable import code ---*/ /*--- Function import code ---*/ /*--- Execution code ---*/ /* "nipy/labs/group/routines.pyx":8 * """ * * __version__ = '0.1' # <<<<<<<<<<<<<< * * import numpy as np */ if (PyObject_SetAttr(__pyx_m, __pyx_n_s____version__, ((PyObject *)__pyx_kp_s_14)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 8; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/group/routines.pyx":10 * __version__ = '0.1' * * import numpy as np # <<<<<<<<<<<<<< * cimport numpy as np * */ __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__numpy), 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 10; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__np, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 10; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/group/routines.pyx":16 * * @cython.boundscheck(False) * def add_lines(np.ndarray[np.float_t, ndim=2] A, # <<<<<<<<<<<<<< * np.ndarray[np.float_t, ndim=2] B, * np.ndarray[np.int_t, ndim=1] I): */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_5group_8routines_1add_lines, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__add_lines, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/group/routines.pyx":1 * # -*- Mode: Python -*- Not really, but the syntax is close enough # <<<<<<<<<<<<<< * """ * Basic ndarray routines for faster computations. */ __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); if (PyObject_SetAttr(__pyx_m, __pyx_n_s____test__, ((PyObject *)__pyx_t_1)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; /* "numpy.pxd":975 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); if (__pyx_m) { __Pyx_AddTraceback("init nipy.labs.group.routines", __pyx_clineno, __pyx_lineno, __pyx_filename); Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init nipy.labs.group.routines"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if PY_MAJOR_VERSION < 3 return; #else return __pyx_m; #endif } /* Runtime support code */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* CYTHON_REFNANNY */ static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) { PyObject *result; result = PyObject_GetAttr(dict, name); if (!result) { if (dict != __pyx_b) { PyErr_Clear(); result = PyObject_GetAttr(__pyx_b, name); } if (!result) { PyErr_SetObject(PyExc_NameError, name); } } return result; } static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%s() takes %s %" CYTHON_FORMAT_SSIZE_T "d positional argument%s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%s() got an unexpected keyword argument '%s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact) { if (!type) { PyErr_Format(PyExc_SystemError, "Missing type object"); return 0; } if (none_allowed && obj == Py_None) return 1; else if (exact) { if (Py_TYPE(obj) == type) return 1; } else { if (PyObject_TypeCheck(obj, type)) return 1; } PyErr_Format(PyExc_TypeError, "Argument '%s' has incorrect type (expected %s, got %s)", name, type->tp_name, Py_TYPE(obj)->tp_name); return 0; } static CYTHON_INLINE int __Pyx_IsLittleEndian(void) { unsigned int n = 1; return *(unsigned char*)(&n) != 0; } static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t < '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) /* First char was not a digit */ PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; /* Consume from buffer string */ while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; /* breaks both loops as ctx->enc_count == 0 */ } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; /* empty struct */ field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static CYTHON_INLINE PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number; int ndim = ctx->head->field->type->ndim; ; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; while (*ts && *ts != ')') { if (isspace(*ts)) continue; number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case 10: case 13: ++ts; break; case '<': if (!__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': /* substruct */ { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; /* Erase processed last struct element */ ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': /* end of substruct; either repeat or move on */ { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; /* Erase processed last struct element */ if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } /* fall through */ case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 's': case 'p': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { ctx->enc_count += ctx->new_count; } else { if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; } ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) { buf->buf = NULL; buf->obj = NULL; buf->strides = __Pyx_zeros; buf->shape = __Pyx_zeros; buf->suboffsets = __Pyx_minusones; } static CYTHON_INLINE int __Pyx_GetBufferAndValidate( Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack) { if (obj == Py_None || obj == NULL) { __Pyx_ZeroBuffer(buf); return 0; } buf->buf = NULL; if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail; if (buf->ndim != nd) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", nd, buf->ndim); goto fail; } if (!cast) { __Pyx_BufFmt_Context ctx; __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned)buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; return 0; fail:; __Pyx_ZeroBuffer(buf); return -1; } static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { if (info->buf == NULL) return; if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; __Pyx_ReleaseBuffer(info); } static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) { #if CYTHON_COMPILING_IN_CPYTHON PyObject *tmp_type, *tmp_value, *tmp_tb; PyThreadState *tstate = PyThreadState_GET(); tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_Restore(type, value, tb); #endif } static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(type, value, tb); #endif } #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } #if PY_VERSION_HEX < 0x02050000 if (PyClass_Check(type)) { #else if (PyType_Check(type)) { #endif #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; #if PY_VERSION_HEX < 0x02050000 if (PyInstance_Check(type)) { type = (PyObject*) ((PyInstanceObject*)type)->in_class; Py_INCREF(type); } else { type = 0; PyErr_SetString(PyExc_TypeError, "raise: exception must be an old-style class or instance"); goto raise_error; } #else type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } #endif } __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else /* Python 3+ */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyEval_CallObject(type, args); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause && cause != Py_None) { PyObject *fixed_cause; if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { PyThreadState *tstate = PyThreadState_GET(); PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } } bad: Py_XDECREF(owned_instance); return; } #endif static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%s to unpack", index, (index == 1) ? "" : "s"); } static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } static CYTHON_INLINE int __Pyx_IterFinish(void) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); PyObject* exc_type = tstate->curexc_type; if (unlikely(exc_type)) { if (likely(exc_type == PyExc_StopIteration) || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration)) { PyObject *exc_value, *exc_tb; exc_value = tstate->curexc_value; exc_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; Py_DECREF(exc_type); Py_XDECREF(exc_value); Py_XDECREF(exc_tb); return 0; } else { return -1; } } return 0; #else if (unlikely(PyErr_Occurred())) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) { PyErr_Clear(); return 0; } else { return -1; } } return 0; #endif } static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) { if (unlikely(retval)) { Py_DECREF(retval); __Pyx_RaiseTooManyValuesError(expected); return -1; } else { return __Pyx_IterFinish(); } return 0; } static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_Format(PyExc_SystemError, "Missing type object"); return 0; } if (likely(PyObject_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { CYTHON_UNUSED PyObject *getbuffer_cobj; #if PY_VERSION_HEX >= 0x02060000 if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); #endif if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags); #if PY_VERSION_HEX < 0x02060000 if (obj->ob_type->tp_dict && (getbuffer_cobj = PyMapping_GetItemString(obj->ob_type->tp_dict, "__pyx_getbuffer"))) { getbufferproc func; #if PY_VERSION_HEX >= 0x02070000 && !(PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION == 0) func = (getbufferproc) PyCapsule_GetPointer(getbuffer_cobj, "getbuffer(obj, view, flags)"); #else func = (getbufferproc) PyCObject_AsVoidPtr(getbuffer_cobj); #endif Py_DECREF(getbuffer_cobj); if (!func) goto fail; return func(obj, view, flags); } else { PyErr_Clear(); } #endif PyErr_Format(PyExc_TypeError, "'%100s' does not have the buffer interface", Py_TYPE(obj)->tp_name); #if PY_VERSION_HEX < 0x02060000 fail: #endif return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; CYTHON_UNUSED PyObject *releasebuffer_cobj; if (!obj) return; #if PY_VERSION_HEX >= 0x02060000 if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } #endif if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) { __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view); return; } #if PY_VERSION_HEX < 0x02060000 if (obj->ob_type->tp_dict && (releasebuffer_cobj = PyMapping_GetItemString(obj->ob_type->tp_dict, "__pyx_releasebuffer"))) { releasebufferproc func; #if PY_VERSION_HEX >= 0x02070000 && !(PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION == 0) func = (releasebufferproc) PyCapsule_GetPointer(releasebuffer_cobj, "releasebuffer(obj, view)"); #else func = (releasebufferproc) PyCObject_AsVoidPtr(releasebuffer_cobj); #endif Py_DECREF(releasebuffer_cobj); if (!func) goto fail; func(obj, view); return; } else { PyErr_Clear(); } #endif goto nofail; #if PY_VERSION_HEX < 0x02060000 fail: #endif PyErr_WriteUnraisable(obj); nofail: Py_DECREF(obj); view->obj = NULL; } #endif /* PY_MAJOR_VERSION < 3 */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level) { PyObject *py_import = 0; PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; py_import = __Pyx_GetAttrString(__pyx_b, "__import__"); if (!py_import) goto bad; if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; #if PY_VERSION_HEX >= 0x02050000 { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { /* try package relative import first */ PyObject *py_level = PyInt_FromLong(1); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; /* try absolute import on failure */ } #endif if (!module) { PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); } } #else if (level>0) { PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4."); goto bad; } module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, NULL); #endif bad: Py_XDECREF(empty_list); Py_XDECREF(py_import); Py_XDECREF(empty_dict); return module; } #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return ::std::complex< float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y*(__pyx_t_float_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real*z.real + z.imag*z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(a, a); case 3: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, a); case 4: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_absf(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return ::std::complex< double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y*(__pyx_t_double_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real*z.real + z.imag*z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(a, a); case 3: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, a); case 4: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_abs(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject* x) { const unsigned char neg_one = (unsigned char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned char" : "value too large to convert to unsigned char"); } return (unsigned char)-1; } return (unsigned char)val; } return (unsigned char)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject* x) { const unsigned short neg_one = (unsigned short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned short" : "value too large to convert to unsigned short"); } return (unsigned short)-1; } return (unsigned short)val; } return (unsigned short)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject* x) { const unsigned int neg_one = (unsigned int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned int" : "value too large to convert to unsigned int"); } return (unsigned int)-1; } return (unsigned int)val; } return (unsigned int)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject* x) { const char neg_one = (char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to char" : "value too large to convert to char"); } return (char)-1; } return (char)val; } return (char)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject* x) { const short neg_one = (short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to short" : "value too large to convert to short"); } return (short)-1; } return (short)val; } return (short)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject* x) { const int neg_one = (int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to int" : "value too large to convert to int"); } return (int)-1; } return (int)val; } return (int)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject* x) { const signed char neg_one = (signed char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed char" : "value too large to convert to signed char"); } return (signed char)-1; } return (signed char)val; } return (signed char)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject* x) { const signed short neg_one = (signed short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed short" : "value too large to convert to signed short"); } return (signed short)-1; } return (signed short)val; } return (signed short)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject* x) { const signed int neg_one = (signed int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed int" : "value too large to convert to signed int"); } return (signed int)-1; } return (signed int)val; } return (signed int)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject* x) { const int neg_one = (int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to int" : "value too large to convert to int"); } return (int)-1; } return (int)val; } return (int)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) { const unsigned long neg_one = (unsigned long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned long"); return (unsigned long)-1; } return (unsigned long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned long"); return (unsigned long)-1; } return (unsigned long)PyLong_AsUnsignedLong(x); } else { return (unsigned long)PyLong_AsLong(x); } } else { unsigned long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned long)-1; val = __Pyx_PyInt_AsUnsignedLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject* x) { const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned PY_LONG_LONG"); return (unsigned PY_LONG_LONG)-1; } return (unsigned PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned PY_LONG_LONG"); return (unsigned PY_LONG_LONG)-1; } return (unsigned PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (unsigned PY_LONG_LONG)PyLong_AsLongLong(x); } } else { unsigned PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned PY_LONG_LONG)-1; val = __Pyx_PyInt_AsUnsignedLongLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject* x) { const long neg_one = (long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long)-1; } return (long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long)-1; } return (long)PyLong_AsUnsignedLong(x); } else { return (long)PyLong_AsLong(x); } } else { long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (long)-1; val = __Pyx_PyInt_AsLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) { const PY_LONG_LONG neg_one = (PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to PY_LONG_LONG"); return (PY_LONG_LONG)-1; } return (PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to PY_LONG_LONG"); return (PY_LONG_LONG)-1; } return (PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (PY_LONG_LONG)PyLong_AsLongLong(x); } } else { PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (PY_LONG_LONG)-1; val = __Pyx_PyInt_AsLongLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) { const signed long neg_one = (signed long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed long"); return (signed long)-1; } return (signed long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed long"); return (signed long)-1; } return (signed long)PyLong_AsUnsignedLong(x); } else { return (signed long)PyLong_AsLong(x); } } else { signed long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (signed long)-1; val = __Pyx_PyInt_AsSignedLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject* x) { const signed PY_LONG_LONG neg_one = (signed PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed PY_LONG_LONG"); return (signed PY_LONG_LONG)-1; } return (signed PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed PY_LONG_LONG"); return (signed PY_LONG_LONG)-1; } return (signed PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (signed PY_LONG_LONG)PyLong_AsLongLong(x); } } else { signed PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (signed PY_LONG_LONG)-1; val = __Pyx_PyInt_AsSignedLongLong(tmp); Py_DECREF(tmp); return val; } } static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); #if PY_VERSION_HEX < 0x02050000 return PyErr_Warn(NULL, message); #else return PyErr_WarnEx(NULL, message, 1); #endif } return 0; } #ifndef __PYX_HAVE_RT_ImportModule #define __PYX_HAVE_RT_ImportModule static PyObject *__Pyx_ImportModule(const char *name) { PyObject *py_name = 0; PyObject *py_module = 0; py_name = __Pyx_PyIdentifier_FromString(name); if (!py_name) goto bad; py_module = PyImport_Import(py_name); Py_DECREF(py_name); return py_module; bad: Py_XDECREF(py_name); return 0; } #endif #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict) { PyObject *py_module = 0; PyObject *result = 0; PyObject *py_name = 0; char warning[200]; py_module = __Pyx_ImportModule(module_name); if (!py_module) goto bad; py_name = __Pyx_PyIdentifier_FromString(class_name); if (!py_name) goto bad; result = PyObject_GetAttr(py_module, py_name); Py_DECREF(py_name); py_name = 0; Py_DECREF(py_module); py_module = 0; if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%s.%s is not a type object", module_name, class_name); goto bad; } if (!strict && (size_t)((PyTypeObject *)result)->tp_basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility", module_name, class_name); #if PY_VERSION_HEX < 0x02050000 if (PyErr_Warn(NULL, warning) < 0) goto bad; #else if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; #endif } else if ((size_t)((PyTypeObject *)result)->tp_basicsize != size) { PyErr_Format(PyExc_ValueError, "%s.%s has the wrong size, try recompiling", module_name, class_name); goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(py_module); Py_XDECREF(result); return NULL; } #endif static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = (start + end) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, /*int argcount,*/ 0, /*int kwonlyargcount,*/ 0, /*int nlocals,*/ 0, /*int stacksize,*/ 0, /*int flags,*/ __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, /*int firstlineno,*/ __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_globals = 0; PyFrameObject *py_frame = 0; py_code = __pyx_find_code_object(c_line ? c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? c_line : py_line, py_code); } py_globals = PyModule_GetDict(__pyx_m); if (!py_globals) goto bad; py_frame = PyFrame_New( PyThreadState_GET(), /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ py_globals, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; py_frame->f_lineno = py_line; PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else /* Python 3+ has unicode identifiers */ if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; ++t; } return 0; } /* Type Conversion Functions */ static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) { PyNumberMethods *m; const char *name = NULL; PyObject *res = NULL; #if PY_VERSION_HEX < 0x03000000 if (PyInt_Check(x) || PyLong_Check(x)) #else if (PyLong_Check(x)) #endif return Py_INCREF(x), x; m = Py_TYPE(x)->tp_as_number; #if PY_VERSION_HEX < 0x03000000 if (m && m->nb_int) { name = "int"; res = PyNumber_Int(x); } else if (m && m->nb_long) { name = "long"; res = PyNumber_Long(x); } #else if (m && m->nb_int) { name = "int"; res = PyNumber_Long(x); } #endif if (res) { #if PY_VERSION_HEX < 0x03000000 if (!PyInt_Check(res) && !PyLong_Check(res)) { #else if (!PyLong_Check(res)) { #endif PyErr_Format(PyExc_TypeError, "__%s__ returned non-%s (type %.200s)", name, name, Py_TYPE(res)->tp_name); Py_DECREF(res); return NULL; } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject* x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { #if PY_VERSION_HEX < 0x02050000 if (ival <= LONG_MAX) return PyInt_FromLong((long)ival); else { unsigned char *bytes = (unsigned char *) &ival; int one = 1; int little = (int)*(unsigned char*)&one; return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0); } #else return PyInt_FromSize_t(ival); #endif } static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject* x) { unsigned PY_LONG_LONG val = __Pyx_PyInt_AsUnsignedLongLong(x); if (unlikely(val == (unsigned PY_LONG_LONG)-1 && PyErr_Occurred())) { return (size_t)-1; } else if (unlikely(val != (unsigned PY_LONG_LONG)(size_t)val)) { PyErr_SetString(PyExc_OverflowError, "value too large to convert to size_t"); return (size_t)-1; } return (size_t)val; } #endif /* Py_PYTHON_H */ nipy-0.3.0/nipy/labs/group/routines.pyx000066400000000000000000000015651210344137400201450ustar00rootroot00000000000000# -*- Mode: Python -*- Not really, but the syntax is close enough """ Basic ndarray routines for faster computations. Author: Alexis Roche, 2008. """ __version__ = '0.1' import numpy as np cimport numpy as np import cython @cython.boundscheck(False) def add_lines(np.ndarray[np.float_t, ndim=2] A, np.ndarray[np.float_t, ndim=2] B, np.ndarray[np.int_t, ndim=1] I): """ add(A, B, I) Add each line of A to a line of B indexed by I, where A and B are two-dimensional arrays and I is a one-dimensional array of indices. This is equivalent to: for i in xrange(len(I)): B[I[i]] += A[i] """ cdef int i, j, index cdef int i_max = I.shape[0] cdef int j_max = B.shape[1] for i in range(i_max): index = I[i] for j in range(j_max): B[index, j] = B[index, j] + A[i, j] nipy-0.3.0/nipy/labs/group/setup.py000066400000000000000000000027251210344137400172440ustar00rootroot00000000000000 def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration # We need this because libcstat.a is linked to lapack, which can # be a fortran library, and the linker needs this information. from numpy.distutils.system_info import get_info # First, try 'lapack_info', as that seems to provide more details on Linux # (both 32 and 64 bits): lapack_info = get_info('lapack_opt', 0) if 'libraries' not in lapack_info: # But on OSX that may not give us what we need, so try with 'lapack' # instead. NOTE: scipy.linalg uses lapack_opt, not 'lapack'... lapack_info = get_info('lapack',0) config = Configuration('group', parent_package, top_path) config.add_subpackage('tests') config.add_extension( 'onesample', sources=['onesample.pyx'], libraries=['cstat'], extra_info=lapack_info, ) config.add_extension( 'twosample', sources=['twosample.pyx'], libraries=['cstat'], extra_info=lapack_info, ) config.add_extension( 'routines', sources=['routines.pyx'], libraries=['cstat'], extra_info=lapack_info, ) config.add_extension( 'glm_twolevel', sources=['glm_twolevel.pyx'], libraries=['cstat'], extra_info=lapack_info, ) return config if __name__ == '__main__': print('This is the wrong setup.py file to run') nipy-0.3.0/nipy/labs/group/spatial_relaxation_onesample.py000066400000000000000000001353231210344137400240330ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ##################################################################################### # BAYESIAN MODEL SELECTION FOR ACTIVATION DETECTION ON FMRI GROUP DATA # Merlin Keller, 2009 import numpy as np import scipy.special as sp from .routines import add_lines from .displacement_field import displacement_field ##################################################################################### # some useful functions def log_gammainv_pdf(x, a, b): """ log density of the inverse gamma distribution with shape a and scale b, at point x, using Stirling's approximation for a > 100 """ return a * np.log(b) - sp.gammaln(a) - (a + 1) * np.log(x) - b / x def log_gaussian_pdf(x, m, v): """ log density of the gaussian distribution with mean m and variance v at point x """ return -0.5 * (np.log(2 * np.pi * v) + (x - m)**2 / v) ##################################################################################### # spatial relaxation multivariate statistic class class multivariate_stat(object): def __init__(self, data, vardata=None, XYZ=None, std=None, sigma=None, labels=None, network=None, v_shape=3, v_scale=20, std_shape=3, std_scale=20, m_mean_rate=1e-3, m_var_shape=3, m_var_scale=20, disp_mask=None, labels_prior=None, label_values=None, labels_prior_mask=None): """ Multivariate modeling of fMRI group data accounting for spatial uncertainty In: data (n,p) estimated effects vardata (n,p) variances of estimated effects XYZ (3,p) voxel coordinates std Initial guess for standard deviate of spatial displacements sigma regularity of displacement field labels (p,) labels defining regions of interest network (N,) binary region labels (1 for active, 0 for inactive) v_shape intensity variance prior shape v_scale intensity variance prior scale std_shape spatial standard error prior shape std_scale spatial standard error prior scale m_mean_rate mean effect prior rate m_var_shape effect variance prior shape m_var_scale effect variance prior scale disp_mask (q,) mask of the brain, to limit displacements labels_prior (M,r) prior on voxelwise region membership labels_prior_values (M,r) voxelwise label values where prior is defined labels_prior_mask (r,) Mask of voxels where a label prior is defined """ self.data = data if vardata != None and vardata.max() == 0: self.vardata = None else: self.vardata = vardata self.std = std self.sigma = sigma self.labels = labels self.network = network self.v_shape = v_shape self.v_scale = v_scale self.std_shape = std_shape self.std_scale = std_scale n, p = data.shape if labels == None: self.labels = np.zeros(p, int) M = self.labels.max() + 1 if network == None: self.network = np.ones(M, int) if np.isscalar(m_mean_rate): self.m_mean_rate = np.zeros(M, float) + m_mean_rate else: self.m_mean_rate = m_mean_rate if np.isscalar(m_var_shape): self.m_var_shape = np.zeros(M, float) + m_var_shape else: self.m_var_shape = m_var_shape if np.isscalar(m_var_scale): self.m_var_scale = np.zeros(M, float) + m_var_scale else: self.m_var_scale = m_var_scale if std != None: self.D = displacement_field(XYZ, sigma, data.shape[0], disp_mask) self.labels_prior = labels_prior self.label_values = label_values self.labels_prior_mask = labels_prior_mask def init_hidden_variables(self, mode='saem', init_spatial=True): n, p = self.data.shape self.X = self.data.copy() self.m = self.X.mean(axis=0) #self.v = np.square(self.X - self.m).mean() N = len(self.network) self.m_mean = np.zeros(N, float) self.m_var = np.zeros(N, float) self.v = np.zeros(N, float) #self.s0 = np.zeros(N, float) #self.S0 = np.zeros(N, float) self.s1 = np.zeros(N, float) self.S1 = np.zeros(N, float) self.s2 = np.zeros(N, float) self.S2 = np.zeros(N, float) self.s3 = np.zeros(N, float) self.S3 = np.zeros(N, float) self.s6 = np.zeros(N, float) for j in xrange(N): self.s6[j] = (self.labels == j).sum() self.S6 = self.s6.copy() self.m_var_post_scale = np.zeros(N, float) if init_spatial and self.std != None: B = len(self.D.block) if B == 0: self.std = None else: self.R = np.zeros((n, B), int) self.N = np.ones(p, float) * n self.s4 = 0.0 self.S4 = 0.0 self.s5 = np.zeros(N, float) self.S5 = np.zeros(N, float) std = self.std self.update_summary_statistics(init_spatial) if mode == 'saem': self.update_parameters_saem(init_spatial) else: self.update_parameters_mcmc(init_spatial) self.std = std def update_summary_statistics(self, w=1.0, update_spatial=True, mode='saem'): n, p = self.data.shape if self.std == None: m = self.m else: m = self.m[self.D.I] if update_spatial: self.s4 = np.square(self.D.U).sum() if mode == 'saem': self.S4 += w * (self.s4 - self.S4) if self.vardata == None: SS = np.square(self.data - m) #/ self.v + np.log(2 * np.pi * self.v) else: SS = np.square(self.X - m) #/ self.vardata + np.log(2 * np.pi * self.vardata) if self.std == None: SS_sum = SS.sum(axis=0) else: SS_sum = np.zeros(p, float) for i in xrange(n): Ii = self.D.I[i] SSi = SS[i].reshape(p, 1) add_lines(SSi, SS_sum.reshape(p, 1), Ii) for j in xrange(len(self.network)): L = np.where(self.labels == j)[0] self.s1[j] = SS_sum[L].sum() if self.labels_prior != None: self.s6[j] = len(L) self.s2[j] = np.square(self.m[L]).sum() if self.network[j] == 1: self.s3[j] = self.m[L].sum() if update_spatial and self.std != None: self.s5[j] = self.N[L].sum() if mode == 'saem': self.S5 += w * (self.s5 - self.S5) if mode == 'saem': self.S1 += w * (self.s1 - self.S1) self.S2 += w * (self.s2 - self.S2) self.S3 += w * (self.s3 - self.S3) if self.labels_prior != None: self.S6 += w * (self.s6 - self.S6) size = self.S6 sum_sq = self.S2 sum = self.S3 else: size = self.S6 sum_sq = self.s2 sum = self.s3 # Update m_var post scale # used to update parameters, # and compute conditional posterior rate = self.m_mean_rate shape = self.m_var_shape scale = self.m_var_scale J = self.network == 1 N1 = J.sum() if N1 > 0: post_rate = rate[J] + size[J] self.m_var_post_scale[J] = scale[J] + 0.5 * (sum_sq[J] - np.square(sum[J]) / post_rate) if N1 < len(self.network): self.m_var_post_scale[J==0] = scale[J==0] + 0.5 * sum_sq[J==0] def update_parameters_saem(self, update_spatial=True): n, p = self.data.shape #self.v = (self.S1 + 2 * self.v_scale) / (n * p + 2 * (1 + self.v_shape)) size = self.S6 rate = self.m_mean_rate shape = self.m_var_shape scale = self.m_var_scale if self.std == None: N = n * size else: N = self.S5 if update_spatial: #B = len(self.D.block) self.std = np.sqrt( (self.S4 + 2 * self.std_scale) / (self.D.U.size + 2 * self.std_shape + 2)) self.v = (self.S1 + 2 * self.v_scale) / (N + 2 * self.v_shape + 2) J = self.network == 1 N1 = J.sum() if N1 > 0: self.m_mean[J] = self.S3[J] / (rate[J] + size[J]) self.m_var[J] = 2 * self.m_var_post_scale[J] / (size[J] + 2 * shape[J] + 3) if N1 < len(self.network): self.m_var[J==0] = 2 * self.m_var_post_scale[J==0] / (size[J==0] + 2 * shape[J==0] + 2) def update_parameters_mcmc(self, update_spatial=True): n, p = self.data.shape #self.v = (self.s1 + 2 * self.v_scale) / np.random.chisquare(df = n * p + 2 * self.v_shape) size = self.s6 rate = self.m_mean_rate shape = self.m_var_shape scale = self.m_var_scale if self.std == None: N = n * size else: N = self.s5 if update_spatial: #B = len(self.D.block) self.std = np.sqrt( (self.s4 + 2*self.std_scale) / np.random.chisquare(df=self.D.U.size + 2*self.std_shape)) J = self.network == 1 if J.sum() > 0: post_rate = rate[J] + size[J] self.m_mean[J] = self.s3[J] / post_rate + np.random.randn(J.sum()) * np.sqrt(self.m_var[J] / post_rate) for j in xrange(len(self.network)): self.v[j] = (self.s1[j] + 2 * self.v_scale) / np.random.chisquare(df = N[j] + 2 * self.v_shape) self.m_var[j] = 2 * self.m_var_post_scale[j] / np.random.chisquare(df = size[j] + 2 * shape[j]) def update_displacements(self): n, p = self.data.shape B = len(self.D.block) if self.proposal == 'prior': for i in xrange(n): for b in np.random.permutation(range(B)): block = self.D.block[b] A = self.update_block(i, b, 'prior', self.std) elif self.proposal == 'rand_walk': if np.isscalar(self.proposal_std): for i in xrange(n): for b in np.random.permutation(range(B)): block = self.D.block[b] A = self.update_block(i, b, 'rand_walk', self.proposal_std) else: for i in xrange(n): for b in np.random.permutation(range(B)): block = self.D.block[b] A = self.update_block(i, b, 'rand_walk', self.proposal_std[:, i, b]) else: for i in xrange(n): for b in np.random.permutation(range(B)): block = self.D.block[b] A = self.update_block(i, b, 'fixed', self.proposal_std[:, i, b], self.proposal_mean[:, i, b]) self.N *= 0 ones = np.ones((p, 1), float) for i in xrange(n): Ii = self.D.I[i] add_lines(ones, self.N.reshape(p, 1), Ii) if self.verbose: print "mean rejected displacements :", self.R.mean(axis=0) def update_block(self, i, b, proposal='prior', proposal_std=None, proposal_mean=None, verbose=False, reject_override=False): block = self.D.block[b] if verbose: print 'sampling field', i, 'block', b # Propose new displacement U, V, L, W, I = self.D.sample(i, b, proposal, proposal_std, proposal_mean) Uc = self.D.U[:, i, b] Ic = self.D.I[i, L] # log acceptance rate mc = self.m[Ic] m = self.m[I] vc = self.v[self.labels[Ic]] v = self.v[self.labels[I]] #A = ((mc - m) * (mc + m - 2 * self.X[i, L])).sum() / self.v A = (np.log(v) - np.log(vc) + (self.X[i, L] - mc)**2 / vc - (self.X[i, L] - m)**2 / v).sum() if not proposal == 'prior': A += (Uc**2 - U**2).sum() / self.std**2 if proposal == 'fixed': if proposal_std.max() == 0: A = np.inf else: A += ((U - Uc) * (U + Uc - 2 * proposal_mean) / proposal_std**2).sum() self.R[i, b] = np.random.uniform() > np.exp(0.5 * A) if self.R[i, b] == 0 and not reject_override: self.D.U[:, i, b] = U self.D.V[:, i, block] = V if len(L)> 0: self.D.W[:, i, L] = W self.D.I[i, L] = I return A def update_effects(self, T=1.0): """ T is a temperature used to compute log posterior density by simulated annealing """ n, p = self.data.shape if self.std == None: m = self.m v = self.v[self.labels] else: m = self.m[self.D.I] v = self.v[self.labels[self.D.I]] #tot_var = self.v + self.vardata #cond_mean = (self.v * self.data + self.vardata * m) / tot_var #cond_var = self.v * self.vardata / tot_var tot_var = v + self.vardata cond_mean = (v * self.data + self.vardata * m) / tot_var cond_var = T * v * self.vardata / tot_var self.X = cond_mean + np.random.randn(n, p) * np.sqrt(cond_var) def update_mean_effect(self, T=1.0): """ T is a temperature used to compute log posterior density by simulated annealing """ n, p = self.data.shape X_sum = np.zeros(p, float) if self.std == None: X_sum = self.X.sum(axis=0) else: #self.N *= 0 #ones = np.ones((p, 1), float) for i in xrange(n): Ii = self.D.I[i] XI = self.X[i].reshape(p, 1) add_lines(XI, X_sum.reshape(p, 1), Ii) #add_lines(ones, self.N.reshape(p, 1), Ii) for j in xrange(len(self.network)): L = np.where(self.labels == j)[0] m_var = self.m_var[j] * T v = self.v[j] * T if self.std == None: #tot_var = self.v + m_var * n tot_var = v + m_var * n else: #tot_var = self.v + m_var * self.N[L] tot_var = v + m_var * self.N[L] #cond_mean = (X_sum[L] * m_var + self.v * self.m_mean[j]) / tot_var #cond_std = np.sqrt(self.v * m_var / tot_var) cond_mean = (X_sum[L] * m_var + v * self.m_mean[j]) / tot_var cond_std = np.sqrt(v * m_var / tot_var) self.m[L] = cond_mean + np.random.randn(len(L)) * cond_std def update_labels(self): N, r = self.labels_prior.shape I = self.labels_prior_mask m_mean = self.m_mean[self.label_values] m_var = self.m_var[self.label_values] L = (self.m[I].reshape(1, r) - m_mean)**2 / m_var P = self.labels_prior * np.exp(-0.5 * L) / np.sqrt(m_var) P_cumsum = P.cumsum(axis=0) X = np.random.rand(r) * P_cumsum[-1] labels = (X > P_cumsum).sum(axis=0) self.labels[I] = self.label_values[labels, xrange(r)] def evaluate(self, nsimu=1e3, burnin=100, J=None, verbose=False, proposal='prior', proposal_std=None, proposal_mean=None, compute_post_mean=False, mode='saem', update_spatial=True): """ Sample posterior distribution of model parameters, or compute their MAP estimator In: nsimu Number of samples drawn from posterior mean distribution burnin Number of discarded burn-in samples J (N,) voxel indices where successive mean values are stored verbose Print some infos during the sampling process proposal 'prior', 'rand_walk' or 'fixed' proposal_mean Used for fixed proposal only proposal_std Used for random walk or fixed proposal mode if mode='saem', compute MAP estimates of model parameters. if mode='mcmc', sample their posterior distribution update_spatial when False, enables sampling conditional on spatial parameters Out: self.m_values (N, nsimu+burnin) successive mean values (if J is not empty) if self.labels_prior is not empty: self.labels_post (M,r) posterior distribution of region labels if self.std is not empty: self.std_values (nsimu+burnin,) successive spatial standard deviate values if compute_post_mean is True: self.mean_m (p,) posterior average of mean effect self.var_m (p,) posterior variance of mean effect if self.std is not empty and compute_post_mean is True: self.r (n, nblocks) mean rejection rate for each displacement field self.mean_U (3, n, nblocks) posterior average of displacement weights self.var_U (3, n, nblocks) posterior marginal variances of displacement weights """ #self.init_hidden_variables() n, p = self.data.shape self.nsimu = nsimu self.burnin = burnin self.J = J self.verbose = verbose self.proposal = proposal self.proposal_mean = proposal_mean self.proposal_std = proposal_std self.compute_post_mean = compute_post_mean #self.v_values = np.zeros(nsimu + burnin, float) if J != None: self.m_values = np.zeros((len(J), nsimu + burnin), float) if self.std != None: B = len(self.D.block) if update_spatial: self.std_values = np.zeros(nsimu + burnin, float) if proposal == 'rand_walk': self.proposal_std_values = np.zeros(nsimu + burnin, float) if self.labels_prior != None: self.labels_post = np.zeros(self.labels_prior.shape, float) #Il = np.array(np.where(self.labels_prior > 0)) #r = len(self.labels_prior_mask) if compute_post_mean: sum_m = np.zeros(p, float) sum_m_sq = np.zeros(p, float) if mode == 'mcmc': N = len(self.network) self.P = np.zeros(N, float) self.mean_m_mean = np.zeros(N, float) self.mean_m_var = np.zeros(N, float) self.mean_v = np.zeros(N, float) if update_spatial and self.std != None: self.r = np.zeros((n, B), float) sum_U = np.zeros((3, n, B), float) sum_U_sq = np.zeros((3, n, B), float) niter = np.array([int(burnin), int(nsimu)]) for j in np.arange(2)[niter>0]: if j == 0: w = 1 if self.verbose: print "Burn-in" else: if mode == 'saem': if self.verbose: print "Maximizing likelihood" else: if self.verbose: print "Sampling posterior distribution" for i in xrange(niter[j]): if self.verbose: if mode == 'saem': print "SAEM", else: print "Gibbs", print "iteration", i+1, "out of", niter[j] # Gibbs iteration #i += 1 if update_spatial and self.std != None: self.update_displacements() if j == 0 and self.proposal == 'rand_walk': self.proposal_std = np.clip(self.proposal_std * (1 + 0.9) / (1 + self.R.mean()), 0.01, 10.0) if self.vardata != None: self.update_effects() self.update_mean_effect() if self.labels_prior != None: self.update_labels() if j == 1: w = 1.0 / (i + 1) self.update_summary_statistics(w, update_spatial, mode) if mode == 'saem': self.update_parameters_saem(update_spatial) else: self.update_parameters_mcmc(update_spatial) if self.verbose: print "population effect min variance value :", self.m_var.min() # Update results #self.v_values[i + self.burnin * j] = self.v if update_spatial and self.std != None: self.std_values[i + self.burnin * j] = self.std if proposal == 'rand_walk': self.proposal_std_values[i + self.burnin * j] = self.proposal_std if self.J != None: self.m_values[:, i + self.burnin * j] = self.m[self.J] if j == 1 and self.labels_prior != None: self.labels_post += \ self.label_values == self.labels[self.labels_prior_mask] #self.labels_post[Il[0], Il[1]] += \ #self.label_values[Il[0], Il[1]] == self.labels[Il[0]] if j == 1 and compute_post_mean: sum_m += self.m sum_m_sq += self.m**2 if mode == 'mcmc': self.P += (self.m_mean > 0) self.mean_m_mean += self.m_mean self.mean_m_var += self.m_var self.mean_v += self.v if update_spatial and self.std != None: self.r += self.R sum_U += self.D.U sum_U_sq += self.D.U**2 if j== 1 and self.labels_prior != None: self.labels_post /= nsimu if j == 1 and compute_post_mean: self.mean_m = sum_m / float(self.nsimu) self.var_m = sum_m_sq / float(self.nsimu) - self.mean_m**2 if mode == 'mcmc': self.P /= float(self.nsimu) self.mean_m_mean /= float(self.nsimu) self.mean_m_var /= float(self.nsimu) self.mean_v /= float(self.nsimu) if update_spatial and self.std != None: self.r /= float(self.nsimu) self.mean_U = sum_U / float(self.nsimu) self.var_U = sum_U_sq / float(self.nsimu) - self.mean_U**2 ##################################################################################### # MAP estimation of displacement fields def estimate_displacements_SA(self, nsimu=100, c=0.99, proposal_std=None, verbose=False): """ MAP estimate of elementary displacements conditional on model parameters """ if proposal_std==None: proposal_std = self.proposal_std LL, self.Z, self.tot_var, self.SS1, self.SS2, self.SS3, self.SS4 =\ self.compute_log_voxel_likelihood(return_SS=True) self.log_voxel_likelihood = LL for i in xrange(nsimu): if verbose: print "SA iteration", i+1, "out of", nsimu self.update_displacements_SA(c**i, proposal_std, verbose) self.update_summary_statistics(w=1.0, update_spatial=True) def update_displacements_SA(self, T=1.0, proposal_std=None, verbose=False): n = self.data.shape[0] B = len(self.D.block) for i in xrange(n): for b in np.random.permutation(range(B)): #block = self.D.block[b] A = self.update_block_SA(i, b, T, proposal_std, verbose) if self.verbose: print "mean rejected displacements :", self.R.mean(axis=0) def compute_log_conditional_displacements_posterior(self, U=None, nsimu=100, burnin=100, proposal_std=None, verbose=False, change_U=False): """ Compute posterior log density of elementary displacements at point U, conditional on model parameters """ n = self.data.shape[0] B = len(self.D.block) if U == None: U = self.D.U.copy() if proposal_std == None: proposal_std = self.proposal_std LL, self.Z, self.tot_var, self.SS1, self.SS2, self.SS3, self.SS4 =\ self.compute_log_voxel_likelihood(return_SS=True) self.log_voxel_likelihood = LL if not change_U: Uc = self.D.U.copy() proposal_c = self.proposal proposal_mean_c = self.proposal_mean proposal_std_c = self.proposal_std.copy() self.proposal = 'fixed' self.proposal_mean = U self.proposal_std = U * 0 self.update_displacements() #Restore displacement parameters self.proposal = proposal_c self.proposal_mean = proposal_mean_c self.proposal_std = proposal_std_c self.update_summary_statistics(update_spatial=True, mode='mcmc') L = 0.0 i,b = n-1, B-1 n_ib = n * B - i * B - b # Note integer division nsimu_ib = nsimu // n_ib burnin_ib = burnin // n_ib A_values = np.zeros(nsimu_ib, float) A2_values = np.zeros(nsimu_ib, float) SS_values = np.zeros(nsimu_ib, float) if verbose: print 'Compute mean acceptance rate for block', i, b print 'Burn-in' if verbose: print 'Sample acceptance rate values' for s in xrange(nsimu_ib): if verbose: print "SA iteration", s, "out of", nsimu_ib A_values[s] = self.update_block_SA(\ i, b, 1.0, proposal_std, verbose=False, reject_override=True) mean_acceptance = np.exp(A_values).clip(0,1).mean() L -= np.log(mean_acceptance) for i in range(n)[::-1]: for b in range(B)[::-1]: n_ib = n * B - i * B - b # Note integer division nsimu_ib = nsimu // n_ib burnin_ib = burnin // n_ib A_values = np.zeros(nsimu_ib, float) A2_values = np.zeros(nsimu_ib, float) SS_values = np.zeros(nsimu_ib, float) if verbose: print 'Compute log conditional posterior for block', i, b print 'Burn-in' for s in xrange(burnin_ib): if verbose: print "SA iteration", s, "out of", burnin_ib for bb in xrange(b, B): A = self.update_block_SA(\ i, bb, 1.0, proposal_std, verbose=False) for ii in xrange(i+1, n): for bb in xrange(B): A = self.update_block_SA(\ ii, bb, 1.0, proposal_std, verbose=False) if verbose: print 'Sample kernel and acceptance rate values' for s in xrange(nsimu_ib): if verbose: print "SA iteration", s, "out of", nsimu_ib for bb in xrange(b, B): A = self.update_block_SA(\ i, bb, 1.0, proposal_std, verbose=False) for ii in xrange(i+1, n): for bb in xrange(B): A = self.update_block_SA(\ ii, bb, 1.0, proposal_std, verbose=False) A_values[s] = self.update_block_SA(\ i, b, 1.0, proposal_std*0, verbose=False, reject_override=True, proposal='fixed', proposal_mean=U[:, i, b]) SS_values[s] = np.square(U[:, i, b] - self.D.U[:, i, b]).sum() if b > 0: A2_values[s] = self.update_block_SA(\ i, b-1, 1.0, proposal_std, verbose=False, reject_override=True) elif i > 0: A2_values[s] = self.update_block_SA(\ i-1, B-1, 1.0, proposal_std, verbose=False, reject_override=True) mean_acceptance = np.exp(A2_values).clip(0,1).mean() mean_kernel = \ (np.exp(A_values).clip(0,1) * \ np.exp( -0.5 * SS_values / proposal_std**2) \ / (np.sqrt(2 * np.pi) * proposal_std)**3).mean() L += np.log(mean_kernel) - np.log(mean_acceptance)*(i>0 or b>0) if not change_U: # Restore initial displacement value self.proposal = 'fixed' self.proposal_mean = Uc self.proposal_std = Uc * 0 self.update_displacements() self.proposal = proposal_c self.proposal_mean = proposal_mean_c self.proposal_std = proposal_std_c self.update_summary_statistics(update_spatial=True, mode='mcmc') return L def update_block_SA(self, i, b, T=1.0, proposal_std=None, verbose=False, reject_override=False, proposal='rand_walk', proposal_mean=None): """ Update displacement block using simulated annealing scheme with random-walk kernel """ if proposal_std==None: proposal_std=self.std block = self.D.block[b] if verbose: print 'sampling field', i, 'block', b # Propose new displacement U, V, L, W, I = self.D.sample(i, b, proposal, proposal_std * T, proposal_mean=proposal_mean) Uc = self.D.U[:, i, b].copy() #Vc = self.D.V[:, i, block].copy() p = self.data.shape[1] pL = len(L) if pL > 0: #Wc = self.D.W[:, i, L].copy() Ic = self.D.I[i, L].copy() J = np.unique(np.concatenate((I, Ic))) q = len(J) IJ = np.searchsorted(J, I) IJc = np.searchsorted(J, Ic) N = self.N[J].copy() Zc = self.Z[i,L].copy() tot_varc = self.tot_var[i,L].copy() SS1 = self.SS1[J].copy() SS2 = self.SS2[J].copy() SS3 = self.SS3[J].copy() SS4 = self.SS4[J].copy() # log acceptance rate #self.D.U[:, i, b] = U #self.D.V[:, i, block] = V #if pL > 0: #self.D.W[:, i, L] = W #self.D.I[i, L] = I ones = np.ones((len(L), 1), float) add_lines(-ones, N.reshape(q, 1), IJc) add_lines(ones, N.reshape(q, 1), IJ) Z = self.data[i,L] - self.m_mean[self.labels[I]] if self.vardata == None: tot_var = self.v[self.labels[I]] + np.zeros(len(L), float) else: tot_var = self.v[self.labels[I]] + self.vardata[i,L] add_lines(\ -(1.0 / tot_varc).reshape(pL, 1), SS1.reshape(q, 1), IJc) add_lines(\ (1.0 / tot_var).reshape(pL, 1), SS1.reshape(q, 1), IJ) add_lines(\ -np.log(tot_varc).reshape(pL, 1), SS2.reshape(q, 1), IJc) add_lines(\ np.log(tot_var).reshape(pL, 1), SS2.reshape(q, 1), IJ) add_lines(\ -(Zc**2 / tot_varc).reshape(pL, 1), SS3.reshape(q, 1), IJc) add_lines(\ (Z**2 / tot_var).reshape(pL, 1), SS3.reshape(q, 1), IJ) add_lines(\ -(Zc / tot_varc).reshape(pL, 1), SS4.reshape(q, 1), IJc) add_lines(\ (Z / tot_var).reshape(pL, 1), SS4.reshape(q, 1), IJ) fc = self.log_voxel_likelihood[J] f = - 0.5 * (\ N * np.log(2 * np.pi) + \ np.log(1 + self.m_var[self.labels[J]] * SS1) \ + SS2 + SS3 - SS4**2 / \ (1 / self.m_var[self.labels[J]] + SS1)) else: f = np.zeros(1) fc = np.zeros(1) A = (f - fc).sum() + 0.5 * (Uc**2 - U**2).sum() / self.std**2 self.R[i, b] = np.random.uniform() > np.exp(A / T) if self.R[i, b] == 0 and not reject_override: self.D.U[:, i, b] = U self.D.V[:, i, block] = V if len(L) > 0: self.D.W[:, i, L] = W self.D.I[i, L] = I self.N[J] = N self.Z[i,L] = Z self.tot_var[i,L] = tot_var self.SS1[J] = SS1 self.SS2[J] = SS2 self.SS3[J] = SS3 self.SS4[J] = SS4 self.log_voxel_likelihood[J] = f return A ##################################################################################### # Marginal likelihood computation for model selection def compute_log_region_likelihood_slow(self, v=None, m_mean=None, m_var=None, verbose=False, J=None): """ Essentially maintained for debug purposes """ if v == None: v = self.v if m_mean == None: m_mean = self.m_mean if m_var == None: m_var = self.m_var n, p = self.data.shape nregions = len(self.network) log_region_likelihood = np.zeros(nregions, float) if J == None: J = xrange(nregions) if self.std == None: nk = n else: I = self.D.I argsort_I = np.argsort(I.ravel()) data_I = self.data.ravel()[argsort_I] if self.vardata != None: var_I = (self.vardata + v[self.labels[I]]).ravel()[argsort_I] cumsum = np.zeros(p + 1, int) cumsum[1:] = self.N.cumsum().astype(int) for i in xrange(len(J)): j = J[i] if verbose: print "computing log likelihood for region", i + 1, "out of", len(J) m_var_j = self.m_var[j] m_mean_j = self.m_mean[j] v_j = self.v[j] L = np.where(self.labels == j)[0] for k in L: if self.std == None: datak = np.matrix(self.data[:, k].reshape(n, 1) - m_mean_j) if self.vardata != None: vark = self.vardata[:, k] + v_j else: nk = int(self.N[k]) datak = np.matrix(data_I[cumsum[k] : cumsum[k + 1]].reshape(nk, 1) - m_mean_j) if self.vardata != None: vark = var_I[cumsum[k] : cumsum[k + 1]] Vk = np.matrix(np.zeros((nk, nk), float) + m_var_j) if self.vardata == None: Vk[xrange(nk), xrange(nk)] = v_j + m_var_j else: Vk[xrange(nk), xrange(nk)] = vark + m_var_j log_region_likelihood[j] += np.log(np.linalg.det(Vk)) + datak.transpose() * np.linalg.inv(Vk) * datak if self.std == None: nj = n * len(L) else: nj = self.N[L].sum() log_region_likelihood[j] += nj * np.log(2 * np.pi) return log_region_likelihood def compute_log_region_likelihood(self, v=None, m_mean=None, m_var=None): log_voxel_likelihood = self.compute_log_voxel_likelihood(v, m_mean, m_var) N = len(self.network) log_region_likelihood = np.zeros(N, float) for j in xrange(N): log_region_likelihood[j] = log_voxel_likelihood[self.labels==j].sum() return log_region_likelihood def compute_log_voxel_likelihood(self, v=None, m_mean=None, m_var=None, return_SS=False): if v == None: v = self.v if m_mean == None: m_mean = self.m_mean if m_var == None: m_var = self.m_var n, p = self.data.shape if self.std == None: N = n v_labels = v[self.labels] Z = self.data - m_mean[self.labels] else: N = self.N I = self.D.I v_labels = v[self.labels[I]] Z = self.data - m_mean[self.labels[I]] if self.vardata == None: tot_var = v_labels + np.zeros(self.data.shape, float) else: tot_var = v_labels + self.vardata if self.std == None: SS1 = (1 / tot_var).sum(axis=0) SS2 = np.log(tot_var).sum(axis=0) SS3 = (Z**2 / tot_var).sum(axis=0) SS4 = (Z / tot_var).sum(axis=0) else: SS1 = np.zeros(p, float) SS2 = np.zeros(p, float) SS3 = np.zeros(p, float) SS4 = np.zeros(p, float) for i in xrange(n): Ii = self.D.I[i] add_lines((1 / tot_var[i]).reshape(p, 1), SS1.reshape(p, 1), Ii) add_lines(np.log(tot_var[i]).reshape(p, 1), SS2.reshape(p, 1), Ii) add_lines((Z[i]**2 / tot_var[i]).reshape(p, 1), SS3.reshape(p, 1), Ii) add_lines((Z[i] / tot_var[i]).reshape(p, 1), SS4.reshape(p, 1), Ii) LL = - 0.5 * (N * np.log(2 * np.pi) + np.log(1 + m_var[self.labels] * SS1) \ + SS2 + SS3 - SS4**2 / (1 / m_var[self.labels] + SS1)) if return_SS: return LL, Z, tot_var, SS1, SS2, SS3, SS4 else: return LL def compute_log_prior(self, v=None, m_mean=None, m_var=None, std=None): """ compute log prior density of model parameters, spatial uncertainty excepted, assuming hidden variables have been initialized """ if v == None: v = self.v if m_mean == None: m_mean = self.m_mean if m_var == None: m_var = self.m_var if std == None: std = self.std N = len(self.network) log_prior_values = np.zeros(N + 1, float) log_prior_values[:-1] = log_gammainv_pdf(v, self.v_shape, self.v_scale) log_prior_values[:-1] += log_gammainv_pdf(m_var, self.m_var_shape, self.m_var_scale) J = self.network == 1 if J.sum() > 0: log_prior_values[J] += log_gaussian_pdf(m_mean[J], 0, m_var[J] / self.m_mean_rate[J]) if self.std != None: log_prior_values[-1] = log_gammainv_pdf(std**2, self.std_shape, self.std_scale) return log_prior_values def compute_log_conditional_posterior(self, v=None, m_mean=None, m_var=None, std=None): """ compute log posterior density of model parameters, conditional on hidden parameters. This function is used in compute_log_region_posterior. It should only be used within the Gibbs sampler, and not the SAEM algorithm. """ n,p = self.data.shape if v == None: v = self.v if m_mean == None: m_mean = self.m_mean if m_var == None: m_var = self.m_var if std == None: std = self.std log_conditional_posterior = np.zeros(len(self.network) + 1, float) size = self.s6 if self.std == None: N = n * size else: N = self.s5 log_conditional_posterior[:-1] = log_gammainv_pdf(v, self.v_shape + 0.5 * N, self.v_scale + 0.5 * self.s1) log_conditional_posterior[:-1] += log_gammainv_pdf(m_var, self.m_var_shape + 0.5 * size, self.m_var_post_scale) J = self.network == 1 if J.sum() > 0: post_rate = self.m_mean_rate[J] + size[J] log_conditional_posterior[J] += log_gaussian_pdf(m_mean[J], self.s3[J] / post_rate, m_var[J] / post_rate) if std != None: #B = len(self.D.block) log_conditional_posterior[-1] = \ log_gammainv_pdf(std**2, self.std_shape + 0.5 * self.D.U.size, self.std_scale + 0.5 * self.s4) return log_conditional_posterior def sample_log_conditional_posterior(self, v=None, m_mean=None, m_var=None, std=None, nsimu=100, burnin=100, stabilize=False, verbose=False, update_spatial=False): """ sample log conditional posterior density of region parameters using a Gibbs sampler (assuming all hidden variables have been initialized). Computes posterior mean. if stabilize is True, sampling is conditioned on the parameters, reducing the variance of the estimate, but introducing a positive bias. """ if v == None: v = self.v.copy() if m_mean == None: m_mean = self.m_mean.copy() if m_var == None: m_var = self.m_var.copy() if std == None and self.std != None: if np.isscalar(self.std): std = self.std else: std = self.std.copy() if update_spatial: U = self.D.U.copy() proposal = self.proposal proposal_mean = self.proposal_mean proposal_std = self.proposal_std N = len(self.network) log_conditional_posterior_values = np.zeros((nsimu, N+1), float) #self.init_hidden_variables() n, p = self.data.shape posterior_mean = np.zeros(p, float) self.nsimu = nsimu self.burnin = burnin #self.J = J self.verbose = verbose niter = np.array([int(burnin), int(nsimu)]) for k in np.arange(2)[niter>0]: if self.verbose: if k == 0: print "Burn-in" else: print "Sampling posterior distribution" for i in xrange(niter[k]): if self.verbose: print "Iteration", i+1, "out of", niter[k] # Gibbs iteration #i += 1 if update_spatial and self.std != None: self.update_displacements() if self.vardata != None: self.update_effects() self.update_mean_effect() posterior_mean += self.m if not stabilize: self.update_summary_statistics(update_spatial, mode='mcmc') self.update_parameters_mcmc(update_spatial) if self.verbose: print "population effect min variance value :", self.m_var.min() if k == 1: if stabilize: self.update_summary_statistics(update_spatial, mode='mcmc') log_conditional_posterior_values[i] = \ self.compute_log_conditional_posterior(v, m_mean, m_var, std)#[:-1] posterior_mean /= nsimu if not stabilize: # Restore initial parameter values self.v[:], self.m_mean[:], self.m_var[:], self.std = v, m_mean, m_var, std if update_spatial: # Restore initial displacement values self.proposal = 'fixed' self.proposal_mean = U self.proposal_std = U * 0 self.update_displacements() self.proposal = proposal self.proposal_mean = proposal_mean self.proposal_std = proposal_std self.update_summary_statistics(update_spatial, mode='mcmc') return log_conditional_posterior_values, posterior_mean def compute_log_posterior(self, v=None, m_mean=None, m_var=None, std=None, nsimu=100, burnin=100, stabilize=False, verbose=False, update_spatial=False): """ compute log posterior density of region parameters by Rao-Blackwell method, or a stabilized upper bound if stabilize is True. """ log_conditional_posterior_values \ = self.sample_log_conditional_posterior(v, m_mean, m_var, std, nsimu, burnin, stabilize, verbose, update_spatial)[0] max_log_conditional = log_conditional_posterior_values.max(axis=0) ll_ratio = log_conditional_posterior_values - max_log_conditional if stabilize: return max_log_conditional + ll_ratio.mean(axis=0) elif not update_spatial: return max_log_conditional \ + np.log(np.exp(ll_ratio).sum(axis=0)) \ - np.log(nsimu) else: return max_log_conditional.sum() \ + np.log(np.exp(ll_ratio.sum(axis=1)).sum()) \ - np.log(nsimu) def compute_marginal_likelihood(self, v=None, m_mean=None, m_var=None, std=None, nsimu=100, burnin=100, stabilize=False, verbose=False, update_spatial=False, U=None, proposal_std=None): log_likelihood = self.compute_log_region_likelihood(v, m_mean, m_var) log_prior = self.compute_log_prior(v, m_mean, m_var, std) log_posterior = self.compute_log_posterior(v, m_mean, m_var, std, nsimu, burnin, stabilize, verbose, update_spatial) if update_spatial and self.std != None: n, B = self.data.shape[0], len(self.D.block) if std == None: std = self.std if U == None: U = self.D.U log_displacements_prior = \ - 0.5 * np.square(U).sum() / std**2 \ - self.D.U.size * np.log(std) log_displacements_posterior = \ self.compute_log_conditional_displacements_posterior(\ U, nsimu*n*B, burnin*n*B, proposal_std, verbose) return log_likelihood.sum() + \ log_prior.sum() + \ log_displacements_prior - \ log_posterior - \ log_displacements_posterior else: return log_likelihood + log_prior[:-1] - log_posterior[:-1] def compute_conditional_posterior_mean(self, v=None, m_mean=None, m_var=None): """ Compute posterior mean of mean effect map, conditional on parameters and displacements """ if v == None: v = self.v.copy() if m_mean == None: m_mean = self.m_mean.copy() if m_var == None: m_var = self.m_var.copy() LL, Z, tot_var, SS1, SS2, SS3, SS4 = \ self.compute_log_voxel_likelihood(v, m_mean, m_var, return_SS=True) #if self.std == None: #I = range(self.m.size)*np.ones(self.data.shape,int) #else: #I = self.D.I m_labels = m_mean[self.labels] v_labels = m_var[self.labels] return (SS4 + m_labels * SS1 + m_labels / v_labels)\ / (SS1 + 1.0 / v_labels) nipy-0.3.0/nipy/labs/group/tests/000077500000000000000000000000001210344137400166665ustar00rootroot00000000000000nipy-0.3.0/nipy/labs/group/tests/__init__.py000066400000000000000000000000001210344137400207650ustar00rootroot00000000000000nipy-0.3.0/nipy/labs/group/tests/test_displacement_field.py000066400000000000000000000121021210344137400241060ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import unittest import numpy as np from ..displacement_field import displacement_field, gaussian_random_field def make_data(n=10, dim=20, r=5, mdim=15, maskdim=20, amplitude=10, noise=1, jitter=None, activation=False): XYZvol = np.zeros((dim,dim,dim),int) XYZ = np.array(np.where(XYZvol==0)) p = XYZ.shape[1] #mask = np.arange(p) XYZvol[XYZ[0],XYZ[1],XYZ[2]] = np.arange(p) o = np.array([dim/2,dim/2,dim/2]) I = XYZvol[(dim-mdim)/2:(dim+mdim)/2,(dim-mdim)/2:(dim+mdim)/2,(dim-mdim)/2:(dim+mdim)/2].ravel() mask = XYZvol[ (dim-maskdim)/2 : (dim+maskdim)/2, (dim-maskdim)/2 : (dim+maskdim)/2, (dim-maskdim)/2 : (dim+maskdim)/2 ].ravel() q = len(mask) maskvol = np.zeros((dim,dim,dim),int) maskvol[XYZ[0,mask],XYZ[1,mask],XYZ[2,mask]] = np.arange(q) Isignal = maskvol[dim/2-r:dim/2+r,dim/2-r:dim/2+r,dim/2-r:dim/2+r].ravel() signal = np.zeros(q,float) signal[Isignal] += amplitude X = np.zeros((n,p),float) + np.nan data = np.zeros((n,p),float) + np.nan vardata = np.zeros((n,p),float) + np.nan for i in xrange(n): X[i,I] = np.random.randn(len(I)) if activation: o = np.array([dim/2,dim/2,dim/2]) if jitter!=None: # numpy 2 casting rules don't allow in-place addition of float # and int. o = o + np.round(np.random.randn(3)*jitter).clip(r-mdim/2,mdim/2-r) #print o Ii = XYZvol[o[0]-r:o[0]+r,o[1]-r:o[1]+r,o[2]-r:o[2]+r].ravel() X[i,Ii] += amplitude vardata[i,I] = np.square(np.random.randn(len(I)))*noise**2 data[i,I] = X[i,I] + np.random.randn(len(I))*np.sqrt(vardata[i,I]) return data, XYZ, mask, XYZvol, vardata, signal class test_displacement_field(unittest.TestCase): def test_sample_prior(self, verbose=False): data, XYZ, mask, XYZvol, vardata, signal = make_data(n=20, dim=20, r=3, mdim=15, maskdim=15, amplitude=5, noise=1, jitter=1, activation=True) D = displacement_field(XYZ, sigma=2.5, n=data.shape[0], mask=mask) B = len(D.block) for b in np.random.permutation(range(B)): for i in xrange(data.shape[0]): if verbose: print 'sampling field', i, 'block', b U, V, L, W, I = D.sample(i, b, 'prior', 1) block = D.block[b] D.U[:, i, b] = U D.V[:, i, block] = V D.W[:, i, L] = W D.I[i, L] = I def test_sample_rand_walk(self, verbose=False): data, XYZ, mask, XYZvol, vardata, signal = make_data(n=20, dim=20, r=3, mdim=15, maskdim=15, amplitude=5, noise=1, jitter=1, activation=True) D = displacement_field(XYZ, sigma=2.5*np.ones(3), n=data.shape[0], mask=mask) B = len(D.block) for b in np.random.permutation(range(B)): for i in xrange(data.shape[0]): if verbose: print 'sampling field', i, 'block', b U, V, L, W, I = D.sample(i, b, 'rand_walk', 1e-2) block = D.block[b] D.U[:, i, b] = U D.V[:, i, block] = V D.W[:, i, L] = W D.I[i, L] = I def test_sample_prior(self, verbose=False): data, XYZ, mask, XYZvol, vardata, signal = make_data(n=20, dim=20, r=3, mdim=15, maskdim=15, amplitude=5, noise=1, jitter=1, activation=True) D = displacement_field(XYZ, sigma=2.5, n=data.shape[0], mask=mask) B = len(D.block) for b in np.random.permutation(range(B)): for i in xrange(data.shape[0]): if verbose: print 'sampling field', i, 'block', b U, V, L, W, I = D.sample(i, b, 'prior', 1) block = D.block[b] D.U[:, i, b] = U D.V[:, i, block] = V D.W[:, i, L] = W D.I[i, L] = I def test_sample_all_blocks(self, verbose=False): data, XYZ, mask, XYZvol, vardata, signal = make_data(n=20, dim=20, r=3, mdim=15, maskdim=15, amplitude=5, noise=1, jitter=1, activation=True) D = displacement_field(XYZ, sigma=2.5, n=data.shape[0], mask=mask) for i in xrange(data.shape[0]): if verbose: print 'sampling field', i U, V, W, I = D.sample_all_blocks(1e-2) D.U[:, i] = U D.V[:, i] = V D.W[:, i] = W D.I[i] = I class test_gaussian_random_field(unittest.TestCase): def test_sample(self, verbose=False): data, XYZ, mask, XYZvol, vardata, signal = make_data(n=20, dim=20, r=3, mdim=15, maskdim=15, amplitude=5, noise=1, jitter=1, activation=True) n=data.shape[0] D = gaussian_random_field(XYZ, 2.5, n) for i in xrange(n): if verbose: print 'sampling field', i+1, 'out of', n U, V, L, W, I = D.sample(i, 1) D.U[:, i], D.V[:, i], D.W[:, i, L], D.I[i, L] = U, V, W, I if __name__ == "__main__": unittest.main() nipy-0.3.0/nipy/labs/group/tests/test_mixed_effects.py000066400000000000000000000010451210344137400231040ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from ..mixed_effects import em import numpy as np from numpy.testing import assert_array_almost_equal def test_simple_model(): size = 100 X = np.zeros((size, 2)) X[:,0] = 1. X[:,1] = range(size) err = .1 sy = err*np.random.rand(size) e1 = sy*np.random.normal(size=size) e2 = .1*np.random.normal(size=size) y = X[:,0] + e1 + e2 vy = sy**2 b, s2 = em(y, vy, X, niter=10) print b, s2 nipy-0.3.0/nipy/labs/group/tests/test_permutation_test.py000066400000000000000000000056401210344137400237120ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import unittest import numpy as np from .. import permutation_test as pt from nipy.algorithms.graph import wgraph_from_3d_grid nperms = 2 ndraws = 10 def make_data(n=10,mask_shape=(10,10,10),axis=0): mask = np.zeros(mask_shape,int) XYZ = np.array(np.where(mask==0)) p = XYZ.shape[1] data = np.random.randn(n,p) vardata = np.random.randn(n,p)**2 if axis==1: data = data.T vardata = vardata.T return data, vardata, XYZ class test_permutation_test(unittest.TestCase): def test_onesample(self): data, vardata, XYZ = make_data() # rfx calibration P = pt.permutation_test_onesample(data, XYZ, ndraws=ndraws) c = [(P.random_Tvalues[P.ndraws*(0.95)],None), ( P.random_Tvalues[P.ndraws*(0.5)], 18.)] r = np.ones(data.shape[1],int) r[data.shape[1]/2:] *= 10 #p_values, cluster_results, region_results = P.calibrate(nperms=100, clusters=c, regions=[r]) # mfx calibration P = pt.permutation_test_onesample( data, XYZ, vardata=vardata, stat_id="student_mfx", ndraws=ndraws) p_values, cluster_results, region_results = P.calibrate( nperms=nperms, clusters=c, regions=[r]) def test_onesample_graph(self): data, vardata, XYZ = make_data() G = wgraph_from_3d_grid(XYZ.T) # rfx calibration P = pt.permutation_test_onesample_graph(data, G, ndraws=ndraws) c = [(P.random_Tvalues[P.ndraws*(0.95)],None)] r = np.ones(data.shape[1],int) r[data.shape[1]/2:] *= 10 #p_values, cluster_results, region_results = P.calibrate(nperms=100, clusters=c, regions=[r]) # mfx calibration P = pt.permutation_test_onesample_graph( data, G, vardata=vardata, stat_id="student_mfx", ndraws=ndraws) p_values, cluster_results, region_results = P.calibrate( nperms=nperms, clusters=c, regions=[r]) def test_twosample(self): data, vardata, XYZ = make_data(n=20) data1, vardata1, data2, vardata2 = ( data[:10], vardata[:10], data[10:],vardata[10:]) # rfx calibration P = pt.permutation_test_twosample(data1, data2, XYZ, ndraws=ndraws) c = [(P.random_Tvalues[P.ndraws*(0.95)],None),(P.random_Tvalues[P.ndraws*(0.5)],10)] r = [np.zeros(data.shape[1])] # Assuming our data.shape[1] is divisible by 2 r[data.shape[1]//2:] *= 10 #p_values, cluster_results, region_results=P.calibrate(nperms=100, clusters=c, regions=r) # mfx calibration P = pt.permutation_test_twosample(data1, data2, XYZ, vardata1=vardata1, vardata2=vardata2, stat_id="student_mfx", ndraws=ndraws) p_values, cluster_results, region_results = P.calibrate(nperms=nperms, clusters=c, regions=r) if __name__ == "__main__": unittest.main() nipy-0.3.0/nipy/labs/group/tests/test_routines.py000066400000000000000000000034201210344137400221460ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from numpy.testing import assert_equal, assert_almost_equal import numpy as np from .. import routines, onesample, twosample def slow_add_lines(A, B, I): for i in xrange(len(I)): B[I[i]] += A[i] def test_add_lines(): nlines = 100000 ncols = 1 A = np.random.rand(nlines, ncols) B = np.random.rand(nlines, ncols) I = np.asarray(np.random.randint(nlines, size=nlines) ).astype(np.int) C1 = B.copy() C2 = B.copy() routines.add_lines(A, C1, I) slow_add_lines(A, C2, I) assert_almost_equal(C1, C2) def test_add_lines2(): nlines = 1e5 ncols = 1 A = np.random.rand(nlines, ncols) B = np.random.rand(nlines, ncols) I = np.asarray(np.random.randint(nlines, size=nlines) ).astype(np.int) C1 = B.copy() C2 = B.copy() routines.add_lines(A, C1, I) slow_add_lines(A, C2, I) assert_almost_equal(C1, C2) def test_onesample_stat(): dx, dy, dz = 3, 4, 2 nvox = dx*dy*dz nsub = 12 # Make surrogate data aux = np.arange(nvox) x = np.reshape(aux.repeat(nsub), [dx, dy, dz, nsub]) # Gold standard y_target = np.inf * np.ones(nvox) y_target[0] = 0.0 # Test: input C-contiguous, data owner, axis=3 y = onesample.stat(x, axis=3).reshape(nvox) assert_equal(y, y_target) # Test: input F-contiguous, not owner, axis=0 y = onesample.stat(x.T, axis=0).reshape(nvox) assert_equal(y, y_target) # Test: input C-contiguous, data owner, axis=0 xT = x.T.copy() y = onesample.stat(xT, axis=0).reshape(nvox) assert_equal(y, y_target) if __name__ == "__main__": import nose nose.run(argv=['', __file__]) nipy-0.3.0/nipy/labs/group/tests/test_spatial_relaxation_onesample.py000066400000000000000000000160511210344137400262300ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np from ..spatial_relaxation_onesample import multivariate_stat from ....testing import (dec, assert_true, assert_equal, assert_almost_equal) verbose = False def make_data(n=10, dim=np.array([20,20,20]), r=5, amplitude=10, noise=1, jitter=None, prng=np.random): if np.isscalar(dim): dim = np.array([dim,dim,dim]) XYZvol = np.zeros((dim),int) XYZ = np.array(np.where(XYZvol==0)) p = XYZ.shape[1] #mask = np.arange(p) XYZvol[XYZ[0],XYZ[1],XYZ[2]] = np.arange(p) o = np.array(dim/2) maskvol = np.zeros(dim, int) maskvol[list(XYZ)] = np.arange(p) Isignal = maskvol[dim[0]/2-r:dim[0]/2+r, dim[1]/2-r:dim[1]/2+r, dim[2]/2-r:dim[2]/2+r].ravel() signal = np.zeros(p, float) signal[Isignal] += amplitude X = np.zeros((n, p), float) + np.nan data = np.zeros((n, p), float) + np.nan vardata = np.zeros((n, p), float) + np.nan for i in xrange(n): X[i] = prng.randn(p) o = np.array(dim/2) if jitter!=None: # Not in place to avoid stricter numpy 2 casting o = o + np.round(prng.randn(3)*jitter).clip(r-dim/2,dim/2-r) Ii = XYZvol[o[0]-r:o[0]+r, o[1]-r:o[1]+r, o[2]-r:o[2]+r].ravel() X[i,Ii] += amplitude vardata[i] = np.square(prng.randn(p))*noise**2 data[i] = X[i] + prng.randn(p)*np.sqrt(vardata[i]) return data, XYZ, XYZvol, vardata, signal def test_evaluate_exact(): # without mfx nor spatial relaxation prng = np.random.RandomState(10) data, XYZ, XYZvol, vardata, signal = make_data(n=20, dim=np.array([20,20,20]), r=3, amplitude=5, noise=0, jitter=0, prng=prng) p = len(signal) XYZvol *= 0 XYZvol[list(XYZ)] = np.arange(p) P = multivariate_stat(data) P.init_hidden_variables() P.evaluate(nsimu=100, burnin=100, J=[XYZvol[5, 5, 5]], compute_post_mean=True, verbose=verbose) P.log_likelihood_values = P.compute_log_region_likelihood() # Verify code consistency Q = multivariate_stat(data, vardata*0, XYZ, std=0, sigma=5) Q.init_hidden_variables() Q.evaluate(nsimu=100, burnin=100, J = [XYZvol[5,5,5]], compute_post_mean=True, update_spatial=False, verbose=verbose) Q.log_likelihood_values = Q.compute_log_region_likelihood() assert_almost_equal(P.mean_m.mean(), Q.mean_m.mean(), int(np.log10(P.nsimu))-1) assert_almost_equal(Q.log_likelihood_values.sum(), P.log_likelihood_values.sum(), 0) def test_model_selection_exact(): prng = np.random.RandomState(10) data, XYZ, XYZvol, vardata, signal = make_data(n=30, dim=20, r=3, amplitude=1, noise=0, jitter=0, prng=prng) labels = (signal > 0).astype(int) P1 = multivariate_stat(data, labels=labels) P1.init_hidden_variables() P1.evaluate(nsimu=100, burnin=10, verbose=verbose) L1 = P1.compute_log_region_likelihood() Prior1 = P1.compute_log_prior() #v, m_mean, m_var = P1.v.copy(), P1.m_mean.copy(), P1.m_var.copy() Post1 = P1.compute_log_posterior(nsimu=1e2, burnin=1e2, verbose=verbose) M1 = L1 + Prior1[:-1] - Post1[:-1] assert_almost_equal(M1.mean(), P1.compute_marginal_likelihood().mean(), 0) P0 = multivariate_stat(data, labels=labels) P0.network *= 0 P0.init_hidden_variables() P0.evaluate(nsimu=100, burnin=100, verbose=verbose) L0 = P0.compute_log_region_likelihood() Prior0 = P0.compute_log_prior() Post0 = P0.compute_log_posterior(nsimu=1e2, burnin=1e2, verbose=verbose) M0 = L0 + Prior0[:-1] - Post0[:-1] assert_almost_equal(M0.mean(), P0.compute_marginal_likelihood().mean(), 0) assert_true(M1[1] > M0[1]) assert_true(M1[0] < M0[0]) @dec.slow # test takes around 7 minutes on my (MB) Mac laptop def test_model_selection_mfx_spatial_rand_walk(): prng = np.random.RandomState(10) data, XYZ, XYZvol, vardata, signal = make_data( n=20, dim=np.array([1,20,20]), r=3, amplitude=3, noise=1, jitter=0.5, prng=prng) labels = (signal > 0).astype(int) P = multivariate_stat(data, vardata, XYZ, std=0.5, sigma=5, labels=labels) P.network[:] = 0 P.init_hidden_variables() P.evaluate(nsimu=100, burnin=100, verbose=verbose, proposal='rand_walk', proposal_std=0.5) L00 = P.compute_log_region_likelihood() # Test simulated annealing procedure P.estimate_displacements_SA(nsimu=100, c=0.99, proposal_std=P.proposal_std, verbose=verbose) L0 = P.compute_log_region_likelihood() assert_true(L0.sum() > L00.sum()) #Prior0 = P.compute_log_prior() #Post0 = P.compute_log_posterior(nsimu=1e2, burnin=1e2, verbose=verbose) #M0 = L0 + Prior0[:-1] - Post0[:-1] M0 = P.compute_marginal_likelihood(update_spatial=True) #yield assert_almost_equal(M0.sum(), P.compute_marginal_likelihood(verbose=verbose).sum(), 0) P.network[1] = 1 #P.init_hidden_variables(init_spatial=False) P.init_hidden_variables(init_spatial=False) P.evaluate(nsimu=100, burnin=100, verbose=verbose, update_spatial=False, proposal_std=P.proposal_std) #L1 = P.compute_log_region_likelihood() #Prior1 = P.compute_log_prior() #Post1 = P.compute_log_posterior(nsimu=1e2, burnin=1e2, verbose=verbose) #M1 = L1 + Prior1[:-1] - Post1[:-1] M1 = P.compute_marginal_likelihood(update_spatial=True) #yield assert_almost_equal(0.1*M1.sum(), 0.1*P.compute_marginal_likelihood(verbose=verbose).sum(), 0) assert_true(M1 > M0) def test_evaluate_mfx_spatial(): prng = np.random.RandomState(10) data, XYZ, XYZvol, vardata, signal = make_data( n=20, dim=10, r=3, amplitude=5, noise=1, jitter=1, prng=prng) P = multivariate_stat(data, vardata, XYZ, std=1, sigma=3) P.init_hidden_variables() P.evaluate(nsimu=5, burnin=5, J=[P.D.XYZ_vol[10, 10, 10]], verbose=verbose, mode='mcmc') # Test log_likelihood computation v = P.v.copy() m_var = P.m_var.copy() m_mean = P.m_mean.copy() L1 = P.compute_log_region_likelihood_slow(v, m_mean, m_var) L2 = P.compute_log_region_likelihood(v, m_mean, m_var) assert_almost_equal(-L1.sum(), L2.sum()*2, 2) # Test posterior density computation #Prior = P.compute_log_prior(v, m_mean, m_var) #Post = P.compute_log_posterior(v, m_mean, m_var, nsimu=10, #burnin=10, verbose=verbose) def test_update_labels(): prng = np.random.RandomState(10) data, XYZ, XYZvol, vardata, signal = make_data( n=20, dim=20, r=3, amplitude=5, noise=1, jitter=1, prng=prng) P = multivariate_stat(data, vardata, XYZ) P.init_hidden_variables() p = P.data.shape[1] P.labels_prior = np.ones((1, p), float) P.label_values = np.zeros((1, p), int) P.labels_prior_mask = np.arange(p) P.update_labels() assert_equal(max(abs(P.labels - np.zeros(p, int))), 0) nipy-0.3.0/nipy/labs/group/twosample.c000066400000000000000000010035601210344137400177100ustar00rootroot00000000000000/* Generated by Cython 0.17.4 on Sat Jan 12 17:27:39 2013 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02040000 #error Cython requires Python 2.4+. #else #include /* For offsetof */ #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #endif #if PY_VERSION_HEX < 0x02050000 typedef int Py_ssize_t; #define PY_SSIZE_T_MAX INT_MAX #define PY_SSIZE_T_MIN INT_MIN #define PY_FORMAT_SIZE_T "" #define CYTHON_FORMAT_SSIZE_T "" #define PyInt_FromSsize_t(z) PyInt_FromLong(z) #define PyInt_AsSsize_t(o) __Pyx_PyInt_AsInt(o) #define PyNumber_Index(o) ((PyNumber_Check(o) && !PyFloat_Check(o)) ? PyNumber_Int(o) : \ (PyErr_Format(PyExc_TypeError, \ "expected index value, got %.200s", Py_TYPE(o)->tp_name), \ (PyObject*)0)) #define __Pyx_PyIndex_Check(o) (PyNumber_Check(o) && !PyFloat_Check(o) && \ !PyComplex_Check(o)) #define PyIndex_Check __Pyx_PyIndex_Check #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message) #define __PYX_BUILD_PY_SSIZE_T "i" #else #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #define __Pyx_PyIndex_Check PyIndex_Check #endif #if PY_VERSION_HEX < 0x02060000 #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) #define PyVarObject_HEAD_INIT(type, size) \ PyObject_HEAD_INIT(type) size, #define PyType_Modified(t) typedef struct { void *buf; PyObject *obj; Py_ssize_t len; Py_ssize_t itemsize; int readonly; int ndim; char *format; Py_ssize_t *shape; Py_ssize_t *strides; Py_ssize_t *suboffsets; void *internal; } Py_buffer; #define PyBUF_SIMPLE 0 #define PyBUF_WRITABLE 0x0001 #define PyBUF_FORMAT 0x0004 #define PyBUF_ND 0x0008 #define PyBUF_STRIDES (0x0010 | PyBUF_ND) #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) #define PyBUF_RECORDS (PyBUF_STRIDES | PyBUF_FORMAT | PyBUF_WRITABLE) #define PyBUF_FULL (PyBUF_INDIRECT | PyBUF_FORMAT | PyBUF_WRITABLE) typedef int (*getbufferproc)(PyObject *, Py_buffer *, int); typedef void (*releasebufferproc)(PyObject *, Py_buffer *); #endif #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #endif #if PY_MAJOR_VERSION < 3 && PY_MINOR_VERSION < 6 #define PyUnicode_FromString(s) PyUnicode_Decode(s, strlen(s), "UTF-8", "strict") #endif #if PY_MAJOR_VERSION >= 3 #define Py_TPFLAGS_CHECKTYPES 0 #define Py_TPFLAGS_HAVE_INDEX 0 #endif #if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3) #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ? \ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #else #define CYTHON_PEP393_ENABLED 0 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_READ(k, d, i) ((k=k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_VERSION_HEX < 0x02060000 #define PyBytesObject PyStringObject #define PyBytes_Type PyString_Type #define PyBytes_Check PyString_Check #define PyBytes_CheckExact PyString_CheckExact #define PyBytes_FromString PyString_FromString #define PyBytes_FromStringAndSize PyString_FromStringAndSize #define PyBytes_FromFormat PyString_FromFormat #define PyBytes_DecodeEscape PyString_DecodeEscape #define PyBytes_AsString PyString_AsString #define PyBytes_AsStringAndSize PyString_AsStringAndSize #define PyBytes_Size PyString_Size #define PyBytes_AS_STRING PyString_AS_STRING #define PyBytes_GET_SIZE PyString_GET_SIZE #define PyBytes_Repr PyString_Repr #define PyBytes_Concat PyString_Concat #define PyBytes_ConcatAndDel PyString_ConcatAndDel #endif #if PY_VERSION_HEX < 0x02060000 #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type) #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_VERSION_HEX < 0x03020000 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300) #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b) #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value) #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b) #else #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0))) #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1))) #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1))) #endif #if PY_MAJOR_VERSION >= 3 #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n))) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n))) #else #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n)) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_NAMESTR(n) ((char *)(n)) #define __Pyx_DOCSTR(n) ((char *)(n)) #else #define __Pyx_NAMESTR(n) (n) #define __Pyx_DOCSTR(n) (n) #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include #define __PYX_HAVE__nipy__labs__group__twosample #define __PYX_HAVE_API__nipy__labs__group__twosample #include "stdio.h" #include "stdlib.h" #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "fff_base.h" #include "fff_vector.h" #include "fff_matrix.h" #include "fff_array.h" #include "fffpy.h" #include "fff_twosample_stat.h" #ifdef _OPENMP #include #endif /* _OPENMP */ #ifdef PYREX_WITHOUT_ASSERTIONS #define CYTHON_WITHOUT_ASSERTIONS #endif /* inline attribute */ #ifndef CYTHON_INLINE #if defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif /* unused attribute */ #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif typedef struct {PyObject **p; char *s; const long n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/ /* Type Conversion Predeclarations */ #define __Pyx_PyBytes_FromUString(s) PyBytes_FromString((char*)s) #define __Pyx_PyBytes_AsUString(s) ((unsigned char*) PyBytes_AsString(s)) #define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x); static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject*); #if CYTHON_COMPILING_IN_CPYTHON #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #ifdef __GNUC__ /* Test for GCC > 2.95 */ #if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* __GNUC__ > 2 ... */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ > 2 ... */ #else /* __GNUC__ */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static PyObject *__pyx_m; static PyObject *__pyx_b; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include #else #include #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "twosample.pyx", "numpy.pxd", "type.pxd", }; /* "numpy.pxd":723 * # in Cython to enable them only on the right systems. * * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t */ typedef npy_int8 __pyx_t_5numpy_int8_t; /* "numpy.pxd":724 * * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t */ typedef npy_int16 __pyx_t_5numpy_int16_t; /* "numpy.pxd":725 * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< * ctypedef npy_int64 int64_t * #ctypedef npy_int96 int96_t */ typedef npy_int32 __pyx_t_5numpy_int32_t; /* "numpy.pxd":726 * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< * #ctypedef npy_int96 int96_t * #ctypedef npy_int128 int128_t */ typedef npy_int64 __pyx_t_5numpy_int64_t; /* "numpy.pxd":730 * #ctypedef npy_int128 int128_t * * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t */ typedef npy_uint8 __pyx_t_5numpy_uint8_t; /* "numpy.pxd":731 * * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t */ typedef npy_uint16 __pyx_t_5numpy_uint16_t; /* "numpy.pxd":732 * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< * ctypedef npy_uint64 uint64_t * #ctypedef npy_uint96 uint96_t */ typedef npy_uint32 __pyx_t_5numpy_uint32_t; /* "numpy.pxd":733 * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< * #ctypedef npy_uint96 uint96_t * #ctypedef npy_uint128 uint128_t */ typedef npy_uint64 __pyx_t_5numpy_uint64_t; /* "numpy.pxd":737 * #ctypedef npy_uint128 uint128_t * * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< * ctypedef npy_float64 float64_t * #ctypedef npy_float80 float80_t */ typedef npy_float32 __pyx_t_5numpy_float32_t; /* "numpy.pxd":738 * * ctypedef npy_float32 float32_t * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< * #ctypedef npy_float80 float80_t * #ctypedef npy_float128 float128_t */ typedef npy_float64 __pyx_t_5numpy_float64_t; /* "numpy.pxd":747 * # The int types are mapped a bit surprising -- * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t # <<<<<<<<<<<<<< * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t */ typedef npy_long __pyx_t_5numpy_int_t; /* "numpy.pxd":748 * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< * ctypedef npy_longlong longlong_t * */ typedef npy_longlong __pyx_t_5numpy_long_t; /* "numpy.pxd":749 * ctypedef npy_long int_t * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< * * ctypedef npy_ulong uint_t */ typedef npy_longlong __pyx_t_5numpy_longlong_t; /* "numpy.pxd":751 * ctypedef npy_longlong longlong_t * * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t */ typedef npy_ulong __pyx_t_5numpy_uint_t; /* "numpy.pxd":752 * * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulonglong_t * */ typedef npy_ulonglong __pyx_t_5numpy_ulong_t; /* "numpy.pxd":753 * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< * * ctypedef npy_intp intp_t */ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; /* "numpy.pxd":755 * ctypedef npy_ulonglong ulonglong_t * * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< * ctypedef npy_uintp uintp_t * */ typedef npy_intp __pyx_t_5numpy_intp_t; /* "numpy.pxd":756 * * ctypedef npy_intp intp_t * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< * * ctypedef npy_double float_t */ typedef npy_uintp __pyx_t_5numpy_uintp_t; /* "numpy.pxd":758 * ctypedef npy_uintp uintp_t * * ctypedef npy_double float_t # <<<<<<<<<<<<<< * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t */ typedef npy_double __pyx_t_5numpy_float_t; /* "numpy.pxd":759 * * ctypedef npy_double float_t * ctypedef npy_double double_t # <<<<<<<<<<<<<< * ctypedef npy_longdouble longdouble_t * */ typedef npy_double __pyx_t_5numpy_double_t; /* "numpy.pxd":760 * ctypedef npy_double float_t * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cfloat cfloat_t */ typedef npy_longdouble __pyx_t_5numpy_longdouble_t; /* "fff.pxd":9 * * # Redefine size_t * ctypedef unsigned long int size_t # <<<<<<<<<<<<<< * * */ typedef unsigned long __pyx_t_3fff_size_t; #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< float > __pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< double > __pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif /*--- Type declarations ---*/ /* "numpy.pxd":762 * ctypedef npy_longdouble longdouble_t * * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t */ typedef npy_cfloat __pyx_t_5numpy_cfloat_t; /* "numpy.pxd":763 * * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< * ctypedef npy_clongdouble clongdouble_t * */ typedef npy_cdouble __pyx_t_5numpy_cdouble_t; /* "numpy.pxd":764 * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cdouble complex_t */ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; /* "numpy.pxd":766 * ctypedef npy_clongdouble clongdouble_t * * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew1(a): */ typedef npy_cdouble __pyx_t_5numpy_complex_t; #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/ #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil) \ if (acquire_gil) { \ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ PyGILState_Release(__pyx_gilstate_save); \ } else { \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil) \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext() \ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif /* CYTHON_REFNANNY */ #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /*proto*/ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], \ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, \ const char* function_name); /*proto*/ static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact); /*proto*/ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len)) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_PyList_Append(L,x) PyList_Append(L,x) #endif #define __Pyx_SetItemInt(o, i, v, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ __Pyx_SetItemInt_Fast(o, i, v) : \ __Pyx_SetItemInt_Generic(o, to_py_func(i), v)) static CYTHON_INLINE int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v) { int r; if (!j) return -1; r = PyObject_SetItem(o, j, v); Py_DECREF(j); return r; } static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v) { #if CYTHON_COMPILING_IN_CPYTHON if (PyList_CheckExact(o)) { Py_ssize_t n = (likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if (likely((n >= 0) & (n < PyList_GET_SIZE(o)))) { PyObject* old = PyList_GET_ITEM(o, n); Py_INCREF(v); PyList_SET_ITEM(o, n, v); Py_DECREF(old); return 1; } } else { /* inlined PySequence_SetItem() */ PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_ass_item)) { if (unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (unlikely(l < 0)) return -1; i += l; } return m->sq_ass_item(o, i, v); } } #else #if CYTHON_COMPILING_IN_PYPY if (PySequence_Check(o) && !PyDict_Check(o)) { #else if (PySequence_Check(o)) { #endif return PySequence_SetItem(o, i, v); } #endif return __Pyx_SetItemInt_Generic(o, PyInt_FromSsize_t(i), v); } static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/ static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); static CYTHON_INLINE int __Pyx_IterFinish(void); /*proto*/ static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); /*proto*/ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level); /*proto*/ static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_Py_intptr_t(Py_intptr_t); #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if defined(_WIN32) && defined(__cplusplus) && CYTHON_CCOMPLEX #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); #if CYTHON_CCOMPLEX #define __Pyx_c_eqf(a, b) ((a)==(b)) #define __Pyx_c_sumf(a, b) ((a)+(b)) #define __Pyx_c_difff(a, b) ((a)-(b)) #define __Pyx_c_prodf(a, b) ((a)*(b)) #define __Pyx_c_quotf(a, b) ((a)/(b)) #define __Pyx_c_negf(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zerof(z) ((z)==(float)0) #define __Pyx_c_conjf(z) (::std::conj(z)) #if 1 #define __Pyx_c_absf(z) (::std::abs(z)) #define __Pyx_c_powf(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zerof(z) ((z)==0) #define __Pyx_c_conjf(z) (conjf(z)) #if 1 #define __Pyx_c_absf(z) (cabsf(z)) #define __Pyx_c_powf(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); #if CYTHON_CCOMPLEX #define __Pyx_c_eq(a, b) ((a)==(b)) #define __Pyx_c_sum(a, b) ((a)+(b)) #define __Pyx_c_diff(a, b) ((a)-(b)) #define __Pyx_c_prod(a, b) ((a)*(b)) #define __Pyx_c_quot(a, b) ((a)/(b)) #define __Pyx_c_neg(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero(z) ((z)==(double)0) #define __Pyx_c_conj(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs(z) (::std::abs(z)) #define __Pyx_c_pow(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero(z) ((z)==0) #define __Pyx_c_conj(z) (conj(z)) #if 1 #define __Pyx_c_abs(z) (cabs(z)) #define __Pyx_c_pow(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject *); static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject *); static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject *); static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject *); static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject *); static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject *); static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject *); static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject *); static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject *); static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject *); static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject *); static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject *); static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject *); static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject *); static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject *); static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject *); static int __Pyx_check_binary_version(void); #if !defined(__Pyx_PyIdentifier_FromString) #if PY_MAJOR_VERSION < 3 #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) #else #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) #endif #endif static PyObject *__Pyx_ImportModule(const char *name); /*proto*/ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /*proto*/ typedef struct { int code_line; PyCodeObject* code_object; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /*proto*/ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/ /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from 'cpython.object' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'libc.stdlib' */ /* Module declarations from 'numpy' */ /* Module declarations from 'numpy' */ static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ /* Module declarations from 'fff' */ /* Module declarations from 'nipy.labs.group.twosample' */ #define __Pyx_MODULE_NAME "nipy.labs.group.twosample" int __pyx_module_is_main_nipy__labs__group__twosample = 0; /* Implementation of 'nipy.labs.group.twosample' */ static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_RuntimeError; static PyObject *__pyx_pf_4nipy_4labs_5group_9twosample_count_permutations(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_n1, unsigned int __pyx_v_n2); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_5group_9twosample_2stat(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_Y1, PyArrayObject *__pyx_v_Y2, PyObject *__pyx_v_id, int __pyx_v_axis, PyArrayObject *__pyx_v_Magics); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_5group_9twosample_4stat_mfx(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_Y1, PyArrayObject *__pyx_v_V1, PyArrayObject *__pyx_v_Y2, PyArrayObject *__pyx_v_V2, PyObject *__pyx_v_id, int __pyx_v_axis, PyArrayObject *__pyx_v_Magics, unsigned int __pyx_v_niter); /* proto */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ static char __pyx_k_1[] = "ndarray is not C contiguous"; static char __pyx_k_3[] = "ndarray is not Fortran contiguous"; static char __pyx_k_5[] = "Non-native byte order not supported"; static char __pyx_k_7[] = "unknown dtype code in numpy.pxd (%d)"; static char __pyx_k_8[] = "Format string allocated too short, see comment in numpy.pxd"; static char __pyx_k_11[] = "Format string allocated too short."; static char __pyx_k_13[] = "\nRoutines for massively univariate random-effect and mixed-effect\nanalysis. Two-sample case.\n\nAuthor: Alexis Roche, 2008.\n"; static char __pyx_k_14[] = "0.1"; static char __pyx_k_17[] = "/Users/mb312/dev_trees/nipy/nipy/labs/group/twosample.pyx"; static char __pyx_k_18[] = "nipy.labs.group.twosample"; static char __pyx_k__B[] = "B"; static char __pyx_k__H[] = "H"; static char __pyx_k__I[] = "I"; static char __pyx_k__L[] = "L"; static char __pyx_k__O[] = "O"; static char __pyx_k__Q[] = "Q"; static char __pyx_k__T[] = "T"; static char __pyx_k__b[] = "b"; static char __pyx_k__d[] = "d"; static char __pyx_k__f[] = "f"; static char __pyx_k__g[] = "g"; static char __pyx_k__h[] = "h"; static char __pyx_k__i[] = "i"; static char __pyx_k__l[] = "l"; static char __pyx_k__n[] = "n"; static char __pyx_k__q[] = "q"; static char __pyx_k__t[] = "t"; static char __pyx_k__V1[] = "V1"; static char __pyx_k__V2[] = "V2"; static char __pyx_k__Y1[] = "Y1"; static char __pyx_k__Y2[] = "Y2"; static char __pyx_k__Zd[] = "Zd"; static char __pyx_k__Zf[] = "Zf"; static char __pyx_k__Zg[] = "Zg"; static char __pyx_k__id[] = "id"; static char __pyx_k__n1[] = "n1"; static char __pyx_k__n2[] = "n2"; static char __pyx_k__np[] = "np"; static char __pyx_k__v1[] = "v1"; static char __pyx_k__v2[] = "v2"; static char __pyx_k__vp[] = "vp"; static char __pyx_k__y1[] = "y1"; static char __pyx_k__y2[] = "y2"; static char __pyx_k__yp[] = "yp"; static char __pyx_k__idx[] = "idx"; static char __pyx_k__nex[] = "nex"; static char __pyx_k__axis[] = "axis"; static char __pyx_k__dims[] = "dims"; static char __pyx_k__idx1[] = "idx1"; static char __pyx_k__idx2[] = "idx2"; static char __pyx_k__simu[] = "simu"; static char __pyx_k__stat[] = "stat"; static char __pyx_k__magic[] = "magic"; static char __pyx_k__multi[] = "multi"; static char __pyx_k__niter[] = "niter"; static char __pyx_k__nsimu[] = "nsimu"; static char __pyx_k__numpy[] = "numpy"; static char __pyx_k__range[] = "range"; static char __pyx_k__stats[] = "stats"; static char __pyx_k__zeros[] = "zeros"; static char __pyx_k__Magics[] = "Magics"; static char __pyx_k__magics[] = "magics"; static char __pyx_k__student[] = "student"; static char __pyx_k____main__[] = "__main__"; static char __pyx_k____test__[] = "__test__"; static char __pyx_k__stat_mfx[] = "stat_mfx"; static char __pyx_k__wilcoxon[] = "wilcoxon"; static char __pyx_k__flag_stat[] = "flag_stat"; static char __pyx_k__ValueError[] = "ValueError"; static char __pyx_k____version__[] = "__version__"; static char __pyx_k__student_mfx[] = "student_mfx"; static char __pyx_k__RuntimeError[] = "RuntimeError"; static char __pyx_k__count_permutations[] = "count_permutations"; static PyObject *__pyx_kp_u_1; static PyObject *__pyx_kp_u_11; static PyObject *__pyx_kp_s_14; static PyObject *__pyx_kp_s_17; static PyObject *__pyx_n_s_18; static PyObject *__pyx_kp_u_3; static PyObject *__pyx_kp_u_5; static PyObject *__pyx_kp_u_7; static PyObject *__pyx_kp_u_8; static PyObject *__pyx_n_s__Magics; static PyObject *__pyx_n_s__RuntimeError; static PyObject *__pyx_n_s__T; static PyObject *__pyx_n_s__V1; static PyObject *__pyx_n_s__V2; static PyObject *__pyx_n_s__ValueError; static PyObject *__pyx_n_s__Y1; static PyObject *__pyx_n_s__Y2; static PyObject *__pyx_n_s____main__; static PyObject *__pyx_n_s____test__; static PyObject *__pyx_n_s____version__; static PyObject *__pyx_n_s__axis; static PyObject *__pyx_n_s__count_permutations; static PyObject *__pyx_n_s__dims; static PyObject *__pyx_n_s__flag_stat; static PyObject *__pyx_n_s__i; static PyObject *__pyx_n_s__id; static PyObject *__pyx_n_s__idx; static PyObject *__pyx_n_s__idx1; static PyObject *__pyx_n_s__idx2; static PyObject *__pyx_n_s__magic; static PyObject *__pyx_n_s__magics; static PyObject *__pyx_n_s__multi; static PyObject *__pyx_n_s__n; static PyObject *__pyx_n_s__n1; static PyObject *__pyx_n_s__n2; static PyObject *__pyx_n_s__nex; static PyObject *__pyx_n_s__niter; static PyObject *__pyx_n_s__np; static PyObject *__pyx_n_s__nsimu; static PyObject *__pyx_n_s__numpy; static PyObject *__pyx_n_s__range; static PyObject *__pyx_n_s__simu; static PyObject *__pyx_n_s__stat; static PyObject *__pyx_n_s__stat_mfx; static PyObject *__pyx_n_s__stats; static PyObject *__pyx_n_s__student; static PyObject *__pyx_n_s__student_mfx; static PyObject *__pyx_n_s__t; static PyObject *__pyx_n_s__v1; static PyObject *__pyx_n_s__v2; static PyObject *__pyx_n_s__vp; static PyObject *__pyx_n_s__wilcoxon; static PyObject *__pyx_n_s__y1; static PyObject *__pyx_n_s__y2; static PyObject *__pyx_n_s__yp; static PyObject *__pyx_n_s__zeros; static PyObject *__pyx_int_15; static PyObject *__pyx_k_tuple_2; static PyObject *__pyx_k_tuple_4; static PyObject *__pyx_k_tuple_6; static PyObject *__pyx_k_tuple_9; static PyObject *__pyx_k_tuple_10; static PyObject *__pyx_k_tuple_12; static PyObject *__pyx_k_tuple_15; static PyObject *__pyx_k_tuple_19; static PyObject *__pyx_k_tuple_21; static PyObject *__pyx_k_codeobj_16; static PyObject *__pyx_k_codeobj_20; static PyObject *__pyx_k_codeobj_22; /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_5group_9twosample_1count_permutations(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_4nipy_4labs_5group_9twosample_1count_permutations = {__Pyx_NAMESTR("count_permutations"), (PyCFunction)__pyx_pw_4nipy_4labs_5group_9twosample_1count_permutations, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}; static PyObject *__pyx_pw_4nipy_4labs_5group_9twosample_1count_permutations(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { unsigned int __pyx_v_n1; unsigned int __pyx_v_n2; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("count_permutations (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__n1,&__pyx_n_s__n2,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__n1)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__n2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("count_permutations", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "count_permutations") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_n1 = __Pyx_PyInt_AsUnsignedInt(values[0]); if (unlikely((__pyx_v_n1 == (unsigned int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_n2 = __Pyx_PyInt_AsUnsignedInt(values[1]); if (unlikely((__pyx_v_n2 == (unsigned int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("count_permutations", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.group.twosample.count_permutations", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_4labs_5group_9twosample_count_permutations(__pyx_self, __pyx_v_n1, __pyx_v_n2); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/group/twosample.pyx":60 * * * def count_permutations(unsigned int n1, unsigned int n2): # <<<<<<<<<<<<<< * cdef double n * fff_twosample_permutation(NULL, NULL, n1, n2, &n) */ static PyObject *__pyx_pf_4nipy_4labs_5group_9twosample_count_permutations(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_n1, unsigned int __pyx_v_n2) { double __pyx_v_n; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("count_permutations", 0); /* "nipy/labs/group/twosample.pyx":62 * def count_permutations(unsigned int n1, unsigned int n2): * cdef double n * fff_twosample_permutation(NULL, NULL, n1, n2, &n) # <<<<<<<<<<<<<< * return int(n) * */ fff_twosample_permutation(NULL, NULL, __pyx_v_n1, __pyx_v_n2, (&__pyx_v_n)); /* "nipy/labs/group/twosample.pyx":63 * cdef double n * fff_twosample_permutation(NULL, NULL, n1, n2, &n) * return int(n) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyFloat_FromDouble(__pyx_v_n); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)(&PyInt_Type))), ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("nipy.labs.group.twosample.count_permutations", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_5group_9twosample_3stat(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_5group_9twosample_2stat[] = "\n T = stat(Y1, Y2, id='student', axis=0, magics=None).\n \n Compute a two-sample test statistic (Y1>Y2) over a number of\n deterministic or random permutations.\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_5group_9twosample_3stat = {__Pyx_NAMESTR("stat"), (PyCFunction)__pyx_pw_4nipy_4labs_5group_9twosample_3stat, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_5group_9twosample_2stat)}; static PyObject *__pyx_pw_4nipy_4labs_5group_9twosample_3stat(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_Y1 = 0; PyArrayObject *__pyx_v_Y2 = 0; PyObject *__pyx_v_id = 0; int __pyx_v_axis; PyArrayObject *__pyx_v_Magics = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("stat (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__Y1,&__pyx_n_s__Y2,&__pyx_n_s__id,&__pyx_n_s__axis,&__pyx_n_s__Magics,0}; PyObject* values[5] = {0,0,0,0,0}; values[2] = ((PyObject *)__pyx_n_s__student); /* "nipy/labs/group/twosample.pyx":66 * * * def stat(ndarray Y1, ndarray Y2, id='student', int axis=0, ndarray Magics=None): # <<<<<<<<<<<<<< * """ * T = stat(Y1, Y2, id='student', axis=0, magics=None). */ values[4] = (PyObject *)((PyArrayObject *)Py_None); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Y1)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Y2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("stat", 0, 2, 5, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__id); if (value) { values[2] = value; kw_args--; } } case 3: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__axis); if (value) { values[3] = value; kw_args--; } } case 4: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Magics); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "stat") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_Y1 = ((PyArrayObject *)values[0]); __pyx_v_Y2 = ((PyArrayObject *)values[1]); __pyx_v_id = values[2]; if (values[3]) { __pyx_v_axis = __Pyx_PyInt_AsInt(values[3]); if (unlikely((__pyx_v_axis == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_axis = ((int)0); } __pyx_v_Magics = ((PyArrayObject *)values[4]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("stat", 0, 2, 5, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.group.twosample.stat", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_Y1), __pyx_ptype_5numpy_ndarray, 1, "Y1", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_Y2), __pyx_ptype_5numpy_ndarray, 1, "Y2", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_Magics), __pyx_ptype_5numpy_ndarray, 1, "Magics", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_4nipy_4labs_5group_9twosample_2stat(__pyx_self, __pyx_v_Y1, __pyx_v_Y2, __pyx_v_id, __pyx_v_axis, __pyx_v_Magics); goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4nipy_4labs_5group_9twosample_2stat(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_Y1, PyArrayObject *__pyx_v_Y2, PyObject *__pyx_v_id, int __pyx_v_axis, PyArrayObject *__pyx_v_Magics) { fff_vector *__pyx_v_y1; fff_vector *__pyx_v_y2; fff_vector *__pyx_v_t; fff_vector *__pyx_v_yp; fff_vector *__pyx_v_magics; fff_array *__pyx_v_idx1; fff_array *__pyx_v_idx2; unsigned int __pyx_v_n; unsigned int __pyx_v_n1; unsigned int __pyx_v_n2; unsigned int __pyx_v_nex; unsigned long __pyx_v_simu; unsigned long __pyx_v_nsimu; unsigned long __pyx_v_idx; fff_twosample_stat *__pyx_v_stat; fff_twosample_stat_flag __pyx_v_flag_stat; double __pyx_v_magic; fffpy_multi_iterator *__pyx_v_multi; PyObject *__pyx_v_dims = NULL; PyObject *__pyx_v_T = NULL; int __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; fff_twosample_stat_flag __pyx_t_3; int __pyx_t_4; size_t __pyx_t_5; int __pyx_t_6; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; unsigned long __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("stat", 0); /* "nipy/labs/group/twosample.pyx":78 * cdef unsigned long int simu, nsimu, idx * cdef fff_twosample_stat* stat * cdef fff_twosample_stat_flag flag_stat = stats[id] # <<<<<<<<<<<<<< * cdef double magic * cdef fffpy_multi_iterator* multi */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__stats); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetItem(__pyx_t_1, __pyx_v_id); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_3 = ((fff_twosample_stat_flag)PyInt_AsLong(__pyx_t_2)); if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_flag_stat = __pyx_t_3; /* "nipy/labs/group/twosample.pyx":83 * * # Get number of observations * n1 = Y1.shape[axis] # <<<<<<<<<<<<<< * n2 = Y2.shape[axis] * n = n1 + n2 */ __pyx_v_n1 = ((unsigned int)(__pyx_v_Y1->dimensions[__pyx_v_axis])); /* "nipy/labs/group/twosample.pyx":84 * # Get number of observations * n1 = Y1.shape[axis] * n2 = Y2.shape[axis] # <<<<<<<<<<<<<< * n = n1 + n2 * */ __pyx_v_n2 = ((unsigned int)(__pyx_v_Y2->dimensions[__pyx_v_axis])); /* "nipy/labs/group/twosample.pyx":85 * n1 = Y1.shape[axis] * n2 = Y2.shape[axis] * n = n1 + n2 # <<<<<<<<<<<<<< * * # Read out magic numbers */ __pyx_v_n = (__pyx_v_n1 + __pyx_v_n2); /* "nipy/labs/group/twosample.pyx":88 * * # Read out magic numbers * if Magics == None: # <<<<<<<<<<<<<< * magics = fff_vector_new(1) * magics.data[0] = 0 ## Just to make sure */ __pyx_t_2 = PyObject_RichCompare(((PyObject *)__pyx_v_Magics), Py_None, Py_EQ); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (__pyx_t_4) { /* "nipy/labs/group/twosample.pyx":89 * # Read out magic numbers * if Magics == None: * magics = fff_vector_new(1) # <<<<<<<<<<<<<< * magics.data[0] = 0 ## Just to make sure * else: */ __pyx_v_magics = fff_vector_new(1); /* "nipy/labs/group/twosample.pyx":90 * if Magics == None: * magics = fff_vector_new(1) * magics.data[0] = 0 ## Just to make sure # <<<<<<<<<<<<<< * else: * magics = fff_vector_fromPyArray(Magics) */ (__pyx_v_magics->data[0]) = 0.0; goto __pyx_L3; } /*else*/ { /* "nipy/labs/group/twosample.pyx":92 * magics.data[0] = 0 ## Just to make sure * else: * magics = fff_vector_fromPyArray(Magics) # <<<<<<<<<<<<<< * * # Create output array */ __pyx_v_magics = fff_vector_fromPyArray(__pyx_v_Magics); } __pyx_L3:; /* "nipy/labs/group/twosample.pyx":95 * * # Create output array * nsimu = magics.size # <<<<<<<<<<<<<< * dims = [Y1.shape[i] for i in range(Y1.ndim)] * dims[axis] = nsimu */ __pyx_t_5 = __pyx_v_magics->size; __pyx_v_nsimu = __pyx_t_5; /* "nipy/labs/group/twosample.pyx":96 * # Create output array * nsimu = magics.size * dims = [Y1.shape[i] for i in range(Y1.ndim)] # <<<<<<<<<<<<<< * dims[axis] = nsimu * T = np.zeros(dims) */ __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 96; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = __pyx_v_Y1->nd; for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_6; __pyx_t_7+=1) { __pyx_v_i = __pyx_t_7; __pyx_t_1 = __Pyx_PyInt_to_py_Py_intptr_t((__pyx_v_Y1->dimensions[__pyx_v_i])); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 96; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (unlikely(__Pyx_PyList_Append(__pyx_t_2, (PyObject*)__pyx_t_1))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 96; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } __pyx_t_1 = ((PyObject *)__pyx_t_2); __Pyx_INCREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_v_dims = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/group/twosample.pyx":97 * nsimu = magics.size * dims = [Y1.shape[i] for i in range(Y1.ndim)] * dims[axis] = nsimu # <<<<<<<<<<<<<< * T = np.zeros(dims) * */ __pyx_t_1 = PyLong_FromUnsignedLong(__pyx_v_nsimu); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 97; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (__Pyx_SetItemInt(((PyObject *)__pyx_v_dims), __pyx_v_axis, __pyx_t_1, sizeof(int), PyInt_FromLong) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 97; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/group/twosample.pyx":98 * dims = [Y1.shape[i] for i in range(Y1.ndim)] * dims[axis] = nsimu * T = np.zeros(dims) # <<<<<<<<<<<<<< * * # Create local structure */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__zeros); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)__pyx_v_dims)); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_v_dims)); __Pyx_GIVEREF(((PyObject *)__pyx_v_dims)); __pyx_t_8 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_v_T = __pyx_t_8; __pyx_t_8 = 0; /* "nipy/labs/group/twosample.pyx":101 * * # Create local structure * yp = fff_vector_new(n) # <<<<<<<<<<<<<< * idx1 = fff_array_new1d(FFF_UINT, n1) * idx2 = fff_array_new1d(FFF_UINT, n2) */ __pyx_v_yp = fff_vector_new(__pyx_v_n); /* "nipy/labs/group/twosample.pyx":102 * # Create local structure * yp = fff_vector_new(n) * idx1 = fff_array_new1d(FFF_UINT, n1) # <<<<<<<<<<<<<< * idx2 = fff_array_new1d(FFF_UINT, n2) * stat = fff_twosample_stat_new(n1, n2, flag_stat) */ __pyx_v_idx1 = fff_array_new1d(FFF_UINT, __pyx_v_n1); /* "nipy/labs/group/twosample.pyx":103 * yp = fff_vector_new(n) * idx1 = fff_array_new1d(FFF_UINT, n1) * idx2 = fff_array_new1d(FFF_UINT, n2) # <<<<<<<<<<<<<< * stat = fff_twosample_stat_new(n1, n2, flag_stat) * */ __pyx_v_idx2 = fff_array_new1d(FFF_UINT, __pyx_v_n2); /* "nipy/labs/group/twosample.pyx":104 * idx1 = fff_array_new1d(FFF_UINT, n1) * idx2 = fff_array_new1d(FFF_UINT, n2) * stat = fff_twosample_stat_new(n1, n2, flag_stat) # <<<<<<<<<<<<<< * * # Multi-iterator */ __pyx_v_stat = fff_twosample_stat_new(__pyx_v_n1, __pyx_v_n2, __pyx_v_flag_stat); /* "nipy/labs/group/twosample.pyx":107 * * # Multi-iterator * multi = fffpy_multi_iterator_new(3, axis, Y1, Y2, T) # <<<<<<<<<<<<<< * * # Vector views */ __pyx_v_multi = fffpy_multi_iterator_new(3, __pyx_v_axis, ((void *)__pyx_v_Y1), ((void *)__pyx_v_Y2), ((void *)__pyx_v_T)); /* "nipy/labs/group/twosample.pyx":110 * * # Vector views * y1 = multi.vector[0] # <<<<<<<<<<<<<< * y2 = multi.vector[1] * t = multi.vector[2] */ __pyx_v_y1 = (__pyx_v_multi->vector[0]); /* "nipy/labs/group/twosample.pyx":111 * # Vector views * y1 = multi.vector[0] * y2 = multi.vector[1] # <<<<<<<<<<<<<< * t = multi.vector[2] * */ __pyx_v_y2 = (__pyx_v_multi->vector[1]); /* "nipy/labs/group/twosample.pyx":112 * y1 = multi.vector[0] * y2 = multi.vector[1] * t = multi.vector[2] # <<<<<<<<<<<<<< * * # Loop */ __pyx_v_t = (__pyx_v_multi->vector[2]); /* "nipy/labs/group/twosample.pyx":115 * * # Loop * for simu from 0 <= simu < nsimu: # <<<<<<<<<<<<<< * * # Set the magic number */ __pyx_t_9 = __pyx_v_nsimu; for (__pyx_v_simu = 0; __pyx_v_simu < __pyx_t_9; __pyx_v_simu++) { /* "nipy/labs/group/twosample.pyx":118 * * # Set the magic number * magic = magics.data[simu*magics.stride] # <<<<<<<<<<<<<< * * # Generate permutation */ __pyx_v_magic = (__pyx_v_magics->data[(__pyx_v_simu * __pyx_v_magics->stride)]); /* "nipy/labs/group/twosample.pyx":123 * nex = fff_twosample_permutation(idx1.data, * idx2.data, * n1, n2, &magic) # <<<<<<<<<<<<<< * * # Reset the multi-iterator */ __pyx_v_nex = fff_twosample_permutation(((unsigned int *)__pyx_v_idx1->data), ((unsigned int *)__pyx_v_idx2->data), __pyx_v_n1, __pyx_v_n2, (&__pyx_v_magic)); /* "nipy/labs/group/twosample.pyx":126 * * # Reset the multi-iterator * fffpy_multi_iterator_reset(multi) # <<<<<<<<<<<<<< * * # Perform the loop */ fffpy_multi_iterator_reset(__pyx_v_multi); /* "nipy/labs/group/twosample.pyx":129 * * # Perform the loop * idx = simu*t.stride # <<<<<<<<<<<<<< * while(multi.index < multi.size): * fff_twosample_apply_permutation(yp, NULL, y1, NULL, y2, NULL, nex, */ __pyx_v_idx = (__pyx_v_simu * __pyx_v_t->stride); /* "nipy/labs/group/twosample.pyx":130 * # Perform the loop * idx = simu*t.stride * while(multi.index < multi.size): # <<<<<<<<<<<<<< * fff_twosample_apply_permutation(yp, NULL, y1, NULL, y2, NULL, nex, * idx1.data, */ while (1) { __pyx_t_4 = (__pyx_v_multi->index < __pyx_v_multi->size); if (!__pyx_t_4) break; /* "nipy/labs/group/twosample.pyx":133 * fff_twosample_apply_permutation(yp, NULL, y1, NULL, y2, NULL, nex, * idx1.data, * idx2.data) # <<<<<<<<<<<<<< * t.data[idx] = fff_twosample_stat_eval(stat, yp) * fffpy_multi_iterator_update(multi) */ fff_twosample_apply_permutation(__pyx_v_yp, NULL, __pyx_v_y1, NULL, __pyx_v_y2, NULL, __pyx_v_nex, ((unsigned int *)__pyx_v_idx1->data), ((unsigned int *)__pyx_v_idx2->data)); /* "nipy/labs/group/twosample.pyx":134 * idx1.data, * idx2.data) * t.data[idx] = fff_twosample_stat_eval(stat, yp) # <<<<<<<<<<<<<< * fffpy_multi_iterator_update(multi) * */ (__pyx_v_t->data[__pyx_v_idx]) = fff_twosample_stat_eval(__pyx_v_stat, __pyx_v_yp); /* "nipy/labs/group/twosample.pyx":135 * idx2.data) * t.data[idx] = fff_twosample_stat_eval(stat, yp) * fffpy_multi_iterator_update(multi) # <<<<<<<<<<<<<< * * # Delete local structures */ fffpy_multi_iterator_update(__pyx_v_multi); } } /* "nipy/labs/group/twosample.pyx":138 * * # Delete local structures * fffpy_multi_iterator_delete(multi) # <<<<<<<<<<<<<< * fff_vector_delete(magics) * fff_vector_delete(yp) */ fffpy_multi_iterator_delete(__pyx_v_multi); /* "nipy/labs/group/twosample.pyx":139 * # Delete local structures * fffpy_multi_iterator_delete(multi) * fff_vector_delete(magics) # <<<<<<<<<<<<<< * fff_vector_delete(yp) * fff_array_delete(idx1) */ fff_vector_delete(__pyx_v_magics); /* "nipy/labs/group/twosample.pyx":140 * fffpy_multi_iterator_delete(multi) * fff_vector_delete(magics) * fff_vector_delete(yp) # <<<<<<<<<<<<<< * fff_array_delete(idx1) * fff_array_delete(idx2) */ fff_vector_delete(__pyx_v_yp); /* "nipy/labs/group/twosample.pyx":141 * fff_vector_delete(magics) * fff_vector_delete(yp) * fff_array_delete(idx1) # <<<<<<<<<<<<<< * fff_array_delete(idx2) * fff_twosample_stat_delete(stat) */ fff_array_delete(__pyx_v_idx1); /* "nipy/labs/group/twosample.pyx":142 * fff_vector_delete(yp) * fff_array_delete(idx1) * fff_array_delete(idx2) # <<<<<<<<<<<<<< * fff_twosample_stat_delete(stat) * */ fff_array_delete(__pyx_v_idx2); /* "nipy/labs/group/twosample.pyx":143 * fff_array_delete(idx1) * fff_array_delete(idx2) * fff_twosample_stat_delete(stat) # <<<<<<<<<<<<<< * * # Return */ fff_twosample_stat_delete(__pyx_v_stat); /* "nipy/labs/group/twosample.pyx":146 * * # Return * return T # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_T); __pyx_r = __pyx_v_T; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("nipy.labs.group.twosample.stat", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_dims); __Pyx_XDECREF(__pyx_v_T); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_5group_9twosample_5stat_mfx(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_5group_9twosample_4stat_mfx[] = "\n T = stat(Y1, V1, Y2, V2, id='student', axis=0, magics=None, niter=5).\n \n Compute a two-sample test statistic (Y1>Y2) over a number of\n deterministic or random permutations.\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_5group_9twosample_5stat_mfx = {__Pyx_NAMESTR("stat_mfx"), (PyCFunction)__pyx_pw_4nipy_4labs_5group_9twosample_5stat_mfx, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_5group_9twosample_4stat_mfx)}; static PyObject *__pyx_pw_4nipy_4labs_5group_9twosample_5stat_mfx(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_Y1 = 0; PyArrayObject *__pyx_v_V1 = 0; PyArrayObject *__pyx_v_Y2 = 0; PyArrayObject *__pyx_v_V2 = 0; PyObject *__pyx_v_id = 0; int __pyx_v_axis; PyArrayObject *__pyx_v_Magics = 0; unsigned int __pyx_v_niter; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("stat_mfx (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__Y1,&__pyx_n_s__V1,&__pyx_n_s__Y2,&__pyx_n_s__V2,&__pyx_n_s__id,&__pyx_n_s__axis,&__pyx_n_s__Magics,&__pyx_n_s__niter,0}; PyObject* values[8] = {0,0,0,0,0,0,0,0}; values[4] = ((PyObject *)__pyx_n_s__student_mfx); /* "nipy/labs/group/twosample.pyx":150 * * def stat_mfx(ndarray Y1, ndarray V1, ndarray Y2, ndarray V2, * id='student_mfx', int axis=0, ndarray Magics=None, # <<<<<<<<<<<<<< * unsigned int niter=5): * """ */ values[6] = (PyObject *)((PyArrayObject *)Py_None); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Y1)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__V1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("stat_mfx", 0, 4, 8, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Y2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("stat_mfx", 0, 4, 8, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__V2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("stat_mfx", 0, 4, 8, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__id); if (value) { values[4] = value; kw_args--; } } case 5: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__axis); if (value) { values[5] = value; kw_args--; } } case 6: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__Magics); if (value) { values[6] = value; kw_args--; } } case 7: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__niter); if (value) { values[7] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "stat_mfx") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_Y1 = ((PyArrayObject *)values[0]); __pyx_v_V1 = ((PyArrayObject *)values[1]); __pyx_v_Y2 = ((PyArrayObject *)values[2]); __pyx_v_V2 = ((PyArrayObject *)values[3]); __pyx_v_id = values[4]; if (values[5]) { __pyx_v_axis = __Pyx_PyInt_AsInt(values[5]); if (unlikely((__pyx_v_axis == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 150; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_axis = ((int)0); } __pyx_v_Magics = ((PyArrayObject *)values[6]); if (values[7]) { __pyx_v_niter = __Pyx_PyInt_AsUnsignedInt(values[7]); if (unlikely((__pyx_v_niter == (unsigned int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 151; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_niter = ((unsigned int)5); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("stat_mfx", 0, 4, 8, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.group.twosample.stat_mfx", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_Y1), __pyx_ptype_5numpy_ndarray, 1, "Y1", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_V1), __pyx_ptype_5numpy_ndarray, 1, "V1", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_Y2), __pyx_ptype_5numpy_ndarray, 1, "Y2", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_V2), __pyx_ptype_5numpy_ndarray, 1, "V2", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_Magics), __pyx_ptype_5numpy_ndarray, 1, "Magics", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 150; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_4nipy_4labs_5group_9twosample_4stat_mfx(__pyx_self, __pyx_v_Y1, __pyx_v_V1, __pyx_v_Y2, __pyx_v_V2, __pyx_v_id, __pyx_v_axis, __pyx_v_Magics, __pyx_v_niter); goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/group/twosample.pyx":149 * * * def stat_mfx(ndarray Y1, ndarray V1, ndarray Y2, ndarray V2, # <<<<<<<<<<<<<< * id='student_mfx', int axis=0, ndarray Magics=None, * unsigned int niter=5): */ static PyObject *__pyx_pf_4nipy_4labs_5group_9twosample_4stat_mfx(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_Y1, PyArrayObject *__pyx_v_V1, PyArrayObject *__pyx_v_Y2, PyArrayObject *__pyx_v_V2, PyObject *__pyx_v_id, int __pyx_v_axis, PyArrayObject *__pyx_v_Magics, unsigned int __pyx_v_niter) { fff_vector *__pyx_v_y1; fff_vector *__pyx_v_y2; fff_vector *__pyx_v_v1; fff_vector *__pyx_v_v2; fff_vector *__pyx_v_t; fff_vector *__pyx_v_yp; fff_vector *__pyx_v_vp; fff_vector *__pyx_v_magics; fff_array *__pyx_v_idx1; fff_array *__pyx_v_idx2; unsigned int __pyx_v_n; unsigned int __pyx_v_n1; unsigned int __pyx_v_n2; unsigned int __pyx_v_nex; unsigned long __pyx_v_simu; unsigned long __pyx_v_nsimu; unsigned long __pyx_v_idx; fff_twosample_stat_mfx *__pyx_v_stat; fff_twosample_stat_flag __pyx_v_flag_stat; double __pyx_v_magic; fffpy_multi_iterator *__pyx_v_multi; PyObject *__pyx_v_dims = NULL; PyObject *__pyx_v_T = NULL; int __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; fff_twosample_stat_flag __pyx_t_3; int __pyx_t_4; size_t __pyx_t_5; int __pyx_t_6; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; unsigned long __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("stat_mfx", 0); /* "nipy/labs/group/twosample.pyx":163 * cdef unsigned long int simu, nsimu, idx * cdef fff_twosample_stat_mfx* stat * cdef fff_twosample_stat_flag flag_stat = stats[id] # <<<<<<<<<<<<<< * cdef double magic * cdef fffpy_multi_iterator* multi */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__stats); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 163; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetItem(__pyx_t_1, __pyx_v_id); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 163; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_3 = ((fff_twosample_stat_flag)PyInt_AsLong(__pyx_t_2)); if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 163; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_flag_stat = __pyx_t_3; /* "nipy/labs/group/twosample.pyx":168 * * # Get number of observations * n1 = Y1.shape[axis] # <<<<<<<<<<<<<< * n2 = Y2.shape[axis] * n = n1 + n2 */ __pyx_v_n1 = ((unsigned int)(__pyx_v_Y1->dimensions[__pyx_v_axis])); /* "nipy/labs/group/twosample.pyx":169 * # Get number of observations * n1 = Y1.shape[axis] * n2 = Y2.shape[axis] # <<<<<<<<<<<<<< * n = n1 + n2 * */ __pyx_v_n2 = ((unsigned int)(__pyx_v_Y2->dimensions[__pyx_v_axis])); /* "nipy/labs/group/twosample.pyx":170 * n1 = Y1.shape[axis] * n2 = Y2.shape[axis] * n = n1 + n2 # <<<<<<<<<<<<<< * * # Read out magic numbers */ __pyx_v_n = (__pyx_v_n1 + __pyx_v_n2); /* "nipy/labs/group/twosample.pyx":173 * * # Read out magic numbers * if Magics == None: # <<<<<<<<<<<<<< * magics = fff_vector_new(1) * magics.data[0] = 0 ## Just to make sure */ __pyx_t_2 = PyObject_RichCompare(((PyObject *)__pyx_v_Magics), Py_None, Py_EQ); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (__pyx_t_4) { /* "nipy/labs/group/twosample.pyx":174 * # Read out magic numbers * if Magics == None: * magics = fff_vector_new(1) # <<<<<<<<<<<<<< * magics.data[0] = 0 ## Just to make sure * else: */ __pyx_v_magics = fff_vector_new(1); /* "nipy/labs/group/twosample.pyx":175 * if Magics == None: * magics = fff_vector_new(1) * magics.data[0] = 0 ## Just to make sure # <<<<<<<<<<<<<< * else: * magics = fff_vector_fromPyArray(Magics) */ (__pyx_v_magics->data[0]) = 0.0; goto __pyx_L3; } /*else*/ { /* "nipy/labs/group/twosample.pyx":177 * magics.data[0] = 0 ## Just to make sure * else: * magics = fff_vector_fromPyArray(Magics) # <<<<<<<<<<<<<< * * # Create output array */ __pyx_v_magics = fff_vector_fromPyArray(__pyx_v_Magics); } __pyx_L3:; /* "nipy/labs/group/twosample.pyx":180 * * # Create output array * nsimu = magics.size # <<<<<<<<<<<<<< * dims = [Y1.shape[i] for i in range(Y1.ndim)] * dims[axis] = nsimu */ __pyx_t_5 = __pyx_v_magics->size; __pyx_v_nsimu = __pyx_t_5; /* "nipy/labs/group/twosample.pyx":181 * # Create output array * nsimu = magics.size * dims = [Y1.shape[i] for i in range(Y1.ndim)] # <<<<<<<<<<<<<< * dims[axis] = nsimu * T = np.zeros(dims) */ __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 181; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = __pyx_v_Y1->nd; for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_6; __pyx_t_7+=1) { __pyx_v_i = __pyx_t_7; __pyx_t_1 = __Pyx_PyInt_to_py_Py_intptr_t((__pyx_v_Y1->dimensions[__pyx_v_i])); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 181; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (unlikely(__Pyx_PyList_Append(__pyx_t_2, (PyObject*)__pyx_t_1))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 181; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } __pyx_t_1 = ((PyObject *)__pyx_t_2); __Pyx_INCREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_v_dims = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/group/twosample.pyx":182 * nsimu = magics.size * dims = [Y1.shape[i] for i in range(Y1.ndim)] * dims[axis] = nsimu # <<<<<<<<<<<<<< * T = np.zeros(dims) * */ __pyx_t_1 = PyLong_FromUnsignedLong(__pyx_v_nsimu); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 182; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (__Pyx_SetItemInt(((PyObject *)__pyx_v_dims), __pyx_v_axis, __pyx_t_1, sizeof(int), PyInt_FromLong) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 182; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/group/twosample.pyx":183 * dims = [Y1.shape[i] for i in range(Y1.ndim)] * dims[axis] = nsimu * T = np.zeros(dims) # <<<<<<<<<<<<<< * * # Create local structure */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__zeros); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)__pyx_v_dims)); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_v_dims)); __Pyx_GIVEREF(((PyObject *)__pyx_v_dims)); __pyx_t_8 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_v_T = __pyx_t_8; __pyx_t_8 = 0; /* "nipy/labs/group/twosample.pyx":186 * * # Create local structure * yp = fff_vector_new(n) # <<<<<<<<<<<<<< * vp = fff_vector_new(n) * idx1 = fff_array_new1d(FFF_UINT, n1) */ __pyx_v_yp = fff_vector_new(__pyx_v_n); /* "nipy/labs/group/twosample.pyx":187 * # Create local structure * yp = fff_vector_new(n) * vp = fff_vector_new(n) # <<<<<<<<<<<<<< * idx1 = fff_array_new1d(FFF_UINT, n1) * idx2 = fff_array_new1d(FFF_UINT, n2) */ __pyx_v_vp = fff_vector_new(__pyx_v_n); /* "nipy/labs/group/twosample.pyx":188 * yp = fff_vector_new(n) * vp = fff_vector_new(n) * idx1 = fff_array_new1d(FFF_UINT, n1) # <<<<<<<<<<<<<< * idx2 = fff_array_new1d(FFF_UINT, n2) * stat = fff_twosample_stat_mfx_new(n1, n2, flag_stat) */ __pyx_v_idx1 = fff_array_new1d(FFF_UINT, __pyx_v_n1); /* "nipy/labs/group/twosample.pyx":189 * vp = fff_vector_new(n) * idx1 = fff_array_new1d(FFF_UINT, n1) * idx2 = fff_array_new1d(FFF_UINT, n2) # <<<<<<<<<<<<<< * stat = fff_twosample_stat_mfx_new(n1, n2, flag_stat) * stat.niter = niter */ __pyx_v_idx2 = fff_array_new1d(FFF_UINT, __pyx_v_n2); /* "nipy/labs/group/twosample.pyx":190 * idx1 = fff_array_new1d(FFF_UINT, n1) * idx2 = fff_array_new1d(FFF_UINT, n2) * stat = fff_twosample_stat_mfx_new(n1, n2, flag_stat) # <<<<<<<<<<<<<< * stat.niter = niter * */ __pyx_v_stat = fff_twosample_stat_mfx_new(__pyx_v_n1, __pyx_v_n2, __pyx_v_flag_stat); /* "nipy/labs/group/twosample.pyx":191 * idx2 = fff_array_new1d(FFF_UINT, n2) * stat = fff_twosample_stat_mfx_new(n1, n2, flag_stat) * stat.niter = niter # <<<<<<<<<<<<<< * * # Multi-iterator */ __pyx_v_stat->niter = __pyx_v_niter; /* "nipy/labs/group/twosample.pyx":197 * Y1, V1, * Y2, V2, * T) # <<<<<<<<<<<<<< * * # Vector views */ __pyx_v_multi = fffpy_multi_iterator_new(5, __pyx_v_axis, ((void *)__pyx_v_Y1), ((void *)__pyx_v_V1), ((void *)__pyx_v_Y2), ((void *)__pyx_v_V2), ((void *)__pyx_v_T)); /* "nipy/labs/group/twosample.pyx":200 * * # Vector views * y1 = multi.vector[0] # <<<<<<<<<<<<<< * v1 = multi.vector[1] * y2 = multi.vector[2] */ __pyx_v_y1 = (__pyx_v_multi->vector[0]); /* "nipy/labs/group/twosample.pyx":201 * # Vector views * y1 = multi.vector[0] * v1 = multi.vector[1] # <<<<<<<<<<<<<< * y2 = multi.vector[2] * v2 = multi.vector[3] */ __pyx_v_v1 = (__pyx_v_multi->vector[1]); /* "nipy/labs/group/twosample.pyx":202 * y1 = multi.vector[0] * v1 = multi.vector[1] * y2 = multi.vector[2] # <<<<<<<<<<<<<< * v2 = multi.vector[3] * t = multi.vector[4] */ __pyx_v_y2 = (__pyx_v_multi->vector[2]); /* "nipy/labs/group/twosample.pyx":203 * v1 = multi.vector[1] * y2 = multi.vector[2] * v2 = multi.vector[3] # <<<<<<<<<<<<<< * t = multi.vector[4] * */ __pyx_v_v2 = (__pyx_v_multi->vector[3]); /* "nipy/labs/group/twosample.pyx":204 * y2 = multi.vector[2] * v2 = multi.vector[3] * t = multi.vector[4] # <<<<<<<<<<<<<< * * # Loop */ __pyx_v_t = (__pyx_v_multi->vector[4]); /* "nipy/labs/group/twosample.pyx":207 * * # Loop * for simu from 0 <= simu < nsimu: # <<<<<<<<<<<<<< * * # Set the magic number */ __pyx_t_9 = __pyx_v_nsimu; for (__pyx_v_simu = 0; __pyx_v_simu < __pyx_t_9; __pyx_v_simu++) { /* "nipy/labs/group/twosample.pyx":210 * * # Set the magic number * magic = magics.data[simu*magics.stride] # <<<<<<<<<<<<<< * * # Generate permutation */ __pyx_v_magic = (__pyx_v_magics->data[(__pyx_v_simu * __pyx_v_magics->stride)]); /* "nipy/labs/group/twosample.pyx":215 * nex = fff_twosample_permutation(idx1.data, * idx2.data, * n1, n2, &magic) # <<<<<<<<<<<<<< * * # Reset the multi-iterator */ __pyx_v_nex = fff_twosample_permutation(((unsigned int *)__pyx_v_idx1->data), ((unsigned int *)__pyx_v_idx2->data), __pyx_v_n1, __pyx_v_n2, (&__pyx_v_magic)); /* "nipy/labs/group/twosample.pyx":218 * * # Reset the multi-iterator * fffpy_multi_iterator_reset(multi) # <<<<<<<<<<<<<< * * # Perform the loop */ fffpy_multi_iterator_reset(__pyx_v_multi); /* "nipy/labs/group/twosample.pyx":221 * * # Perform the loop * idx = simu*t.stride # <<<<<<<<<<<<<< * while(multi.index < multi.size): * fff_twosample_apply_permutation(yp, vp, y1, v1, y2, v2, nex, */ __pyx_v_idx = (__pyx_v_simu * __pyx_v_t->stride); /* "nipy/labs/group/twosample.pyx":222 * # Perform the loop * idx = simu*t.stride * while(multi.index < multi.size): # <<<<<<<<<<<<<< * fff_twosample_apply_permutation(yp, vp, y1, v1, y2, v2, nex, * idx1.data, */ while (1) { __pyx_t_4 = (__pyx_v_multi->index < __pyx_v_multi->size); if (!__pyx_t_4) break; /* "nipy/labs/group/twosample.pyx":225 * fff_twosample_apply_permutation(yp, vp, y1, v1, y2, v2, nex, * idx1.data, * idx2.data) # <<<<<<<<<<<<<< * t.data[idx] = fff_twosample_stat_mfx_eval(stat, yp, vp) * fffpy_multi_iterator_update(multi) */ fff_twosample_apply_permutation(__pyx_v_yp, __pyx_v_vp, __pyx_v_y1, __pyx_v_v1, __pyx_v_y2, __pyx_v_v2, __pyx_v_nex, ((unsigned int *)__pyx_v_idx1->data), ((unsigned int *)__pyx_v_idx2->data)); /* "nipy/labs/group/twosample.pyx":226 * idx1.data, * idx2.data) * t.data[idx] = fff_twosample_stat_mfx_eval(stat, yp, vp) # <<<<<<<<<<<<<< * fffpy_multi_iterator_update(multi) * */ (__pyx_v_t->data[__pyx_v_idx]) = fff_twosample_stat_mfx_eval(__pyx_v_stat, __pyx_v_yp, __pyx_v_vp); /* "nipy/labs/group/twosample.pyx":227 * idx2.data) * t.data[idx] = fff_twosample_stat_mfx_eval(stat, yp, vp) * fffpy_multi_iterator_update(multi) # <<<<<<<<<<<<<< * * # Delete local structures */ fffpy_multi_iterator_update(__pyx_v_multi); } } /* "nipy/labs/group/twosample.pyx":230 * * # Delete local structures * fffpy_multi_iterator_delete(multi) # <<<<<<<<<<<<<< * fff_vector_delete(magics) * fff_vector_delete(yp) */ fffpy_multi_iterator_delete(__pyx_v_multi); /* "nipy/labs/group/twosample.pyx":231 * # Delete local structures * fffpy_multi_iterator_delete(multi) * fff_vector_delete(magics) # <<<<<<<<<<<<<< * fff_vector_delete(yp) * fff_vector_delete(vp) */ fff_vector_delete(__pyx_v_magics); /* "nipy/labs/group/twosample.pyx":232 * fffpy_multi_iterator_delete(multi) * fff_vector_delete(magics) * fff_vector_delete(yp) # <<<<<<<<<<<<<< * fff_vector_delete(vp) * fff_array_delete(idx1) */ fff_vector_delete(__pyx_v_yp); /* "nipy/labs/group/twosample.pyx":233 * fff_vector_delete(magics) * fff_vector_delete(yp) * fff_vector_delete(vp) # <<<<<<<<<<<<<< * fff_array_delete(idx1) * fff_array_delete(idx2) */ fff_vector_delete(__pyx_v_vp); /* "nipy/labs/group/twosample.pyx":234 * fff_vector_delete(yp) * fff_vector_delete(vp) * fff_array_delete(idx1) # <<<<<<<<<<<<<< * fff_array_delete(idx2) * fff_twosample_stat_mfx_delete(stat) */ fff_array_delete(__pyx_v_idx1); /* "nipy/labs/group/twosample.pyx":235 * fff_vector_delete(vp) * fff_array_delete(idx1) * fff_array_delete(idx2) # <<<<<<<<<<<<<< * fff_twosample_stat_mfx_delete(stat) * */ fff_array_delete(__pyx_v_idx2); /* "nipy/labs/group/twosample.pyx":236 * fff_array_delete(idx1) * fff_array_delete(idx2) * fff_twosample_stat_mfx_delete(stat) # <<<<<<<<<<<<<< * * # Return */ fff_twosample_stat_mfx_delete(__pyx_v_stat); /* "nipy/labs/group/twosample.pyx":239 * * # Return * return T # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_T); __pyx_r = __pyx_v_T; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("nipy.labs.group.twosample.stat_mfx", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_dims); __Pyx_XDECREF(__pyx_v_T); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":194 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_copy_shape; int __pyx_v_i; int __pyx_v_ndim; int __pyx_v_endian_detector; int __pyx_v_little_endian; int __pyx_v_t; char *__pyx_v_f; PyArray_Descr *__pyx_v_descr = 0; int __pyx_v_offset; int __pyx_v_hasfields; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; char *__pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "numpy.pxd":200 * # of flags * * if info == NULL: return # <<<<<<<<<<<<<< * * cdef int copy_shape, i, ndim */ __pyx_t_1 = (__pyx_v_info == NULL); if (__pyx_t_1) { __pyx_r = 0; goto __pyx_L0; goto __pyx_L3; } __pyx_L3:; /* "numpy.pxd":203 * * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((&endian_detector)[0] != 0) * */ __pyx_v_endian_detector = 1; /* "numpy.pxd":204 * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * * ndim = PyArray_NDIM(self) */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "numpy.pxd":206 * cdef bint little_endian = ((&endian_detector)[0] != 0) * * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< * * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); /* "numpy.pxd":208 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); if (__pyx_t_1) { /* "numpy.pxd":209 * * if sizeof(npy_intp) != sizeof(Py_ssize_t): * copy_shape = 1 # <<<<<<<<<<<<<< * else: * copy_shape = 0 */ __pyx_v_copy_shape = 1; goto __pyx_L4; } /*else*/ { /* "numpy.pxd":211 * copy_shape = 1 * else: * copy_shape = 0 # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) */ __pyx_v_copy_shape = 0; } __pyx_L4:; /* "numpy.pxd":213 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS); if (__pyx_t_1) { /* "numpy.pxd":214 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not C contiguous") * */ __pyx_t_2 = (!PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS)); __pyx_t_3 = __pyx_t_2; } else { __pyx_t_3 = __pyx_t_1; } if (__pyx_t_3) { /* "numpy.pxd":215 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L5; } __pyx_L5:; /* "numpy.pxd":217 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ __pyx_t_3 = ((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS); if (__pyx_t_3) { /* "numpy.pxd":218 * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not Fortran contiguous") * */ __pyx_t_1 = (!PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS)); __pyx_t_2 = __pyx_t_1; } else { __pyx_t_2 = __pyx_t_3; } if (__pyx_t_2) { /* "numpy.pxd":219 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_4), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L6; } __pyx_L6:; /* "numpy.pxd":221 * raise ValueError(u"ndarray is not Fortran contiguous") * * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< * info.ndim = ndim * if copy_shape: */ __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); /* "numpy.pxd":222 * * info.buf = PyArray_DATA(self) * info.ndim = ndim # <<<<<<<<<<<<<< * if copy_shape: * # Allocate new buffer for strides and shape info. */ __pyx_v_info->ndim = __pyx_v_ndim; /* "numpy.pxd":223 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ if (__pyx_v_copy_shape) { /* "numpy.pxd":226 * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) # <<<<<<<<<<<<<< * info.shape = info.strides + ndim * for i in range(ndim): */ __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2))); /* "numpy.pxd":227 * # This is allocated as one block, strides first. * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) * info.shape = info.strides + ndim # <<<<<<<<<<<<<< * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] */ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); /* "numpy.pxd":228 * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) * info.shape = info.strides + ndim * for i in range(ndim): # <<<<<<<<<<<<<< * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] */ __pyx_t_5 = __pyx_v_ndim; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "numpy.pxd":229 * info.shape = info.strides + ndim * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< * info.shape[i] = PyArray_DIMS(self)[i] * else: */ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); /* "numpy.pxd":230 * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< * else: * info.strides = PyArray_STRIDES(self) */ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); } goto __pyx_L7; } /*else*/ { /* "numpy.pxd":232 * info.shape[i] = PyArray_DIMS(self)[i] * else: * info.strides = PyArray_STRIDES(self) # <<<<<<<<<<<<<< * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL */ __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); /* "numpy.pxd":233 * else: * info.strides = PyArray_STRIDES(self) * info.shape = PyArray_DIMS(self) # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) */ __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); } __pyx_L7:; /* "numpy.pxd":234 * info.strides = PyArray_STRIDES(self) * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) */ __pyx_v_info->suboffsets = NULL; /* "numpy.pxd":235 * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< * info.readonly = not PyArray_ISWRITEABLE(self) * */ __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); /* "numpy.pxd":236 * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< * * cdef int t */ __pyx_v_info->readonly = (!PyArray_ISWRITEABLE(__pyx_v_self)); /* "numpy.pxd":239 * * cdef int t * cdef char* f = NULL # <<<<<<<<<<<<<< * cdef dtype descr = self.descr * cdef list stack */ __pyx_v_f = NULL; /* "numpy.pxd":240 * cdef int t * cdef char* f = NULL * cdef dtype descr = self.descr # <<<<<<<<<<<<<< * cdef list stack * cdef int offset */ __pyx_t_4 = ((PyObject *)__pyx_v_self->descr); __Pyx_INCREF(__pyx_t_4); __pyx_v_descr = ((PyArray_Descr *)__pyx_t_4); __pyx_t_4 = 0; /* "numpy.pxd":244 * cdef int offset * * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< * * if not hasfields and not copy_shape: */ __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); /* "numpy.pxd":246 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ __pyx_t_2 = (!__pyx_v_hasfields); if (__pyx_t_2) { __pyx_t_3 = (!__pyx_v_copy_shape); __pyx_t_1 = __pyx_t_3; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "numpy.pxd":248 * if not hasfields and not copy_shape: * # do not call releasebuffer * info.obj = None # <<<<<<<<<<<<<< * else: * # need to call releasebuffer */ __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = Py_None; goto __pyx_L10; } /*else*/ { /* "numpy.pxd":251 * else: * # need to call releasebuffer * info.obj = self # <<<<<<<<<<<<<< * * if not hasfields: */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); } __pyx_L10:; /* "numpy.pxd":253 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ __pyx_t_1 = (!__pyx_v_hasfields); if (__pyx_t_1) { /* "numpy.pxd":254 * * if not hasfields: * t = descr.type_num # <<<<<<<<<<<<<< * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): */ __pyx_t_5 = __pyx_v_descr->type_num; __pyx_v_t = __pyx_t_5; /* "numpy.pxd":255 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_1 = (__pyx_v_descr->byteorder == '>'); if (__pyx_t_1) { __pyx_t_2 = __pyx_v_little_endian; } else { __pyx_t_2 = __pyx_t_1; } if (!__pyx_t_2) { /* "numpy.pxd":256 * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" */ __pyx_t_1 = (__pyx_v_descr->byteorder == '<'); if (__pyx_t_1) { __pyx_t_3 = (!__pyx_v_little_endian); __pyx_t_7 = __pyx_t_3; } else { __pyx_t_7 = __pyx_t_1; } __pyx_t_1 = __pyx_t_7; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "numpy.pxd":257 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_6), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L12; } __pyx_L12:; /* "numpy.pxd":258 * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" */ __pyx_t_1 = (__pyx_v_t == NPY_BYTE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__b; goto __pyx_L13; } /* "numpy.pxd":259 * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" */ __pyx_t_1 = (__pyx_v_t == NPY_UBYTE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__B; goto __pyx_L13; } /* "numpy.pxd":260 * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" */ __pyx_t_1 = (__pyx_v_t == NPY_SHORT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__h; goto __pyx_L13; } /* "numpy.pxd":261 * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" */ __pyx_t_1 = (__pyx_v_t == NPY_USHORT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__H; goto __pyx_L13; } /* "numpy.pxd":262 * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" */ __pyx_t_1 = (__pyx_v_t == NPY_INT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__i; goto __pyx_L13; } /* "numpy.pxd":263 * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" */ __pyx_t_1 = (__pyx_v_t == NPY_UINT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__I; goto __pyx_L13; } /* "numpy.pxd":264 * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" */ __pyx_t_1 = (__pyx_v_t == NPY_LONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__l; goto __pyx_L13; } /* "numpy.pxd":265 * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" */ __pyx_t_1 = (__pyx_v_t == NPY_ULONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__L; goto __pyx_L13; } /* "numpy.pxd":266 * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" */ __pyx_t_1 = (__pyx_v_t == NPY_LONGLONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__q; goto __pyx_L13; } /* "numpy.pxd":267 * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" */ __pyx_t_1 = (__pyx_v_t == NPY_ULONGLONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Q; goto __pyx_L13; } /* "numpy.pxd":268 * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" */ __pyx_t_1 = (__pyx_v_t == NPY_FLOAT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__f; goto __pyx_L13; } /* "numpy.pxd":269 * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" */ __pyx_t_1 = (__pyx_v_t == NPY_DOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__d; goto __pyx_L13; } /* "numpy.pxd":270 * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" */ __pyx_t_1 = (__pyx_v_t == NPY_LONGDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__g; goto __pyx_L13; } /* "numpy.pxd":271 * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" */ __pyx_t_1 = (__pyx_v_t == NPY_CFLOAT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zf; goto __pyx_L13; } /* "numpy.pxd":272 * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" */ __pyx_t_1 = (__pyx_v_t == NPY_CDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zd; goto __pyx_L13; } /* "numpy.pxd":273 * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f = "O" * else: */ __pyx_t_1 = (__pyx_v_t == NPY_CLONGDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zg; goto __pyx_L13; } /* "numpy.pxd":274 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_1 = (__pyx_v_t == NPY_OBJECT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__O; goto __pyx_L13; } /*else*/ { /* "numpy.pxd":276 * elif t == NPY_OBJECT: f = "O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * info.format = f * return */ __pyx_t_4 = PyInt_FromLong(__pyx_v_t); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_8 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_7), __pyx_t_4); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_8)); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_8)); __Pyx_GIVEREF(((PyObject *)__pyx_t_8)); __pyx_t_8 = 0; __pyx_t_8 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L13:; /* "numpy.pxd":277 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f # <<<<<<<<<<<<<< * return * else: */ __pyx_v_info->format = __pyx_v_f; /* "numpy.pxd":278 * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f * return # <<<<<<<<<<<<<< * else: * info.format = stdlib.malloc(_buffer_format_string_len) */ __pyx_r = 0; goto __pyx_L0; goto __pyx_L11; } /*else*/ { /* "numpy.pxd":280 * return * else: * info.format = stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 */ __pyx_v_info->format = ((char *)malloc(255)); /* "numpy.pxd":281 * else: * info.format = stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< * offset = 0 * f = _util_dtypestring(descr, info.format + 1, */ (__pyx_v_info->format[0]) = '^'; /* "numpy.pxd":282 * info.format = stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 # <<<<<<<<<<<<<< * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, */ __pyx_v_offset = 0; /* "numpy.pxd":285 * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, * &offset) # <<<<<<<<<<<<<< * f[0] = c'\0' # Terminate format string * */ __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_9; /* "numpy.pxd":286 * info.format + _buffer_format_string_len, * &offset) * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< * * def __releasebuffer__(ndarray self, Py_buffer* info): */ (__pyx_v_f[0]) = '\x00'; } __pyx_L11:; __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_descr); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":288 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__releasebuffer__", 0); /* "numpy.pxd":289 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_t_1 = PyArray_HASFIELDS(__pyx_v_self); if (__pyx_t_1) { /* "numpy.pxd":290 * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): * stdlib.free(info.format) # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) */ free(__pyx_v_info->format); goto __pyx_L3; } __pyx_L3:; /* "numpy.pxd":291 * if PyArray_HASFIELDS(self): * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * stdlib.free(info.strides) * # info.shape was stored after info.strides in the same block */ __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); if (__pyx_t_1) { /* "numpy.pxd":292 * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) # <<<<<<<<<<<<<< * # info.shape was stored after info.strides in the same block * */ free(__pyx_v_info->strides); goto __pyx_L4; } __pyx_L4:; __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":768 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, a) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); /* "numpy.pxd":769 * * cdef inline object PyArray_MultiIterNew1(a): * return PyArray_MultiIterNew(1, a) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew2(a, b): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 769; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":771 * return PyArray_MultiIterNew(1, a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, a, b) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); /* "numpy.pxd":772 * * cdef inline object PyArray_MultiIterNew2(a, b): * return PyArray_MultiIterNew(2, a, b) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew3(a, b, c): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":774 * return PyArray_MultiIterNew(2, a, b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, a, b, c) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); /* "numpy.pxd":775 * * cdef inline object PyArray_MultiIterNew3(a, b, c): * return PyArray_MultiIterNew(3, a, b, c) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":777 * return PyArray_MultiIterNew(3, a, b, c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, a, b, c, d) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); /* "numpy.pxd":778 * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): * return PyArray_MultiIterNew(4, a, b, c, d) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 778; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":780 * return PyArray_MultiIterNew(4, a, b, c, d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, a, b, c, d, e) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); /* "numpy.pxd":781 * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): * return PyArray_MultiIterNew(5, a, b, c, d, e) # <<<<<<<<<<<<<< * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 781; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":783 * return PyArray_MultiIterNew(5, a, b, c, d, e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { PyArray_Descr *__pyx_v_child = 0; int __pyx_v_endian_detector; int __pyx_v_little_endian; PyObject *__pyx_v_fields = 0; PyObject *__pyx_v_childname = NULL; PyObject *__pyx_v_new_offset = NULL; PyObject *__pyx_v_t = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *(*__pyx_t_6)(PyObject *); int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_t_10; long __pyx_t_11; char *__pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_util_dtypestring", 0); /* "numpy.pxd":790 * cdef int delta_offset * cdef tuple i * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((&endian_detector)[0] != 0) * cdef tuple fields */ __pyx_v_endian_detector = 1; /* "numpy.pxd":791 * cdef tuple i * cdef int endian_detector = 1 * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * cdef tuple fields * */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "numpy.pxd":794 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ if (unlikely(((PyObject *)__pyx_v_descr->names) == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_1 = ((PyObject *)__pyx_v_descr->names); __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; for (;;) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif __Pyx_XDECREF(__pyx_v_childname); __pyx_v_childname = __pyx_t_3; __pyx_t_3 = 0; /* "numpy.pxd":795 * * for childname in descr.names: * fields = descr.fields[childname] # <<<<<<<<<<<<<< * child, new_offset = fields * */ __pyx_t_3 = PyObject_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (!__pyx_t_3) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected tuple, got %.200s", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF(((PyObject *)__pyx_v_fields)); __pyx_v_fields = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "numpy.pxd":796 * for childname in descr.names: * fields = descr.fields[childname] * child, new_offset = fields # <<<<<<<<<<<<<< * * if (end - f) - (new_offset - offset[0]) < 15: */ if (likely(PyTuple_CheckExact(((PyObject *)__pyx_v_fields)))) { PyObject* sequence = ((PyObject *)__pyx_v_fields); #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else if (1) { __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } else { Py_ssize_t index = -1; __pyx_t_5 = PyObject_GetIter(((PyObject *)__pyx_v_fields)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = Py_TYPE(__pyx_t_5)->tp_iternext; index = 0; __pyx_t_3 = __pyx_t_6(__pyx_t_5); if (unlikely(!__pyx_t_3)) goto __pyx_L5_unpacking_failed; __Pyx_GOTREF(__pyx_t_3); index = 1; __pyx_t_4 = __pyx_t_6(__pyx_t_5); if (unlikely(!__pyx_t_4)) goto __pyx_L5_unpacking_failed; __Pyx_GOTREF(__pyx_t_4); if (__Pyx_IternextUnpackEndCheck(__pyx_t_6(__pyx_t_5), 2) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = NULL; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L6_unpacking_done; __pyx_L5_unpacking_failed:; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_L6_unpacking_done:; } if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF(((PyObject *)__pyx_v_child)); __pyx_v_child = ((PyArray_Descr *)__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_v_new_offset); __pyx_v_new_offset = __pyx_t_4; __pyx_t_4 = 0; /* "numpy.pxd":798 * child, new_offset = fields * * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ __pyx_t_4 = PyInt_FromLong((__pyx_v_end - __pyx_v_f)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_Subtract(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyObject_RichCompare(__pyx_t_3, __pyx_int_15, Py_LT); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { /* "numpy.pxd":799 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_t_5 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_9), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L7; } __pyx_L7:; /* "numpy.pxd":801 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_7 = (__pyx_v_child->byteorder == '>'); if (__pyx_t_7) { __pyx_t_8 = __pyx_v_little_endian; } else { __pyx_t_8 = __pyx_t_7; } if (!__pyx_t_8) { /* "numpy.pxd":802 * * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * # One could encode it in the format string and have Cython */ __pyx_t_7 = (__pyx_v_child->byteorder == '<'); if (__pyx_t_7) { __pyx_t_9 = (!__pyx_v_little_endian); __pyx_t_10 = __pyx_t_9; } else { __pyx_t_10 = __pyx_t_7; } __pyx_t_7 = __pyx_t_10; } else { __pyx_t_7 = __pyx_t_8; } if (__pyx_t_7) { /* "numpy.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_10), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L8; } __pyx_L8:; /* "numpy.pxd":813 * * # Output padding bytes * while offset[0] < new_offset: # <<<<<<<<<<<<<< * f[0] = 120 # "x"; pad byte * f += 1 */ while (1) { __pyx_t_5 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_t_5, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!__pyx_t_7) break; /* "numpy.pxd":814 * # Output padding bytes * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< * f += 1 * offset[0] += 1 */ (__pyx_v_f[0]) = 120; /* "numpy.pxd":815 * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte * f += 1 # <<<<<<<<<<<<<< * offset[0] += 1 * */ __pyx_v_f = (__pyx_v_f + 1); /* "numpy.pxd":816 * f[0] = 120 # "x"; pad byte * f += 1 * offset[0] += 1 # <<<<<<<<<<<<<< * * offset[0] += child.itemsize */ __pyx_t_11 = 0; (__pyx_v_offset[__pyx_t_11]) = ((__pyx_v_offset[__pyx_t_11]) + 1); } /* "numpy.pxd":818 * offset[0] += 1 * * offset[0] += child.itemsize # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(child): */ __pyx_t_11 = 0; (__pyx_v_offset[__pyx_t_11]) = ((__pyx_v_offset[__pyx_t_11]) + __pyx_v_child->elsize); /* "numpy.pxd":820 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ __pyx_t_7 = (!PyDataType_HASFIELDS(__pyx_v_child)); if (__pyx_t_7) { /* "numpy.pxd":821 * * if not PyDataType_HASFIELDS(child): * t = child.type_num # <<<<<<<<<<<<<< * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") */ __pyx_t_3 = PyInt_FromLong(__pyx_v_child->type_num); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 821; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_XDECREF(__pyx_v_t); __pyx_v_t = __pyx_t_3; __pyx_t_3 = 0; /* "numpy.pxd":822 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ __pyx_t_7 = ((__pyx_v_end - __pyx_v_f) < 5); if (__pyx_t_7) { /* "numpy.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_t_3 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_12), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L12; } __pyx_L12:; /* "numpy.pxd":826 * * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" */ __pyx_t_3 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 98; goto __pyx_L13; } /* "numpy.pxd":827 * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" */ __pyx_t_5 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 66; goto __pyx_L13; } /* "numpy.pxd":828 * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" */ __pyx_t_3 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 104; goto __pyx_L13; } /* "numpy.pxd":829 * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" */ __pyx_t_5 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 72; goto __pyx_L13; } /* "numpy.pxd":830 * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" */ __pyx_t_3 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 105; goto __pyx_L13; } /* "numpy.pxd":831 * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" */ __pyx_t_5 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 73; goto __pyx_L13; } /* "numpy.pxd":832 * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" */ __pyx_t_3 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 108; goto __pyx_L13; } /* "numpy.pxd":833 * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" */ __pyx_t_5 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 76; goto __pyx_L13; } /* "numpy.pxd":834 * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" */ __pyx_t_3 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 113; goto __pyx_L13; } /* "numpy.pxd":835 * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" */ __pyx_t_5 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 81; goto __pyx_L13; } /* "numpy.pxd":836 * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" */ __pyx_t_3 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 102; goto __pyx_L13; } /* "numpy.pxd":837 * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf */ __pyx_t_5 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 100; goto __pyx_L13; } /* "numpy.pxd":838 * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd */ __pyx_t_3 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 103; goto __pyx_L13; } /* "numpy.pxd":839 * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg */ __pyx_t_5 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 102; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":840 * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" */ __pyx_t_3 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 100; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":841 * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: */ __pyx_t_5 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 103; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":842 * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_3 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 79; goto __pyx_L13; } /*else*/ { /* "numpy.pxd":844 * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * f += 1 * else: */ __pyx_t_5 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_7), __pyx_v_t); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_5)); __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_5)); __Pyx_GIVEREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L13:; /* "numpy.pxd":845 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * f += 1 # <<<<<<<<<<<<<< * else: * # Cython ignores struct boundary information ("T{...}"), */ __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L11; } /*else*/ { /* "numpy.pxd":849 * # Cython ignores struct boundary information ("T{...}"), * # so don't output it * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< * return f * */ __pyx_t_12 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_12 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 849; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_12; } __pyx_L11:; } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "numpy.pxd":850 * # so don't output it * f = _util_dtypestring(child, f, end, offset) * return f # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_f; goto __pyx_L0; __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_child); __Pyx_XDECREF(__pyx_v_fields); __Pyx_XDECREF(__pyx_v_childname); __Pyx_XDECREF(__pyx_v_new_offset); __Pyx_XDECREF(__pyx_v_t); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":965 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { PyObject *__pyx_v_baseptr; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("set_array_base", 0); /* "numpy.pxd":967 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ __pyx_t_1 = (__pyx_v_base == Py_None); if (__pyx_t_1) { /* "numpy.pxd":968 * cdef PyObject* baseptr * if base is None: * baseptr = NULL # <<<<<<<<<<<<<< * else: * Py_INCREF(base) # important to do this before decref below! */ __pyx_v_baseptr = NULL; goto __pyx_L3; } /*else*/ { /* "numpy.pxd":970 * baseptr = NULL * else: * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< * baseptr = base * Py_XDECREF(arr.base) */ Py_INCREF(__pyx_v_base); /* "numpy.pxd":971 * else: * Py_INCREF(base) # important to do this before decref below! * baseptr = base # <<<<<<<<<<<<<< * Py_XDECREF(arr.base) * arr.base = baseptr */ __pyx_v_baseptr = ((PyObject *)__pyx_v_base); } __pyx_L3:; /* "numpy.pxd":972 * Py_INCREF(base) # important to do this before decref below! * baseptr = base * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< * arr.base = baseptr * */ Py_XDECREF(__pyx_v_arr->base); /* "numpy.pxd":973 * baseptr = base * Py_XDECREF(arr.base) * arr.base = baseptr # <<<<<<<<<<<<<< * * cdef inline object get_array_base(ndarray arr): */ __pyx_v_arr->base = __pyx_v_baseptr; __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":975 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("get_array_base", 0); /* "numpy.pxd":976 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ __pyx_t_1 = (__pyx_v_arr->base == NULL); if (__pyx_t_1) { /* "numpy.pxd":977 * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: * return None # <<<<<<<<<<<<<< * else: * return arr.base */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; goto __pyx_L3; } /*else*/ { /* "numpy.pxd":979 * return None * else: * return arr.base # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); __pyx_r = ((PyObject *)__pyx_v_arr->base); goto __pyx_L0; } __pyx_L3:; __pyx_r = Py_None; __Pyx_INCREF(Py_None); __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, __Pyx_NAMESTR("twosample"), __Pyx_DOCSTR(__pyx_k_13), /* m_doc */ -1, /* m_size */ __pyx_methods /* m_methods */, NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_kp_u_1, __pyx_k_1, sizeof(__pyx_k_1), 0, 1, 0, 0}, {&__pyx_kp_u_11, __pyx_k_11, sizeof(__pyx_k_11), 0, 1, 0, 0}, {&__pyx_kp_s_14, __pyx_k_14, sizeof(__pyx_k_14), 0, 0, 1, 0}, {&__pyx_kp_s_17, __pyx_k_17, sizeof(__pyx_k_17), 0, 0, 1, 0}, {&__pyx_n_s_18, __pyx_k_18, sizeof(__pyx_k_18), 0, 0, 1, 1}, {&__pyx_kp_u_3, __pyx_k_3, sizeof(__pyx_k_3), 0, 1, 0, 0}, {&__pyx_kp_u_5, __pyx_k_5, sizeof(__pyx_k_5), 0, 1, 0, 0}, {&__pyx_kp_u_7, __pyx_k_7, sizeof(__pyx_k_7), 0, 1, 0, 0}, {&__pyx_kp_u_8, __pyx_k_8, sizeof(__pyx_k_8), 0, 1, 0, 0}, {&__pyx_n_s__Magics, __pyx_k__Magics, sizeof(__pyx_k__Magics), 0, 0, 1, 1}, {&__pyx_n_s__RuntimeError, __pyx_k__RuntimeError, sizeof(__pyx_k__RuntimeError), 0, 0, 1, 1}, {&__pyx_n_s__T, __pyx_k__T, sizeof(__pyx_k__T), 0, 0, 1, 1}, {&__pyx_n_s__V1, __pyx_k__V1, sizeof(__pyx_k__V1), 0, 0, 1, 1}, {&__pyx_n_s__V2, __pyx_k__V2, sizeof(__pyx_k__V2), 0, 0, 1, 1}, {&__pyx_n_s__ValueError, __pyx_k__ValueError, sizeof(__pyx_k__ValueError), 0, 0, 1, 1}, {&__pyx_n_s__Y1, __pyx_k__Y1, sizeof(__pyx_k__Y1), 0, 0, 1, 1}, {&__pyx_n_s__Y2, __pyx_k__Y2, sizeof(__pyx_k__Y2), 0, 0, 1, 1}, {&__pyx_n_s____main__, __pyx_k____main__, sizeof(__pyx_k____main__), 0, 0, 1, 1}, {&__pyx_n_s____test__, __pyx_k____test__, sizeof(__pyx_k____test__), 0, 0, 1, 1}, {&__pyx_n_s____version__, __pyx_k____version__, sizeof(__pyx_k____version__), 0, 0, 1, 1}, {&__pyx_n_s__axis, __pyx_k__axis, sizeof(__pyx_k__axis), 0, 0, 1, 1}, {&__pyx_n_s__count_permutations, __pyx_k__count_permutations, sizeof(__pyx_k__count_permutations), 0, 0, 1, 1}, {&__pyx_n_s__dims, __pyx_k__dims, sizeof(__pyx_k__dims), 0, 0, 1, 1}, {&__pyx_n_s__flag_stat, __pyx_k__flag_stat, sizeof(__pyx_k__flag_stat), 0, 0, 1, 1}, {&__pyx_n_s__i, __pyx_k__i, sizeof(__pyx_k__i), 0, 0, 1, 1}, {&__pyx_n_s__id, __pyx_k__id, sizeof(__pyx_k__id), 0, 0, 1, 1}, {&__pyx_n_s__idx, __pyx_k__idx, sizeof(__pyx_k__idx), 0, 0, 1, 1}, {&__pyx_n_s__idx1, __pyx_k__idx1, sizeof(__pyx_k__idx1), 0, 0, 1, 1}, {&__pyx_n_s__idx2, __pyx_k__idx2, sizeof(__pyx_k__idx2), 0, 0, 1, 1}, {&__pyx_n_s__magic, __pyx_k__magic, sizeof(__pyx_k__magic), 0, 0, 1, 1}, {&__pyx_n_s__magics, __pyx_k__magics, sizeof(__pyx_k__magics), 0, 0, 1, 1}, {&__pyx_n_s__multi, __pyx_k__multi, sizeof(__pyx_k__multi), 0, 0, 1, 1}, {&__pyx_n_s__n, __pyx_k__n, sizeof(__pyx_k__n), 0, 0, 1, 1}, {&__pyx_n_s__n1, __pyx_k__n1, sizeof(__pyx_k__n1), 0, 0, 1, 1}, {&__pyx_n_s__n2, __pyx_k__n2, sizeof(__pyx_k__n2), 0, 0, 1, 1}, {&__pyx_n_s__nex, __pyx_k__nex, sizeof(__pyx_k__nex), 0, 0, 1, 1}, {&__pyx_n_s__niter, __pyx_k__niter, sizeof(__pyx_k__niter), 0, 0, 1, 1}, {&__pyx_n_s__np, __pyx_k__np, sizeof(__pyx_k__np), 0, 0, 1, 1}, {&__pyx_n_s__nsimu, __pyx_k__nsimu, sizeof(__pyx_k__nsimu), 0, 0, 1, 1}, {&__pyx_n_s__numpy, __pyx_k__numpy, sizeof(__pyx_k__numpy), 0, 0, 1, 1}, {&__pyx_n_s__range, __pyx_k__range, sizeof(__pyx_k__range), 0, 0, 1, 1}, {&__pyx_n_s__simu, __pyx_k__simu, sizeof(__pyx_k__simu), 0, 0, 1, 1}, {&__pyx_n_s__stat, __pyx_k__stat, sizeof(__pyx_k__stat), 0, 0, 1, 1}, {&__pyx_n_s__stat_mfx, __pyx_k__stat_mfx, sizeof(__pyx_k__stat_mfx), 0, 0, 1, 1}, {&__pyx_n_s__stats, __pyx_k__stats, sizeof(__pyx_k__stats), 0, 0, 1, 1}, {&__pyx_n_s__student, __pyx_k__student, sizeof(__pyx_k__student), 0, 0, 1, 1}, {&__pyx_n_s__student_mfx, __pyx_k__student_mfx, sizeof(__pyx_k__student_mfx), 0, 0, 1, 1}, {&__pyx_n_s__t, __pyx_k__t, sizeof(__pyx_k__t), 0, 0, 1, 1}, {&__pyx_n_s__v1, __pyx_k__v1, sizeof(__pyx_k__v1), 0, 0, 1, 1}, {&__pyx_n_s__v2, __pyx_k__v2, sizeof(__pyx_k__v2), 0, 0, 1, 1}, {&__pyx_n_s__vp, __pyx_k__vp, sizeof(__pyx_k__vp), 0, 0, 1, 1}, {&__pyx_n_s__wilcoxon, __pyx_k__wilcoxon, sizeof(__pyx_k__wilcoxon), 0, 0, 1, 1}, {&__pyx_n_s__y1, __pyx_k__y1, sizeof(__pyx_k__y1), 0, 0, 1, 1}, {&__pyx_n_s__y2, __pyx_k__y2, sizeof(__pyx_k__y2), 0, 0, 1, 1}, {&__pyx_n_s__yp, __pyx_k__yp, sizeof(__pyx_k__yp), 0, 0, 1, 1}, {&__pyx_n_s__zeros, __pyx_k__zeros, sizeof(__pyx_k__zeros), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_range = __Pyx_GetName(__pyx_b, __pyx_n_s__range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 96; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_ValueError = __Pyx_GetName(__pyx_b, __pyx_n_s__ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_RuntimeError = __Pyx_GetName(__pyx_b, __pyx_n_s__RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "numpy.pxd":215 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_k_tuple_2 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_2); __Pyx_INCREF(((PyObject *)__pyx_kp_u_1)); PyTuple_SET_ITEM(__pyx_k_tuple_2, 0, ((PyObject *)__pyx_kp_u_1)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_1)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_2)); /* "numpy.pxd":219 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_k_tuple_4 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_4); __Pyx_INCREF(((PyObject *)__pyx_kp_u_3)); PyTuple_SET_ITEM(__pyx_k_tuple_4, 0, ((PyObject *)__pyx_kp_u_3)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_3)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_4)); /* "numpy.pxd":257 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_k_tuple_6 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_6); __Pyx_INCREF(((PyObject *)__pyx_kp_u_5)); PyTuple_SET_ITEM(__pyx_k_tuple_6, 0, ((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_6)); /* "numpy.pxd":799 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_k_tuple_9 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_9); __Pyx_INCREF(((PyObject *)__pyx_kp_u_8)); PyTuple_SET_ITEM(__pyx_k_tuple_9, 0, ((PyObject *)__pyx_kp_u_8)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_8)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_9)); /* "numpy.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_k_tuple_10 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_10)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_10); __Pyx_INCREF(((PyObject *)__pyx_kp_u_5)); PyTuple_SET_ITEM(__pyx_k_tuple_10, 0, ((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_10)); /* "numpy.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_k_tuple_12 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_12)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_12); __Pyx_INCREF(((PyObject *)__pyx_kp_u_11)); PyTuple_SET_ITEM(__pyx_k_tuple_12, 0, ((PyObject *)__pyx_kp_u_11)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_11)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_12)); /* "nipy/labs/group/twosample.pyx":60 * * * def count_permutations(unsigned int n1, unsigned int n2): # <<<<<<<<<<<<<< * cdef double n * fff_twosample_permutation(NULL, NULL, n1, n2, &n) */ __pyx_k_tuple_15 = PyTuple_New(3); if (unlikely(!__pyx_k_tuple_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_15); __Pyx_INCREF(((PyObject *)__pyx_n_s__n1)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 0, ((PyObject *)__pyx_n_s__n1)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__n1)); __Pyx_INCREF(((PyObject *)__pyx_n_s__n2)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 1, ((PyObject *)__pyx_n_s__n2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__n2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__n)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 2, ((PyObject *)__pyx_n_s__n)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__n)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_15)); __pyx_k_codeobj_16 = (PyObject*)__Pyx_PyCode_New(2, 0, 3, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_15, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__count_permutations, 60, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/group/twosample.pyx":66 * * * def stat(ndarray Y1, ndarray Y2, id='student', int axis=0, ndarray Magics=None): # <<<<<<<<<<<<<< * """ * T = stat(Y1, Y2, id='student', axis=0, magics=None). */ __pyx_k_tuple_19 = PyTuple_New(26); if (unlikely(!__pyx_k_tuple_19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_19); __Pyx_INCREF(((PyObject *)__pyx_n_s__Y1)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 0, ((PyObject *)__pyx_n_s__Y1)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Y1)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Y2)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 1, ((PyObject *)__pyx_n_s__Y2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Y2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__id)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 2, ((PyObject *)__pyx_n_s__id)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__id)); __Pyx_INCREF(((PyObject *)__pyx_n_s__axis)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 3, ((PyObject *)__pyx_n_s__axis)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__axis)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Magics)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 4, ((PyObject *)__pyx_n_s__Magics)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Magics)); __Pyx_INCREF(((PyObject *)__pyx_n_s__y1)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 5, ((PyObject *)__pyx_n_s__y1)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__y1)); __Pyx_INCREF(((PyObject *)__pyx_n_s__y2)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 6, ((PyObject *)__pyx_n_s__y2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__y2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__t)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 7, ((PyObject *)__pyx_n_s__t)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__t)); __Pyx_INCREF(((PyObject *)__pyx_n_s__yp)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 8, ((PyObject *)__pyx_n_s__yp)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__yp)); __Pyx_INCREF(((PyObject *)__pyx_n_s__magics)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 9, ((PyObject *)__pyx_n_s__magics)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__magics)); __Pyx_INCREF(((PyObject *)__pyx_n_s__idx1)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 10, ((PyObject *)__pyx_n_s__idx1)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__idx1)); __Pyx_INCREF(((PyObject *)__pyx_n_s__idx2)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 11, ((PyObject *)__pyx_n_s__idx2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__idx2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__n)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 12, ((PyObject *)__pyx_n_s__n)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__n)); __Pyx_INCREF(((PyObject *)__pyx_n_s__n1)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 13, ((PyObject *)__pyx_n_s__n1)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__n1)); __Pyx_INCREF(((PyObject *)__pyx_n_s__n2)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 14, ((PyObject *)__pyx_n_s__n2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__n2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__nex)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 15, ((PyObject *)__pyx_n_s__nex)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__nex)); __Pyx_INCREF(((PyObject *)__pyx_n_s__simu)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 16, ((PyObject *)__pyx_n_s__simu)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__simu)); __Pyx_INCREF(((PyObject *)__pyx_n_s__nsimu)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 17, ((PyObject *)__pyx_n_s__nsimu)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__nsimu)); __Pyx_INCREF(((PyObject *)__pyx_n_s__idx)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 18, ((PyObject *)__pyx_n_s__idx)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__idx)); __Pyx_INCREF(((PyObject *)__pyx_n_s__stat)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 19, ((PyObject *)__pyx_n_s__stat)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__stat)); __Pyx_INCREF(((PyObject *)__pyx_n_s__flag_stat)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 20, ((PyObject *)__pyx_n_s__flag_stat)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__flag_stat)); __Pyx_INCREF(((PyObject *)__pyx_n_s__magic)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 21, ((PyObject *)__pyx_n_s__magic)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__magic)); __Pyx_INCREF(((PyObject *)__pyx_n_s__multi)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 22, ((PyObject *)__pyx_n_s__multi)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__multi)); __Pyx_INCREF(((PyObject *)__pyx_n_s__dims)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 23, ((PyObject *)__pyx_n_s__dims)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__dims)); __Pyx_INCREF(((PyObject *)__pyx_n_s__T)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 24, ((PyObject *)__pyx_n_s__T)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__T)); __Pyx_INCREF(((PyObject *)__pyx_n_s__i)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 25, ((PyObject *)__pyx_n_s__i)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__i)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_19)); __pyx_k_codeobj_20 = (PyObject*)__Pyx_PyCode_New(5, 0, 26, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_19, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__stat, 66, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_20)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/group/twosample.pyx":149 * * * def stat_mfx(ndarray Y1, ndarray V1, ndarray Y2, ndarray V2, # <<<<<<<<<<<<<< * id='student_mfx', int axis=0, ndarray Magics=None, * unsigned int niter=5): */ __pyx_k_tuple_21 = PyTuple_New(32); if (unlikely(!__pyx_k_tuple_21)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_21); __Pyx_INCREF(((PyObject *)__pyx_n_s__Y1)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 0, ((PyObject *)__pyx_n_s__Y1)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Y1)); __Pyx_INCREF(((PyObject *)__pyx_n_s__V1)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 1, ((PyObject *)__pyx_n_s__V1)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__V1)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Y2)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 2, ((PyObject *)__pyx_n_s__Y2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Y2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__V2)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 3, ((PyObject *)__pyx_n_s__V2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__V2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__id)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 4, ((PyObject *)__pyx_n_s__id)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__id)); __Pyx_INCREF(((PyObject *)__pyx_n_s__axis)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 5, ((PyObject *)__pyx_n_s__axis)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__axis)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Magics)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 6, ((PyObject *)__pyx_n_s__Magics)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Magics)); __Pyx_INCREF(((PyObject *)__pyx_n_s__niter)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 7, ((PyObject *)__pyx_n_s__niter)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__niter)); __Pyx_INCREF(((PyObject *)__pyx_n_s__y1)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 8, ((PyObject *)__pyx_n_s__y1)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__y1)); __Pyx_INCREF(((PyObject *)__pyx_n_s__y2)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 9, ((PyObject *)__pyx_n_s__y2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__y2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__v1)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 10, ((PyObject *)__pyx_n_s__v1)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__v1)); __Pyx_INCREF(((PyObject *)__pyx_n_s__v2)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 11, ((PyObject *)__pyx_n_s__v2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__v2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__t)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 12, ((PyObject *)__pyx_n_s__t)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__t)); __Pyx_INCREF(((PyObject *)__pyx_n_s__yp)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 13, ((PyObject *)__pyx_n_s__yp)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__yp)); __Pyx_INCREF(((PyObject *)__pyx_n_s__vp)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 14, ((PyObject *)__pyx_n_s__vp)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__vp)); __Pyx_INCREF(((PyObject *)__pyx_n_s__magics)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 15, ((PyObject *)__pyx_n_s__magics)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__magics)); __Pyx_INCREF(((PyObject *)__pyx_n_s__idx1)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 16, ((PyObject *)__pyx_n_s__idx1)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__idx1)); __Pyx_INCREF(((PyObject *)__pyx_n_s__idx2)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 17, ((PyObject *)__pyx_n_s__idx2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__idx2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__n)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 18, ((PyObject *)__pyx_n_s__n)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__n)); __Pyx_INCREF(((PyObject *)__pyx_n_s__n1)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 19, ((PyObject *)__pyx_n_s__n1)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__n1)); __Pyx_INCREF(((PyObject *)__pyx_n_s__n2)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 20, ((PyObject *)__pyx_n_s__n2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__n2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__nex)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 21, ((PyObject *)__pyx_n_s__nex)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__nex)); __Pyx_INCREF(((PyObject *)__pyx_n_s__simu)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 22, ((PyObject *)__pyx_n_s__simu)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__simu)); __Pyx_INCREF(((PyObject *)__pyx_n_s__nsimu)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 23, ((PyObject *)__pyx_n_s__nsimu)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__nsimu)); __Pyx_INCREF(((PyObject *)__pyx_n_s__idx)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 24, ((PyObject *)__pyx_n_s__idx)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__idx)); __Pyx_INCREF(((PyObject *)__pyx_n_s__stat)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 25, ((PyObject *)__pyx_n_s__stat)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__stat)); __Pyx_INCREF(((PyObject *)__pyx_n_s__flag_stat)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 26, ((PyObject *)__pyx_n_s__flag_stat)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__flag_stat)); __Pyx_INCREF(((PyObject *)__pyx_n_s__magic)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 27, ((PyObject *)__pyx_n_s__magic)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__magic)); __Pyx_INCREF(((PyObject *)__pyx_n_s__multi)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 28, ((PyObject *)__pyx_n_s__multi)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__multi)); __Pyx_INCREF(((PyObject *)__pyx_n_s__dims)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 29, ((PyObject *)__pyx_n_s__dims)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__dims)); __Pyx_INCREF(((PyObject *)__pyx_n_s__T)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 30, ((PyObject *)__pyx_n_s__T)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__T)); __Pyx_INCREF(((PyObject *)__pyx_n_s__i)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 31, ((PyObject *)__pyx_n_s__i)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__i)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_21)); __pyx_k_codeobj_22 = (PyObject*)__Pyx_PyCode_New(8, 0, 32, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_21, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__stat_mfx, 149, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_22)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_15 = PyInt_FromLong(15); if (unlikely(!__pyx_int_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC inittwosample(void); /*proto*/ PyMODINIT_FUNC inittwosample(void) #else PyMODINIT_FUNC PyInit_twosample(void); /*proto*/ PyMODINIT_FUNC PyInit_twosample(void) #endif { PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannyDeclarations #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_twosample(void)", 0); if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #ifdef __Pyx_CyFunction_USED if (__Pyx_CyFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4(__Pyx_NAMESTR("twosample"), __pyx_methods, __Pyx_DOCSTR(__pyx_k_13), 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (!PyDict_GetItemString(modules, "nipy.labs.group.twosample")) { if (unlikely(PyDict_SetItemString(modules, "nipy.labs.group.twosample", __pyx_m) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } } #endif __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); if (unlikely(!__pyx_b)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; /*--- Initialize various global constants etc. ---*/ if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_module_is_main_nipy__labs__group__twosample) { if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s____main__) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; } /*--- Builtin init code ---*/ if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Constants init code ---*/ if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Global init code ---*/ /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ /*--- Type import code ---*/ __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", #if CYTHON_COMPILING_IN_PYPY sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 861; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Variable import code ---*/ /*--- Function import code ---*/ /*--- Execution code ---*/ /* "nipy/labs/group/twosample.pyx":9 * """ * * __version__ = '0.1' # <<<<<<<<<<<<<< * * */ if (PyObject_SetAttr(__pyx_m, __pyx_n_s____version__, ((PyObject *)__pyx_kp_s_14)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/group/twosample.pyx":49 * * # Initialize numpy * fffpy_import_array() # <<<<<<<<<<<<<< * import_array() * import numpy as np */ fffpy_import_array(); /* "nipy/labs/group/twosample.pyx":50 * # Initialize numpy * fffpy_import_array() * import_array() # <<<<<<<<<<<<<< * import numpy as np * */ import_array(); /* "nipy/labs/group/twosample.pyx":51 * fffpy_import_array() * import_array() * import numpy as np # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__numpy), 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__np, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/group/twosample.pyx":55 * * # Stat dictionary * stats = {'student': FFF_TWOSAMPLE_STUDENT, # <<<<<<<<<<<<<< * 'wilcoxon': FFF_TWOSAMPLE_WILCOXON, * 'student_mfx': FFF_TWOSAMPLE_STUDENT_MFX} */ __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 55; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); __pyx_t_2 = PyInt_FromLong(FFF_TWOSAMPLE_STUDENT); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 55; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__student), __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 55; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "nipy/labs/group/twosample.pyx":56 * # Stat dictionary * stats = {'student': FFF_TWOSAMPLE_STUDENT, * 'wilcoxon': FFF_TWOSAMPLE_WILCOXON, # <<<<<<<<<<<<<< * 'student_mfx': FFF_TWOSAMPLE_STUDENT_MFX} * */ __pyx_t_2 = PyInt_FromLong(FFF_TWOSAMPLE_WILCOXON); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__wilcoxon), __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 55; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "nipy/labs/group/twosample.pyx":57 * stats = {'student': FFF_TWOSAMPLE_STUDENT, * 'wilcoxon': FFF_TWOSAMPLE_WILCOXON, * 'student_mfx': FFF_TWOSAMPLE_STUDENT_MFX} # <<<<<<<<<<<<<< * * */ __pyx_t_2 = PyInt_FromLong(FFF_TWOSAMPLE_STUDENT_MFX); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__student_mfx), __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 55; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (PyObject_SetAttr(__pyx_m, __pyx_n_s__stats, ((PyObject *)__pyx_t_1)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 55; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; /* "nipy/labs/group/twosample.pyx":60 * * * def count_permutations(unsigned int n1, unsigned int n2): # <<<<<<<<<<<<<< * cdef double n * fff_twosample_permutation(NULL, NULL, n1, n2, &n) */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_5group_9twosample_1count_permutations, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__count_permutations, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/group/twosample.pyx":66 * * * def stat(ndarray Y1, ndarray Y2, id='student', int axis=0, ndarray Magics=None): # <<<<<<<<<<<<<< * """ * T = stat(Y1, Y2, id='student', axis=0, magics=None). */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_5group_9twosample_3stat, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__stat, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/group/twosample.pyx":149 * * * def stat_mfx(ndarray Y1, ndarray V1, ndarray Y2, ndarray V2, # <<<<<<<<<<<<<< * id='student_mfx', int axis=0, ndarray Magics=None, * unsigned int niter=5): */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_5group_9twosample_5stat_mfx, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__stat_mfx, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/group/twosample.pyx":1 * # -*- Mode: Python -*- Not really, but the syntax is close enough # <<<<<<<<<<<<<< * """ * Routines for massively univariate random-effect and mixed-effect */ __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); if (PyObject_SetAttr(__pyx_m, __pyx_n_s____test__, ((PyObject *)__pyx_t_1)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; /* "numpy.pxd":975 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); if (__pyx_m) { __Pyx_AddTraceback("init nipy.labs.group.twosample", __pyx_clineno, __pyx_lineno, __pyx_filename); Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init nipy.labs.group.twosample"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if PY_MAJOR_VERSION < 3 return; #else return __pyx_m; #endif } /* Runtime support code */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* CYTHON_REFNANNY */ static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) { PyObject *result; result = PyObject_GetAttr(dict, name); if (!result) { if (dict != __pyx_b) { PyErr_Clear(); result = PyObject_GetAttr(__pyx_b, name); } if (!result) { PyErr_SetObject(PyExc_NameError, name); } } return result; } static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%s() takes %s %" CYTHON_FORMAT_SSIZE_T "d positional argument%s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%s() got an unexpected keyword argument '%s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact) { if (!type) { PyErr_Format(PyExc_SystemError, "Missing type object"); return 0; } if (none_allowed && obj == Py_None) return 1; else if (exact) { if (Py_TYPE(obj) == type) return 1; } else { if (PyObject_TypeCheck(obj, type)) return 1; } PyErr_Format(PyExc_TypeError, "Argument '%s' has incorrect type (expected %s, got %s)", name, type->tp_name, Py_TYPE(obj)->tp_name); return 0; } static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) { #if CYTHON_COMPILING_IN_CPYTHON PyObject *tmp_type, *tmp_value, *tmp_tb; PyThreadState *tstate = PyThreadState_GET(); tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_Restore(type, value, tb); #endif } static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(type, value, tb); #endif } #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } #if PY_VERSION_HEX < 0x02050000 if (PyClass_Check(type)) { #else if (PyType_Check(type)) { #endif #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; #if PY_VERSION_HEX < 0x02050000 if (PyInstance_Check(type)) { type = (PyObject*) ((PyInstanceObject*)type)->in_class; Py_INCREF(type); } else { type = 0; PyErr_SetString(PyExc_TypeError, "raise: exception must be an old-style class or instance"); goto raise_error; } #else type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } #endif } __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else /* Python 3+ */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyEval_CallObject(type, args); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause && cause != Py_None) { PyObject *fixed_cause; if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { PyThreadState *tstate = PyThreadState_GET(); PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } } bad: Py_XDECREF(owned_instance); return; } #endif static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%s to unpack", index, (index == 1) ? "" : "s"); } static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } static CYTHON_INLINE int __Pyx_IterFinish(void) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); PyObject* exc_type = tstate->curexc_type; if (unlikely(exc_type)) { if (likely(exc_type == PyExc_StopIteration) || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration)) { PyObject *exc_value, *exc_tb; exc_value = tstate->curexc_value; exc_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; Py_DECREF(exc_type); Py_XDECREF(exc_value); Py_XDECREF(exc_tb); return 0; } else { return -1; } } return 0; #else if (unlikely(PyErr_Occurred())) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) { PyErr_Clear(); return 0; } else { return -1; } } return 0; #endif } static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) { if (unlikely(retval)) { Py_DECREF(retval); __Pyx_RaiseTooManyValuesError(expected); return -1; } else { return __Pyx_IterFinish(); } return 0; } static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_Format(PyExc_SystemError, "Missing type object"); return 0; } if (likely(PyObject_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level) { PyObject *py_import = 0; PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; py_import = __Pyx_GetAttrString(__pyx_b, "__import__"); if (!py_import) goto bad; if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; #if PY_VERSION_HEX >= 0x02050000 { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { /* try package relative import first */ PyObject *py_level = PyInt_FromLong(1); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; /* try absolute import on failure */ } #endif if (!module) { PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); } } #else if (level>0) { PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4."); goto bad; } module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, NULL); #endif bad: Py_XDECREF(empty_list); Py_XDECREF(py_import); Py_XDECREF(empty_dict); return module; } static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_Py_intptr_t(Py_intptr_t val) { const Py_intptr_t neg_one = (Py_intptr_t)-1, const_zero = (Py_intptr_t)0; const int is_unsigned = const_zero < neg_one; if ((sizeof(Py_intptr_t) == sizeof(char)) || (sizeof(Py_intptr_t) == sizeof(short))) { return PyInt_FromLong((long)val); } else if ((sizeof(Py_intptr_t) == sizeof(int)) || (sizeof(Py_intptr_t) == sizeof(long))) { if (is_unsigned) return PyLong_FromUnsignedLong((unsigned long)val); else return PyInt_FromLong((long)val); } else if (sizeof(Py_intptr_t) == sizeof(PY_LONG_LONG)) { if (is_unsigned) return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG)val); else return PyLong_FromLongLong((PY_LONG_LONG)val); } else { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; return _PyLong_FromByteArray(bytes, sizeof(Py_intptr_t), little, !is_unsigned); } } #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return ::std::complex< float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y*(__pyx_t_float_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real*z.real + z.imag*z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(a, a); case 3: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, a); case 4: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_absf(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return ::std::complex< double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y*(__pyx_t_double_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real*z.real + z.imag*z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(a, a); case 3: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, a); case 4: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_abs(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject* x) { const unsigned char neg_one = (unsigned char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned char" : "value too large to convert to unsigned char"); } return (unsigned char)-1; } return (unsigned char)val; } return (unsigned char)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject* x) { const unsigned short neg_one = (unsigned short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned short" : "value too large to convert to unsigned short"); } return (unsigned short)-1; } return (unsigned short)val; } return (unsigned short)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject* x) { const unsigned int neg_one = (unsigned int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned int" : "value too large to convert to unsigned int"); } return (unsigned int)-1; } return (unsigned int)val; } return (unsigned int)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject* x) { const char neg_one = (char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to char" : "value too large to convert to char"); } return (char)-1; } return (char)val; } return (char)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject* x) { const short neg_one = (short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to short" : "value too large to convert to short"); } return (short)-1; } return (short)val; } return (short)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject* x) { const int neg_one = (int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to int" : "value too large to convert to int"); } return (int)-1; } return (int)val; } return (int)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject* x) { const signed char neg_one = (signed char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed char" : "value too large to convert to signed char"); } return (signed char)-1; } return (signed char)val; } return (signed char)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject* x) { const signed short neg_one = (signed short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed short" : "value too large to convert to signed short"); } return (signed short)-1; } return (signed short)val; } return (signed short)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject* x) { const signed int neg_one = (signed int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed int" : "value too large to convert to signed int"); } return (signed int)-1; } return (signed int)val; } return (signed int)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject* x) { const int neg_one = (int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to int" : "value too large to convert to int"); } return (int)-1; } return (int)val; } return (int)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) { const unsigned long neg_one = (unsigned long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned long"); return (unsigned long)-1; } return (unsigned long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned long"); return (unsigned long)-1; } return (unsigned long)PyLong_AsUnsignedLong(x); } else { return (unsigned long)PyLong_AsLong(x); } } else { unsigned long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned long)-1; val = __Pyx_PyInt_AsUnsignedLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject* x) { const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned PY_LONG_LONG"); return (unsigned PY_LONG_LONG)-1; } return (unsigned PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned PY_LONG_LONG"); return (unsigned PY_LONG_LONG)-1; } return (unsigned PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (unsigned PY_LONG_LONG)PyLong_AsLongLong(x); } } else { unsigned PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned PY_LONG_LONG)-1; val = __Pyx_PyInt_AsUnsignedLongLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject* x) { const long neg_one = (long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long)-1; } return (long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long)-1; } return (long)PyLong_AsUnsignedLong(x); } else { return (long)PyLong_AsLong(x); } } else { long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (long)-1; val = __Pyx_PyInt_AsLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) { const PY_LONG_LONG neg_one = (PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to PY_LONG_LONG"); return (PY_LONG_LONG)-1; } return (PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to PY_LONG_LONG"); return (PY_LONG_LONG)-1; } return (PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (PY_LONG_LONG)PyLong_AsLongLong(x); } } else { PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (PY_LONG_LONG)-1; val = __Pyx_PyInt_AsLongLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) { const signed long neg_one = (signed long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed long"); return (signed long)-1; } return (signed long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed long"); return (signed long)-1; } return (signed long)PyLong_AsUnsignedLong(x); } else { return (signed long)PyLong_AsLong(x); } } else { signed long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (signed long)-1; val = __Pyx_PyInt_AsSignedLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject* x) { const signed PY_LONG_LONG neg_one = (signed PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed PY_LONG_LONG"); return (signed PY_LONG_LONG)-1; } return (signed PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed PY_LONG_LONG"); return (signed PY_LONG_LONG)-1; } return (signed PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (signed PY_LONG_LONG)PyLong_AsLongLong(x); } } else { signed PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (signed PY_LONG_LONG)-1; val = __Pyx_PyInt_AsSignedLongLong(tmp); Py_DECREF(tmp); return val; } } static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); #if PY_VERSION_HEX < 0x02050000 return PyErr_Warn(NULL, message); #else return PyErr_WarnEx(NULL, message, 1); #endif } return 0; } #ifndef __PYX_HAVE_RT_ImportModule #define __PYX_HAVE_RT_ImportModule static PyObject *__Pyx_ImportModule(const char *name) { PyObject *py_name = 0; PyObject *py_module = 0; py_name = __Pyx_PyIdentifier_FromString(name); if (!py_name) goto bad; py_module = PyImport_Import(py_name); Py_DECREF(py_name); return py_module; bad: Py_XDECREF(py_name); return 0; } #endif #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict) { PyObject *py_module = 0; PyObject *result = 0; PyObject *py_name = 0; char warning[200]; py_module = __Pyx_ImportModule(module_name); if (!py_module) goto bad; py_name = __Pyx_PyIdentifier_FromString(class_name); if (!py_name) goto bad; result = PyObject_GetAttr(py_module, py_name); Py_DECREF(py_name); py_name = 0; Py_DECREF(py_module); py_module = 0; if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%s.%s is not a type object", module_name, class_name); goto bad; } if (!strict && (size_t)((PyTypeObject *)result)->tp_basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility", module_name, class_name); #if PY_VERSION_HEX < 0x02050000 if (PyErr_Warn(NULL, warning) < 0) goto bad; #else if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; #endif } else if ((size_t)((PyTypeObject *)result)->tp_basicsize != size) { PyErr_Format(PyExc_ValueError, "%s.%s has the wrong size, try recompiling", module_name, class_name); goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(py_module); Py_XDECREF(result); return NULL; } #endif static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = (start + end) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, /*int argcount,*/ 0, /*int kwonlyargcount,*/ 0, /*int nlocals,*/ 0, /*int stacksize,*/ 0, /*int flags,*/ __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, /*int firstlineno,*/ __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_globals = 0; PyFrameObject *py_frame = 0; py_code = __pyx_find_code_object(c_line ? c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? c_line : py_line, py_code); } py_globals = PyModule_GetDict(__pyx_m); if (!py_globals) goto bad; py_frame = PyFrame_New( PyThreadState_GET(), /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ py_globals, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; py_frame->f_lineno = py_line; PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else /* Python 3+ has unicode identifiers */ if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; ++t; } return 0; } /* Type Conversion Functions */ static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) { PyNumberMethods *m; const char *name = NULL; PyObject *res = NULL; #if PY_VERSION_HEX < 0x03000000 if (PyInt_Check(x) || PyLong_Check(x)) #else if (PyLong_Check(x)) #endif return Py_INCREF(x), x; m = Py_TYPE(x)->tp_as_number; #if PY_VERSION_HEX < 0x03000000 if (m && m->nb_int) { name = "int"; res = PyNumber_Int(x); } else if (m && m->nb_long) { name = "long"; res = PyNumber_Long(x); } #else if (m && m->nb_int) { name = "int"; res = PyNumber_Long(x); } #endif if (res) { #if PY_VERSION_HEX < 0x03000000 if (!PyInt_Check(res) && !PyLong_Check(res)) { #else if (!PyLong_Check(res)) { #endif PyErr_Format(PyExc_TypeError, "__%s__ returned non-%s (type %.200s)", name, name, Py_TYPE(res)->tp_name); Py_DECREF(res); return NULL; } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject* x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { #if PY_VERSION_HEX < 0x02050000 if (ival <= LONG_MAX) return PyInt_FromLong((long)ival); else { unsigned char *bytes = (unsigned char *) &ival; int one = 1; int little = (int)*(unsigned char*)&one; return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0); } #else return PyInt_FromSize_t(ival); #endif } static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject* x) { unsigned PY_LONG_LONG val = __Pyx_PyInt_AsUnsignedLongLong(x); if (unlikely(val == (unsigned PY_LONG_LONG)-1 && PyErr_Occurred())) { return (size_t)-1; } else if (unlikely(val != (unsigned PY_LONG_LONG)(size_t)val)) { PyErr_SetString(PyExc_OverflowError, "value too large to convert to size_t"); return (size_t)-1; } return (size_t)val; } #endif /* Py_PYTHON_H */ nipy-0.3.0/nipy/labs/group/twosample.pyx000066400000000000000000000160021210344137400203000ustar00rootroot00000000000000# -*- Mode: Python -*- Not really, but the syntax is close enough """ Routines for massively univariate random-effect and mixed-effect analysis. Two-sample case. Author: Alexis Roche, 2008. """ __version__ = '0.1' # Includes from fff cimport * # Exports from fff_twosample_stat.h cdef extern from "fff_twosample_stat.h": ctypedef enum fff_twosample_stat_flag: FFF_TWOSAMPLE_STUDENT = 0, FFF_TWOSAMPLE_WILCOXON = 1, FFF_TWOSAMPLE_STUDENT_MFX = 10 ctypedef struct fff_twosample_stat: pass ctypedef struct fff_twosample_stat_mfx: unsigned int niter fff_twosample_stat* fff_twosample_stat_new(unsigned int n1, unsigned int n2, fff_twosample_stat_flag flag) void fff_twosample_stat_delete(fff_twosample_stat* thisone) double fff_twosample_stat_eval(fff_twosample_stat* thisone, fff_vector* x) fff_twosample_stat_mfx* fff_twosample_stat_mfx_new(unsigned int n1, unsigned int n2, fff_twosample_stat_flag flag) void fff_twosample_stat_mfx_delete(fff_twosample_stat_mfx* thisone) double fff_twosample_stat_mfx_eval(fff_twosample_stat_mfx* thisone, fff_vector* x, fff_vector* vx) unsigned int fff_twosample_permutation(unsigned int* idx1, unsigned int* idx2, unsigned int n1, unsigned int n2, double* magic) void fff_twosample_apply_permutation(fff_vector* px, fff_vector* pv, fff_vector* x1, fff_vector* v1, fff_vector* x2, fff_vector* v2, unsigned int i, unsigned int* idx1, unsigned int* idx2) # Initialize numpy fffpy_import_array() import_array() import numpy as np # Stat dictionary stats = {'student': FFF_TWOSAMPLE_STUDENT, 'wilcoxon': FFF_TWOSAMPLE_WILCOXON, 'student_mfx': FFF_TWOSAMPLE_STUDENT_MFX} def count_permutations(unsigned int n1, unsigned int n2): cdef double n fff_twosample_permutation(NULL, NULL, n1, n2, &n) return int(n) def stat(ndarray Y1, ndarray Y2, id='student', int axis=0, ndarray Magics=None): """ T = stat(Y1, Y2, id='student', axis=0, magics=None). Compute a two-sample test statistic (Y1>Y2) over a number of deterministic or random permutations. """ cdef fff_vector *y1, *y2, *t, *yp, *magics cdef fff_array *idx1, *idx2 cdef unsigned int n, n1, n2, nex cdef unsigned long int simu, nsimu, idx cdef fff_twosample_stat* stat cdef fff_twosample_stat_flag flag_stat = stats[id] cdef double magic cdef fffpy_multi_iterator* multi # Get number of observations n1 = Y1.shape[axis] n2 = Y2.shape[axis] n = n1 + n2 # Read out magic numbers if Magics == None: magics = fff_vector_new(1) magics.data[0] = 0 ## Just to make sure else: magics = fff_vector_fromPyArray(Magics) # Create output array nsimu = magics.size dims = [Y1.shape[i] for i in range(Y1.ndim)] dims[axis] = nsimu T = np.zeros(dims) # Create local structure yp = fff_vector_new(n) idx1 = fff_array_new1d(FFF_UINT, n1) idx2 = fff_array_new1d(FFF_UINT, n2) stat = fff_twosample_stat_new(n1, n2, flag_stat) # Multi-iterator multi = fffpy_multi_iterator_new(3, axis, Y1, Y2, T) # Vector views y1 = multi.vector[0] y2 = multi.vector[1] t = multi.vector[2] # Loop for simu from 0 <= simu < nsimu: # Set the magic number magic = magics.data[simu*magics.stride] # Generate permutation nex = fff_twosample_permutation(idx1.data, idx2.data, n1, n2, &magic) # Reset the multi-iterator fffpy_multi_iterator_reset(multi) # Perform the loop idx = simu*t.stride while(multi.index < multi.size): fff_twosample_apply_permutation(yp, NULL, y1, NULL, y2, NULL, nex, idx1.data, idx2.data) t.data[idx] = fff_twosample_stat_eval(stat, yp) fffpy_multi_iterator_update(multi) # Delete local structures fffpy_multi_iterator_delete(multi) fff_vector_delete(magics) fff_vector_delete(yp) fff_array_delete(idx1) fff_array_delete(idx2) fff_twosample_stat_delete(stat) # Return return T def stat_mfx(ndarray Y1, ndarray V1, ndarray Y2, ndarray V2, id='student_mfx', int axis=0, ndarray Magics=None, unsigned int niter=5): """ T = stat(Y1, V1, Y2, V2, id='student', axis=0, magics=None, niter=5). Compute a two-sample test statistic (Y1>Y2) over a number of deterministic or random permutations. """ cdef fff_vector *y1, *y2, *v1, *v2, *t, *yp, *vp, *magics cdef fff_array *idx1, *idx2 cdef unsigned int n, n1, n2, nex cdef unsigned long int simu, nsimu, idx cdef fff_twosample_stat_mfx* stat cdef fff_twosample_stat_flag flag_stat = stats[id] cdef double magic cdef fffpy_multi_iterator* multi # Get number of observations n1 = Y1.shape[axis] n2 = Y2.shape[axis] n = n1 + n2 # Read out magic numbers if Magics == None: magics = fff_vector_new(1) magics.data[0] = 0 ## Just to make sure else: magics = fff_vector_fromPyArray(Magics) # Create output array nsimu = magics.size dims = [Y1.shape[i] for i in range(Y1.ndim)] dims[axis] = nsimu T = np.zeros(dims) # Create local structure yp = fff_vector_new(n) vp = fff_vector_new(n) idx1 = fff_array_new1d(FFF_UINT, n1) idx2 = fff_array_new1d(FFF_UINT, n2) stat = fff_twosample_stat_mfx_new(n1, n2, flag_stat) stat.niter = niter # Multi-iterator multi = fffpy_multi_iterator_new(5, axis, Y1, V1, Y2, V2, T) # Vector views y1 = multi.vector[0] v1 = multi.vector[1] y2 = multi.vector[2] v2 = multi.vector[3] t = multi.vector[4] # Loop for simu from 0 <= simu < nsimu: # Set the magic number magic = magics.data[simu*magics.stride] # Generate permutation nex = fff_twosample_permutation(idx1.data, idx2.data, n1, n2, &magic) # Reset the multi-iterator fffpy_multi_iterator_reset(multi) # Perform the loop idx = simu*t.stride while(multi.index < multi.size): fff_twosample_apply_permutation(yp, vp, y1, v1, y2, v2, nex, idx1.data, idx2.data) t.data[idx] = fff_twosample_stat_mfx_eval(stat, yp, vp) fffpy_multi_iterator_update(multi) # Delete local structures fffpy_multi_iterator_delete(multi) fff_vector_delete(magics) fff_vector_delete(yp) fff_vector_delete(vp) fff_array_delete(idx1) fff_array_delete(idx2) fff_twosample_stat_mfx_delete(stat) # Return return T nipy-0.3.0/nipy/labs/mask.py000066400000000000000000000431571210344137400157070ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Utilities for extracting masks from EPI images and applying them to time series. """ # Major scientific libraries imports import numpy as np from scipy import ndimage # Neuroimaging libraries imports from nibabel import load, nifti1, save from nibabel.loadsave import read_img_data ############################################################################### # Operating on connect component ############################################################################### def largest_cc(mask): """ Return the largest connected component of a 3D mask array. Parameters ----------- mask: 3D boolean array 3D array indicating a mask. Returns -------- mask: 3D boolean array 3D array indicating a mask, with only one connected component. """ # We use asarray to be able to work with masked arrays. mask = np.asarray(mask) labels, label_nb = ndimage.label(mask) if not label_nb: raise ValueError('No non-zero values: no connected components') if label_nb == 1: return mask.astype(np.bool) label_count = np.bincount(labels.ravel()) # discard 0 the 0 label label_count[0] = 0 return labels == label_count.argmax() def threshold_connect_components(map, threshold, copy=True): """ Given a map with some coefficients set to zero, segment the connect components with number of voxels smaller than the threshold and set them to 0. Parameters ---------- map: ndarray, The spatial map to segment threshold: scalar, The minimum number of voxels to keep a cluster. copy: bool, optional If copy is false, the input array is modified inplace Returns ------- map: ndarray, the map with connected components removed """ labels, _ = ndimage.label(map) weights = np.bincount(labels.ravel()) if copy: map = map.copy() for label, weight in enumerate(weights): if label == 0: continue if weight < threshold: map[labels == label] = 0 return map ############################################################################### # Utilities to calculate masks ############################################################################### def compute_mask_files(input_filename, output_filename=None, return_mean=False, m=0.2, M=0.9, cc=1, exclude_zeros=False, opening=2): """ Compute a mask file from fMRI nifti file(s) Compute and write the mask of an image based on the grey level This is based on an heuristic proposed by T.Nichols: find the least dense point of the histogram, between fractions m and M of the total image histogram. In case of failure, it is usually advisable to increase m. Parameters ---------- input_filename : string nifti filename (4D) or list of filenames (3D). output_filename : string or None, optional path to save the output nifti image (if not None). return_mean : boolean, optional if True, and output_filename is None, return the mean image also, as a 3D array (2nd return argument). m : float, optional lower fraction of the histogram to be discarded. M: float, optional upper fraction of the histogram to be discarded. cc: boolean, optional if cc is True, only the largest connect component is kept. exclude_zeros: boolean, optional Consider zeros as missing values for the computation of the threshold. This option is useful if the images have been resliced with a large padding of zeros. opening: int, optional Size of the morphological opening performed as post-processing Returns ------- mask : 3D boolean array The brain mask mean_image : 3d ndarray, optional The main of all the images used to estimate the mask. Only provided if `return_mean` is True. """ if isinstance(input_filename, basestring): # One single filename or image nim = load(input_filename) # load the image from the path vol_arr = read_img_data(nim, prefer='unscaled') header = nim.get_header() affine = nim.get_affine() if vol_arr.ndim == 4: if isinstance(vol_arr, np.memmap): # Get rid of memmapping: it is faster. mean_volume = np.array(vol_arr, copy=True).mean(axis=-1) else: mean_volume = vol_arr.mean(axis=-1) # Make a copy, to avoid holding a reference on the full array, # and thus polluting the memory. first_volume = vol_arr[..., 0].copy() elif vol_arr.ndim == 3: mean_volume = first_volume = vol_arr else: raise ValueError('Need 4D file for mask') del vol_arr else: # List of filenames if len(list(input_filename)) == 0: raise ValueError('input_filename should be a non-empty ' 'list of file names') # We have several images, we do mean on the fly, # to avoid loading all the data in the memory # We do not use the unscaled data here?: # if the scalefactor is being used to record real # differences in intensity over the run this would break for index, filename in enumerate(input_filename): nim = load(filename) if index == 0: first_volume = nim.get_data().squeeze() mean_volume = first_volume.copy().astype(np.float32) header = nim.get_header() affine = nim.get_affine() else: mean_volume += nim.get_data().squeeze() mean_volume /= float(len(list(input_filename))) del nim if np.isnan(mean_volume).any(): tmp = mean_volume.copy() tmp[np.isnan(tmp)] = 0 mean_volume = tmp mask = compute_mask(mean_volume, first_volume, m, M, cc, opening=opening, exclude_zeros=exclude_zeros) if output_filename is not None: header['descrip'] = 'mask' output_image = nifti1.Nifti1Image(mask.astype(np.uint8), affine=affine, header=header) save(output_image, output_filename) if not return_mean: return mask else: return mask, mean_volume def compute_mask(mean_volume, reference_volume=None, m=0.2, M=0.9, cc=True, opening=2, exclude_zeros=False): """ Compute a mask file from fMRI data in 3D or 4D ndarrays. Compute and write the mask of an image based on the grey level This is based on an heuristic proposed by T.Nichols: find the least dense point of the histogram, between fractions m and M of the total image histogram. In case of failure, it is usually advisable to increase m. Parameters ---------- mean_volume : 3D ndarray mean EPI image, used to compute the threshold for the mask. reference_volume: 3D ndarray, optional reference volume used to compute the mask. If none is give, the mean volume is used. m : float, optional lower fraction of the histogram to be discarded. M: float, optional upper fraction of the histogram to be discarded. cc: boolean, optional if cc is True, only the largest connect component is kept. opening: int, optional if opening is larger than 0, an morphological opening is performed, to keep only large structures. This step is useful to remove parts of the skull that might have been included. exclude_zeros: boolean, optional Consider zeros as missing values for the computation of the threshold. This option is useful if the images have been resliced with a large padding of zeros. Returns ------- mask : 3D boolean ndarray The brain mask """ if reference_volume is None: reference_volume = mean_volume sorted_input = np.sort(mean_volume.reshape(-1)) if exclude_zeros: sorted_input = sorted_input[sorted_input != 0] limiteinf = np.floor(m * len(sorted_input)) limitesup = np.floor(M * len(sorted_input)) delta = sorted_input[limiteinf + 1:limitesup + 1] \ - sorted_input[limiteinf:limitesup] ia = delta.argmax() threshold = 0.5 * (sorted_input[ia + limiteinf] + sorted_input[ia + limiteinf + 1]) mask = (reference_volume >= threshold) if cc: mask = largest_cc(mask) if opening > 0: mask = ndimage.binary_opening(mask.astype(np.int), iterations=opening) return mask.astype(bool) def compute_mask_sessions(session_images, m=0.2, M=0.9, cc=1, threshold=0.5, exclude_zeros=False, return_mean=False, opening=2): """ Compute a common mask for several sessions of fMRI data. Uses the mask-finding algorithmes to extract masks for each session, and then keep only the main connected component of the a given fraction of the intersection of all the masks. Parameters ---------- session_images : list of (list of strings) or nipy image objects A list of images/list of nifti filenames. Each inner list/image represents a session. m : float, optional lower fraction of the histogram to be discarded. M: float, optional upper fraction of the histogram to be discarded. cc: boolean, optional if cc is True, only the largest connect component is kept. threshold : float, optional the inter-session threshold: the fraction of the total number of session in for which a voxel must be in the mask to be kept in the common mask. threshold=1 corresponds to keeping the intersection of all masks, whereas threshold=0 is the union of all masks. exclude_zeros: boolean, optional Consider zeros as missing values for the computation of the threshold. This option is useful if the images have been resliced with a large padding of zeros. return_mean: boolean, optional if return_mean is True, the mean image accross subjects is returned. opening: int, optional, size of the morphological opening Returns ------- mask : 3D boolean ndarray The brain mask mean : 3D float array The mean image """ mask, mean = None, None for index, session in enumerate(session_images): if hasattr(session, 'get_data'): mean = session.get_data() if mean.ndim > 3: mean = mean.mean(-1) this_mask = compute_mask(mean, None, m=m, M=M, cc=cc, opening=opening, exclude_zeros=exclude_zeros) if return_mean: this_mask = this_mask, mean else: this_mask = compute_mask_files( session, m=m, M=M, cc=cc, exclude_zeros=exclude_zeros, return_mean=return_mean, opening=opening) if return_mean: this_mask, this_mean = this_mask if mean is None: mean = this_mean.astype(np.float) else: mean += this_mean this_mask = this_mask.astype(np.int8) if mask is None: mask = this_mask else: mask += this_mask # Free memory early del this_mask # Take the "half-intersection", i.e. all the voxels that fall within # 50% of the individual masks. mask = (mask > threshold * len(list(session_images))) if cc: # Select the largest connected component (each mask is # connect, but the half-interesection may not be): mask = largest_cc(mask) mask = mask.astype(np.bool) if return_mean: # Divide by the number of sessions mean /= len(session_images) return mask, mean return mask def intersect_masks(input_masks, output_filename=None, threshold=0.5, cc=True): """ Given a list of input mask images, generate the output image which is the the threshold-level intersection of the inputs Parameters ---------- input_masks: list of strings or ndarrays paths of the input images nsubj set as len(input_mask_files), or individual masks. output_filename, string: Path of the output image, if None no file is saved. threshold: float within [0, 1[, optional gives the level of the intersection. threshold=1 corresponds to keeping the intersection of all masks, whereas threshold=0 is the union of all masks. cc: bool, optional If true, extract the main connected component Returns ------- grp_mask, boolean array of shape the image shape """ grp_mask = None if threshold > 1: raise ValueError('The threshold should be < 1') if threshold < 0: raise ValueError('The threshold should be > 0') threshold = min(threshold, 1 - 1.e-7) for this_mask in input_masks: if isinstance(this_mask, basestring): # We have a filename this_mask = load(this_mask).get_data() if grp_mask is None: grp_mask = this_mask.copy().astype(np.int) else: # If this_mask is floating point and grp_mask is integer, numpy 2 # casting rules raise an error for in-place addition. # Hence we do it long-hand. # XXX should the masks be coerced to int before addition? grp_mask = grp_mask + this_mask grp_mask = grp_mask > (threshold * len(list(input_masks))) if np.any(grp_mask > 0) and cc: grp_mask = largest_cc(grp_mask) if output_filename is not None: if isinstance(input_masks[0], basestring): nim = load(input_masks[0]) header = nim.get_header() affine = nim.get_affine() else: header = dict() affine = np.eye(4) header['descrip'] = 'mask image' output_image = nifti1.Nifti1Image(grp_mask.astype(np.uint8), affine=affine, header=header, ) save(output_image, output_filename) return grp_mask > 0 ############################################################################### # Time series extraction ############################################################################### def series_from_mask(filenames, mask, dtype=np.float32, smooth=False, ensure_finite=True): """ Read the time series from the given sessions filenames, using the mask. Parameters ----------- filenames: list of 3D nifti file names, or 4D nifti filename. Files are grouped by session. mask: 3d ndarray 3D mask array: true where a voxel should be used. smooth: False or float, optional If smooth is not False, it gives the size, in voxel of the spatial smoothing to apply to the signal. ensure_finite: boolean, optional If ensure_finite is True, the non-finite values (NaNs and infs) found in the images will be replaced by zeros Returns -------- session_series: ndarray 3D array of time course: (session, voxel, time) header: header object The header of the first file. Notes ----- When using smoothing, ensure_finite should be True: as elsewhere non finite values will spread accross the image. """ assert len(filenames) != 0, ( 'filenames should be a file name or a list of file names, ' '%s (type %s) was passed' % (filenames, type(filenames))) mask = mask.astype(np.bool) if smooth: # Convert from a sigma to a FWHM: smooth /= np.sqrt(8 * np.log(2)) if isinstance(filenames, basestring): # We have a 4D nifti file data_file = load(filenames) header = data_file.get_header() series = data_file.get_data() if ensure_finite: # SPM tends to put NaNs in the data outside the brain series[np.logical_not(np.isfinite(series))] = 0 series = series.astype(dtype) affine = data_file.get_affine()[:3, :3] del data_file if isinstance(series, np.memmap): series = np.asarray(series).copy() if smooth: vox_size = np.sqrt(np.sum(affine ** 2, axis=0)) smooth_sigma = smooth / vox_size for this_volume in np.rollaxis(series, -1): this_volume[...] = ndimage.gaussian_filter(this_volume, smooth_sigma) series = series[mask] else: nb_time_points = len(list(filenames)) series = np.zeros((mask.sum(), nb_time_points), dtype=dtype) for index, filename in enumerate(filenames): data_file = load(filename) data = data_file.get_data() if ensure_finite: # SPM tends to put NaNs in the data outside the brain data[np.logical_not(np.isfinite(data))] = 0 data = data.astype(dtype) if smooth is not False: affine = data_file.get_affine()[:3, :3] vox_size = np.sqrt(np.sum(affine ** 2, axis=0)) smooth_sigma = smooth / vox_size data = ndimage.gaussian_filter(data, smooth_sigma) series[:, index] = data[mask] # Free memory early del data if index == 0: header = data_file.get_header() return series, header nipy-0.3.0/nipy/labs/setup.py000066400000000000000000000063671210344137400161160ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os from distutils import log # Global variables LIBS = os.path.realpath('libcstat') def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs from numpy.distutils.system_info import get_info config = Configuration('labs', parent_package, top_path) # cstat library config.add_include_dirs(os.path.join(LIBS,'fff')) config.add_include_dirs(os.path.join(LIBS,'wrapper')) config.add_include_dirs(get_numpy_include_dirs()) sources = [os.path.join(LIBS,'fff','*.c')] sources.append(os.path.join(LIBS,'wrapper','*.c')) # Link with lapack if found on the system # XXX: We need to better sort out the use of get_info() for Lapack, because # using 'lapack' and 'lapack_opt' returns different things even comparing # Ubuntu 8.10 machines on 32 vs 64 bit setups. On OSX # get_info('lapack_opt') does not return the keys: 'libraries' and # 'library_dirs', but get_info('lapack') does. # # For now this code should do the right thing on OSX and linux, but we # should ask on the numpy list for clarification on the proper approach. # XXX: If you modify these lines, remember to pass the information # along to the different .so in the neurospin build system. # First, try 'lapack_info', as that seems to provide more details on Linux # (both 32 and 64 bits): lapack_info = get_info('lapack_opt', 0) if 'libraries' not in lapack_info: # But on OSX that may not give us what we need, so try with 'lapack' # instead. NOTE: scipy.linalg uses lapack_opt, not 'lapack'... lapack_info = get_info('lapack', 0) # If no lapack install is found, we use the rescue lapack lite # distribution included in the package (sources have been # translated to C using f2c) if not lapack_info: log.warn('No lapack installation found, using lapack lite distribution') sources.append(os.path.join(LIBS,'lapack_lite','*.c')) library_dirs = [] libraries = [] # Best-case scenario: lapack found else: library_dirs = lapack_info['library_dirs'] libraries = lapack_info['libraries'] if 'include_dirs' in lapack_info: config.add_include_dirs(lapack_info['include_dirs']) # Information message print('LAPACK build options:') print('library_dirs: %s ' % library_dirs) print('libraries: %s ' % libraries) print('lapack_info: %s ' % lapack_info) config.add_library('cstat', sources=sources, library_dirs=library_dirs, libraries=libraries, extra_info=lapack_info) # Subpackages config.add_subpackage('bindings') config.add_subpackage('glm') config.add_subpackage('group') config.add_subpackage('spatial_models') config.add_subpackage('utils') config.add_subpackage('viz_tools') config.add_subpackage('datasets') config.add_subpackage('tests') config.make_config_py() # installs __config__.py return config if __name__ == '__main__': print('This is the wrong setup.py file to run') nipy-0.3.0/nipy/labs/spatial_models/000077500000000000000000000000001210344137400173705ustar00rootroot00000000000000nipy-0.3.0/nipy/labs/spatial_models/__init__.py000066400000000000000000000001141210344137400214750ustar00rootroot00000000000000from nipy.testing import Tester test = Tester().test bench = Tester().bench nipy-0.3.0/nipy/labs/spatial_models/bayesian_structural_analysis.py000066400000000000000000000520421210344137400257330ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ The main routine of this package that aims at performing the extraction of ROIs from multisubject dataset using the localization and activation strength of extracted regions. This has been published in: - Thirion et al. High level group analysis of FMRI data based on Dirichlet process mixture models, IPMI 2007 - Thirion et al. Accurate Definition of Brain Regions Position Through the Functional Landmark Approach, MICCAI 2010 Author : Bertrand Thirion, 2006-2011 """ import numpy as np import scipy.stats as st from .structural_bfls import build_LR from nipy.algorithms.graph import wgraph_from_coo_matrix from ...algorithms.statistics.empirical_pvalue import \ NormalEmpiricalNull, three_classes_GMM_fit, gamma_gaussian_fit from .hroi import HROI_as_discrete_domain_blobs #################################################################### # Ancillary functions #################################################################### def _relabel_(label, nl=None): """ Simple utilisity to relabel a pre-existing label vector Parameters ---------- label: array of shape(n) nl: array of shape(p), where p<= label.max(), optional if None, the output is -1*np.ones(n) Returns ------- new_label : array of shape (n) """ if label.max() + 1 < np.size(nl): raise ValueError('incompatible values for label of nl') new_label = - np.ones(np.shape(label)) if nl is not None: aux = np.arange(label.max() + 1) aux[0: np.size(nl)] = nl new_label[label > - 1] = aux[label[label > - 1]] return new_label def signal_to_pproba(test, learn=None, method='prior', alpha=0.01, verbose=0): """Convert a set of z-values to posterior probabilities of not being active Parameters ---------- test: array pf shape(n_samples), data that is assessed learn: array pf shape(n_samples), optional data to learn a mixture model method: string, optional, to be chosen within ['gauss_mixture', 'emp_null', 'gam_gauss', 'prior'] alpha: float in the [0,1], optional, parameter that yields the prior probability that a region is active should be chosen close to 0 """ if method == 'gauss_mixture': prior_strength = 100 fixed_scale = True bfp = three_classes_GMM_fit( learn, test, alpha, prior_strength, verbose, fixed_scale) bf0 = bfp[:, 1] elif method == 'emp_null': enn = NormalEmpiricalNull(learn) enn.learn() bf0 = np.reshape(enn.fdr(test), np.size(test)) elif method == 'gam_gauss': bfp = gamma_gaussian_fit(learn, test, verbose) bf0 = bfp[:, 1] elif method == 'prior': y0 = st.norm.pdf(test) shape_, scale_ = 3., 2. y1 = st.gamma.pdf(test, shape_, scale=scale_) bf0 = np.ravel((1 - alpha) * y0 / (alpha * y1 + (1 - alpha) * y0)) else: raise ValueError('Unknown method') return bf0 def compute_individual_regions(domain, lbeta, smin=5, theta=3.0, method='gauss_mixture', verbose=0, reshuffle=0, criterion='size', assign_val='weighted_mean'): """ Compute the individual regions that are real activation candidates Parameters ---------- dom : StructuredDomain instance, generic descriptor of the space domain lbeta: an array of shape (nbnodes, subjects) the multi-subject statistical maps smin: int, optional minimal size of the regions to validate them theta: float, optional first level threshold method: string, optional, method that is used to provide priori significance can be 'prior', 'gauss_mixture', 'gam_gauss' or 'emp_null' verbose: verbosity mode, optional reshuffle: bool, otpional, if nonzero, reshuffle the positions; this affects bf and gfc criterion: string, optional, 'size' or 'volume', thresholdding criterion assign_val: string, optional, to be chosen in 'weighted mean', 'mean', 'min', 'max' heuristic to assigna blob-level signal Returns ------- bf list of nipy.labs.spatial_models.hroi.Nroi instances representing individual ROIs let nr be the number of terminal regions across subjects gf0, array of shape (nr) the mixture-based prior probability that the terminal regions are false positives sub, array of shape (nr) the subject index associated with the terminal regions gfc, array of shape (nr, coord.shape[1]) the coordinates of the of the terminal regions Fixme ----- Should allow for subject specific domains """ bf = [] gfc = [] gf0 = [] sub = [] n_subj = lbeta.shape[1] nvox = lbeta.shape[0] for s in range(n_subj): # description in terms of blobs beta = np.reshape(lbeta[:, s], (nvox, 1)) nroi = HROI_as_discrete_domain_blobs( domain, beta, threshold=theta, smin=smin) if nroi is not None and nroi.k > 0: bfm = nroi.representative_feature('signal', 'weighted mean') bfm = bfm[[nroi.select_id(id) for id in nroi.get_leaves_id()]] # get the regions position if reshuffle: nroi.reduce_to_leaves() ## randomize the positions ## by taking any local maximum of the image temp = np.argsort(np.random.rand(nvox))[:nroi.k] bfc = domain.coord[temp] else: mean_pos = np.asarray( [np.mean(coords, 0) for coords in nroi.get_coord()]) nroi.set_roi_feature('position', mean_pos) bfc = mean_pos[[nroi.select_id(id) for id in nroi.get_leaves_id()]] gfc.append(bfc) # compute the prior proba of being null learn = np.squeeze(beta[beta != 0]) bf0 = signal_to_pproba(bfm, learn, method) gf0.append(bf0) sub.append(s * np.ones(np.size(bfm))) bf.append(nroi) return bf, gf0, sub, gfc def dpmm(gfc, alpha, g0, g1, dof, prior_precision, gf1, sub, burnin, spatial_coords=None, nis=1000, co_clust=False, verbose=False): """Apply the dpmm analysis to compute clusters from regions coordinates """ from nipy.algorithms.clustering.imm import MixedIMM dim = gfc.shape[1] migmm = MixedIMM(alpha, dim) migmm.set_priors(gfc) migmm.set_constant_densities(null_dens=g0, prior_dens=g1) migmm._prior_dof = dof migmm._prior_scale = np.diag(prior_precision[0] / dof) migmm._inv_prior_scale_ = [np.diag(dof * 1. / (prior_precision[0]))] migmm.sample(gfc, null_class_proba=1 - gf1, niter=burnin, init=False, kfold=sub) if verbose: print 'number of components: ', migmm.k # sampling if co_clust: like, pproba, co_clust = migmm.sample( gfc, null_class_proba=1 - gf1, niter=nis, sampling_points=spatial_coords, kfold=sub, co_clustering=co_clust) if verbose: print 'number of components: ', migmm.k return like, 1 - pproba, co_clust else: like, pproba = migmm.sample( gfc, null_class_proba=1 - gf1, niter=nis, sampling_points=spatial_coords, kfold=sub, co_clustering=co_clust) if verbose: print 'number of components: ', migmm.k return like, 1 - pproba def bsa_dpmm(bf, gf0, sub, gfc, dmax, thq, ths, verbose=0): """ Estimation of the population level model of activation density using dpmm and inference Parameters ---------- bf list of nipy.labs.spatial_models.hroi.HierarchicalROI instances representing individual ROIs let nr be the number of terminal regions across subjects gf0, array of shape (nr) the mixture-based prior probability that the terminal regions are true positives sub, array of shape (nr) the subject index associated with the terminal regions gfc, array of shape (nr, coord.shape[1]) the coordinates of the of the terminal regions dmax float>0: expected cluster std in the common space in units of coord thq = 0.5 (float in the [0,1] interval) p-value of the prevalence test ths=0, float in the rannge [0,nsubj] null hypothesis on region prevalence that is rejected during inference verbose=0, verbosity mode Returns ------- crmap: array of shape (nnodes): the resulting group-level labelling of the space LR: a instance of sbf.LandmarkRegions that describes the ROIs found in inter-subject inference If no such thing can be defined LR is set to None bf: List of nipy.labs.spatial_models.hroi.Nroi instances representing individual ROIs p: array of shape (nnodes): likelihood of the data under H1 over some sampling grid """ from nipy.algorithms.graph.field import field_from_coo_matrix_and_data dom = bf[0].domain n_subj = len(bf) crmap = - np.ones(dom.size, np.int) LR = None p = np.zeros(dom.size) if len(sub) < 1: return crmap, LR, bf, p sub = np.concatenate(sub).astype(np.int) gfc = np.concatenate(gfc) gf0 = np.concatenate(gf0) g0 = 1. / dom.local_volume.sum() # prepare the DPMM dim = dom.em_dim g1 = g0 prior_precision = 1. / (dmax * dmax) * np.ones((1, dim)) dof = 10 burnin = 100 nis = 1000 # nis = number of iterations to estimate p p, q = dpmm(gfc, 0.5, g0, g1, dof, prior_precision, 1 - gf0, sub, burnin, dom.coord, nis) if verbose: h1, c1 = np.histogram((1 - gf0), bins=100) h2, c2 = np.histogram(q, bins=100) try: import matplotlib.pylab as pl pl.figure() pl.plot(1 - gf0, q, '.') pl.figure() pl.bar(c1[:len(h1)], h1, width=0.005) pl.bar(c2[:len(h2)] + 0.003, h2, width=0.005, color='r') except ImportError: pass print 'Number of candidate regions %i, regions found %i' % ( np.size(q), q.sum()) Fbeta = field_from_coo_matrix_and_data(dom.topology, p) _, label = Fbeta.custom_watershed(0, g0) # append some information to the hroi in each subject for s in range(n_subj): bfs = bf[s] if bfs.k > 0: leaves_pos = [bfs.select_id(k) for k in bfs.get_leaves_id()] us = - np.ones(bfs.k).astype(np.int) # set posterior proba lq = np.zeros(bfs.k) lq[leaves_pos] = q[sub == s] bfs.set_roi_feature('posterior_proba', lq) # set prior proba lq = np.zeros(bfs.k) lq[leaves_pos] = 1 - gf0[sub == s] bfs.set_roi_feature('prior_proba', lq) pos = np.asarray( [np.mean(coords, 0) for coords in bfs.get_coord()]) midx = [np.argmin(np.sum((dom.coord - pos[k]) ** 2, 1)) for k in range(bfs.k)] j = label[np.array(midx)] us[leaves_pos] = j[leaves_pos] # when parent regions has similarly labelled children, # include it also us = bfs.make_forest().propagate_upward(us) bfs.set_roi_feature('label', us) # derive the group-level landmarks # with a threshold on the number of subjects # that are represented in each one LR, nl = build_LR(bf, thq, ths, dmax, verbose=verbose) # make a group-level map of the landmark position crmap = _relabel_(label, nl) return crmap, LR, bf, p def bsa_dpmm2(bf, gf0, sub, gfc, dmax, thq, ths, verbose): """ Estimation of the population level model of activation density using dpmm and inference Parameters ---------- bf list of nipy.labs.spatial_models.hroi.HierarchicalROI instances representing individual ROIs let nr be the number of terminal regions across subjects gf0, array of shape (nr) the mixture-based prior probability that the terminal regions are false positives sub, array of shape (nr) the subject index associated with the terminal regions gfc, array of shape (nr, coord.shape[1]) the coordinates of the of the terminal regions dmax float>0: expected cluster std in the common space in units of coord thq = 0.5 (float in the [0,1] interval) p-value of the prevalence test ths=0, float in the rannge [0,nsubj] null hypothesis on region prevalence that is rejected during inference verbose=0, verbosity mode Returns ------- crmap: array of shape (nnodes): the resulting group-level labelling of the space LR: a instance of sbf.LandmarkRegions that describes the ROIs found in inter-subject inference If no such thing can be defined LR is set to None bf: List of nipy.labs.spatial_models.hroi.Nroi instances representing individual ROIs Coclust: array of shape (nr,nr): co-labelling matrix that gives for each pair of inputs how likely they are in the same class according to the model """ dom = bf[0].domain n_subj = len(bf) crmap = - np.ones(dom.size, np.int) LR = None p = np.zeros(dom.size) if len(sub) < 1: return crmap, LR, bf, p sub = np.concatenate(sub).astype(np.int) gfc = np.concatenate(gfc) gf0 = np.concatenate(gf0) # prepare the DPMM g0 = 1. / (np.sum(dom.local_volume)) g1 = g0 prior_precision = 1. / (dmax * dmax) * np.ones((1, dom.em_dim), np.float) dof = 10 burnin = 100 nis = 300 q, p, CoClust = dpmm(gfc, .5, g0, g1, dof, prior_precision, 1 - gf0, sub, burnin, nis=nis, co_clust=True) cg = wgraph_from_coo_matrix(CoClust) if cg.E > 0: cg.remove_edges(cg.weights > .5) u = cg.cc() u[p < g0] = u.max() + 1 + np.arange(np.sum(p < g0)) if verbose: cg.show(gfc) # append some information to the hroi in each subject for s in range(n_subj): bfs = bf[s] if bfs is not None: leaves = np.asarray( [bfs.select_id(id) for id in bfs.get_leaves_id()]) us = - np.ones(bfs.k).astype(np.int) lq = np.zeros(bfs.k) lq[leaves] = q[sub == s] bfs.set_roi_feature('posterior_proba', lq) lq = np.zeros(bfs.k) lq[leaves] = 1 - gf0[sub == s] bfs.set_roi_feature('prior_proba', lq) us[leaves] = u[sub == s] # when parent regions has similarly labelled children, # include it also us = bfs.make_forest().propagate_upward(us) bfs.set_roi_feature('label', us) # derive the group-level landmarks # with a threshold on the number of subjects # that are represented in each one LR, nl = build_LR(bf, thq, ths, dmax, verbose=verbose) # make a group-level map of the landmark position crmap = - np.ones(dom.size) # not implemented at the moment return crmap, LR, bf, CoClust ########################################################################### # Main functions ########################################################################### def compute_BSA_simple(dom, lbeta, dmax, thq=0.5, smin=5, ths=0, theta=3.0, method='prior', verbose=0): """ Compute the Bayesian Structural Activation paterns simplified version Parameters ---------- dom : StructuredDomain instance, Description of the spatial context of the data lbeta: an array of shape (nbnodes, subjects): the multi-subject statistical maps dmax float>0: expected cluster std in the common space in units of coord thq = 0.5 (float): posterior significance threshold should be in the [0,1] interval smin = 5 (int): minimal size of the regions to validate them theta = 3.0 (float): first level threshold method: string, optional, the method used to assess the prior significance of the regions verbose=0: verbosity mode Returns ------- crmap: array of shape (nnodes): the resulting group-level labelling of the space LR: a instance of sbf.LandmarkRegions that describes the ROIs found in inter-subject inference If no such thing can be defined LR is set to None bf: List of nipy.labs.spatial_models.hroi.Nroi instances representing individual ROIs p: array of shape (nnodes): likelihood of the data under H1 over some sampling grid Notes ----- In that case, the DPMM is used to derive a spatial density of significant local maxima in the volume. Each terminal (leaf) region which is a posteriori significant enough is assigned to the nearest mode of this distribution fixme ----- The number of itertions should become a parameter """ bf, gf0, sub, gfc = compute_individual_regions( dom, lbeta, smin, theta, 'prior', verbose) crmap, LR, bf, p = bsa_dpmm(bf, gf0, sub, gfc, dmax, thq, ths, verbose) return crmap, LR, bf, p def compute_BSA_quick(dom, lbeta, dmax, thq=0.5, smin=5, ths=0, theta=3.0, verbose=0): """Idem compute_BSA_simple, but this one does not estimate the full density (on small datasets, it can be much faster) Parameters ---------- dom : StructuredDomain instance, Description of the spatial context of the data lbeta: an array of shape (nbnodes, subjects): the multi-subject statistical maps dmax float>0: expected cluster std in the common space in units of coord thq = 0.5 (float): posterior significance threshold should be in the [0,1] interval smin = 5 (int): minimal size of the regions to validate them theta = 3.0 (float): first level threshold method: string, optional, the method used to assess the prior significance of the regions verbose=0: verbosity mode Returns ------- crmap: array of shape (nnodes): the resulting group-level labelling of the space LR: a instance of sbf.LandmarkRegions that describes the ROIs found in inter-subject inference If no such thing can be defined LR is set to None bf: List of nipy.labs.spatial_models.hroi.Nroi instances representing individual ROIs coclust: array of shape (nr, nr): co-labelling matrix that gives for each pair of cross_subject regions how likely they are in the same class according to the model """ bf, gf0, sub, gfc = compute_individual_regions( dom, lbeta, smin, theta, 'prior', verbose) crmap, LR, bf, co_clust = bsa_dpmm2( bf, gf0, sub, gfc, dmax, thq, ths, verbose) return crmap, LR, bf, co_clust def compute_BSA_loo(dom, lbeta, dmax, thq=0.5, smin=5, ths=0, theta=3.0, verbose=0): """ Compute the Bayesian Structural Activation paterns - with statistical validation Parameters ---------- dom: StructuredDomain instance, Description of the spatial context of the data lbeta: an array of shape (nbnodes, subjects): the multi-subject statistical maps dmax float>0: expected cluster std in the common space in units of coord thq = 0.5 (float): posterior significance threshold should be in the [0,1] interval smin = 5 (int): minimal size of the regions to validate them theta = 3.0 (float): first level threshold method: string, optional, the method used to assess the prior significance of the regions verbose=0: verbosity mode Returns ------- mll : float the average cross-validated log-likelihood across subjects ml0 : float the log-likelihood of the model under a global null hypothesis """ n_subj = lbeta.shape[1] nvox = dom.size bf, gf0, sub, gfc = compute_individual_regions( dom, lbeta, smin, theta, 'gauss_mixture', verbose) p = np.zeros(nvox) g0 = 1. / (np.sum(dom.local_volume)) if len(sub) < 1: return np.log(g0), np.log(g0) sub = np.concatenate(sub).astype(np.int) gfc = np.concatenate(gfc) gf0 = np.concatenate(gf0) # prepare the DPMM g1 = g0 dim = dom.em_dim prior_precision = 1. / (dmax * dmax) * np.ones((1, dim), np.float) dof = 10 burnin = 100 nis = 300 ll0 = [] ll2 = [] for s in range(n_subj): if np.sum(sub == s) > 0: spatial_coords = gfc[sub == s] p, q = dpmm( gfc[sub != s], 0.5, g0, g1, dof, prior_precision, 1 - gf0[sub != s], sub[sub != s], burnin, spatial_coords, nis) pp = gf0[sub == s] * g0 + p * (1 - gf0[sub == s]) ll2.append(np.mean(np.log(pp))) ll0.append(np.mean(np.log(g0))) ml0 = np.mean(np.array(ll0)) mll = np.mean(np.array(ll2)) if verbose: print 'average cross-validated log likelihood' print 'null model: ', ml0, ' alternative model: ', mll return mll, ml0 nipy-0.3.0/nipy/labs/spatial_models/bsa_io.py000066400000000000000000000166301210344137400212040ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This module is the interface to the bayesian_structural_analysis (bsa) module It handles the images provided as input and produces result images. """ import numpy as np import os.path as op from nibabel import load, save, Nifti1Image from ..mask import intersect_masks from .bayesian_structural_analysis import (compute_BSA_simple, compute_BSA_quick, compute_BSA_loo) from .discrete_domain import domain_from_image def make_bsa_image( mask_images, betas, theta=3., dmax=5., ths=0, thq=0.5, smin=0, swd=None, method='simple', subj_id=None, nbeta='default', dens_path=None, cr_path=None, verbose=0, reshuffle=False): """ Main function for performing bsa on a set of images. It creates the some output images in the given directory Parameters ---------- mask_images: A list of image paths that yield binary images, one for each subject the number os subjects, nsubj, is taken as len(mask_images) betas: A list of image paths that yields the activation images, one for each subject theta=3., threshold used to ignore all the image data that si below dmax=5., prior width of the spatial model corresponds to multi-subject uncertainty ths=0: threshold on the representativity measure of the obtained regions thq=0.5: p-value of the representativity test: test = p(representativity>ths)>thq smin=0: minimal size (in voxels) of the extracted blobs smaller blobs are merged into larger ones swd: string, optional if not None, output directory method='simple': applied region detection method; to be chose among 'simple', 'quick', 'loo' subj_id=None: list of strings, identifiers of the subjects. by default it is range(nsubj) nbeta='default', string, identifier of the contrast dens_path=None, string, path of the output density image if False, no image is written if None, the path is computed from swd, nbeta cr_path=None, string, path of the (4D) output label image if False, no ime is written if None, many images are written, with paths computed from swd, subj_id and nbeta reshuffle: bool, optional if true, randomly swap the sign of the data Returns ------- AF: an nipy.labs.spatial_models.structural_bfls.landmark_regions instance that describes the structures found at the group level None is returned if nothing has been found significant at the group level BF : a list of nipy.labs.spatial_models.hroi.Nroi instances (one per subject) that describe the individual coounterpart of AF if method=='loo', the output is different: mll, float, the average likelihood of the data under the model after cross validation ll0, float the log-likelihood of the data under the global null fixme ===== unique mask should be allowed """ # Sanity check if len(mask_images) != len(betas): raise ValueError("the number of masks and activation images" \ "should be the same") nsubj = len(mask_images) if subj_id == None: subj_id = [str(i) for i in range(nsubj)] # Read the referential information nim = load(mask_images[0]) ref_dim = nim.shape[:3] affine = nim.get_affine() # Read the masks and compute the "intersection" mask = np.reshape(intersect_masks(mask_images), ref_dim).astype('u8') # encode it as a domain dom = domain_from_image(Nifti1Image(mask, affine), nn=18) nvox = dom.size # read the functional images lbeta = [] for s in range(nsubj): rbeta = load(betas[s]) beta = np.reshape(rbeta.get_data(), ref_dim) lbeta.append(beta[mask > 0]) lbeta = np.array(lbeta).T if reshuffle: rswap = 2 * (np.random.randn(nsubj) > 0.5) - 1 lbeta = np.dot(lbeta, np.diag(rswap)) # launch the method crmap = np.zeros(nvox) p = np.zeros(nvox) AF = None BF = [None for s in range(nsubj)] if method == 'simple': crmap, AF, BF, p = compute_BSA_simple( dom, lbeta, dmax, thq, smin, ths, theta, verbose=verbose) if method == 'quick': crmap, AF, BF, co_clust = compute_BSA_quick( dom, lbeta, dmax, thq, smin, ths, theta, verbose=verbose) density = np.zeros(nvox) crmap = AF.map_label(dom.coord, 0.95, dmax) if method == 'loo': mll, ll0 = compute_BSA_loo( dom, lbeta, dmax, thq, smin, ths, theta, verbose=verbose) return mll, ll0 # Write the results as images # the spatial density image if dens_path is not False: density = np.zeros(ref_dim) density[mask > 0] = p wim = Nifti1Image(density, affine) wim.get_header()['descrip'] = 'group-level spatial density \ of active regions' if dens_path == None: dens_path = op.join(swd, "density_%s.nii" % nbeta) save(wim, dens_path) if cr_path == False: return AF, BF default_idx = AF.k + 2 if cr_path == None and swd == None: return AF, BF if cr_path == None: # write a 3D image for group-level labels cr_path = op.join(swd, "CR_%s.nii" % nbeta) labels = - 2 * np.ones(ref_dim) labels[mask > 0] = crmap wim = Nifti1Image(labels.astype('int16'), affine) wim.get_header()['descrip'] = 'group Level labels from bsa procedure' save(wim, cr_path) # write a prevalence image cr_path = op.join(swd, "prevalence_%s.nii" % nbeta) prev = np.zeros(ref_dim) prev[mask > 0] = AF.prevalence_density() wim = Nifti1Image(prev, affine) wim.get_header()['descrip'] = 'Weighted prevalence image' save(wim, cr_path) # write 3d images for the subjects for s in range(nsubj): label_image = op.join(swd, "AR_s%s_%s.nii" % (subj_id[s], nbeta)) labels = - 2 * np.ones(ref_dim) labels[mask > 0] = -1 if BF[s] is not None: nls = BF[s].get_roi_feature('label') nls[nls == - 1] = default_idx lab = BF[s].label lab[lab > - 1] = nls[lab[lab > - 1]] labels[mask > 0] = lab wim = Nifti1Image(labels.astype('int16'), affine) wim.get_header()['descrip'] = \ 'Individual label image from bsa procedure' save(wim, label_image) else: # write everything in a single 4D image wdim = (ref_dim[0], ref_dim[1], ref_dim[2], nsubj + 1) labels = - 2 * np.ones(wdim, 'int16') labels[mask > 0, 0] = crmap for s in range(nsubj): labels[mask > 0, s + 1] = - 1 if BF[s] is not None: nls = BF[s].get_roi_feature('label') nls[nls == - 1] = default_idx lab = BF[s].label lab[lab > - 1] = nls[lab[lab > - 1]] labels[mask > 0, s + 1] = lab wim = Nifti1Image(labels, affine) wim.get_header()['descrip'] = 'group Level and individual labels\ from bsa procedure' save(wim, cr_path) return AF, BF nipy-0.3.0/nipy/labs/spatial_models/discrete_domain.py000066400000000000000000000556021210344137400231030ustar00rootroot00000000000000""" This module defines the StructuredDomain class, that represents a generic neuroimaging kind of domain This is meant to provide a unified API to deal with n-d imaged and meshes. Author: Bertrand Thirion, 2010 """ import numpy as np import scipy.sparse as sp from nibabel import load, Nifti1Image, save from nipy.algorithms.graph import WeightedGraph, wgraph_from_coo_matrix, \ wgraph_from_3d_grid ############################################################## # Ancillary functions ############################################################## def smatrix_from_3d_array(mask, nn=18): """Create a sparse adjacency matrix from an array Parameters ---------- mask : 3d array, input array, interpreted as a mask nn: int, optional 3d neighboring system to be chosen within {6, 18, 26} Returns ------- coo_mat: a sparse coo matrix, adjacency of the neighboring system """ ijk = np.array(np.where(mask)).T return smatrix_from_3d_idx(ijk, nn) def smatrix_from_3d_idx(ijk, nn=18): """Create a sparse adjacency matrix from 3d index system Parameters ---------- idx:array of shape (n_samples, 3), type int indexes of certain positions in a 3d space nn: int, optional 3d neighboring system to be chosen within {6, 18, 26} Returns ------- coo_mat: a sparse coo matrix, adjacency of the neighboring system """ G = wgraph_from_3d_grid(ijk, nn) return G.to_coo_matrix() def smatrix_from_nd_array(mask, nn=0): """Create a sparse adjacency matrix from an arbitrary nd array Parameters ---------- mask : nd array, input array, interpreted as a mask nn: int, optional nd neighboring system, unsused at the moment Returns ------- coo_mat: a sparse coo matrix, adjacency of the neighboring system """ idx = np.array(np.where(mask)).T return smatrix_from_nd_idx(idx, nn) def smatrix_from_nd_idx(idx, nn=0): """Create a sparse adjacency matrix from nd index system Parameters ---------- idx:array of shape (n_samples, dim), type int indexes of certain positions in a nd space nn: int, optional nd neighboring system, unused at the moment Returns ------- coo_mat: a sparse coo matrix, adjacency of the neighboring system """ n = idx.shape[0] dim = idx.shape[1] nidx = idx - idx.min(0) eA = [] eB = [] # compute the edges in each possible direction for d in range(dim): mi = nidx.max(0) + 2 a = np.hstack((1, np.cumprod(mi[:dim - 1]))) v1 = np.dot(nidx, a) assert(np.size(v1) == np.size(np.unique(v1))) o1 = np.argsort(v1) sv1 = v1[o1] nz = np.squeeze(np.nonzero(sv1[:n - 1] - sv1[1:] == - 1)) nz = np.reshape(nz, np.size(nz)) eA.append(o1[nz]) eB.append(o1[nz + 1]) nidx = np.roll(nidx, 1, 1) eA = np.concatenate(eA) eB = np.concatenate(eB) E = 2 * np.size(eA) # create a graph structure if E == 0: return sp.coo_matrix((n, n)) edges = np.vstack((np.hstack((eA, eB)), np.hstack((eB, eA)))).T weights = np.ones(E) G = WeightedGraph(n, edges, weights) return G.to_coo_matrix() def array_affine_coord(mask, affine): """Compute coordinates from a boolean array and an affine transform Parameters ---------- mask: nd array, input array, interpreted as a mask affine: (n+1, n+1) matrix, affine transform that maps the mask points to some embedding space Returns ------- coords: array of shape(sum(mask>0), n), the computed coordinates """ idx = np.array(np.where(mask)).T return idx_affine_coord(idx, affine) def idx_affine_coord(idx, affine): """Compute coordinates from a set of indexes and an affine transform Parameters ---------- idx:array of shape (n_samples, dim), type int indexes of certain positions in a nd space affine: (n+1, n+1) matrix, affine transform that maps the mask points to some embedding space Returns ------- coords: array of shape(sum(mask>0), n), the computed coordinates """ size = idx.shape[0] hidx = np.hstack((idx, np.ones((size, 1)))) coord = np.dot(hidx, affine.T)[:, 0:-1] return coord def reduce_coo_matrix(mat, mask): """Reduce a supposedly coo_matrix to the vertices in the mask Parameters ---------- mat: sparse coo_matrix, input matrix mask: boolean array of shape mat.shape[0], desired elements """ G = wgraph_from_coo_matrix(mat) K = G.subgraph(mask) if K is None: return None return K.to_coo_matrix() ################################################################# # Functions to instantiate StructuredDomains ################################################################# def domain_from_binary_array(mask, affine=None, nn=0): """Return a StructuredDomain from an n-d array Parameters ---------- mask: np.array instance a supposedly boolean array that repesents the domain affine: np.array, optional affine transform that maps the array coordinates to some embedding space by default, this is np.eye(dim+1, dim+1) nn: neighboring system considered unsued at the moment """ dim = len(mask.shape) if affine is None: affine = np.eye(dim + 1) mask = mask > 0 vol = np.absolute(np.linalg.det(affine)) * np.ones(np.sum(mask)) coord = array_affine_coord(mask, affine) topology = smatrix_from_nd_array(mask) return StructuredDomain(dim, coord, vol, topology) def domain_from_image(mim, nn=18): """Return a StructuredDomain instance from the input mask image Parameters ---------- mim: NiftiIImage instance, or string path toward such an image supposedly a mask (where is used to crate the DD) nn: int, optional neighboring system considered from the image can be 6, 18 or 26 Returns ------- The corresponding StructuredDomain instance """ if isinstance(mim, basestring): iim = load(mim) else: iim = mim return domain_from_binary_array(iim.get_data(), iim.get_affine(), nn) def grid_domain_from_binary_array(mask, affine=None, nn=0): """Return a NDGridDomain from an n-d array Parameters ---------- mask: np.array instance a supposedly boolean array that repesents the domain affine: np.array, optional affine transform that maps the array coordinates to some embedding space by default, this is np.eye(dim+1, dim+1) nn: neighboring system considered unsued at the moment """ dim = len(mask.shape) shape = mask.shape if affine is None: affine = np.eye(dim + 1) mask = mask > 0 ijk = np.array(np.where(mask)).T vol = np.absolute(np.linalg.det(affine[:3, 0:3])) * np.ones(np.sum(mask)) topology = smatrix_from_nd_idx(ijk, nn) return NDGridDomain(dim, ijk, shape, affine, vol, topology) def grid_domain_from_image(mim, nn=18): """Return a NDGridDomain instance from the input mask image Parameters ---------- mim: NiftiIImage instance, or string path toward such an image supposedly a mask (where is used to crate the DD) nn: int, optional neighboring system considered from the image can be 6, 18 or 26 Returns ------- The corresponding NDGridDomain instance """ if isinstance(mim, basestring): iim = load(mim) else: iim = mim return grid_domain_from_binary_array(iim.get_data(), iim.get_affine(), nn) def grid_domain_from_shape(shape, affine=None): """Return a NDGridDomain from an n-d array Parameters ---------- shape: tuple the shape of a rectangular domain. affine: np.array, optional affine transform that maps the array coordinates to some embedding space. By default, this is np.eye(dim+1, dim+1) """ dim = len(shape) if affine is None: affine = np.eye(dim + 1) rect = np.ones(shape) ijk = np.array(np.where(rect)).T vol = np.absolute(np.linalg.det(affine[:3, 0:3])) * np.ones(np.sum(rect)) topology = smatrix_from_nd_idx(ijk, 0) return NDGridDomain(dim, ijk, shape, affine, vol, topology) ################################################################ # Domain from mesh ################################################################ class MeshDomain(object): """ temporary class to handle meshes """ def __init__(self, coord, triangles): """Initialize mesh domain instance Parameters ---------- coord: array of shape (n_vertices, 3), the node coordinates triangles: array of shape(n_triables, 3), indices of the nodes per triangle """ self.coord = coord self.triangles = triangles self.V = len(coord) # fixme: implement consistency checks def area(self): """Return array of areas for each node Returns ------- area: array of shape self.V, area of each node """ E = len(self.triangles) narea = np.zeros(self.V) def _area(a, b): """Area spanned by the vectors(a,b) in 3D """ c = np.array([a[1] * b[2] - a[2] * b[1], - a[0] * b[2] + a[2] * b[0], a[0] * b[1] - a[1] * b[0]]) return np.sqrt((c ** 2).sum()) for e in range(E): i, j, k = self.triangles[e] a = self.coord[i] - self.coord[k] b = self.coord[j] - self.coord[k] ar = _area(a, b) narea[i] += ar narea[j] += ar narea[k] += ar narea /= 6 # because division by 2 has been 'forgotten' in area computation # the area of a triangle is divided into the 3 vertices return narea def topology(self): """Returns a sparse matrix that represents the connectivity in self """ E = len(self.triangles) edges = np.zeros((3 * E, 2)) weights = np.ones(3 * E) for i in range(E): sa, sb, sc = self.triangles[i] edges[3 * i] = np.array([sa, sb]) edges[3 * i + 1] = np.array([sa, sc]) edges[3 * i + 2] = np.array([sb, sc]) G = WeightedGraph(self.V, edges, weights) # symmeterize the graph G = G.symmeterize() # remove redundant edges G = G.cut_redundancies() # make it a metric graph G.set_euclidian(self.coord) return G.to_coo_matrix() def domain_from_mesh(mesh): """Instantiate a StructuredDomain from a gifti mesh Parameters ---------- mesh: nibabel gifti mesh instance, or path to such a mesh """ if isinstance(mesh, basestring): from nibabel.gifti import read mesh_ = read(mesh) else: mesh_ = mesh if len(mesh_.darrays) == 2: cor, tri = mesh_.darrays elif len(mesh_.darrays) == 3: cor, nor, tri = mesh_.darrays else: raise Exception("%d arrays in gifti file (case not handled)" \ % len(mesh_.darrays)) mesh_dom = MeshDomain(cor.data, tri.data) vol = mesh_dom.area() topology = mesh_dom.topology() dim = 2 return StructuredDomain(dim, mesh_dom.coord, vol, topology) ################################################################ # StructuredDomain class ################################################################ class DiscreteDomain(object): """ Descriptor of a certain domain that consists of discrete elements that are characterized by a coordinate system and a topology: the coordinate system is specified through a coordinate array the topology encodes the neighboring system """ def __init__(self, dim, coord, local_volume, id='', referential=''): """Initialize discrete domain instance Parameters ---------- dim: int, the (physical) dimension of the domain coord: array of shape(size, em_dim), explicit coordinates of the domain sites local_volume: array of shape(size), yields the volume associated with each site id: string, optional, domain identifier referential: string, optional, identifier of the referential of the coordinates system Caveat ------ em_dim may be greater than dim e.g. (meshes coordinate in 3D) """ # dimension self.dim = dim # number of discrete elements self.size = coord.shape[0] # coordinate system if np.size(coord) == coord.shape[0]: coord = np.reshape(coord, (np.size(coord), 1)) if np.size(coord) == 0: self.em_dim = dim else: self.em_dim = coord.shape[1] if self.em_dim < dim: raise ValueError('Embedding dimension cannot be smaller than dim') self.coord = coord # volume if np.size(local_volume) != self.size: raise ValueError("Inconsistent Volume size") if (local_volume < 0).any(): raise ValueError('Volume should be positive') self.local_volume = np.ravel(local_volume) self.id = id self.referential = referential self.features = {} def copy(self): """Returns a copy of self """ new_dom = DiscreteDomain(self.dim, self.coord.copy(), self.local_volume.copy(), self.id, self.referential) for fid in self.features.keys(): new_dom.set_feature(fid, self.get_feature(fid).copy()) return new_dom def get_coord(self): """Returns self.coord """ return self.coord def get_volume(self): """Returns self.local_volume """ return self.local_volume def connected_components(self): """returns a labelling of the domain into connected components """ if self.topology is not None: return wgraph_from_coo_matrix(self.topology).cc() else: return [] def mask(self, bmask, id=''): """Returns an DiscreteDomain instance that has been further masked """ if bmask.size != self.size: raise ValueError('Invalid mask size') svol = self.local_volume[bmask] scoord = self.coord[bmask] DD = DiscreteDomain(self.dim, scoord, svol, id, self.referential) for fid in self.features.keys(): f = self.features.pop(fid) DD.set_feature(fid, f[bmask]) return DD def set_feature(self, fid, data, override=True): """Append a feature 'fid' Parameters ---------- fid: string, feature identifier data: array of shape(self.size, p) or self.size the feature data """ if data.shape[0] != self.size: raise ValueError('Wrong data size') if (fid in self.features) & (override == False): return self.features.update({fid: data}) def get_feature(self, fid): """Return self.features[fid] """ return self.features[fid] def representative_feature(self, fid, method): """Compute a statistical representative of the within-Foain feature Parameters ---------- fid: string, feature id method: string, method used to compute a representative to be chosen among 'mean', 'max', 'median', 'min' """ f = self.get_feature(fid) if method == "mean": return np.mean(f, 0) if method == "min": return np.min(f, 0) if method == "max": return np.max(f, 0) if method == "median": return np.median(f, 0) def integrate(self, fid): """Integrate certain feature over the domain and returns the result Parameters ---------- fid : string, feature identifier, by default, the 1 function is integrataed, yielding domain volume Returns ------- lsum = array of shape (self.feature[fid].shape[1]), the result """ if fid == None: return np.sum(self.local_volume) ffid = self.features[fid] if np.size(ffid) == ffid.shape[0]: ffid = np.reshape(ffid, (self.size, 1)) slv = np.reshape(self.local_volume, (self.size, 1)) return np.sum(ffid * slv, 0) class StructuredDomain(DiscreteDomain): """ Besides DiscreteDomain attributed, StructuredDomain has a topology, which allows many operations (morphology etc.) """ def __init__(self, dim, coord, local_volume, topology, did='', referential=''): """Initialize structured domain instance Parameters ---------- dim: int, the (physical) dimension of the domain coord: array of shape(size, em_dim), explicit coordinates of the domain sites local_volume: array of shape(size), yields the volume associated with each site topology: sparse binary coo_matrix of shape (size, size), that yields the neighboring locations in the domain did: string, optional, domain identifier referential: string, optional, identifier of the referential of the coordinates system """ DiscreteDomain.__init__(self, dim, coord, local_volume, id, referential) # topology if topology is not None: if topology.shape != (self.size, self.size): raise ValueError('Incompatible shape for topological model') self.topology = topology def mask(self, bmask, did=''): """Returns a StructuredDomain instance that has been further masked """ td = DiscreteDomain.mask(self, bmask) stopo = reduce_coo_matrix(self.topology, bmask) dd = StructuredDomain(self.dim, td.coord, td.local_volume, stopo, did, self.referential) for fid in td.features.keys(): dd.set_feature(fid, td.features.pop(fid)) return dd class NDGridDomain(StructuredDomain): """ Particular instance of StructuredDomain, that receives 3 additional variables: affine: array of shape (dim+1, dim+1), affine transform that maps points to a coordinate system shape: dim-tuple, shape of the domain ijk: array of shape(size, dim), int grid coordinates of the points This is to allow easy conversion to images when dim==3, and for compatibility with previous classes """ def __init__(self, dim, ijk, shape, affine, local_volume, topology, referential=''): """Initialize ndgrid domain instance Parameters ---------- dim: int, the (physical) dimension of the domain ijk: array of shape(size, dim), int grid coordinates of the points shape: dim-tuple, shape of the domain affine: array of shape (dim+1, dim+1), affine transform that maps points to a coordinate system local_volume: array of shape(size), yields the volume associated with each site topology: sparse binary coo_matrix of shape (size, size), that yields the neighboring locations in the domain referential: string, optional, identifier of the referential of the coordinates system Fixme ----- local_volume might be computed on-the-fly as |det(affine)| """ # shape if len(shape) != dim: raise ValueError('Incompatible shape and dim') self.shape = shape # affine if affine.shape != (dim + 1, dim + 1): raise ValueError('Incompatible dim and affine') self.affine = affine # ijk if (np.size(ijk) == ijk.shape[0]) & (np.size(ijk) > 0): ijk = np.reshape(ijk, (ijk.size, 1)) if (ijk.max(0) + 1 > shape).any(): raise ValueError('Provided indices do not fit within shape') self.ijk = ijk # coord coord = idx_affine_coord(ijk, affine) StructuredDomain.__init__(self, dim, coord, local_volume, topology) def mask(self, bmask): """Returns an instance of self that has been further masked """ if bmask.size != self.size: raise ValueError('Invalid mask size') svol = self.local_volume[bmask] stopo = reduce_coo_matrix(self.topology, bmask) sijk = self.ijk[bmask] DD = NDGridDomain(self.dim, sijk, self.shape, self.affine, svol, stopo, self.referential) for fid in self.features.keys(): f = self.features.pop(fid) DD.set_feature(fid, f[bmask]) return DD def to_image(self, path=None, data=None): """Write itself as a binary image, and returns it Parameters ---------- path: string, path of the output image, if any data: array of shape self.size, data to put in the nonzer-region of the image """ if data is None: wdata = np.zeros(self.shape, np.int8) else: wdata = np.zeros(self.shape, data.dtype) wdata[self.ijk[:, 0], self.ijk[:, 1], self.ijk[:, 2]] = 1 if data is not None: if data.size != self.size: raise ValueError('incorrect data size') wdata[wdata > 0] = data nim = Nifti1Image(wdata, self.affine) nim.get_header()['descrip'] = 'mask image representing domain %s' \ % self.id if path is not None: save(nim, path) return nim def make_feature_from_image(self, path, fid=''): """Extract the information from an image to make it a domain a feature Parameters ---------- path: string or Nifti1Image instance, the image from which one wished to extract data fid: string, optional identifier of the resulting feature. if '', the feature is not stored Returns ------- the correponding set of values """ if isinstance(path, basestring): nim = load(path) else: nim = path if (nim.get_affine() != self.affine).any(): raise ValueError('nim and self do not have the same referential') data = nim.get_data() feature = data[self.ijk[:, 0], self.ijk[:, 1], self.ijk[:, 2]] if fid is not '': self.features[fid] = feature return feature nipy-0.3.0/nipy/labs/spatial_models/hierarchical_parcellation.py000066400000000000000000000377101210344137400251250ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Computation of parcellations using a hierarchical approach. Author: Bertrand Thirion, 2008 """ import numpy as np from numpy.random import rand from nipy.algorithms.clustering.utils import kmeans, voronoi from .parcellation import MultiSubjectParcellation from nipy.algorithms.graph.field import Field from nipy.algorithms.graph.graph import wgraph_from_coo_matrix def _jointly_reduce_data(data1, data2, chunksize): lnvox = data1.shape[0] aux = np.argsort(rand(lnvox)) [:np.minimum(chunksize, lnvox)] rdata1 = data1[aux] rdata2 = data2[aux] return rdata1, rdata2 def _reduce_and_concatenate(data1, data2, chunksize): nb_subj = len(data1) rset1 = [] rset2 = [] for s in range(nb_subj): rdata1, rdata2 = _jointly_reduce_data(data1[s], data2[s], chunksize / nb_subj) rset1.append(rdata1) rset2.append(rdata2) rset1 = np.concatenate(rset1) rset2 = np.concatenate(rset2) return rset1, rset2 def _field_gradient_jac(ref, target): """ Given a reference field ref and a target field target compute the jacobian of the target with respect to ref Parameters ---------- ref: Field instance that yields the topology of the space target : array of shape(ref.V,dim) Returns ------- fgj: array of shape (ref.V) that gives the jacobian implied by the ref.field->target transformation. """ import numpy.linalg as nl n = ref.V xyz = ref.field dim = xyz.shape[1] fgj = [] ln = ref.list_of_neighbors() for i in range(n): j = ln[i] if np.size(j) > dim - 1: dx = np.squeeze(xyz[j] - xyz[i]) df = np.squeeze(target[j] - target[i]) FG = np.dot(nl.pinv(dx), df) fgj.append(nl.det(FG)) else: fgj.append(1) fgj = np.array(fgj) return fgj def _exclusion_map_dep(i, ref, target, targeti): """ ancillary function to determine admissible values of some position within some predefined values Parameters ---------- i (int): index of the structure under consideration ref: Field that represent the topological structure of parcels and their standard position target: array of shape (ref.V,3): current posistion of the parcels targeti array of shape (n,3): possible new positions for the ith item Returns ------- emap: aray of shape (n): a potential that yields the fitness of the proposed positions given the current configuration rmin (double): ancillary parameter """ xyz = ref.field ln = ref.list_of_neighbors() j = ln[i] if np.size(j) > 0: dx = xyz[j] - xyz[i] dx = np.squeeze(dx) rmin = np.min(np.sum(dx ** 2, 1)) / 4 u0 = xyz[i] + np.mean(target[j] - xyz[j], 1) emap = - np.sum((targeti - u0) ** 2, 1) + rmin else: emap = np.zeros(targeti.shape[0]) return emap def _exclusion_map(i, ref, target, targeti): """Ancillary function to determin admissible values of some position within some predefined values Parameters ---------- i (int): index of the structure under consideration ref: Field that represent the topological structure of parcels and their standard position target= array of shape (ref.V,3): current posistion of the parcels targeti array of shape (n,3): possible new positions for the ith item Returns ------- emap: aray of shape (n): a potential that yields the fitness of the proposed positions given the current configuration rmin (double): ancillary parameter """ xyz = ref.field fd = target.shape[1] ln = ref.list_of_neighbors() j = ln[i] j = np.reshape(j, np.size(j)) rmin = 0 if np.size(j) > 0: dx = np.reshape(xyz[j] - xyz[i], (np.size(j), fd)) rmin = np.mean(np.sum(dx ** 2, 1)) / 4 u0 = xyz[i] + np.mean(target[j] - xyz[j], 0) emap = rmin - np.sum((targeti - u0) ** 2, 1) for k in j: amap = np.sum((targeti - target[k]) ** 2, 1) - rmin / 4 emap[amap < 0] = amap[amap < 0] else: emap = np.zeros(targeti.shape[0]) return emap, rmin def _field_gradient_jac_Map_(i, ref, target, targeti): """ Given a reference field ref and a target field target compute the jacobian of the target with respect to ref """ import scipy.linalg as nl xyz = ref.field fgj = [] ln = ref.list_of_neighbors() j = ln[i] if np.size(j) > 0: dx = xyz[j] - xyz[i] dx = np.squeeze(dx) idx = nl.pinv(dx) for k in range(targeti.shape[0]): df = target[j] - targeti[k] df = np.squeeze(df) fg = np.dot(idx, df) fgj.append(nl.det(fg)) else: fgj = np.zeros(targeti.shape[0]) fgj = np.array(fgj) return fgj def _field_gradient_jac_Map(i, ref, target, targeti): """ Given a reference field ref and a target field target compute the jacobian of the target with respect to ref """ import scipy.linalg as nl xyz = ref.field fgj = [] ln = ref.list_of_neighbors() j = ln[i] if np.size(j) > 0: dx = xyz[j] - xyz[i] dx = np.squeeze(dx) idx = nl.pinv(dx) for k in range(targeti.shape[0]): df = target[j] - targeti[k] df = np.squeeze(df) fg = np.dot(idx, df) fgj.append(nl.det(fg)) fgj = np.array(fgj) for ij in np.squeeze(j): aux = [] jj = np.squeeze(ln[ij]) dx = xyz[jj] - xyz[ij] dx = np.squeeze(dx) idx = nl.pinv(dx) ji = np.nonzero(jj == i) for k in range(targeti.shape[0]): df = target[jj] - target[ij] df[ji] = targeti[k] - target[ij] df = np.squeeze(df) fg = np.dot(idx, df) aux.append(nl.det(fg)) aux = np.array(aux) fgj = np.minimum(fgj, aux) else: fgj = np.zeros(targeti.shape[0]) return fgj def _optim_hparcel(feature, domain, graphs, nb_parcel, lamb=1., dmax=10., niter=5, initial_mask=None, chunksize=1.e5, verbose=0): """ Core function of the heirrachical parcellation procedure. Parameters ---------- feature: list of subject-related feature arrays Pa : parcellation instance that is updated graphs: graph that represents the topology of the parcellation anat_coord: array of shape (nvox,3) space defining set of coordinates nb_parcel: int the number of desrired parcels lamb=1.0: parameter to weight position and feature impact on the algorithm dmax = 10: locality parameter (in the space of anat_coord) to limit surch volume (CPU save) chunksize = int, optional niter = 5: number of iterations in the algorithm verbose=0: verbosity level Returns ------- U: list of arrays of length nsubj subject-dependent parcellations Proto_anat: array of shape (nvox) labelling of the common space (template parcellation) """ nb_subj = len(feature) # a1. perform a rough clustering of the data to make prototype indiv_coord = np.array([domain.coord[initial_mask[:, s] > - 1] for s in range(nb_subj)]) reduced_anat, reduced_feature = _reduce_and_concatenate( indiv_coord, feature, chunksize) _, labs, _ = kmeans(reduced_feature, nb_parcel, Labels=None, maxiter=10) proto_anat = [np.mean(reduced_anat[labs == k], 0) for k in range(nb_parcel)] proto_anat = np.array(proto_anat) proto = [np.mean(reduced_feature[labs == k], 0) for k in range(nb_parcel)] proto = np.array(proto) # a2. topological model of the parcellation # group-level part spatial_proto = Field(nb_parcel) spatial_proto.set_field(proto_anat) spatial_proto.voronoi_diagram(proto_anat, domain.coord) spatial_proto.set_gaussian(proto_anat) spatial_proto.normalize() for git in range(niter): LP = [] LPA = [] U = [] Energy = 0 for s in range(nb_subj): # b.subject-specific instances of the model # b.0 subject-specific information Fs = feature[s] lac = indiv_coord[s] target = proto_anat.copy() lseeds = np.zeros(nb_parcel, np.int) aux = np.argsort(rand(nb_parcel)) toto = np.zeros(lac.shape[0]) for j in range(nb_parcel): # b.1 speed-up :only take a small ball i = aux[j] dx = lac - target[i] iz = np.nonzero(np.sum(dx ** 2, 1) < dmax ** 2) iz = np.reshape(iz, np.size(iz)) if np.size(iz) == 0: iz = np.array([np.argmin(np.sum(dx ** 2, 1))]) # b.2: anatomical constraints lanat = np.reshape(lac[iz], (np.size(iz), domain.coord.shape[1])) pot = np.zeros(np.size(iz)) JM, rmin = _exclusion_map(i, spatial_proto, target, lanat) pot[JM < 0] = np.inf pot[JM >= 0] = - JM[JM >= 0] # b.3: add feature discrepancy df = Fs[iz] - proto[i] df = np.reshape(df, (np.size(iz), proto.shape[1])) pot += lamb * np.sum(df ** 2, 1) # b.4: solution if np.sum(np.isinf(pot)) == np.size(pot): pot = np.sum(dx[iz] ** 2, 1) sol = iz[np.argmin(pot)] target[i] = lac[sol] lseeds[i] = sol toto[sol] = 1 if verbose > 1: jm = _field_gradient_jac(spatial_proto, target) print jm.min(), jm.max(), np.sum(toto > 0) # c.subject-specific parcellation g = graphs[s] f = Field(g.V, g.edges, g.weights, Fs) U.append(f.constrained_voronoi(lseeds)) Energy += np.sum((Fs - proto[U[-1]]) ** 2) / \ np.sum(initial_mask[:, s] > - 1) # recompute the prototypes # (average in subject s) lproto = [np.mean(Fs[U[-1] == k], 0) for k in range(nb_parcel)] lproto = np.array(lproto) lproto_anat = np.array([np.mean(lac[U[-1] == k], 0) for k in range(nb_parcel)]) LP.append(lproto) LPA.append(lproto_anat) # recompute the prototypes across subjects proto_mem = proto.copy() proto = np.mean(np.array(LP), 0) proto_anat = np.mean(np.array(LPA), 0) displ = np.sqrt(np.sum((proto_mem - proto) ** 2, 1).max()) if verbose: print 'energy', Energy, 'displacement', displ # recompute the topological model spatial_proto.set_field(proto_anat) spatial_proto.voronoi_diagram(proto_anat, domain.coord) spatial_proto.set_gaussian(proto_anat) spatial_proto.normalize() if displ < 1.e-4 * dmax: break return U, proto_anat def hparcel(domain, ldata, nb_parcel, nb_perm=0, niter=5, mu=10., dmax=10., lamb=100.0, chunksize=1.e5, verbose=0, initial_mask=None): """ Function that performs the parcellation by optimizing the inter-subject similarity while retaining the connectedness within subject and some consistency across subjects. Parameters ---------- domain: discrete_domain.DiscreteDomain instance, yields all the spatial information on the parcelled domain ldata: list of (n_subj) arrays of shape (domain.size, dim) the feature data used to inform the parcellation nb_parcel: int, the number of parcels nb_perm: int, optional, the number of times the parcellation and prfx computation is performed on sign-swaped data niter: int, optional, number of iterations to obtain the convergence of the method information in the clustering algorithm mu: float, optional, relative weight of anatomical information dmax: float optional, radius of allowed deformations lamb: float optional parameter to control the relative importance of space vs function chunksize; int, optional number of points used in internal sub-sampling verbose: bool, optional, verbosity mode initial_mask: array of shape (domain.size, nb_subj), optional initial subject-depedent masking of the domain Returns ------- Pa: the resulting parcellation structure appended with the labelling """ # a various parameters nbvox = domain.size nb_subj = len(ldata) if initial_mask is None: initial_mask = np.ones((nbvox, nb_subj), np.int) graphs = [] feature = [] for s in range(nb_subj): # build subject-specific models of the data lnvox = np.sum(initial_mask[:, s] > - 1) lac = domain.coord[initial_mask[:, s] > - 1] beta = np.reshape(ldata[s], (lnvox, ldata[s].shape[1])) lf = np.hstack((beta, mu * lac / (1.e-15 + np.std(domain.coord, 0)))) feature.append(lf) g = wgraph_from_coo_matrix(domain.topology) g.remove_trivial_edges() graphs.append(g) # main function all_labels, proto_anat = _optim_hparcel( feature, domain, graphs, nb_parcel, lamb, dmax, niter, initial_mask, chunksize=chunksize, verbose=verbose) # write the individual labelling labels = - np.ones((nbvox, nb_subj)).astype(np.int) for s in range(nb_subj): labels[initial_mask[:, s] > -1, s] = all_labels[s] # compute the group-level labels template_labels = voronoi(domain.coord, proto_anat) # create the parcellation pcl = MultiSubjectParcellation(domain, individual_labels=labels, template_labels=template_labels, nb_parcel=nb_parcel) pcl.make_feature('functional', np.rollaxis(np.array(ldata), 1, 0)) if nb_perm > 0: prfx0 = perm_prfx(domain, graphs, feature, nb_parcel, ldata, initial_mask, nb_perm, niter, dmax, lamb, chunksize) return pcl, prfx0 else: return pcl def perm_prfx(domain, graphs, features, nb_parcel, ldata, initial_mask=None, nb_perm=100, niter=5, dmax=10., lamb=100.0, chunksize=1.e5, verbose=1): """ caveat: assumes that the functional dimension is 1 """ from ..utils.reproducibility_measures import ttest # permutations for the assesment of the results prfx0 = [] adim = domain.coord.shape[1] nb_subj = len(ldata) for q in range(nb_perm): feature = [] sldata = [] for s in range(nb_subj): lf = features[s].copy() swap = (rand() > 0.5) * 2 - 1 lf[:, 0:-adim] = swap * lf[:, 0:-adim] sldata.append(swap * ldata[s]) feature.append(lf) # optimization part all_labels, proto_anat = _optim_hparcel( feature, domain, graphs, nb_parcel, lamb, dmax, niter, initial_mask, chunksize=chunksize) labels = - np.ones((domain.size, nb_subj)).astype(np.int) for s in range(nb_subj): labels[initial_mask[:, s] > -1, s] = all_labels[s] # compute the group-level labels template_labels = voronoi(domain.coord, proto_anat) # create the parcellation pcl = MultiSubjectParcellation(domain, individual_labels=labels, template_labels=template_labels) pdata = pcl.make_feature('functional', np.rollaxis(np.array(ldata), 1, 0)) prfx = ttest(np.squeeze(pdata)) if verbose: print q, prfx.max(0) prfx0.append(prfx.max(0)) return prfx0 nipy-0.3.0/nipy/labs/spatial_models/hroi.py000066400000000000000000000530661210344137400207150ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This module contains the specification of 'hierarchical ROI' object, Which is used in spatial models of the library such as structural analysis The connection with other classes is not completely satisfactory at the moment: there should be some intermediate classes between 'Fields' and 'hroi' Author : Bertrand Thirion, 2009-2011 Virgile Fritsch """ import numpy as np from nipy.algorithms.graph.graph import WeightedGraph from nipy.algorithms.graph.forest import Forest from nipy.algorithms.graph.field import field_from_coo_matrix_and_data from .mroi import SubDomains NINF = - np.inf def hroi_agglomeration(input_hroi, criterion='size', smin=0): """Performs an agglomeration then a selection of regions so that a certain size or volume criterion is satisfied. Parameters ---------- input_hroi: HierarchicalROI instance The input hROI criterion: str, optional To be chosen among 'size' or 'volume' smin: float, optional The applied criterion Returns ------- output_hroi: HierarchicalROI instance """ if criterion not in ['size', 'volume']: return ValueError('unknown criterion') output_hroi = input_hroi.copy() k = 2 * output_hroi.k if criterion == 'size': value = output_hroi.get_size() if criterion == 'volume': value = output_hroi.get_volume() # iteratively agglomerate regions that are too small while k > output_hroi.k: k = output_hroi.k # regions agglomeration output_hroi.merge_ascending(output_hroi.get_id()[value <= smin]) # suppress parents nodes having only one child output_hroi.merge_descending() # early stopping 1 if output_hroi.k == 0: break # early stopping 2 if criterion == 'size': value = output_hroi.get_size() if criterion == 'volume': value = output_hroi.get_volume() if value.max() < smin: break # finally remove those regions for which the criterion cannot be matched output_hroi.select_roi(output_hroi.get_id()[value > smin]) return output_hroi def HROI_as_discrete_domain_blobs(domain, data, threshold=NINF, smin=0, criterion='size'): """Instantiate an HierarchicalROI as the blob decomposition of data in a certain domain. Parameters ---------- domain : discrete_domain.StructuredDomain instance, Definition of the spatial context. data : array of shape (domain.size) The corresponding data field. threshold : float, optional Thresholding level. criterion : string, optional To be chosen among 'size' or 'volume'. smin: float, optional A threshold on the criterion. Returns ------- nroi: HierachicalROI instance with a `signal` feature. """ if threshold > data.max(): # return an empty HROI structure label = - np.ones(data.shape) parents = np.array([]) return HierarchicalROI(domain, label, parents) # check size df = field_from_coo_matrix_and_data(domain.topology, data) idx, parents, label = df.threshold_bifurcations(th=threshold) nroi = HierarchicalROI(domain, label, parents) # create a signal feature data = np.ravel(data) signal = [data[nroi.select_id(id, roi=False)] for id in nroi.get_id()] nroi.set_feature('signal', signal) # agglomerate regions in order to compact the structure if necessary nroi = hroi_agglomeration(nroi, criterion=criterion, smin=smin) return nroi def HROI_from_watershed(domain, data, threshold=NINF): """Instantiate an HierarchicalROI as the watershed of a certain dataset Parameters ---------- domain: discrete_domain.StructuredDomain instance Definition of the spatial context. data: array of shape (domain.size) The corresponding data field. threshold: float, optional Thresholding level. Returns ------- nroi : ``HierarchichalROI`` instance The HierachicalROI instance with a ``seed`` feature. """ if threshold > data.max(): # return an empty HROI structure label = - np.ones(data.shape) parents = np.array([]) return HierarchicalROI(domain, label, parents) df = field_from_coo_matrix_and_data(domain.topology, data) idx, label = df.custom_watershed(0, threshold) parents = np.arange(idx.size).astype(int) nroi = HierarchicalROI(domain, label, parents) nroi.set_roi_feature('seed', idx) return nroi ######################################################################## # Hierarchical ROI ######################################################################## class HierarchicalROI(SubDomains): """Class that handles hierarchical ROIs Parameters ---------- k : int Number of ROI in the SubDomains object label : array of shape (domain.size), dtype=np.int An array use to define which voxel belongs to which ROI. The label values greater than -1 correspond to subregions labelling. The labels are recomputed so as to be consecutive integers. The labels should not be accessed outside this class. One has to use the API mapping methods instead. features : dict {str: list of object, length=self.k} Describe the voxels features, grouped by ROI roi_features : dict {str: array-like, shape=(self.k, roi_feature_dim) Describe the ROI features. A special feature, `id`, is read-only and is used to give an unique identifier for region, which is persistent through the MROI objects manipulations. On should access the different ROI's features using ids. parents : np.ndarray, shape(self.k) self.parents[i] is the index of the parent of the i-th ROI. TODO: have the parents as a list of id rather than a list of indices. """ def __init__(self, domain, label, parents, id=None): """Building the HierarchicalROI """ SubDomains.__init__(self, domain, label, id=id) self.parents = np.ravel(parents).astype(np.int) ### # Getters for very basic features or roi features ### def get_volume(self, id=None, ignore_children=True): """Get ROI volume Parameters ---------- id: any hashable type, optional Id of the ROI from which we want to get the volume. Can be None (default) if we want all ROIs's volumes. ignore_children : bool, optional Specify if the volume of the node should include (ignore_children = False) or not the one of its children (ignore_children = True). Returns ------- volume : float if an id is provided, or list of float if no id provided (default) """ if ignore_children: # volume of the children is not included volume = SubDomains.get_volume(self, id) else: # volume of the children is included if id is not None: volume = SubDomains.get_volume(self, id) desc = self.make_forest().get_descendants( self.select_id(id), exclude_self=True) # get children volume for k in desc: volume = volume + SubDomains.get_volume( self, self.get_id()[k]) else: volume = [] for id in self.get_id(): roi_volume = SubDomains.get_volume(self, id) desc = self.make_forest().get_descendants( self.select_id(id), exclude_self=True) # get children volume for k in desc: roi_volume = roi_volume + SubDomains.get_volume( self, self.get_id()[k]) volume.append(roi_volume) return volume def get_size(self, id=None, ignore_children=True): """Get ROI size (counted in terms of voxels) Parameters ---------- id: any hashable type, optional Id of the ROI from which we want to get the size. Can be None (default) if we want all ROIs's sizes. ignore_children: bool, optional Specify if the size of the node should include (ignore_children = False) or not the one of its children (ignore_children = True). Returns ------- size: int if an id is provided, or list of int if no id provided (default) """ if ignore_children: # size of the children is not included size = SubDomains.get_size(self, id) else: # size of the children is included if id is not None: size = SubDomains.get_size(self, id) desc = self.make_forest().get_descendants( self.select_id(id), exclude_self=True) # get children size for k in desc: size = size + SubDomains.get_size(self, self.get_id()[k]) else: size = [] for id in self.get_id(): roi_size = SubDomains.get_size(self, id) desc = self.make_forest().get_descendants( self.select_id(id), exclude_self=True) # get children size for k in desc: roi_size = roi_size + SubDomains.get_size( self, self.get_id()[k]) size.append(roi_size) return size def select_roi(self, id_list): """Returns an instance of HROI with only the subset of chosen ROIs. The hierarchy is set accordingly. Parameters ---------- id_list: list of id (any hashable type) The id of the ROI to be kept in the structure. """ valid = np.asarray([int(i in id_list) for i in self.get_id()]) if np.size(id_list) == 0: # handle the case of an empty selection new_parents = np.array([]) self = HierarchicalROI( self.domain, -np.ones(self.label.size), np.array([])) else: # get new parents new_parents = Forest(self.k, self.parents).subforest( valid.astype(np.bool)).parents.astype(np.int) SubDomains.select_roi(self, id_list) self.parents = new_parents self.recompute_labels() def make_graph(self): """Output an nipy graph structure to represent the ROI hierarchy. """ if self.k == 0: return None weights = np.ones(self.k) edges = (np.vstack((np.arange(self.k), self.parents))).T return WeightedGraph(self.k, edges, weights) def make_forest(self): """Output an nipy forest structure to represent the ROI hierarchy. """ if self.k == 0: return None G = Forest(self.k, self.parents) return G def merge_ascending(self, id_list, pull_features=None): """Remove the non-valid ROIs by including them in their parents when it exists. Parameters ---------- id_list: list of id (any hashable type) The id of the ROI to be merged into their parents. Nodes that are their own parent are unmodified. pull_features: list of str List of the ROI features that will be pooled from the children when they are merged into their parents. Otherwise, the receiving parent would keep its own ROI feature. """ if pull_features is None: pull_features = [] if self.k == 0: return id_list = [k for k in self.get_id() if k in id_list] # relabel maps old labels to new labels relabel = np.arange(self.k) # merge nodes, one at a time for c_id in id_list: # define alias for clearer indexing c_pos = self.select_id(c_id) p_pos = self.parents[c_pos] p_id = self.get_id()[p_pos] if p_pos != c_pos: # this will be used in many places mask_pos = np.ones(self.k, np.bool) mask_pos[c_pos] = False # set new parents self.parents = self.parents[mask_pos] self.parents[self.parents == c_pos] = p_pos self.parents[self.parents > c_pos] -= 1 self.k -= 1 # merge labels relabel[relabel == c_id] = p_id # compute new features for fid in self.features.keys(): # replace feature # (without the API since self is in an inconsistent state) dj = self.get_feature(fid) dj[p_pos] = np.hstack((dj[self.select_id(c_id)], dj[self.select_id(p_id)])) del dj[c_pos] self.features[fid] = dj # compute new roi features for fid in self.roi_features.keys(): dj = self.get_roi_feature(fid) if fid in pull_features: # modify only if `pull` requested dj[p_pos] = dj[c_pos] self.roi_features[fid] = dj[mask_pos] # update the labels self.label[self.label > -1] = relabel[self.label[self.label > - 1]] self.recompute_labels() def merge_descending(self, pull_features=None): """ Remove the items with only one son by including them in their son Parameters ---------- methods indicates the way possible features are dealt with (not implemented yet) Caveat ------ if roi_features have been defined, they will be removed """ if pull_features is None: pull_features = [] if self.k == 0: return # relabel maps old labels to new labels relabel = np.arange(self.k) # merge nodes, one at a time id_list = self.get_id()[:: - 1] for p_id in id_list: p_pos = self.select_id(p_id) p_children = np.nonzero(self.parents == p_pos)[0] if p_pos in p_children: # remove current node from its children list p_children = p_children[p_children != p_pos] if p_children.size == 1: # merge node if it has only one child c_pos = p_children[0] c_id = self.get_id()[c_pos] mask_pos = np.ones(self.k, np.bool) mask_pos[p_pos] = False # set new parents self.parents[c_pos] = self.parents[p_pos] if self.parents[c_pos] == p_pos: self.parents[c_pos] = c_pos self.parents = self.parents[mask_pos] self.parents[self.parents > p_pos] -= 1 # merge labels relabel[relabel == p_pos] = relabel[c_pos] self.k -= 1 # compute new features for fid in self.features.keys(): # replace feature # (without the API since self is in an inconsistent state) dj = self.get_feature(fid) dj[c_pos] = np.hstack((dj[self.select_id(c_id)], dj[self.select_id(p_id)])) del dj[p_pos] self.features[fid] = dj # compute new roi features for fid in self.roi_features.keys(): dj = self.get_roi_feature(fid) if fid in pull_features: # modify only if `pull` requested dj[c_pos] = dj[p_pos] self.roi_features[fid] = dj[mask_pos] # update HROI structure self.label[self.label > -1] = relabel[self.label[self.label > - 1]] self.recompute_labels() def get_parents(self): """Return the parent of each node in the hierarchy The parents are represented by their position in the nodes flat list. TODO: The purpose of this class API is not to rely on this order, so we should have self.parents as a list of ids instead of a list of positions """ return self.parents def get_leaves_id(self): """Return the ids of the leaves. """ if self.k == 0: return np.array([]) # locate the positions of the children of each node is_leaf_aux = [np.where(self.parents == k)[0] for k in range(self.k)] # select nodes that has no child (different from themselves) is_leaf = np.asarray( [(len(child) == 0) or (len(child) == 1 and child[0] == i) for i, child in enumerate(is_leaf_aux)]) # finaly return ids return self.get_id()[is_leaf] def reduce_to_leaves(self): """Create a new set of rois which are only the leaves of self. Modification of the structure is done in place. One way therefore want to work on a copy a of a given HROI oject. """ if self.k == 0: # handle the empy HROI case return HierarchicalROI( self.domain, -np.ones(self.domain.size), np.array([])) leaves_id = self.get_leaves_id() self.select_roi(leaves_id) def copy(self): """ Returns a copy of self. self.domain is not copied. """ cp = HierarchicalROI( self.domain, self.label.copy(), self.parents.copy(), self.get_id()) # copy features for fid in self.features.keys(): cp.set_feature(fid, self.get_feature(fid)) # copy ROI features for fid in self.roi_features.keys(): cp.set_roi_feature(fid, self.get_roi_feature(fid)) return cp def representative_feature(self, fid, method='mean', id=None, ignore_children=True, assess_quality=True): """Compute a ROI representative of a given feature. Parameters ---------- fid: str, Feature id method: str, Method used to compute a representative. Chosen among 'mean' (default), 'max', 'median', 'min', 'weighted mean'. id: any hashable type Id of the ROI from which we want to extract a representative feature. Can be None (default) if we want to get all ROIs's representatives. ignore_children: bool, Specify if the volume of the node should include (ignore_children = False) or not the one of its children (ignore_children = True). assess_quality: bool If True, a new roi feature is created, which represent the quality of the feature representative (the number of non-nan value for the feature over the ROI size). Default is False. """ rf = [] eps = 1.e-15 feature_quality = np.zeros(self.k) for i, k in enumerate(self.get_id()): f = self.get_feature(fid, k) p_pos = self.select_id(k) if not ignore_children: # also include the children features desc = np.nonzero(self.parents == p_pos)[0] if p_pos in desc: desc = desc[desc != p_pos] for c in desc: f = np.concatenate( (f, self.get_feature(fid, self.get_id()[c]))) # NaN-resistant representative if f.ndim == 2: nan = np.isnan(f.sum(1)) else: nan = np.isnan(f) # feature quality feature_quality[i] = (~nan).sum() / float(nan.size) # compute representative if method == "mean": rf.append(np.mean(f[~nan], 0)) if method == "weighted mean": lvk = self.get_local_volume(k) if not ignore_children: # append weights for children's voxels for c in desc: lvk = np.concatenate( (lvk, self.get_local_volume(fid, self.select_id(c)))) tmp = np.dot(lvk[~nan], f[~nan].reshape((-1, 1))) / \ np.maximum(eps, np.sum(lvk[~nan])) rf.append(tmp) if method == "min": rf.append(np.min(f[~nan])) if method == "max": rf.append(np.max(f[~nan])) if method == "median": rf.append(np.median(f[~nan], 0)) if id is not None: summary_feature = rf[self.select_id(id)] else: summary_feature = rf if assess_quality: self.set_roi_feature('%s_quality' % fid, feature_quality) return np.array(summary_feature) def make_hroi_from_subdomain(sub_domain, parents): """Instantiate an HROi from a SubDomain instance and parents """ hroi = HierarchicalROI(sub_domain.domain, sub_domain.label, parents) # set features for fid in sub_domain.features.keys(): hroi.set_feature(fid, sub_domain.get_feature(fid)) # set ROI features for fid in sub_domain.roi_features.keys(): hroi.set_roi_feature(fid, sub_domain.get_roi_feature(fid)) return hroi nipy-0.3.0/nipy/labs/spatial_models/mroi.py000066400000000000000000000675241210344137400207260ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np from nibabel import load, Nifti1Image from . import discrete_domain as ddom ############################################################################## # class SubDomains ############################################################################## class SubDomains(object): """ This is a class to represent multiple ROI objects, where the reference to a given domain is explicit. A mutliple ROI object is a set of ROI defined on a given domain, each having its own 'region-level' characteristics (ROI features). Every voxel of the domain can have its own characteristics yet, defined at the 'voxel-level', but those features can only be accessed familywise (i.e. the values are grouped by ROI). Parameters ---------- k : int Number of ROI in the SubDomains object label : array of shape (domain.size), dtype=np.int An array use to define which voxel belongs to which ROI. The label values greater than -1 correspond to subregions labelling. The labels are recomputed so as to be consecutive integers. The labels should not be accessed outside this class. One has to use the API mapping methods instead. features : dict {str: list of object, length=self.k} Describe the voxels features, grouped by ROI roi_features : dict {str: array-like, shape=(self.k, roi_feature_dim) Describe the ROI features. A special feature, `id`, is read-only and is used to give an unique identifier for region, which is persistent through the MROI objects manipulations. On should access the different ROI's features using ids. """ def __init__(self, domain, label, id=None): """Initialize subdomains instance Parameters ---------- domain: ROI instance defines the spatial context of the SubDomains label: array of shape (domain.size), dtype=np.int, An array use to define which voxel belongs to which ROI. The label values greater than -1 correspond to subregions labelling. The labels are recomputed so as to be consecutive integers. The labels should not be accessed outside this class. One has to use the select_id() mapping method instead. id: array of shape (n_roi) Define the ROI identifiers. Once an id has been associated to a ROI it becomes impossible to change it using the API. Hence, one should access ROI through their id to avoid hazardous manipulations. """ # check that label size is consistent with domain if np.size(label) != domain.size: raise ValueError('inconsistent labels and domains specification') self.domain = domain self.label = np.reshape(label, label.size).astype(np.int) # use continuous labels self.recompute_labels() # initialize empty feature/roi_feature dictionaries self.features = {} self.roi_features = {} # set `id` feature: unique and persistent identifier for each roi if id is None: # ids correspond to initial labels self.set_roi_feature('id', np.arange(self.k)) else: # use user-provided ids if len(id) != self.k: raise ValueError("incorrect shape for `id`") else: self.set_roi_feature('id', id) ### # Methods for internal use: id and labels manipulations ### def recompute_labels(self): """Redefine labels so that they are consecutive integers. Labels are used as a map to associate voxels to a given ROI. It is an inner object that should not be accessed outside this class. The number of nodes is updated appropriately. Notes ----- This method must be called everytime the MROI structure is modified. """ lmap = np.unique(self.label[self.label > - 1]) for i, k in enumerate(lmap): self.label[self.label == k] = i # number of ROIs: number of labels > -1 self.k = np.amax(self.label) + 1 def get_id(self): """Return ROI's id list. Users must access ROIs with the use of the identifiers of this list and the methods that give access to their properties/features. """ return self.get_roi_feature('id') def select_id(self, id, roi=True): """Convert a ROI id into an index to be used to index features safely. Parameters ---------- id : any hashable type, must be in self.get_id() The id of the region one wants to access. roi : bool If True (default), return the ROI index in the ROI list. If False, return the indices of the voxels of the ROI with the given id. That way, internal access to self.label can be made. Returns ------- index : int or np.array of shape (roi.size, ) Either the position of the ROI in the ROI list (if roi == True), or the positions of the voxels of the ROI with id `id` with respect to the self.label array. """ if id not in self.get_id(): raise ValueError("Unexisting `id` provided") if roi: index = int(np.where(self.get_id() == id)[0]) else: index = np.where(self.label == np.where(self.get_id() == id)[0])[0] return index ### # General purpose methods ### def copy(self): """Returns a copy of self. Note that self.domain is not copied. """ cp = SubDomains(self.domain, self.label.copy(), id=self.get_id()) for fid in self.features.keys(): f = self.get_feature(fid) sf = [np.array(f[k]).copy() for k in range(self.k)] cp.set_feature(fid, sf) for fid in self.roi_features.keys(): cp.set_roi_feature(fid, self.get_roi_feature(fid).copy()) return cp ### # Getters for very basic features or roi features ### def get_coord(self, id=None): """Get coordinates of ROI's voxels Parameters ---------- id: any hashable type Id of the ROI from which we want the voxels' coordinates. Can be None (default) if we want all ROIs's voxels coordinates. Returns ------- coords: array-like, shape=(roi_size, domain_dimension) if an id is provided, or list of arrays of shape(roi_size, domain_dimension) if no id provided (default) """ if id is not None: coords = self.domain.coord[self.select_id(id, roi=False)] else: coords = [self.domain.coord[self.select_id(k, roi=False)] for k in self.get_id()] return coords def get_size(self, id=None): """Get ROI size (counted in terms of voxels) Parameters ---------- id: any hashable type Id of the ROI from which we want to get the size. Can be None (default) if we want all ROIs's sizes. Returns ------- size: int if an id is provided, or list of int if no id provided (default) """ if id is not None: size = np.size(self.select_id(id, roi=False)) else: size = np.array( [np.size(self.select_id(k, roi=False)) for k in self.get_id()]) return size def get_local_volume(self, id=None): """Get volume of ROI's voxels Parameters ---------- id: any hashable type Id of the ROI from which we want the voxels' volumes. Can be None (default) if we want all ROIs's voxels volumes. Returns ------- loc_volume: array-like, shape=(roi_size, ), if an id is provided, or list of arrays of shape(roi_size, ) if no id provided (default) """ if id is not None: loc_volume = self.domain.local_volume[ self.select_id(id, roi=False)] else: loc_volume = [self.domain.local_volume[ self.select_id(k, roi=False)] for k in self.get_id()] return loc_volume def get_volume(self, id=None): """Get ROI volume Parameters ---------- id: any hashable type Id of the ROI from which we want to get the volume. Can be None (default) if we want all ROIs's volumes. Returns ------- volume : float if an id is provided, or list of float if no id provided (default) """ if id is not None: volume = np.sum(self.get_local_volume(id)) else: volume = np.asarray([np.sum(k) for k in self.get_local_volume()]) return volume ### # Methods for features manipulation (user level) ### def get_feature(self, fid, id=None): """Return a voxel-wise feature, grouped by ROI. Parameters ---------- fid: str, Feature to be returned id: any hashable type Id of the ROI from which we want to get the feature. Can be None (default) if we want all ROIs's features. Returns ------- feature: array-like, shape=(roi_size, feature_dim) if an id is provided, or list of arrays, shape=(roi_size, feature_dim) if no id provided (default) """ if fid not in self.features: raise ValueError("the `%s` feature does not exist" % fid) if id is not None: feature = np.asarray(self.features[fid][self.select_id(id)]) else: feature = self.features[fid] return feature def set_feature(self, fid, data, id=None, override=False): """Append or modify a feature Parameters ---------- fid : str feature identifier data: list or array The feature data. Can be a list of self.k arrays of shape(self.size[k], p) or array of shape(self.size[k]) id: any hashable type, optional Id of the ROI from which we want to set the feature. Can be None (default) if we want to set all ROIs's features. override: bool, optional Allow feature overriding Note that we cannot create a feature having the same name than a ROI feature. """ # ensure that the `id` field will not be modified if fid == 'id': override = False # check the feature is already present if setting a single roi if fid not in self.features and len(data) != self.k: raise ValueError("`%s` feature does not exist, create it first" % fid) if fid in self.roi_features: raise ValueError("a roi_feature called `%s` already exists" % fid) # check we will not override anything if fid in self.features and not override: #TODO: raise a warning return # modify one particular region if id is not None: # check data size roi_size = self.get_size(id) if len(data) != roi_size: raise ValueError("data for region `%i` should have length %i" % (id, roi_size)) # update feature the_feature = self.get_feature(fid, id) the_feature[self.select_id(id)] = data # modify all regions else: # check data size if len(data) != self.k: raise ValueError("data should have length %i" % self.k) for k in self.get_id(): if len(data[self.select_id(k)]) != self.get_size(k): raise ValueError('Wrong data size for region `%i`' % k) self.features.update({fid: data}) def representative_feature(self, fid, method='mean', id=None, assess_quality=False): """Compute a ROI representative of a given feature. Parameters ---------- fid : str Feature id method : str, optional Method used to compute a representative. Chosen among 'mean' (default), 'max', 'median', 'min', 'weighted mean'. id : any hashable type, optional Id of the ROI from which we want to extract a representative feature. Can be None (default) if we want to get all ROIs's representatives. assess_quality: bool, optional If True, a new roi feature is created, which represent the quality of the feature representative (the number of non-nan value for the feature over the ROI size). Default is False. Returns ------- summary_feature: np.ndarray, shape=(self.k, feature_dim) Representative feature computed according to `method`. """ rf = [] eps = 1.e-15 feature_quality = np.zeros(self.k) for i, k in enumerate(self.get_id()): f = self.get_feature(fid, k) # NaN-resistant representative if f.ndim == 2: nan = np.isnan(f.sum(1)) else: nan = np.isnan(f) # feature quality feature_quality[i] = (~nan).sum() / float(nan.size) # compute representative if method == "mean": rf.append(np.mean(f[~nan], 0)) if method == "weighted mean": lvk = self.get_local_volume(k)[~nan] tmp = np.dot(lvk, f[~nan].reshape((-1, 1))) / \ np.maximum(eps, np.sum(lvk)) rf.append(tmp) if method == "min": rf.append(np.min(f[~nan])) if method == "max": rf.append(np.max(f[~nan])) if method == "median": rf.append(np.median(f[~nan], 0)) if id is not None: summary_feature = rf[self.select_id(id)] else: summary_feature = rf if assess_quality: self.set_roi_feature('%s_quality' % fid, feature_quality) return np.array(summary_feature) def remove_feature(self, fid): """Remove a certain feature Parameters ---------- fid: str Feature id Returns ------- f : object The removed feature. """ return self.features.pop(fid) def feature_to_voxel_map(self, fid, roi=False, method="mean"): """Convert a feature to a flat voxel-mapping array. Parameters ---------- fid: str Identifier of the feature to be mapped. roi: bool, optional If True, compute the map from a ROI feature. method: str, optional Representative feature computation method if `fid` is a feature and `roi` is True. Returns ------- res: array-like, shape=(domain.size, feature_dim) A flat array, giving the correspondence between voxels and the feature. """ res = np.zeros(self.label.size) if not roi: f = self.get_feature(fid) for id in self.get_id(): res[self.select_id(id, roi=False)] = f[self.select_id(id)] else: if fid in self.roi_features.keys(): f = self.get_roi_feature(fid) for id in self.get_id(): res[self.select_id(id, roi=False)] = f[self.select_id(id)] elif fid in self.features.keys(): f = self.representative_feature(fid, method=method) for id in self.get_id(): res[self.select_id(id, roi=False)] = f[self.select_id(id)] else: raise ValueError("Wrong feature id provided") return res def integrate(self, fid=None, id=None): """Integrate certain feature on each ROI and return the k results Parameters ---------- fid : str Feature identifier. By default, the 1 function is integrated, yielding ROI volumes. id: any hashable type The ROI on which we want to integrate. Can be None if we want the results for every region. Returns ------- lsum = array of shape (self.k, self.feature[fid].shape[1]), The results """ if fid == None: # integrate the 1 function if no feature id provided if id is not None: lsum = self.get_volume(id) else: lsum = [self.get_volume(k) for k in self.get_id()] else: if id is not None: slvk = np.expand_dims(self.get_local_volume(id), 1) sfk = self.get_feature(fid, id) sfk = np.reshape(sfk, (-1, 1)) lsum = np.sum(sfk * slvk, 0) else: lsum = [] for k in self.get_id(): slvk = np.expand_dims(self.get_local_volume(k), 1) sfk = self.get_feature(fid, k) sfk = np.reshape(sfk, (-1, 1)) sumk = np.sum(sfk * slvk, 0) lsum.append(sumk) return np.array(lsum) def plot_feature(self, fid, ax=None): """Boxplot the distribution of features within ROIs. Note that this assumes 1-d features. Parameters ---------- fid: string the feature identifier ax: axis handle, optional """ f = self.get_feature(fid) if ax is None: import matplotlib.pylab as mp mp.figure() ax = mp.subplot(111) ax.boxplot(f) ax.set_title('ROI-level distribution for feature %s' % fid) ax.set_xlabel('Region index') ax.set_xticks(np.arange(1, self.k + 1)) return ax ### # Methods for ROI features manipulation (user level) ### def get_roi_feature(self, fid, id=None): """ """ if id is not None: feature = self.roi_features[fid][self.select_id(id)] else: feature = np.asarray(self.roi_features[fid]) return feature def set_roi_feature(self, fid, data, id=None, override=False): """Append or modify a ROI feature Parameters ---------- fid: str, feature identifier data: list of self.k features or a single feature The ROI feature data id: any hashable type Id of the ROI of which we want to set the ROI feature. Can be None (default) if we want to set all ROIs's ROI features. override: bool, optional, Allow feature overriding Note that we cannot create a ROI feature having the same name than a feature. Note that the `id` feature cannot be modified as an internal component. """ # check we do not modify the `id` feature if 'id' in self.roi_features.keys() and fid == 'id': return # check we will not override anything if fid in self.roi_features and not override: #TODO: raise a warning return # check the feature is already present if setting a single roi if fid not in self.roi_features and len(data) != self.k: raise ValueError("`%s` feature does not exist, create it first") if fid in self.features: raise ValueError("a feature called `%s` already exists" % fid) # modify one particular region if id is not None: # check data size if len(data) != 1: raise ValueError("data for region `%i` should have length 1") # update feature the_feature = self.get_roi_feature(fid) the_feature[self.select_id(id)] = data else: # check data size if len(data) != self.k: raise ValueError("data should have length %i" % self.k) self.roi_features.update({fid: data}) def remove_roi_feature(self, fid): """Remove a certain ROI feature. The `id` ROI feature cannot be removed. Returns ------- f : object The removed Roi feature. """ if fid != 'id': feature = self.roi_features.pop(fid) else: feature = self.get_id() return feature #TODO: raise a warning otherwise def to_image(self, fid=None, roi=False, method="mean", descrip=None): """Generates a label image that represents self. Parameters ---------- fid: str, Feature to be represented. If None, a binary image of the MROI domain will be we created. roi: bool, Whether or not to write the desired feature as a ROI one. (i.e. a ROI feature corresponding to `fid` will be looked upon, and if not found, a representative feature will be computed from the `fid` feature). method: str, If a feature is written as a ROI feature, this keyword tweaks the way the representative feature is computed. descrip: str, Description of the image, to be written in its header. Notes ----- Requires that self.dom is an ddom.NDGridDomain Returns ------- nim : nibabel nifti image Nifti image corresponding to the ROI feature to be written. """ if not isinstance(self.domain, ddom.NDGridDomain): print 'self.domain is not an NDGridDomain; nothing was written.' return None if fid is None: # write a binary representation of the domain if no fid provided nim = self.domain.to_image(data=(self.label != -1).astype(np.int32)) if descrip is None: descrip = 'binary representation of MROI' else: data = -np.ones(self.label.size, dtype=np.int32) tmp_image = self.domain.to_image() mask = tmp_image.get_data().copy().astype(bool) if not roi: # write a feature if fid not in self.features: raise ValueError("`%s` feature could not be found" % fid) for i in self.get_id(): data[self.select_id(i, roi=False)] = \ self.get_feature(fid, i) else: # write a roi feature if fid in self.roi_features: # write from existing roi feature for i in self.get_id(): data[self.select_id(i, roi=False)] = \ self.get_roi_feature( fid, i) elif fid in self.features: # write from representative feature summary_feature = self.representative_feature( fid, method=method) for i in self.get_id(): data[self.select_id(i, roi=False)] = \ summary_feature[self.select_id(i)] # MROI object was defined on a masked image: we square it back. wdata = -np.ones(mask.shape, data.dtype) wdata[mask] = data nim = Nifti1Image(wdata, tmp_image.get_affine()) # set description of the image if descrip is not None: nim.get_header()['descrip'] = descrip return nim ### # ROIs structure manipulation ### def select_roi(self, id_list): """Returns an instance of MROI with only the subset of chosen ROIs. Parameters ---------- id_list: list of id (any hashable type) The id of the ROI to be kept in the structure. """ # handle the case of an empty selection if len(id_list) == 0: self = SubDomains(self.domain, -np.ones(self.label.size)) return # convert id to indices id_list_pos = np.ravel([self.select_id(k) for k in id_list]) # set new labels (= map between voxels and ROI) for id in self.get_id(): if id not in id_list: self.label[self.select_id(id, roi=False)] = -1 self.recompute_labels() self.roi_features['id'] = np.ravel([id_list]) # set new features # (it's ok to do that after labels and id modification since we are # poping out the former features and use the former id indices) for fid in self.features.keys(): f = self.remove_feature(fid) sf = [f[id] for id in id_list_pos] self.set_feature(fid, sf) # set new ROI features # (it's ok to do that after labels and id modification since we are # poping out the former features and use the former id indices) for fid in self.roi_features.keys(): if fid != 'id': f = self.remove_roi_feature(fid) sf = np.ravel(f[id_list_pos]) self.set_roi_feature(fid, sf) def subdomain_from_array(labels, affine=None, nn=0): """Return a SubDomain from an n-d int array Parameters ---------- label: np.array instance A supposedly boolean array that yields the regions. affine: np.array, optional Affine transform that maps the array coordinates to some embedding space by default, this is np.eye(dim+1, dim+1). nn: int, Neighboring system considered. Unused at the moment. Notes ----- Only labels > -1 are considered. """ dom = ddom.grid_domain_from_binary_array( np.ones(labels.shape), affine=affine, nn=nn) return SubDomains(dom, labels.astype(np.int)) def subdomain_from_image(mim, nn=18): """Return a SubDomain instance from the input mask image. Parameters ---------- mim: NiftiIImage instance, or string path toward such an image supposedly a label image nn: int, optional Neighboring system considered from the image can be 6, 18 or 26. Returns ------- The MultipleROI instance Notes ----- Only labels > -1 are considered """ if isinstance(mim, basestring): iim = load(mim) else: iim = mim return subdomain_from_array(iim.get_data(), iim.get_affine(), nn) def subdomain_from_position_and_image(nim, pos): """Keep the set of labels of the image corresponding to a certain index so that their position is closest to the prescribed one. Parameters ---------- mim: NiftiIImage instance, or string path toward such an image supposedly a label image pos: array of shape(3) or list of length 3, the prescribed position """ tmp = subdomain_from_image(nim) coord = np.array([tmp.domain.coord[tmp.label == k].mean(0) for k in range(tmp.k)]) idx = ((coord - pos) ** 2).sum(1).argmin() return subdomain_from_array(nim.get_data() == idx, nim.get_affine()) def subdomain_from_balls(domain, positions, radii): """Create discrete ROIs as a set of balls within a certain coordinate systems. Parameters ---------- domain: StructuredDomain instance, the description of a discrete domain positions: array of shape(k, dim): the positions of the balls radii: array of shape(k): the sphere radii """ # checks if np.size(positions) == positions.shape[0]: positions = np.reshape(positions, (positions.size), 1) if positions.shape[1] != domain.em_dim: raise ValueError('incompatible dimensions for domain and positions') if positions.shape[0] != np.size(radii): raise ValueError('incompatible positions and radii provided') label = - np.ones(domain.size) for k in range(radii.size): supp = np.sum((domain.coord - positions[k]) ** 2, 1) < radii[k] ** 2 label[supp] = k return SubDomains(domain, label) nipy-0.3.0/nipy/labs/spatial_models/parcel_io.py000066400000000000000000000301721210344137400217020ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Utility functions for mutli-subjectParcellation: this basically uses nipy io lib to perform IO opermation in parcel definition processes """ import numpy as np import os.path from nibabel import load, save, Nifti1Image from nipy.algorithms.clustering.utils import kmeans from .discrete_domain import grid_domain_from_image from .mroi import SubDomains from ..mask import intersect_masks def mask_parcellation(mask_images, nb_parcel, threshold=0, output_image=None): """ Performs the parcellation of a certain mask Parameters ---------- mask_images: string or Nifti1Image or list of strings/Nifti1Images, paths of mask image(s) that define(s) the common space. nb_parcel: int, number of desired parcels threshold: float, optional, level of intersection of the masks output_image: string, optional path of the output image Returns ------- wim: Nifti1Imagine instance, representing the resulting parcellation """ if isinstance(mask_images, basestring): mask = mask_images elif isinstance(mask_images, Nifti1Image): mask = mask_images else: # mask_images should be a list mask_data = intersect_masks(mask_images, threshold=0) > 0 mask = Nifti1Image(mask_data.astype('u8'), load(mask_images[0]).get_affine()) domain = grid_domain_from_image(mask) cent, labels, J = kmeans(domain.coord, nb_parcel) sub_dom = SubDomains(domain, labels) # get id (or labels) image wim = sub_dom.to_image(fid='id', roi=True) return wim def parcel_input(mask_images, learning_images, ths=.5, fdim=None): """Instantiating a Parcel structure from a give set of input Parameters ---------- mask_images: string or Nifti1Image or list of strings/Nifti1Images, paths of mask image(s) that define(s) the common space. learning_images: (nb_subject-) list of (nb_feature-) list of strings, paths of feature images used as input to the parcellation procedure ths=.5: threshold to select the regions that are common across subjects. if ths = .5, thethreshold is half the number of subjects fdim: int, optional if nb_feature (the dimension of the data) used in subsequent analyses if greater than fdim, a PCA is perfomed to reduce the information in the data Byd efault, no reduction is performed Returns ------- domain : discrete_domain.DiscreteDomain instance that stores the spatial information on the parcelled domain feature: (nb_subect-) list of arrays of shape (domain.size, fdim) feature information available to parcellate the data """ nb_subj = len(learning_images) # get a group-level mask if isinstance(mask_images, basestring): mask = mask_images elif isinstance(mask_images, Nifti1Image): mask = mask_images else: # mask_images should be a list grp_mask = intersect_masks(mask_images, threshold=ths) > 0 mask = Nifti1Image(grp_mask.astype('u8'), load(mask_images[0]).get_affine()) # build the domain domain = grid_domain_from_image(mask, nn=6) #nn = 6 for speed up and stability # load the functional data feature = [] nbeta = len(learning_images[0]) for s in range(nb_subj): if len(learning_images[s]) != nbeta: raise ValueError('Inconsistent number of dimensions') feature.append(np.array([domain.make_feature_from_image(b) for b in learning_images[s]]).T) # Possibly reduce the dimension of the functional data if (len(feature[0].shape) == 1) or (fdim is None): return domain, feature if fdim < feature[0].shape[1]: import numpy.linalg as nl subj = np.concatenate([s * np.ones(feature[s].shape[0]) \ for s in range(nb_subj)]) cfeature = np.concatenate(feature) cfeature -= np.mean(cfeature, 0) m1, m2, m3 = nl.svd(cfeature, 0) cfeature = np.dot(m1, np.diag(m2)) cfeature = cfeature[:, 0:fdim] feature = [cfeature[subj == s] for s in range(nb_subj)] return domain, feature def write_parcellation_images(Pa, template_path=None, indiv_path=None, subject_id=None, swd=None): """ Write images that describe the spatial structure of the parcellation Parameters ---------- Pa : MultiSubjectParcellation instance, the description of the parcellation template_path: string, optional, path of the group-level parcellation image indiv_path: list of strings, optional paths of the individual parcellation images subject_id: list of strings of length Pa.nb_subj subject identifiers, used to infer the paths when not available swd: string, optional output directory used to infer the paths when these are not available """ # argument check if swd == None: from tempfile import mkdtemp swd = mkdtemp() if subject_id == None: subject_id = ['subj_%04d' % s for s in range(Pa.nb_subj)] if len(subject_id) != Pa.nb_subj: raise ValueError('subject_id does not match parcellation') # If necessary, generate the paths if template_path is None: template_path = os.path.join(swd, "template_parcel.nii") if indiv_path is None: indiv_path = [os.path.join(swd, "parcel%s.nii" % subject_id[s]) for s in range(Pa.nb_subj)] # write the template image tlabs = Pa.template_labels.astype(np.int16) template = SubDomains(Pa.domain, tlabs) template_img = template.to_image( fid='id', roi=True, descrip='Intra-subject parcellation template') save(template_img, template_path) # write subject-related stuff for s in range(Pa.nb_subj): # write the individual label images labs = Pa.individual_labels[:, s] parcellation = SubDomains(Pa.domain, labs) parcellation_img = parcellation.to_image( fid='id', roi=True, descrip='Intra-subject parcellation') save(parcellation_img, indiv_path[s]) def parcellation_based_analysis(Pa, test_images, test_id='one_sample', rfx_path=None, condition_id='', swd=None): """ This function computes parcel averages and RFX at the parcel-level Parameters ---------- Pa: MultiSubjectParcellation instance the description of the parcellation test_images: (Pa.nb_subj-) list of paths paths of images used in the inference procedure test_id: string, optional, if test_id=='one_sample', the one_sample statstic is computed otherwise, the parcel-based signal averages are returned rfx_path: string optional, path of the resulting one-sample test image, if applicable swd: string, optional output directory used to compute output path if rfx_path is not given condition_id: string, optional, contrast/condition id used to compute output path Returns ------- test_data: array of shape(Pa.nb_parcel, Pa.nb_subj) the parcel-level signal average if test is not 'one_sample' prfx: array of shape(Pa.nb_parcel), the one-sample t-value if test_id is 'one_sample' """ nb_subj = Pa.nb_subj # 1. read the test data if len(test_images) != nb_subj: raise ValueError('Inconsistent number of test images') test = np.array([Pa.domain.make_feature_from_image(ti) for ti in test_images]).T test_data = Pa.make_feature('', np.array(test)) if test_id is not 'one_sample': return test_data # 2. perform one-sample test # computation from ..utils.reproducibility_measures import ttest prfx = ttest(test_data) # Write the stuff template = SubDomains(Pa.domain, Pa.template_labels) template.set_roi_feature('prfx', prfx) wim = template.to_image('prfx', roi=True) hdr = wim.get_header() hdr['descrip'] = 'parcel-based random effects image (in t-variate)' if rfx_path is not None: save(wim, rfx_path) return prfx def fixed_parcellation(mask_image, betas, nbparcel, nn=6, method='ward', write_dir=None, mu=10., verbose=0, fullpath=None): """ Fixed parcellation of a given dataset Parameters ---------- domain/mask_image betas: list of paths to activation images from the subject nbparcel, int : number fo desired parcels nn=6: number of nearest neighbors to define the image topology (6, 18 or 26) method='ward': clustering method used, to be chosen among 'ward', 'gkm', 'ward_and-gkm' 'ward': Ward's clustering algorithm 'gkm': Geodesic k-means algorithm, random initialization 'gkm_and_ward': idem, initialized by Ward's clustering write_di: string, topional, write directory. If fullpath is None too, then no file output. mu = 10., float: the relative weight of anatomical information verbose=0: verbosity mode fullpath=None, string, path of the output image If write_dir and fullpath are None then no file output. If only fullpath is None then it is the write dir + a name depending on the method. Notes ----- Ward's method takes time (about 6 minutes for a 60K voxels dataset) Geodesic k-means is 'quick and dirty' Ward's + GKM is expensive but quite good To reduce CPU time, rather use nn=6 (especially with Ward) """ from nipy.algorithms.graph.field import field_from_coo_matrix_and_data if method not in ['ward', 'gkm', 'ward_and_gkm', 'kmeans']: raise ValueError('unknown method') if nn not in [6, 18, 26]: raise ValueError('nn should be 6,18 or 26') # step 1: load the data ---------------------------- # 1.1 the domain domain = grid_domain_from_image(mask_image, nn) if method is not 'kmeans': # 1.2 get the main cc of the graph # to remove the small connected components pass coord = domain.coord # 1.3 read the functional data beta = np.array([domain.make_feature_from_image(b) for b in betas]) if len(beta.shape) > 2: beta = np.squeeze(beta) if beta.shape[0] != domain.size: beta = beta.T feature = np.hstack((beta, mu * coord / np.std(coord))) #step 2: parcellate the data --------------------------- if method is not 'kmeans': g = field_from_coo_matrix_and_data(domain.topology, feature) if method == 'kmeans': _, u, _ = kmeans(feature, nbparcel) if method == 'ward': u, _ = g.ward(nbparcel) if method == 'gkm': seeds = np.argsort(np.random.rand(g.V))[:nbparcel] _, u, _ = g.geodesic_kmeans(seeds) if method == 'ward_and_gkm': w, _ = g.ward(nbparcel) _, u, _ = g.geodesic_kmeans(label=w) lpa = SubDomains(domain, u) if verbose: var_beta = np.array( [np.var(beta[lpa.label == k], 0).sum() for k in range(lpa.k)]) var_coord = np.array( [np.var(coord[lpa.label == k], 0).sum() for k in range(lpa.k)]) size = lpa.get_size() vf = np.dot(var_beta, size) / size.sum() va = np.dot(var_coord, size) / size.sum() print nbparcel, "functional variance", vf, "anatomical variance", va # step3: write the resulting label image if fullpath is not None: label_image = fullpath elif write_dir is not None: label_image = os.path.join(write_dir, "parcel_%s.nii" % method) else: label_image = None if label_image is not None: lpa_img = lpa.to_image( fid='id', roi=True, descrip='Intra-subject parcellation image') save(lpa_img, label_image) if verbose: print "Wrote the parcellation images as %s" % label_image return lpa nipy-0.3.0/nipy/labs/spatial_models/parcellation.py000066400000000000000000000152401210344137400224210ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: #autoindent """ Generic Parcellation class: Contains all the items that define a multi-subject parcellation Author : Bertrand Thirion, 2005-2008 TODO : add a method 'global field', i.e. non-subject-specific info """ import numpy as np ################################################################### # Parcellation class ################################################################### class MultiSubjectParcellation(object): """ MultiSubjectParcellation class are used to represent parcels that can have different spatial different contours in a given group of subject It consists of self.domain: the specification of a domain self.template_labels the specification of a template parcellation self.individual_labels the specification of individual parcellations fixme:should inherit from mroi.MultiROI """ def __init__(self, domain, template_labels=None, individual_labels=None, nb_parcel=None): """ Initialize multi-subject parcellation Parameters ---------- domain: discrete_domain.DiscreteDomain instance, definition of the space considered in the parcellation template_labels: array of shape domain.size, optional definition of the template labelling individual_labels: array of shape (domain.size, nb_subjects), optional, the individual parcellations corresponding to the template nb_parcel: int, optional, number of parcels in the model can be inferred as template_labels.max()+1, or 1 by default cannot be smaller than template_labels.max()+1 """ self.domain = domain self.template_labels = template_labels self.individual_labels = individual_labels self.nb_parcel = 1 if template_labels is not None: self.nb_parcel = template_labels.max() + 1 if nb_parcel is not None: self.nb_parcel = nb_parcel self.check() self.nb_subj = 0 if individual_labels is not None: if individual_labels.shape[0] == individual_labels.size: self.individual_labels = individual_labels[:, np.newaxis] self.nb_subj = self.individual_labels.shape[1] self.features = {} def copy(self): """ Returns a copy of self """ msp = MultiSubjectParcellation(self.domain.copy(), self.template_labels.copy(), self.individual_labels.copy(), self.nb_parcel) for fid in self.features.keys(): msp.set_feature(fid, self.get_feature(fid).copy()) return msp def check(self): """ Performs an elementary check on self """ size = self.domain.size if self.template_labels is not None: nvox = np.size(self.template_labels) if size != nvox: raise ValueError("template labels not consistent with domain") if self.individual_labels is not None: n2 = self.individual_labels.shape[0] if size != n2: raise ValueError( "Individual labels not consistent with domain") if self.nb_parcel < self.template_labels.max() + 1: raise ValueError("too many labels in template") if self.nb_parcel < self.individual_labels.max() + 1: raise ValueError("Too many labels in individual models") def set_template_labels(self, template_labels): """ """ self.template_labels = template_labels self.check() def set_individual_labels(self, individual_labels): """ """ self.individual_labels = individual_labels self.check() self.nb_subj = self.individual_labels.shape[1] def population(self): """ Returns the counting of labels per voxel per subject Returns ------- population: array of shape (self.nb_parcel, self.nb_subj) """ population = np.zeros((self.nb_parcel, self.nb_subj)).astype(np.int) for ns in range(self.nb_subj): for k in range(self.nb_parcel): population[k, ns] = np.sum(self.individual_labels[:, ns] == k) return population def make_feature(self, fid, data): """ Compute parcel-level averages of data Parameters ---------- fid: string, the feature identifier data: array of shape (self.domain.size, self.nb_subj, dim) or (self.domain.sire, self.nb_subj) Some information at the voxel level Returns ------- pfeature: array of shape(self.nb_parcel, self.nbsubj, dim) the computed feature data """ if len(data.shape) < 2: raise ValueError("Data array should at least have dimension 2") if len(data.shape) > 3: raise ValueError("Data array should have <4 dimensions") if ((data.shape[0] != self.domain.size) or (data.shape[1] != self.nb_subj)): raise ValueError('incorrect feature size') if len(data.shape) == 3: dim = data.shape[2] pfeature = np.zeros((self.nb_parcel, self.nb_subj, dim)) else: pfeature = np.zeros((self.nb_parcel, self.nb_subj)) for k in range(self.nb_parcel): for s in range(self.nb_subj): dsk = data[self.individual_labels[:, s] == k, s] pfeature[k, s] = np.mean(dsk, 0) self.set_feature(fid, pfeature) return pfeature def set_feature(self, fid, data): """ Set feature defined by `fid` and `data` into ``self`` Parameters ---------- fid: string the feature identifier data: array of shape (self.nb_parcel, self.nb_subj, dim) or (self.nb_parcel, self.nb_subj) the data to be set as parcel- and subject-level information """ if len(data.shape) < 2: raise ValueError("Data array should at least have dimension 2") if (data.shape[0] != self.nb_parcel) or \ (data.shape[1] != self.nb_subj): raise ValueError('incorrect feature size') else: self.features.update({fid: data}) def get_feature(self, fid): """ Get feature defined by `fid` Parameters ---------- fid: string, the feature identifier """ return self.features[fid] nipy-0.3.0/nipy/labs/spatial_models/setup.py000066400000000000000000000004741210344137400211070ustar00rootroot00000000000000 def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('spatial_models', parent_package, top_path) config.add_subpackage('tests') return config if __name__ == '__main__': print('This is the wrong setup.py file to run') nipy-0.3.0/nipy/labs/spatial_models/structural_bfls.py000066400000000000000000000346031210344137400231660ustar00rootroot00000000000000# vi: set ft=python sts=4 ts=4 sw=4 et: # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- """ The main routine of this module implement the LandmarkRegions class, that is used to represent Regions of interest at the population level (in a template space). This has been used in Thirion et al. Structural Analysis of fMRI Data Revisited: Improving the Sensitivity and Reliability of fMRI Group Studies. IEEE TMI 2007 Author : Bertrand Thirion, 2006-2010 """ #autoindent import numpy as np from scipy import stats class LandmarkRegions(object): """ This class is intended to represent a set of inter-subject regions It should inherit from some abstract multiple ROI class, not implemented yet. """ def __init__(self, domain, k, indiv_coord, subj, id=''): """ Building the landmark_region Parameters ---------- domain: ROI instance defines the spatial context of the SubDomains k: int, the number of regions considered indiv_coord: k-length list of arrays, optional, coordinates of the nodes in some embedding space. subj: k-length list of integers these correspond to and ROI feature: the subject index of individual regions id: string, optional, identifier """ self.domain = domain self.k = int(k) self.id = id self.features = {} self.set_feature('position', indiv_coord) self.set_feature('subjects', subj) def set_feature(self, fid, data): """ """ if len(data) != self.k: raise ValueError('data should have length k') self.features.update({fid: data}) def get_feature(self, fid): return self.features[fid] def centers(self): """returns the average of the coordinates for each region """ pos = self.get_feature('position') centers = np.array([np.mean(pos[k], 0) for k in range(self.k)]) return centers def homogeneity(self): """ returns the mean distance between points within each LR """ from ...algorithms.utils.fast_distance import euclidean_distance coord = self.get_feature('position') h = np.zeros(self.k) for k in range(self.k): pk = coord[k] sk = pk.shape[0] if sk < 2: h[k] = 0 else: edk = euclidean_distance(pk) h[k] = edk.sum() / (sk * (sk - 1)) return h def density(self, k, coord=None, dmax=1., dof=10): """Posterior density of component k Parameters ---------- k: int, less or equal to self.k reference component coord: array of shape(n, self.dom.em_dim), optional a set of input coordinates dmax: float, optional regularizaing constant for the variance estimation dof: float, optional, strength of the regularization Returns ------- pd: array of shape(n) the posterior density that has been computed delta: array of shape(n) the quadratic term in the gaussian model Fixme ----- instead of dof/dmax, use Raftery's regularization """ from scipy.linalg import svd if k > self.k: raise ValueError('wrong region index') pos = self.get_feature('position')[k] center = pos.mean(0) dim = self.domain.em_dim if coord == None: coord = self.domain.coord if coord.shape[1] != dim: raise ValueError("incompatible dimensions") n_points = pos.shape[0] dx = pos - center covariance = np.dot(dx.T, dx) / n_points U, S, V = svd(covariance, 0) S = (n_points * S + dmax ** 2 * np.ones(dim) * dof) / (n_points + dof) sqrts = 1. / np.sqrt(S) dx = coord - center dx = np.dot(dx, U) dx = np.dot(dx, np.diag(sqrts)) delta = np.sum(dx ** 2, 1) lcst = - np.log(2 * np.pi) * dim / 2 + (np.log(sqrts)).sum() pd = np.exp(lcst - delta / 2) return pd, delta def hpd(self, k, coord=None, pval=0.95, dmax=1.0): """Sample the posterior probability of being in k on a grid defined by cs, assuming that the roi is an ellipsoid Parameters ---------- k: int, less or equal to self.k reference component coord: array of shape(n,dim), optional a set of input coordinates pval: float<1, optional, cutoff for the CR dmax=1.0: an upper bound for the spatial variance to avoid degenerate variance Returns ------- hpd array of shape(n) that yields the value """ hpd, delta = self.density(k, coord, dmax) import scipy.special as sp gamma = 2 * sp.erfinv(pval) ** 2 # #--- all the following is to solve the equation #--- erf(x/sqrt(2))-x*exp(-x**2/2)/sqrt(pi/2) = alpha #--- should better be put elsewhere def dicho_solve_lfunc(alpha, eps=1.e-7): if alpha > 1: raise ValueError("no solution for alpha>1") if alpha > 1 - 1.e-15: return np.inf if alpha < 0: raise ValueError("no solution for alpha<0") if alpha < 1.e-15: return 0 xmin = sp.erfinv(alpha) * np.sqrt(2) xmax = 2 * xmin while lfunc(xmax) < alpha: xmax *= 2 xmin *= 2 return (dichomain_lfunc(xmin, xmax, eps, alpha)) def dichomain_lfunc(xmin, xmax, eps, alpha): x = (xmin + xmax) / 2 if xmax < xmin + eps: return x else: if lfunc(x) > alpha: return dichomain_lfunc(xmin, x, eps, alpha) else: return dichomain_lfunc(x, xmax, eps, alpha) def lfunc(x): return sp.erf(x / np.sqrt(2)) - x * np.exp(-x ** 2 / 2) / \ np.sqrt(np.pi / 2) gamma = dicho_solve_lfunc(pval) ** 2 hpd[delta > gamma] = 0 return hpd def map_label(self, coord=None, pval=0.95, dmax=1.): """Sample the set of landmark regions on the proposed coordiante set cs, assuming a Gaussian shape Parameters ---------- coord: array of shape(n,dim), optional, a set of input coordinates pval: float in [0,1]), optional cutoff for the CR, i.e. highest posterior density threshold dmax: an upper bound for the spatial variance to avoid degenerate variance Returns ------- label: array of shape (n): the posterior labelling """ if coord == None: coord = self.domain.coord label = - np.ones(coord.shape[0]) if self.k > 0: aux = - np.ones((coord.shape[0], self.k)) for k in range(self.k): aux[:, k] = self.hpd(k, coord, pval, dmax) maux = np.max(aux, 1) label[maux > 0] = np.argmax(aux, 1)[maux > 0] return label def show(self): """function to print basic information on self """ centers = self.centers() subj = self.get_feature('subjects') prevalence = self.roi_prevalence() print "index", "prevalence", "mean_position", "individuals" for i in range(self.k): print i, prevalence[i], centers[i], np.unique(subj[i]) def roi_confidence(self, ths=0, fid='confidence'): """ assuming that a certain feature fid field has been set as a discrete feature, this creates an approximate p-value that states how confident one might that the LR is defined in at least ths individuals if conficence is not defined as a discrete_feature, it is assumed to be 1. Parameters ---------- ths: integer that yields the representativity threshold Returns ------- pvals: array of shape self.k the p-values corresponding to the ROIs """ pvals = np.zeros(self.k) subj = self.get_feature('subjects') if fid not in self.features: # the feature has not been defined print 'using per ROI subject counts' for j in range(self.k): pvals[j] = np.size(np.unique(subj[j])) pvals = pvals > ths + 0.5 * (pvals == ths) else: for j in range(self.k): subjj = subj[j] conf = self.get_feature(fid)[j] mp = 0. vp = 0. for ls in np.unique(subjj): lmj = 1 - np.prod(1 - conf[subjj == ls]) lvj = lmj * (1 - lmj) mp = mp + lmj vp = vp + lvj # If noise is too low the variance is 0: ill-defined: vp = max(vp, 1e-14) pvals[j] = stats.norm.sf(ths, mp, np.sqrt(vp)) return pvals def roi_prevalence(self, fid='confidence'): """ assuming that fid='confidence' field has been set as a discrete feature, this creates the expectancy of the confidence measure i.e. expected numberof detection of the roi in the observed group Returns ------- confid: array of shape self.k the population_prevalence """ confid = np.zeros(self.k) subj = self.get_feature('subjects') if fid not in self.features: for j in range(self.k): subjj = subj[j] confid[j] = np.size(np.unique(subjj)) else: for j in range(self.k): subjj = subj[j] conf = self.get_feature(fid)[j] for ls in np.unique(subjj): lmj = 1 - np.prod(1 - conf[subjj == ls]) confid[j] += lmj return confid def weighted_feature_density(self, feature): """ Given a set of feature values, produce a weighted feature map, where roi-levle features are mapped smoothly based on the density of the components Parameters ---------- feature: array of shape (self.k), the information to map Returns ------- wsm: array of shape(self.shape) """ if np.size(feature) != self.k: raise ValueError('Incompatible feature dimension') cs = self.domain.coord aux = np.zeros((cs.shape[0], self.k)) for k in range(self.k): aux[:, k], _ = self.density(k, cs) wsum = np.dot(aux, feature) return wsum def prevalence_density(self): """Returns a weighted map of self.prevalence Returns ------- wp: array of shape(n_samples) """ return self.weighted_feature_density(self.roi_prevalence()) def build_LR(bf, thq=0.95, ths=0, dmax=1., verbose=0): """ Given a list of hierarchical ROIs, and an associated labelling, this creates an Amer structure wuch groups ROIs with the same label. Parameters ---------- bf : list of nipy.labs.spatial_models.hroi.Nroi instances it is assumd that each list corresponds to one subject each HierarchicalROI is assumed to have the roi_features 'position', 'label' and 'posterior_proba' defined thq=0.95, ths=0 defines the condition (c): (c) A label should be present in ths subjects with a probability>thq in order to be valid dmax: float optional, regularizing constant that defines a prior on the region extent Returns ------- LR : None or structural_bfls.LR instance describing a cross-subject set of ROIs. If inference yields a null result, LR is set to None newlabel: a relabelling of the individual ROIs, similar to u, which discards labels that do not fulfill the condition (c) """ dim = bf[0].domain.em_dim # prepare various variables to ease information manipulation nbsubj = np.size(bf) subj = np.concatenate([s * np.ones(bf[s].k, np.int) for s in range(nbsubj)]) u = np.concatenate([bf[s].get_roi_feature('label') for s in range(nbsubj)if bf[s].k > 0]) u = np.squeeze(u) if 'prior_proba' in bf[0].roi_features: conf = np.concatenate([bf[s].get_roi_feature('prior_proba') for s in range(nbsubj)if bf[s].k > 0]) else: conf = np.ones(u.size) intrasubj = np.concatenate([np.arange(bf[s].k) for s in range(nbsubj)]) coords = [] subjs = [] pps = [] n_labels = int(u.max() + 1) valid = np.zeros(n_labels).astype(np.int) # do some computation to find which regions are worth reporting for i in np.unique(u[u > - 1]): mp = 0. vp = 0. subjj = subj[u == i] for ls in np.unique(subjj): lmj = 1 - np.prod(1 - conf[(u == i) * (subj == ls)]) lvj = lmj * (1 - lmj) mp = mp + lmj vp = vp + lvj # If noise is too low the variance is 0: ill-defined: vp = max(vp, 1e-14) # if above threshold, get some information to create the LR if verbose: print 'lr', i, valid.sum(), ths, mp, thq if stats.norm.sf(ths, mp, np.sqrt(vp)) > thq: sj = np.size(subjj) coord = np.zeros((sj, dim)) for (k, s, a) in zip(intrasubj[u == i], subj[u == i], range(sj)): coord[a] = bf[s].get_roi_feature('position')[k] valid[i] = 1 coords.append(coord) subjs.append(subjj) pps.append(conf[u == i]) # relabel the ROIs maplabel = - np.ones(n_labels).astype(np.int) maplabel[valid > 0] = np.cumsum(valid[valid > 0]) - 1 for s in range(nbsubj): if bf[s].k > 0: us = bf[s].get_roi_feature('label') us[us > - 1] = maplabel[us[us > - 1]] bf[s].set_roi_feature('label', us) # create the landmark regions structure k = np.sum(valid) LR = LandmarkRegions(bf[0].domain, k, indiv_coord=coords, subj=subjs) LR.set_feature('confidence', pps) return LR, maplabel nipy-0.3.0/nipy/labs/spatial_models/tests/000077500000000000000000000000001210344137400205325ustar00rootroot00000000000000nipy-0.3.0/nipy/labs/spatial_models/tests/__init__.py000066400000000000000000000000501210344137400226360ustar00rootroot00000000000000# Init to make test directory a package nipy-0.3.0/nipy/labs/spatial_models/tests/test_bsa.py000066400000000000000000000060631210344137400227150ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Tests for bayesian_structural_analysis Author : Bertrand Thirion, 2009 """ #autoindent import numpy as np import scipy.stats as st from nose.tools import assert_true from nipy.testing import dec from ...utils.simul_multisubject_fmri_dataset import surrogate_2d_dataset from ..bayesian_structural_analysis import compute_BSA_simple from ..discrete_domain import domain_from_binary_array def make_bsa_2d(betas, theta=3., dmax=5., ths=0, thq=0.5, smin=0, nbeta=[0], method='simple'): """ Function for performing bayesian structural analysis on a set of images. Fixme: 'quick' is not tested """ ref_dim = np.shape(betas[0]) n_subj = betas.shape[0] xyz = np.array(np.where(betas[:1])).T nvox = np.size(xyz, 0) # get the functional information lbeta = np.array([np.ravel(betas[k]) for k in range(n_subj)]).T # the voxel volume is 1.0 g0 = 1.0 / (1.0 * nvox) bdensity = 1 dom = domain_from_binary_array(np.ones(ref_dim)) if method == 'simple': group_map, AF, BF, likelihood = \ compute_BSA_simple(dom, lbeta, dmax, thq, smin, ths, theta, g0, bdensity) return AF, BF @dec.slow def test_bsa_methods(): # generate the data n_subj = 5 shape = (40, 40) pos = np.array([[12, 14], [20, 20], [30, 35]]) # make a dataset with a nothing feature null_ampli = np.array([0, 0, 0]) null_betas = surrogate_2d_dataset(n_subj=n_subj, shape=shape, pos=pos, ampli=null_ampli, width=5.0, seed=1) #null_betas = np.reshape(null_dataset, (n_subj, shape[0], shape[1])) # make a dataset with a something feature pos_ampli = np.array([5, 7, 6]) pos_betas = surrogate_2d_dataset(n_subj=n_subj, shape=shape, pos=pos, ampli=pos_ampli, width=5.0, seed=2) #pos_betas = np.reshape(pos_dataset, (n_subj, shape[0], shape[1])) # set various parameters theta = float(st.t.isf(0.01, 100)) dmax = 5. / 1.5 half_subjs = n_subj / 2 thq = 0.9 smin = 5 # tuple of tuples with each tuple being # (name_of_method, ths_value, data_set, test_function) algs_tests = ( ('simple', half_subjs, null_betas, lambda AF, BF: AF.k == 0), ('simple', 1, pos_betas, lambda AF, BF: AF.k > 1)) for name, ths, betas, test_func in algs_tests: # run the algo AF, BF = make_bsa_2d(betas, theta, dmax, ths, thq, smin, method=name) yield assert_true, test_func(AF, BF) if __name__ == '__main__': import nose nose.run(argv=['', __file__]) nipy-0.3.0/nipy/labs/spatial_models/tests/test_discrete_domain.py000066400000000000000000000173021210344137400252770ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Test the discrete_domain utilities. Caveat assumes that the MNI template image is available at in ~/.nipy/tests/data """ import numpy as np from numpy.testing import assert_almost_equal, assert_equal from ..discrete_domain import smatrix_from_nd_idx, smatrix_from_3d_array, \ smatrix_from_nd_array, domain_from_binary_array, domain_from_image, \ domain_from_mesh, grid_domain_from_binary_array, grid_domain_from_image, \ grid_domain_from_shape from nibabel import Nifti1Image import nibabel.gifti as nbg from nipy.testing.decorators import skipif GOOD_GIFTI = hasattr(nbg, 'GiftiDataArray') shape = np.array([5, 6, 7, 8, 9]) def generate_dataset(shape): """Generate a dataset with the described shape """ dim = len(shape) idx = np.reshape(np.indices(shape), (dim, -1)).T return idx def test_smatrix_1d(): """Test the 1-d topological domain """ idx = generate_dataset(shape[:1]) sm = smatrix_from_nd_idx(idx, nn=0) assert_equal(sm.data.size, 2 * shape[0] - 2) def test_smatrix_2d(): """Test the 2-d topological domain """ idx = generate_dataset(shape[:2]) sm = smatrix_from_nd_idx(idx, nn=0) ne = 2 * (2 * np.prod(shape[:2]) - shape[0] - shape[1]) assert_equal(sm.data.size, ne) def test_smatrix_3d(): """Test the 3-d topological domain """ idx = generate_dataset(shape[:3]) sm = smatrix_from_nd_idx(idx) ne = 2 * (3 * np.prod(shape[:3]) - shape[0] * shape[1] - shape[0] * shape[2] - shape[1] * shape[2]) assert_equal(sm.data.size, ne) def test_smatrix_4d(): """Test the 4-d topological domain """ idx = generate_dataset(shape[:4]) sm = smatrix_from_nd_idx(idx) ne = 4 * np.prod(shape[:4]) for d in range(4): ne -= np.prod(shape[:4]) / shape[d] ne *= 2 assert_equal(sm.data.size, ne) def test_smatrix_5d(): """Test the 5-d topological domain """ idx = generate_dataset(shape) sm = smatrix_from_nd_idx(idx) ne = 5 * np.prod(shape) for d in range(5): ne -= np.prod(shape) / shape[d] ne *= 2 assert_equal(sm.data.size, ne) def test_smatrix_5d_bis(): """Test the 5-d topological domain """ toto = np.ones(shape) sm = smatrix_from_nd_array(toto) ne = 5 * np.prod(shape) for d in range(5): ne -= np.prod(shape) / shape[d] ne *= 2 assert_equal(sm.data.size, ne) def test_matrix_from_3d_array(): """Test the topology using the nipy.graph approach """ toto = np.ones(shape[:3]) sm = smatrix_from_3d_array(toto, 6) ne = 3 * np.prod(shape[:3]) for d in range(3): ne -= np.prod(shape[:3]) / shape[d] ne *= 2 print sm.data, ne assert_equal((sm.data > 0).sum(), ne) def test_array_domain(): """Test the construction of domain based on array """ toto = np.ones(shape) ddom = domain_from_binary_array(toto) assert_equal(np.sum(ddom.local_volume), np.prod(shape)) def test_connected_components(): """Test the estimation of connected components """ toto = np.ones(shape) ddom = domain_from_binary_array(toto) assert_equal(ddom.connected_components(), np.zeros(ddom.size)) def test_image_domain(): """Test the construction of domain based on image """ toto = np.ones(shape[:3]) affine = np.random.randn(4, 4) affine[3:, 0:3] = 0 nim = Nifti1Image(toto, affine) ddom = domain_from_image(nim) ref = np.sum(toto) * np.absolute(np.linalg.det(affine)) assert_almost_equal(np.sum(ddom.local_volume), ref) def test_image_feature(): """Test the construction of domain based on image and related feature """ mask = np.random.randn(*shape[:3]) > .5 noise = np.random.randn(*shape[:3]) affine = np.eye(4) mim = Nifti1Image(mask.astype('u8'), affine) nim = Nifti1Image(noise, affine) ddom = grid_domain_from_image(mim) ddom.make_feature_from_image(nim, 'noise') assert_almost_equal(ddom.features['noise'], noise[mask]) def test_array_grid_domain(): """Test the construction of grid domain based on array """ toto = np.ones(shape) ddom = grid_domain_from_binary_array(toto) assert_equal(np.sum(ddom.local_volume), np.prod(shape)) def test_image_grid_domain(): """Test the construction of grid domain based on image """ toto = np.ones(shape[:3]) affine = np.random.randn(4, 4) affine[3:, 0:3] = 0 nim = Nifti1Image(toto, affine) ddom = grid_domain_from_image(nim) ref = np.sum(toto) * np.absolute(np.linalg.det(affine[:3, 0:3])) assert_almost_equal(np.sum(ddom.local_volume), ref) def test_shape_grid_domain(): """ """ ddom = grid_domain_from_shape(shape) assert_equal(np.sum(ddom.local_volume), np.prod(shape)) def test_feature(): """ test feature inclusion """ toto = np.random.rand(*shape) ddom = domain_from_binary_array(toto) ddom.set_feature('data', np.ravel(toto)) plop = ddom.get_feature('data') assert_almost_equal(plop, np.ravel(toto)) def test_mask_feature(): """ test_feature_masking """ toto = np.random.rand(*shape) ddom = domain_from_binary_array(toto) ddom.set_feature('data', np.ravel(toto)) mdom = ddom.mask(np.ravel(toto > .5)) plop = mdom.get_feature('data') assert_almost_equal(plop, toto[toto > .5]) def test_domain_mask(): """test domain masking """ toto = np.random.rand(*shape) ddom = domain_from_binary_array(toto) mdom = ddom.mask(np.ravel(toto > .5)) assert_equal(mdom.size, np.sum(toto > .5)) def test_grid_domain_mask(): """test grid domain masking """ toto = np.random.rand(*shape) ddom = grid_domain_from_binary_array(toto) mdom = ddom.mask(np.ravel(toto > .5)) assert_equal(mdom.size, np.sum(toto > .5)) @skipif(not GOOD_GIFTI) def test_domain_from_mesh(): """Test domain_from_mesh method """ coords = np.array([[0., 0., 0.], [0., 0., 1.], [0., 1., 0.], [1., 0., 0.]]) triangles = np.asarray([[0, 1, 2], [0, 1, 3], [0, 2, 3], [1, 2, 3]]) darrays = [nbg.GiftiDataArray(coords)] + [nbg.GiftiDataArray(triangles)] toy_image = nbg.GiftiImage(darrays=darrays) domain = domain_from_mesh(toy_image) # if we get there, we could build the domain, and that's what we wanted. assert_equal(domain.get_coord(), coords) def test_representative(): """ test representative computation """ toto = np.random.rand(*shape) ddom = domain_from_binary_array(toto) ddom.set_feature('data', np.ravel(toto)) dmean = toto.mean() dmin = toto.min() dmax = toto.max() dmed = np.median(toto) assert_almost_equal(ddom.representative_feature('data', 'mean'), dmean) assert_almost_equal(ddom.representative_feature('data', 'min'), dmin) assert_almost_equal(ddom.representative_feature('data', 'max'), dmax) assert_almost_equal(ddom.representative_feature('data', 'median'), dmed) def test_integrate_1d(): """ test integration in 1d """ toto = np.random.rand(*shape) ddom = domain_from_binary_array(toto) ddom.set_feature('data', np.ravel(toto)) assert_almost_equal(ddom.integrate('data'), toto.sum()) def test_integrate_2d(): """test integration in 2d """ toto = np.random.rand(*shape) ddom = domain_from_binary_array(toto) ftoto = np.ravel(toto) f2 = np.vstack((ftoto, ftoto)).T ddom.set_feature('data', f2) ts = np.ones(2) * toto.sum() assert_almost_equal(ddom.integrate('data'), ts) if __name__ == "__main__": import nose nose.run(argv=['', __file__]) nipy-0.3.0/nipy/labs/spatial_models/tests/test_hroi.py000066400000000000000000000140561210344137400231120ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Test the discrete_domain utilities. Caveat assumes that the MNI template image is available at in ~/.nipy/tests/data In those tests, we often access some ROI directly by a fixed index instead of using the utility functions such as get_id() or select_id(). """ import numpy as np from numpy.testing import assert_equal from ..hroi import HROI_as_discrete_domain_blobs, make_hroi_from_subdomain from ..mroi import subdomain_from_array from ..discrete_domain import domain_from_binary_array shape = (5, 6, 7) def make_domain(): """Create a multiple ROI instance """ labels = np.ones(shape) dom = domain_from_binary_array(labels, affine=None) return dom ####################################################################### # Test on hierarchical ROI ####################################################################### def make_hroi(empty=False): """Create a multiple ROI instance """ labels = np.zeros(shape) if not empty: labels[4:, 5:, 6:] = 1 labels[:2, 0:2, 0:2] = 2 labels[:2, 5:, 6:] = 3 labels[:2, 0:2, 6:] = 4 labels[4:, 0:2, 6:] = 5 labels[4:, 0:2, 0:2] = 6 labels[4:, 5:, 0:2] = 7 labels[:2, 5:, 0:2] = 8 parents = np.zeros(9) else: labels = -np.ones(shape) parents = np.array([]) sd = subdomain_from_array(labels, affine=None, nn=0) hroi = make_hroi_from_subdomain(sd, parents) return hroi def test_hroi(): """ """ hroi = make_hroi() assert_equal(hroi.k, 9) def test_hroi_isleaf(): """ Test basic construction of a tree of isolated leaves """ hroi = make_hroi() hroi.select_roi([0] + range(2, 9)) assert_equal(hroi.k, 8) def test_hroi_isleaf_2(): """Test tree pruning, with parent remapping """ hroi = make_hroi() #import pdb; pdb.set_trace() hroi.select_roi(range(1, 9)) assert_equal(hroi.parents, np.arange(8).astype(np.int)) def test_asc_merge(): """ Test ascending merge """ hroi = make_hroi() s1 = hroi.get_size(0) + hroi.get_size(1) total_size = np.sum([hroi.get_size(id) for id in hroi.get_id()]) assert_equal(hroi.get_size(0, ignore_children=False), total_size) hroi.merge_ascending([1]) assert_equal(hroi.get_size(0), s1) def test_asc_merge_2(): """ Test ascending merge Test that ROI being their own parent are inchanged. """ hroi = make_hroi() s1 = hroi.get_size(0) hroi.merge_ascending([0]) assert_equal(hroi.k, 9) assert_equal(hroi.get_size(0), s1) def test_asc_merge_3(): """Test ascending merge """ hroi = make_hroi() hroi.set_roi_feature('labels', np.arange(9)) hroi.set_roi_feature('labels2', np.arange(9)) hroi.merge_ascending([1], pull_features=['labels2']) assert_equal(hroi.get_roi_feature('labels', 0), 0) assert_equal(hroi.get_roi_feature('labels2', 0), 1) def test_asc_merge_4(): """Test ascending merge """ hroi = make_hroi() hroi.set_roi_feature('labels', range(9)) hroi.set_roi_feature('labels2', range(9)) parents = np.arange(9) - 1 parents[0] = 0 hroi.parents = parents labels3 = [hroi.label[hroi.label == k] for k in range(hroi.k)] hroi.set_feature('labels3', labels3) hroi.merge_ascending([1], pull_features=['labels2']) assert_equal(hroi.k, 8) assert_equal(hroi.get_roi_feature('labels', 0), 0) assert_equal(hroi.get_roi_feature('labels2', 0), 1) assert_equal(len(hroi.get_feature('labels3')), hroi.k) assert_equal(hroi.get_roi_feature('labels2').size, hroi.k) def test_desc_merge(): """ Test descending merge """ hroi = make_hroi() parents = np.arange(hroi.k) parents[1] = 0 hroi.parents = parents s1 = hroi.get_size(0) + hroi.get_size(1) hroi.merge_descending() assert_equal(hroi.get_size()[0], s1) def test_desc_merge_2(): """ Test descending merge """ hroi = make_hroi() parents = np.arange(-1, hroi.k - 1) parents[0] = 0 hroi.parents = parents hroi.set_roi_feature('labels', np.arange(hroi.k)) labels2 = [hroi.label[hroi.label == k] for k in range(hroi.k)] hroi.set_feature('labels2', labels2) hroi.merge_descending() assert_equal(hroi.k, 1) assert_equal(len(hroi.get_feature('labels2')), hroi.k) assert_equal(hroi.get_roi_feature('labels').size, hroi.k) def test_desc_merge_3(): """ Test descending merge """ hroi = make_hroi() parents = np.minimum(np.arange(1, hroi.k + 1), hroi.k - 1) hroi.parents = parents hroi.merge_descending() assert_equal(hroi.k, 1) def test_leaves(): """ Test leaves """ hroi = make_hroi() size = hroi.get_size()[1:].copy() lroi = hroi.copy() lroi.reduce_to_leaves() assert_equal(lroi.k, 8) assert_equal(lroi.get_size(), size) assert_equal(lroi.get_leaves_id(), np.arange(1, 9)) def test_leaves_empty(): """Test the reduce_to_leaves method on an HROI containing no node """ hroi = make_hroi(empty=True) lroi = hroi.reduce_to_leaves() assert_equal(lroi.k, 0) def test_hroi_from_domain(): dom = make_domain() data = np.random.rand(*shape) data[:2, 0:2, 0:2] = 2 rdata = np.reshape(data, (data.size, 1)) hroi = HROI_as_discrete_domain_blobs(dom, rdata, threshold=1., smin=0) assert_equal(hroi.k, 1) def test_sd_representative(): """Test the computation of representative features """ hroi = make_hroi() hroi.parents = np.arange(9) hroi.parents[2] = 1 data = [[k] * hroi.get_size(k) for k in hroi.get_id()] hroi.set_feature('data', data) sums = hroi.representative_feature('data') for k in hroi.get_id(): assert_equal(sums[hroi.select_id(k)], k) sums2 = hroi.representative_feature('data', ignore_children=False) for k in hroi.get_id(): if k != 1: assert_equal(sums2[hroi.select_id(k)], k) else: assert_equal(sums2[1], 17. / 9) if __name__ == "__main__": import nose nose.run(argv=['', __file__]) nipy-0.3.0/nipy/labs/spatial_models/tests/test_mroi.py000066400000000000000000000132361210344137400231160ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Test the discrete_domain utilities. Caveat assumes that the MNI template image is available at in ~/.nipy/tests/data """ import numpy as np from ..mroi import subdomain_from_array, subdomain_from_balls from ..discrete_domain import domain_from_binary_array from numpy.testing import assert_equal shape = (5, 6, 7) ########################################################### # SubDomains tests ########################################################### def make_subdomain(): """Create a multiple ROI instance """ labels = np.zeros(shape) labels[4:, 5:, 6:] = 1 labels[:2, 0:2, 0:2] = 2 labels[:2, 5:, 6:] = 3 labels[:2, 0:2, 6:] = 4 labels[4:, 0:2, 6:] = 5 labels[4:, 0:2, 0:2] = 6 labels[4:, 5:, 0:2] = 7 labels[:2, 5:, 0:2] = 8 mroi = subdomain_from_array(labels - 1, affine=None) return mroi def test_subdomain(): """Test basic construction of multiple_roi """ mroi = make_subdomain() assert_equal(mroi.k, 8) def test_subdomain2(): """Test mroi.size """ mroi = make_subdomain() assert_equal(len(mroi.get_size()), 8) for k in mroi.get_id(): assert_equal(mroi.get_size(k), np.sum(mroi.label == mroi.select_id(k))) def test_copy_subdomain(): """Test basic construction of multiple_roi """ mroi = make_subdomain() foo_feature = [[i] * j for i, j in enumerate(mroi.get_size())] foo_roi_feature = np.arange(mroi.k) mroi.set_feature('a', foo_feature) mroi.set_roi_feature('b', foo_roi_feature) mroi_copy = mroi.copy() # check some properties of mroi assert_equal(mroi.k, 8) for k in mroi.get_id(): assert_equal(mroi.get_feature('a', k), foo_feature[mroi.select_id(k)]) assert_equal(mroi.get_roi_feature('b'), foo_roi_feature) # delete mroi del mroi # check mroi_copy assert_equal(mroi_copy.k, 8) for k in mroi_copy.get_id(): assert_equal(mroi_copy.get_feature('a', k), foo_feature[mroi_copy.select_id(k)]) assert_equal(mroi_copy.get_roi_feature('b'), foo_roi_feature) def test_select_roi(): """ """ mroi = make_subdomain() aux = np.random.randn(np.prod(shape)) data = [aux[mroi.label == k] for k in range(8)] mroi.set_feature('data', data) mroi.set_roi_feature('data_mean', range(8)) mroi.select_roi([0]) assert(mroi.k == 1) assert_equal(mroi.get_roi_feature('data_mean', 0), 0) def test_roi_features(): """ """ mroi = make_subdomain() dshape = (8, 3) data = np.random.randn(*dshape) mroi.set_roi_feature('data_mean', data) assert mroi.roi_features['data_mean'].shape == dshape def test_subdomain_feature(): """Test the basic construction of features """ mroi = make_subdomain() aux = np.random.randn(np.prod(shape)) data = [aux[mroi.label == k] for k in range(8)] mroi.set_feature('data', data) assert_equal(mroi.features['data'][0], data[0]) def test_sd_integrate(): """Test the integration """ mroi = make_subdomain() aux = np.random.randn(np.prod(shape)) data = [aux[mroi.label == k] for k in range(8)] mroi.set_feature('data', data) sums = mroi.integrate('data') for k in range(8): assert_equal(sums[k], np.sum(data[k])) def test_sd_integrate2(): """Test the integration """ mroi = make_subdomain() for k in mroi.get_id(): assert_equal(mroi.get_volume(k), mroi.integrate(id=k)) volume_from_integration = mroi.integrate() volume_from_feature = mroi.get_volume() for i in range(mroi.k): assert_equal(volume_from_feature[i], volume_from_integration[i]) def test_sd_representative(): """Test the computation of representative features """ mroi = make_subdomain() data = [[k] * mroi.get_size(k) for k in mroi.get_id()] mroi.set_feature('data', data) sums = mroi.representative_feature('data') for k in mroi.get_id(): assert_equal(sums[mroi.select_id(k)], k) def test_sd_from_ball(): dom = domain_from_binary_array(np.ones((10, 10))) radii = np.array([2, 2, 2]) positions = np.array([[3, 3], [3, 7], [7, 7]]) subdomain = subdomain_from_balls(dom, positions, radii) assert_equal(subdomain.k, 3) assert_equal(subdomain.get_size(), np.array([9, 9, 9])) def test_set_feature(): """Test the feature building capability """ mroi = make_subdomain() data = np.random.randn(np.prod(shape)) feature_data = [data[mroi.select_id(k, roi=False)] for k in mroi.get_id()] mroi.set_feature('data', feature_data) get_feature_output = mroi.get_feature('data') assert_equal([len(k) for k in mroi.get_feature('data')], mroi.get_size()) for k in mroi.get_id(): assert_equal(mroi.get_feature('data', k), data[mroi.select_id(k, roi=False)]) assert_equal(get_feature_output[k], data[mroi.select_id(k, roi=False)]) def test_set_feature2(): """ """ mroi = make_subdomain() data = np.random.randn(np.prod(shape)) feature_data = [data[mroi.select_id(k, roi=False)] for k in mroi.get_id()] mroi.set_feature('data', feature_data) mroi.set_feature('data', np.asarray([1000]), id=0, override=True) assert_equal(mroi.get_feature('data', 0), [1000]) def test_get_coord(): """ """ mroi = make_subdomain() for k in mroi.get_id(): assert_equal(mroi.get_coord(k), mroi.domain.coord[mroi.select_id(k, roi=False)]) if __name__ == "__main__": import nose nose.run(argv=['', __file__]) nipy-0.3.0/nipy/labs/spatial_models/tests/test_parcel.py000066400000000000000000000122241210344137400234120ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np from nipy.algorithms.graph.field import field_from_coo_matrix_and_data from ..hierarchical_parcellation import hparcel from ...utils.simul_multisubject_fmri_dataset import surrogate_2d_dataset from ..parcellation import MultiSubjectParcellation from ..discrete_domain import grid_domain_from_binary_array def test_parcel_interface(): """ Simply test parcellation interface """ # prepare some data shape = (5, 5, 5) nb_parcel = 10 data = np.random.randn(np.prod(shape)) domain = grid_domain_from_binary_array(np.ones(shape)) g = field_from_coo_matrix_and_data(domain.topology, data) u, J0 = g.ward(nb_parcel) tmp = np.array([np.sum(u == k) for k in range(nb_parcel)]) #instantiate a parcellation msp = MultiSubjectParcellation(domain, u, u) assert msp.nb_parcel == nb_parcel assert msp.nb_subj == 1 assert (msp.population().ravel() == tmp).all() def test_parcel_interface_multi_subj(): """ test parcellation interface, with multiple subjects """ # prepare some data shape = (5, 5, 5) nb_parcel = 10 nb_subj = 5 v = [] for s in range(nb_subj): data = np.random.randn(np.prod(shape)) domain = grid_domain_from_binary_array(np.ones(shape)) g = field_from_coo_matrix_and_data(domain.topology, data) u, J0 = g.ward(nb_parcel) v.append(u) v = np.array(v).T tmp = np.array([np.sum(v == k, 0) for k in range(nb_parcel)]) #instantiate a parcellation msp = MultiSubjectParcellation(domain, u, v) assert msp.nb_parcel == nb_parcel assert msp.nb_subj == nb_subj assert (msp.population() == tmp).all() def test_parcel_feature(): """ Simply test parcellation feature interface """ # prepare some data shape = (5, 5, 5) nb_parcel = 10 data = np.random.randn(np.prod(shape), 1) domain = grid_domain_from_binary_array(np.ones(shape)) g = field_from_coo_matrix_and_data(domain.topology, data) u, J0 = g.ward(nb_parcel) #instantiate a parcellation msp = MultiSubjectParcellation(domain, u, u) msp.make_feature('data', data) assert msp.get_feature('data').shape == (nb_parcel, 1) # test with a copy msp2 = msp.copy() assert (msp2.get_feature('data') == msp2.get_feature('data')).all() # test a multi_dimensional feature dim = 4 msp.make_feature('new', np.random.randn(np.prod(shape), 1, dim)) assert msp.get_feature('new').shape == (nb_parcel, 1, dim) def test_parcel_feature_multi_subj(): """ Test parcellation feature interface with multiple subjects """ # prepare some data shape = (5, 5, 5) nb_parcel = 10 nb_subj = 5 v = [] for s in range(nb_subj): data = np.random.randn(np.prod(shape)) domain = grid_domain_from_binary_array(np.ones(shape)) g = field_from_coo_matrix_and_data(domain.topology, data) u, J0 = g.ward(nb_parcel) v.append(u) v = np.array(v).T msp = MultiSubjectParcellation(domain, u, v) # test a multi_dimensional feature # dimension 1 msp.make_feature('data', np.random.randn(np.prod(shape), nb_subj)) assert msp.get_feature('data').shape == (nb_parcel, nb_subj) #dimension>1 dim = 4 msp.make_feature('data', np.random.randn(np.prod(shape), nb_subj, dim)) assert msp.get_feature('data').shape == (nb_parcel, nb_subj, dim) # msp.features['data'] has been overriden assert msp.features.keys() == ['data'] def test_parcel_hierarchical(): """Test the algorithm for hierrachical parcellation """ # step 1: generate some synthetic data n_subj = 10 shape = (30, 30) dataset = surrogate_2d_dataset(n_subj=n_subj, shape=shape) # step 2 : prepare all the information for the parcellation nb_parcel = 10 domain = grid_domain_from_binary_array(dataset[0] ** 2, np.eye(3)) ldata = np.reshape(dataset, (n_subj, np.prod(shape), 1)) # step 3 : run the algorithm Pa = hparcel(domain, ldata, nb_parcel) # step 4: look at the results Label = Pa.individual_labels control = True for s in range(n_subj): control *= (np.unique(Label[:, s]) == np.arange(nb_parcel)).all() assert(control) def test_prfx(): """Test the ability to construct parcel features and random effects models """ # step 1: generate some synthetic data n_subj = 10 shape = (30, 30) dataset = surrogate_2d_dataset(n_subj=n_subj, shape=shape) # step 2 : prepare all the information for the parcellation nb_parcel = 10 domain = grid_domain_from_binary_array(dataset[0] ** 2, np.eye(3)) ldata = np.reshape(dataset, (n_subj, np.prod(shape), 1)) # step 3 : run the algorithm Pa = hparcel(domain, ldata, nb_parcel) pdata = Pa.make_feature('functional', np.rollaxis(np.array(ldata), 1, 0)) one_sample = np.squeeze(pdata.mean(0) / pdata.std(0)) assert np.shape(one_sample) == tuple([nb_parcel]) assert one_sample.mean() < 1 assert one_sample.mean() > -1 if __name__ == '__main__': import nose nose.run(argv=['', __file__]) nipy-0.3.0/nipy/labs/spatial_models/tests/test_parcel_io.py000066400000000000000000000104611210344137400241020ustar00rootroot00000000000000from __future__ import with_statement from os.path import exists import numpy as np from nibabel import Nifti1Image, save from numpy.testing import assert_equal from ...utils.simul_multisubject_fmri_dataset import surrogate_3d_dataset from ..parcel_io import (mask_parcellation, fixed_parcellation, parcellation_based_analysis) from ..hierarchical_parcellation import hparcel from ..discrete_domain import grid_domain_from_shape from nibabel.tmpdirs import InTemporaryDirectory def test_mask_parcel(): """ Test that mask parcellation performs correctly """ n_parcels = 20 shape = (10, 10, 10) mask_image = Nifti1Image(np.ones(shape), np.eye(4)) wim = mask_parcellation(mask_image, n_parcels) assert_equal(np.unique(wim.get_data()), np.arange(n_parcels)) def test_mask_parcel_multi_subj(): """ Test that mask parcellation performs correctly """ rng = np.random.RandomState(0); n_parcels = 20 shape = (10, 10, 10) n_subjects = 5 mask_images = [] with InTemporaryDirectory(): for subject in range(n_subjects): path = 'mask%s.nii' % subject save(Nifti1Image((rng.rand(*shape) > .1).astype('u8'), np.eye(4)), path) mask_images.append(path) wim = mask_parcellation(mask_images, n_parcels) assert_equal(np.unique(wim.get_data()), np.arange(n_parcels)) def test_parcel_intra_from_3d_image(): """Test that a parcellation is generated, starting from an input 3D image """ # Generate an image shape = (10, 10, 10) n_parcel, nn, mu = 10, 6, 1. mask_image = Nifti1Image(np.ones(shape), np.eye(4)) with InTemporaryDirectory() as dir_context: surrogate_3d_dataset(mask=mask_image, out_image_file='image.nii') #run the algo for method in ['ward', 'kmeans', 'gkm']: osp = fixed_parcellation(mask_image, ['image.nii'], n_parcel, nn, method, dir_context, mu) result = 'parcel_%s.nii' % method assert exists(result) assert_equal(osp.k, n_parcel) def test_parcel_intra_from_3d_images_list(): """Test that a parcellation is generated, starting from a list of 3D images """ # Generate an image shape = (10, 10, 10) n_parcel, nn, mu = 10, 6, 1. method = 'ward' mask_image = Nifti1Image(np.ones(shape), np.eye(4)) with InTemporaryDirectory() as dir_context: data_image = ['image_%d.nii' % i for i in range(5)] for datim in data_image: surrogate_3d_dataset(mask=mask_image, out_image_file=datim) #run the algo osp = fixed_parcellation(mask_image, data_image, n_parcel, nn, method, dir_context, mu) assert exists('parcel_%s.nii' % method) assert_equal(osp.k, n_parcel) def test_parcel_intra_from_4d_image(): """Test that a parcellation is generated, starting from a 4D image """ # Generate an image shape = (10, 10, 10) n_parcel, nn, mu = 10, 6, 1. method = 'ward' mask_image = Nifti1Image(np.ones(shape), np.eye(4)) with InTemporaryDirectory() as dir_context: surrogate_3d_dataset(n_subj=10, mask=mask_image, out_image_file='image.nii') osp = fixed_parcellation(mask_image, ['image.nii'], n_parcel, nn, method, dir_context, mu) assert exists('parcel_%s.nii' % method) assert_equal(osp.k, n_parcel) def test_parcel_based_analysis(): # Generate an image shape = (7, 8, 4) n_subj = 5 n_parcel, nn, mu = 10, 6, 1. with InTemporaryDirectory() as dir_context: data_image = ['image_%d.nii' % i for i in range(5)] for datim in data_image: surrogate_3d_dataset(shape=shape, out_image_file=datim) ldata = np.random.randn(n_subj, np.prod(shape), 1) domain = grid_domain_from_shape(shape) parcels = hparcel(domain, ldata, n_parcel, mu=3.0) prfx = parcellation_based_analysis( parcels, data_image, test_id='one_sample', rfx_path='prfx.nii', condition_id='', swd=dir_context) assert exists('prfx.nii') assert prfx.max() < 10 assert prfx.min() > - 10 if __name__ == "__main__": import nose nose.run(argv=['', __file__]) nipy-0.3.0/nipy/labs/statistical_mapping.py000066400000000000000000000356031210344137400210100ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np import scipy.stats as sp_stats # Use the nibabel image object from nibabel import Nifti1Image as Image from nibabel.affines import apply_affine from ..algorithms.graph.field import field_from_graph_and_data from ..algorithms.graph.graph import wgraph_from_3d_grid from ..algorithms.statistics import empirical_pvalue from .glm import glm from .group.permutation_test import \ permutation_test_onesample, permutation_test_twosample # FIXME: rename permutation_test_onesample class #so that name starts with upper case ############################################################################### # Cluster statistics ############################################################################### def bonferroni(p, n): return np.minimum(1., p * n) def simulated_pvalue(t, simu_t): return 1 - np.searchsorted(simu_t, t) / float(np.size(simu_t)) def cluster_stats(zimg, mask, height_th, height_control='fpr', cluster_th=0, nulls={}): """ Return a list of clusters, each cluster being represented by a dictionary. Clusters are sorted by descending size order. Within each cluster, local maxima are sorted by descending depth order. Parameters ---------- zimg: z-score image mask: mask image height_th: cluster forming threshold height_control: string false positive control meaning of cluster forming threshold: 'fpr'|'fdr'|'bonferroni'|'none' cluster_th: cluster size threshold null_s : cluster-level calibration method: None|'rft'|array Notes ----- This works only with three dimensional data """ # Masking if len(mask.shape) > 3: xyz = np.where((mask.get_data() > 0).squeeze()) zmap = zimg.get_data().squeeze()[xyz] else: xyz = np.where(mask.get_data() > 0) zmap = zimg.get_data()[xyz] xyz = np.array(xyz).T nvoxels = np.size(xyz, 0) # Thresholding if height_control == 'fpr': zth = sp_stats.norm.isf(height_th) elif height_control == 'fdr': zth = empirical_pvalue.gaussian_fdr_threshold(zmap, height_th) elif height_control == 'bonferroni': zth = sp_stats.norm.isf(height_th / nvoxels) else: ## Brute-force thresholding zth = height_th pth = sp_stats.norm.sf(zth) above_th = zmap > zth if len(np.where(above_th)[0]) == 0: return None, None ## FIXME zmap_th = zmap[above_th] xyz_th = xyz[above_th] # Clustering ## Extract local maxima and connex components above some threshold ff = field_from_graph_and_data(wgraph_from_3d_grid(xyz_th, k=18), zmap_th) maxima, depth = ff.get_local_maxima(th=zth) labels = ff.cc() ## Make list of clusters, each cluster being a dictionary clusters = [] for k in range(labels.max() + 1): s = np.sum(labels == k) if s >= cluster_th: in_cluster = labels[maxima] == k m = maxima[in_cluster] d = depth[in_cluster] sorted = d.argsort()[::-1] clusters.append({'size': s, 'maxima': m[sorted], 'depth': d[sorted]}) ## Sort clusters by descending size order clusters.sort(key=lambda c : c['size'], reverse=True) # FDR-corrected p-values fdr_pvalue = empirical_pvalue.gaussian_fdr(zmap)[above_th] # Default "nulls" if not 'zmax' in nulls: nulls['zmax'] = 'bonferroni' if not 'smax' in nulls: nulls['smax'] = None if not 's' in nulls: nulls['s'] = None # Report significance levels in each cluster for c in clusters: maxima = c['maxima'] zscore = zmap_th[maxima] pval = sp_stats.norm.sf(zscore) # Replace array indices with real coordinates c['maxima'] = apply_affine(zimg.get_affine(), xyz_th[maxima]) c['zscore'] = zscore c['pvalue'] = pval c['fdr_pvalue'] = fdr_pvalue[maxima] # Voxel-level corrected p-values p = None if nulls['zmax'] == 'bonferroni': p = bonferroni(pval, nvoxels) elif isinstance(nulls['zmax'], np.ndarray): p = simulated_pvalue(zscore, nulls['zmax']) c['fwer_pvalue'] = p # Cluster-level p-values (corrected) p = None if isinstance(nulls['smax'], np.ndarray): p = simulated_pvalue(c['size'], nulls['smax']) c['cluster_fwer_pvalue'] = p # Cluster-level p-values (uncorrected) p = None if isinstance(nulls['s'], np.ndarray): p = simulated_pvalue(c['size'], nulls['s']) c['cluster_pvalue'] = p # General info info = {'nvoxels': nvoxels, 'threshold_z': zth, 'threshold_p': pth, 'threshold_pcorr': bonferroni(pth, nvoxels)} return clusters, info ############################################################################### # Peak_extraction ############################################################################### def get_3d_peaks(image, mask=None, threshold=0., nn=18, order_th=0): """ returns all the peaks of image that are with the mask and above the provided threshold Parameters ---------- image, (3d) test image mask=None, (3d) mask image By default no masking is performed threshold=0., float, threshold value above which peaks are considered nn=18, int, number of neighbours of the topological spatial model order_th=0, int, threshold on topological order to validate the peaks Returns ------- peaks, a list of dictionaries, where each dict has the fields: vals, map value at the peak order, topological order of the peak ijk, array of shape (1,3) grid coordinate of the peak pos, array of shape (n_maxima,3) mm coordinates (mapped by affine) of the peaks """ # Masking if mask is not None: bmask = mask.get_data().ravel() data = image.get_data().ravel()[bmask > 0] xyz = np.array(np.where(bmask > 0)).T else: shape = image.shape data = image.get_data().ravel() xyz = np.reshape(np.indices(shape), (3, np.prod(shape))).T affine = image.get_affine() if not (data > threshold).any(): return None # Extract local maxima and connex components above some threshold ff = field_from_graph_and_data(wgraph_from_3d_grid(xyz, k=18), data) maxima, order = ff.get_local_maxima(th=threshold) # retain only the maxima greater than the specified order maxima = maxima[order > order_th] order = order[order > order_th] n_maxima = len(maxima) if n_maxima == 0: # should not occur ? return None # reorder the maxima to have decreasing peak value vals = data[maxima] idx = np.argsort(- vals) maxima = maxima[idx] order = order[idx] vals = data[maxima] ijk = xyz[maxima] pos = np.dot(np.hstack((ijk, np.ones((n_maxima, 1)))), affine.T)[:, :3] peaks = [{'val': vals[k], 'order': order[k], 'ijk': ijk[k], 'pos': pos[k]} for k in range(n_maxima)] return peaks ############################################################################### # Statistical tests ############################################################################### def prepare_arrays(data_images, vardata_images, mask_images): from .mask import intersect_masks # Compute mask intersection mask = intersect_masks(mask_images, threshold=1.) # Compute xyz coordinates from mask xyz = np.array(np.where(mask > 0)) # Prepare data & vardata arrays data = np.array([(d.get_data()[xyz[0], xyz[1], xyz[2]]).squeeze() for d in data_images]).squeeze() if vardata_images == None: vardata = None else: vardata = np.array([(d.get_data()[xyz[0], xyz[1], xyz[2]]).squeeze() for d in vardata_images]).squeeze() return data, vardata, xyz, mask def onesample_test(data_images, vardata_images, mask_images, stat_id, permutations=0, cluster_forming_th=0.01): """ Helper function for permutation-based mass univariate onesample group analysis. """ # Prepare arrays data, vardata, xyz, mask = prepare_arrays(data_images, vardata_images, mask_images) # Create one-sample permutation test instance ptest = permutation_test_onesample(data, xyz, vardata=vardata, stat_id=stat_id) # Compute z-map image zmap = np.zeros(data_images[0].shape).squeeze() zmap[list(xyz)] = ptest.zscore() zimg = Image(zmap, data_images[0].get_affine()) # Compute mask image maskimg = Image(mask.astype(np.int8), data_images[0].get_affine()) # Multiple comparisons if permutations <= 0: return zimg, maskimg else: # Cluster definition: (threshold, diameter) cluster_def = (ptest.height_threshold(cluster_forming_th), None) # Calibration voxel_res, cluster_res, region_res = \ ptest.calibrate(nperms=permutations, clusters=[cluster_def]) nulls = {} nulls['zmax'] = ptest.zscore(voxel_res['perm_maxT_values']) nulls['s'] = cluster_res[0]['perm_size_values'] nulls['smax'] = cluster_res[0]['perm_maxsize_values'] # Return z-map image, mask image and dictionary of null distribution # for cluster sizes (s), max cluster size (smax) and max z-score (zmax) return zimg, maskimg, nulls def twosample_test(data_images, vardata_images, mask_images, labels, stat_id, permutations=0, cluster_forming_th=0.01): """ Helper function for permutation-based mass univariate twosample group analysis. Labels is a binary vector (1-2). Regions more active for group 1 than group 2 are inferred. """ # Prepare arrays data, vardata, xyz, mask = prepare_arrays(data_images, vardata_images, mask_images) # Create two-sample permutation test instance if vardata_images == None: ptest = permutation_test_twosample( data[labels == 1], data[labels == 2], xyz, stat_id=stat_id) else: ptest = permutation_test_twosample( data[labels == 1], data[labels == 2], xyz, vardata1=vardata[labels == 1], vardata2=vardata[labels == 2], stat_id=stat_id) # Compute z-map image zmap = np.zeros(data_images[0].shape).squeeze() zmap[list(xyz)] = ptest.zscore() zimg = Image(zmap, data_images[0].get_affine()) # Compute mask image maskimg = Image(mask, data_images[0].get_affine()) # Multiple comparisons if permutations <= 0: return zimg, maskimg else: # Cluster definition: (threshold, diameter) cluster_def = (ptest.height_threshold(cluster_forming_th), None) # Calibration voxel_res, cluster_res, region_res = \ ptest.calibrate(nperms=permutations, clusters=[cluster_def]) nulls = {} nulls['zmax'] = ptest.zscore(voxel_res['perm_maxT_values']) nulls['s'] = cluster_res[0]['perm_size_values'] nulls['smax'] = cluster_res[0]['perm_maxsize_values'] # Return z-map image, mask image and dictionary of null # distribution for cluster sizes (s), max cluster size (smax) # and max z-score (zmax) return zimg, maskimg, nulls ############################################################################### # Linear model ############################################################################### def linear_model_fit(data_images, mask_images, design_matrix, vector): """ Helper function for group data analysis using arbitrary design matrix """ # Prepare arrays data, vardata, xyz, mask = prepare_arrays(data_images, None, mask_images) # Create glm instance G = glm(data, design_matrix) # Compute requested contrast c = G.contrast(vector) # Compute z-map image zmap = np.zeros(data_images[0].shape).squeeze() zmap[list(xyz)] = c.zscore() zimg = Image(zmap, data_images[0].get_affine()) return zimg class LinearModel(object): def_model = 'spherical' def_niter = 2 def __init__(self, data, design_matrix, mask=None, formula=None, model=def_model, method=None, niter=def_niter): # Convert input data and design into sequences if not hasattr(data, '__iter__'): data = [data] if not hasattr(design_matrix, '__iter__'): design_matrix = [design_matrix] # configure spatial properties # the 'sampling' direction is assumed to be the last # TODO: check that all input images have the same shape and # that it's consistent with the mask nomask = mask == None if nomask: self.xyz = None self.axis = len(data[0].shape) - 1 else: self.xyz = np.where(mask.get_data() > 0) self.axis = 1 self.spatial_shape = data[0].shape[0: -1] self.affine = data[0].get_affine() self.glm = [] for i in range(len(data)): if not isinstance(design_matrix[i], np.ndarray): raise ValueError('Invalid design matrix') if nomask: Y = data[i].get_data() else: Y = data[i].get_data()[self.xyz] X = design_matrix[i] self.glm.append(glm(Y, X, axis=self.axis, formula=formula, model=model, method=method, niter=niter)) def dump(self, filename): """Dump GLM fit as npz file. """ models = len(self.glm) if models == 1: self.glm[0].save(filename) else: for i in range(models): self.glm[i].save(filename + str(i)) def contrast(self, vector): """Compute images of contrast and contrast variance. """ # Compute the overall contrast across models c = self.glm[0].contrast(vector) for g in self.glm[1:]: c += g.contrast(vector) def affect_inmask(dest, src, xyz): if xyz == None: dest = src else: dest[xyz] = src return dest con = np.zeros(self.spatial_shape) con_img = Image(affect_inmask(con, c.effect, self.xyz), self.affine) vcon = np.zeros(self.spatial_shape) vcon_img = Image(affect_inmask(vcon, c.variance, self.xyz), self.affine) z = np.zeros(self.spatial_shape) z_img = Image(affect_inmask(z, c.zscore(), self.xyz), self.affine) dof = c.dof return con_img, vcon_img, z_img, dof ############################################################################### # Hack to have nose skip onesample_test, which is not a unit test onesample_test.__test__ = False twosample_test.__test__ = False nipy-0.3.0/nipy/labs/tests/000077500000000000000000000000001210344137400155325ustar00rootroot00000000000000nipy-0.3.0/nipy/labs/tests/__init__.py000066400000000000000000000000001210344137400176310ustar00rootroot00000000000000nipy-0.3.0/nipy/labs/tests/test_mask.py000066400000000000000000000122201210344137400200730ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Test the mask-extracting utilities. """ from __future__ import with_statement import numpy as np import nibabel as nib from nibabel.tmpdirs import InTemporaryDirectory from .. import mask as nnm from ..mask import largest_cc, threshold_connect_components, \ series_from_mask from nipy.testing import assert_equal, assert_true, \ assert_array_equal, anatfile, assert_false def test_largest_cc(): """ Check the extraction of the largest connected component. """ a = np.zeros((6, 6, 6)) a[1:3, 1:3, 1:3] = 1 yield assert_equal, a, largest_cc(a) b = a.copy() b[5, 5, 5] = 1 yield assert_equal, a, largest_cc(b) def test_threshold_connect_components(): a = np.zeros((10, 10)) a[0, 0] = 1 a[3, 4] = 1 a = threshold_connect_components(a, 2) yield assert_true, np.all(a == 0) a[0, 0:3] = 1 b = threshold_connect_components(a, 2) yield assert_true, np.all(a == b) def test_mask(): mean_image = np.ones((9, 9)) mean_image[3:-3, 3:-3] = 10 mean_image[5, 5] = 100 mask1 = nnm.compute_mask(mean_image) mask2 = nnm.compute_mask(mean_image, exclude_zeros=True) # With an array with no zeros, exclude_zeros should not make # any difference yield assert_array_equal, mask1, mask2 # Check that padding with zeros does not change the extracted mask mean_image2 = np.zeros((30, 30)) mean_image2[:9, :9] = mean_image mask3 = nnm.compute_mask(mean_image2, exclude_zeros=True) yield assert_array_equal, mask1, mask3[:9, :9] # However, without exclude_zeros, it does mask3 = nnm.compute_mask(mean_image2) yield assert_false, np.allclose(mask1, mask3[:9, :9]) # check that opening is 2 by default mask4 = nnm.compute_mask(mean_image, exclude_zeros=True, opening=2) yield assert_array_equal, mask1, mask4 # check that opening has an effect mask5 = nnm.compute_mask(mean_image, exclude_zeros=True, opening=0) yield assert_true, mask5.sum() > mask4.sum() def test_mask_files(): with InTemporaryDirectory(): # Make a 4D file from the anatomical example img = nib.load(anatfile) arr = img.get_data() a2 = np.zeros(arr.shape + (2, )) a2[:, :, :, 0] = arr a2[:, :, :, 1] = arr img = nib.Nifti1Image(a2, np.eye(4)) a_fname = 'fourd_anat.nii' nib.save(img, a_fname) # check 4D mask msk1, mean1 = nnm.compute_mask_files(a_fname, return_mean=True) # and mask from identical list of 3D files msk2, mean2 = nnm.compute_mask_files([anatfile, anatfile], return_mean=True) yield assert_array_equal, msk1, msk2 yield assert_array_equal, mean1, mean2 def test_series_from_mask(): """ Test the smoothing of the timeseries extraction """ # A delta in 3D data = np.zeros((40, 40, 40, 2)) data[20, 20, 20] = 1 mask = np.ones((40, 40, 40), dtype=np.bool) with InTemporaryDirectory(): for affine in (np.eye(4), np.diag((1, 1, -1, 1)), np.diag((.5, 1, .5, 1))): img = nib.Nifti1Image(data, affine) nib.save(img, 'testing.nii') series, header = series_from_mask('testing.nii', mask, smooth=9) series = np.reshape(series[:, 0], (40, 40, 40)) vmax = series.max() # We are expecting a full-width at half maximum of # 9mm/voxel_size: above_half_max = series > .5*vmax for axis in (0, 1, 2): proj = np.any(np.any(np.rollaxis(above_half_max, axis=axis), axis=-1), axis=-1) assert_equal(proj.sum(), 9/np.abs(affine[axis, axis])) # Check that NaNs in the data do not propagate data[10, 10, 10] = np.NaN img = nib.Nifti1Image(data, affine) nib.save(img, 'testing.nii') series, header = series_from_mask('testing.nii', mask, smooth=9) assert_true(np.all(np.isfinite(series))) def test_compute_mask_sessions(): """Test that the mask computes well on multiple sessions """ with InTemporaryDirectory(): # Make a 4D file from the anatomical example img = nib.load(anatfile) arr = img.get_data() a2 = np.zeros(arr.shape + (2, )) a2[:, :, :, 0] = arr a2[:, :, :, 1] = arr img = nib.Nifti1Image(a2, np.eye(4)) a_fname = 'fourd_anat.nii' nib.save(img, a_fname) a3 = a2.copy() a3[:10, :10, :10] = 0 img2 = nib.Nifti1Image(a3, np.eye(4)) # check 4D mask msk1 = nnm.compute_mask_sessions([img2, img2]) msk2 = nnm.compute_mask_sessions([img2, a_fname]) assert_array_equal(msk1, msk2) msk3 = nnm.compute_mask_sessions([img2, a_fname], threshold=.9) msk4 = nnm.compute_mask_sessions([img2, a_fname], threshold=0) msk5 = nnm.compute_mask_sessions([a_fname, a_fname]) assert_array_equal(msk1, msk3) assert_array_equal(msk4, msk5) if __name__ == "__main__": import nose nose.run(argv=['', __file__]) nipy-0.3.0/nipy/labs/tests/test_statistical_mapping.py000066400000000000000000000050171210344137400232050ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np from nibabel import Nifti1Image from ..utils.simul_multisubject_fmri_dataset import \ surrogate_2d_dataset from ..statistical_mapping import cluster_stats def make_surrogate_data(): """ Return a single deterministic 3D image """ shape = (40, 40) pos = np.array([[ 2, 10], [10, 4], [20, 30], [30, 20]]) ampli = np.array([5, 5, 5, 5]) data = surrogate_2d_dataset(n_subj=1, pos=pos, shape=shape, noise_level=0, ampli=ampli, spatial_jitter=0, signal_jitter=0).squeeze() data = np.reshape(data, (shape[0], shape[1], 1)) return Nifti1Image(data, np.eye(4)) def test1(): img = make_surrogate_data() clusters, info = cluster_stats(img, img, height_th=3., height_control='None', cluster_th=0, nulls={}) assert len(clusters)==4 def test2(): img = make_surrogate_data() clusters, info = cluster_stats(img, img, height_th=3., height_control='None', cluster_th=5, nulls={}) assert len(clusters)==4 def test3(): img = make_surrogate_data() clusters, info = cluster_stats(img, img, height_th=3., height_control='None', cluster_th=10, nulls={}) assert len(clusters)==0 def test_4(): img = make_surrogate_data() clusters, info = cluster_stats(img, img, height_th=.001, height_control='fpr', cluster_th=0, nulls={}) assert len(clusters)==4 def test_5(): img = make_surrogate_data() clusters, info = cluster_stats(img, img, height_th=.05, height_control='bonferroni', cluster_th=0, nulls={}) assert len(clusters)==4 def test_6(): img = make_surrogate_data() clusters, info = cluster_stats(img, img, height_th=.05, height_control='fdr', cluster_th=0, nulls={}) print len(clusters), sum([c['size'] for c in clusters]) assert len(clusters)==4 def test7(): img = make_surrogate_data() clusters, info = cluster_stats(img, img, height_th=3., height_control='None', cluster_th=0, nulls={}) nstv = sum([c['size'] for c in clusters]) assert nstv==36 def test_8(): img = make_surrogate_data() clusters, info = cluster_stats(img, img, height_th=.001, height_control='fpr', cluster_th=0, nulls={}) nstv = sum([c['size'] for c in clusters]) assert nstv==36 if __name__ == "__main__": import nose nose.run(argv=['', __file__]) nipy-0.3.0/nipy/labs/utils/000077500000000000000000000000001210344137400155305ustar00rootroot00000000000000nipy-0.3.0/nipy/labs/utils/__init__.py000066400000000000000000000005241210344137400176420ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from .routines import (quantile, median, mahalanobis, svd, permutations, combinations, gamln, psi) from .zscore import zscore from nipy.testing import Tester test = Tester().test bench = Tester().bench nipy-0.3.0/nipy/labs/utils/mask.py000066400000000000000000000005071210344137400170370ustar00rootroot00000000000000""" Compatibility module """ import warnings warnings.warn(DeprecationWarning( "This module (nipy.labs.utils.mask) has been moved and " "is depreciated. Please update your code to import from " "'nipy.labs.mask'.")) # Absolute import, as 'import *' doesnot work with relative imports from nipy.labs.mask import * nipy-0.3.0/nipy/labs/utils/random_threshold.py000066400000000000000000000631531210344137400214460ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ############################################################################## # Random Thresholding Procedure (after M. Lavielle and C. Ludena) import numpy as np import scipy.stats as st from nipy.algorithms.graph import wgraph_from_3d_grid from ..group.routines import add_lines tol = 1e-10 ############################################################################## # Wrappers def randthresh_main(Y, K, XYZ=None, p=np.inf, varwind=False, knownull=True, stop=False, verbose=False): """ Wrapper for random threshold functions Parameters ---------- Y: array of shape (n,),Observations K: int, Some positive integer (lower bound on the number of null hypotheses) XYZ: array of shape (3, n) voxel coordinates. If not empty, connexity constraints are used on the non-null set p: float, optional, lp norm varwind: bool, Varying window variant (vs. fixed window, with width K) knownull: bool, optional, Known null distribution (observations assumed Exp(1) under H0) versus unknown (observations assumed Gaussian under H0) stop: bool, optional Stop when minimum is attained (save computation time) verbose: bool, 'Chatty' mode Returns ------- D : dict containing the following fields: * "C" (n-K) array Lp norm of partial sums fluctuation about their conditional expectation * "thresh" Detection threshold * "detect" (k,) Index of detected activations Notes ----- Random thresholding is performed only if null hypothesis of no activations is rejected at level 5% """ if XYZ == None: return randthresh(Y, K, p, stop, verbose, varwind, knownull) else: return randthresh_connex(Y, K, XYZ, p, stop, verbose, varwind, knownull) def randthresh(Y, K, p=np.inf, stop=False, verbose=False, varwind=False, knownull=True): """ Wrapper for random threshold functions (without connexity constraints) Parameters ---------- Y: array of shape (n,) Observations K: int, Some positive integer (lower bound on the number of null hypotheses) p: float, lp norm stop Stop when minimum is attained (save computation time) verbose 'Chatty' mode varwind Varying window variant (vs. fixed window, with width K) knownull Known null distribution (observations assumed Exp(1) under H0) versus unknown (observations assumed Gaussian under H0) Returns ------- A dictionary D containing the following fields: "C" (n-K) Lp norm of partial sums fluctuation about their conditional expectation "thresh" Detection threshold "detect" (k,) Index of detected activations "v" Estimated null variance (if knownull is False) Notes ----- Random thresholding is performed only if null hypothesis of no activations is rejected at level 5% """ D = {} # Test presence of activity if knownull: X = Y else: v = np.square(Y).mean() X = np.clip( - np.log(1 - st.chi2.cdf(Y ** 2, 1, 0, scale=v)), 0, 1 / tol) D["v"] = v T = test_stat(X, p=np.inf) if T <= 0.65: print "No activity detected at 5% level" D["detect"] = np.array([]) D["thresh"] = np.inf else: # Find optimal threshold if varwind: if knownull: C = randthresh_varwind_knownull(Y, K, p, stop, verbose) else: C, V = randthresh_varwind_gaussnull( Y, K, p, stop, one_sided=False, verbose=verbose) else: if knownull: C = randthresh_fixwind_knownull(Y, K, p, stop, verbose) else: C, V = randthresh_fixwind_gaussnull( Y, K, p, stop, one_sided=False, verbose=verbose) n = len(X) if stop: I = np.where(C > 0)[0] if len(I) > 0: ncoeffs = I[-1] else: ncoeffs = n - K else: I = np.where((C[2:] > C[1:-1]) * (C[1:-1] < C[:-2]))[0] if len(I) > 0: ncoeffs = I[np.argmin(C[1: -1][I])] + 1 else: ncoeffs = n - K thresh = np.sort(np.abs(Y))[ - ncoeffs] # Detected activations detect = np.where(np.abs(Y) > thresh)[0] D["C"] = C[2:] D["thresh"] = thresh D["detect"] = detect if not knownull: D["v"] = V[2:] return D def randthresh_connex(Y, K, XYZ, p=np.inf, stop=False, verbose=False, varwind=False, knownull=True): """ Wrapper for random threshold functions under connexity constraints Parameters ---------- Y (n,) Observations K Some positive integer (lower bound on the number of null hypotheses) XYZ (3,n) voxel coordinates p lp norm stop Stop when minimum is attained (save computation time) verbose 'Chatty' mode varwind Varying window variant (vs. fixed window, with width K) knownull Known null distribution (observations assumed Exp(1) under H0) versus unknown (observations assumed Gaussian under H0) Returns ------- A dictionary D containing the following fields: "C" (n-K) Lp norm of partial sums fluctuation about their conditional expectation "thresh" Detection threshold "detect" (ncoeffs,) Index of detected voxels Notes ----- Random thresholding is performed only if null hypothesis of no activations is rejected at level 5% """ # Test presence of activity D = {} if knownull: X = Y else: v = np.square(Y).mean() X = np.clip( - np.log(1 - st.chi2.cdf(Y ** 2, 1, 0, scale=v)), 0, 1 / tol) D["v"] = v T = test_stat(X, p=np.inf) if T <= 0.65: print "No activity detected at 5% level" D["detect"] = np.array([]) D["thresh"] = np.inf else: # Find optimal threshold if varwind: if knownull: C = randthresh_varwind_knownull_connex( Y, K, XYZ, p, stop, verbose) else: C, V = randthresh_varwind_gaussnull_connex( Y, K, XYZ, p, stop, verbose=verbose) else: if knownull: C = randthresh_fixwind_knownull_connex( Y, K, XYZ, p, stop, verbose) else: C, V = randthresh_fixwind_gaussnull_connex( Y, K, XYZ, p, stop, verbose=verbose) n = len(X) if stop: I = np.where(C > 0)[0] if len(I) > 0: ncoeffs = I[-1] else: ncoeffs = n - K else: I = np.where((C[2:] > C[1:-1]) * (C[1:-1] < C[:-2]))[0] if len(I) > 0: ncoeffs = I[np.argmin(C[1:-1][I])] + 1 else: ncoeffs = n - K thresh = np.sort(np.abs(Y))[ - ncoeffs] detect = np.where(np.abs(Y) > thresh)[0] # Remove isolated voxels iso = isolated(XYZ[:, detect]) detect[iso] = -1 detect = detect[detect != -1] D["C"] = C[2:] D["thresh"] = thresh D["detect"] = detect if knownull == False: Ynull = np.square(Y).copy() Ynull[detect] = np.nan Ynull = Ynull[np.isnan(Ynull) == False] D["v"] = V[2:] return D ######################################################################### # random threshold functions without connexity constraints def randthresh_fixwind_knownull(X, K, p=np.inf, stop=False, verbose=False): """Random threshold with fixed-window and known null distribution Parameters ========== X (n,): Observations (must be Exp(1) under H0) K : Some positive integer (lower bound on the number of null hypotheses) p : Lp norm stop : Stop when minimum is attained (save computation time) Returns ======= C (n-K): Lp norm of partial sums fluctuation about their conditional expectation """ n = len(X) I = 1.0 / np.arange(1, n + 1) # Sort data sortX = np.sort(X)[:: - 1] C = np.zeros(n - K, float) T = np.cumsum(sortX) for k in xrange(2, n - K): #Ratio of expectations B = np.arange(1, K + 1) * (1 + I[:n - 1 - k].sum() - I[:K].cumsum()) B /= float(K) * ( 1 + I[K: n - 1 - k].sum() ) #Partial sums Tk = T[k + 1: k + K + 1] - T[k] #Conditional expectations Q = B * Tk[-1] if p == np.inf: C[k] = np.abs(Tk - Q).max() / np.sqrt(n) else: C[k] = ( np.abs(Tk - Q) ** p ).sum() / n ** (p / 2.0 + 1) if verbose: print "k :", k, "C[k]:", C[k] if C[k] > C[k - 1] and C[k - 1] < C[k - 2] and stop: break return C def randthresh_varwind_knownull(X, K, p=np.inf, stop=False, verbose=False): """Random threshold with varying window and known null distribution Parameters ========== X (n,): Observations (Exp(1) under H0) K : Some positive integer (lower bound on the number of null hypotheses) p : lp norm stop : Stop when minimum is attained (save computation time) Returns ======= C (n-K) Lp norm of partial sums fluctuation about their conditional expectation """ n = len(X) I = 1.0 / np.arange(1, n + 1) #Sort data sortX = np.sort(X)[:: - 1] T = np.cumsum(sortX) C = np.zeros(n - K, float) for k in xrange(2, n - K): #Ratio of expectations B = np.arange(1, n - k) * ( 1 + I[:n - 1 - k].sum() - I[:n - k - 1].cumsum()) B /= float(n - k - 1) #Partial sums Tk = T[k + 1:] - T[k] #Conditional expectations Q = B * Tk[ - 1] if p == np.inf: C[k] = np.abs(Tk - Q).max() / np.sqrt(n - k - 1) else: C[k] = ( np.abs(Tk - Q) ** p).sum() / (n - k - 1) ** (p / 2.0 + 1) if verbose: print "k:", k, "C[k]:", C[k] if C[k] > C[k - 1] and C[k - 1] < C[k - 2] and stop: break return C def randthresh_fixwind_gaussnull(Y, K, p=np.inf, stop=False, one_sided=False, verbose=False): """ Random threshold with fixed window and null gaussian distribution Parameters ========== Y array of shape (n,) Observations (assumed Gaussian under H0, with unknown variance) K, int, Some positive integer (lower bound on the number of null hypotheses) p, float, lp norm stop: bool, Stop when minimum is attained (save computation time) one_sided: bool, If nonzero means are positive only (vs. positive or negative) Returns ======= C array of shape (n-K) Lp norm of partial sums fluctuation about their conditional expectation """ n = len(Y) I = 1.0 / np.arange(1, n + 1) if one_sided: sortY = np.sort(Y) std = np.sqrt((np.sum(sortY[1:K] ** 2) + np.cumsum(sortY[K: n] ** 2))\ * 1.0 / np.arange(K, n)) std = std[:: - 1] else: sortY = np.sort(np.square(Y)) V = (np.sum(sortY[1: K]) + np.cumsum(sortY[K: n])) * \ 1.0 / np.arange(K, n) V = V[:: - 1] C = np.zeros(n - K, float) sortY = sortY[:: - 1] for k in xrange(2, n - K): if one_sided: X = np.clip( - np.log(1 - st.norm.cdf(sortY[k + 1: k + K + 1], scale=std[k])), 0, 1 / tol) else: X = np.clip( - np.log(1 - st.chi2.cdf(sortY[k + 1: k + K + 1], 1, 0, scale=V[k])), 0, 1 / tol) # Ratio of expectations B = np.arange(1, K + 1) * (1 + I[:n - 1 - k].sum() - I[: K].cumsum()) B /= float(K) * (1 + I[K: n - 1 - k].sum()) # Partial sums T = X.cumsum() # Conditional expectations Q = B * T[-1] if p == np.inf: C[k] = np.abs(T - Q).max() / np.sqrt(n) else: C[k] = ( np.abs(T - Q) ** p ).sum() / n ** ( p / 2.0 + 1) if verbose: print "k:", k, "C[k]:", C[k] if C[k] > C[k-1] and C[k-1] < C[k-2] and stop: break return C, V def randthresh_varwind_gaussnull(Y, K, p=np.inf, stop=False, one_sided=False, verbose=False): """Random threshold with fixed window and gaussian null distribution Parameters ========== Y (n,) Observations (assumed Gaussian under H0, with unknown variance) K Some positive integer (lower bound on the number of null hypotheses) p lp norm stop Stop when minimum is attained (save computation time) one_sided If nonzero means are positive only (vs. positive or negative) Returns ======= C (n-K) Lp norm of partial sums fluctuation about their conditional expectation """ n = len(Y) I = 1.0 / np.arange(1, n + 1) if one_sided: sortY = np.sort(Y) std = np.sqrt((np.sum(sortY[1: K] ** 2) + np.cumsum(sortY[K: n] ** 2)) * 1.0 / np.arange(K, n)) std = std[:: - 1] else: sortY = np.sort(np.square(Y)) V = (np.sum(sortY[1: K]) + np.cumsum(sortY[K: n]))\ * 1.0 / np.arange(K, n) V = V[:: - 1] C = np.zeros(n - K, float) sortY = sortY[:: - 1] for k in xrange(2, n - K): if one_sided: X = np.clip( - np.log(1 - st.norm.cdf(sortY[k + 1:], scale=std[k])), 0, 1 / tol) else: X = np.clip( - np.log(1 - st.chi2.cdf(sortY[k + 1:], 1, 0, scale=V[k])), 0, 1 / tol) # Ratio of expectations B = np.arange(1, n - k) * ( 1 + I[: n - 1 - k].sum() - \ I[: n - k - 1].cumsum() ) B /= float(n - k - 1) # Partial sums T = X.cumsum() # Conditional expectations Q = B * T[ - 1] if p == np.inf: C[k] = np.abs(T - Q).max() / np.sqrt(n) else: C[k] = ( np.abs(T - Q) ** p ).sum() / n ** (p / 2.0 + 1) if verbose: print "k:", k, "C[k]:", C[k] if C[k] > C[k - 1] and C[k - 1] < C[k - 2] and stop: break return C, V ############################################################################### # random threshold functions with connexity constraints def randthresh_fixwind_knownull_connex(X, K, XYZ, p=np.inf, stop=False, verbose=False): """Random threshold with fixed-window and known null distribution, using connexity constraint on non-null set. Parameters ========== X (n,): Observations (must be Exp(1) under H0) XYZ (3,n): voxel coordinates K : Some positive integer (lower bound on the number of null hypotheses) p : Lp norm stop : Stop when minimum is attained (save computation time) Returns ======= C (n-K): Lp norm of partial sums fluctuation about their conditional expectation """ n = len(X) I = 1.0 / np.arange(1, n + 1) #Sort data J = np.argsort(X)[:: - 1] sortX = X[J] C = np.zeros(n - K, float) T = np.zeros(K, float) L = np.zeros(n, int) L[J[0]] = 1 for k in xrange(2, n - K): #Ratio of expectations B = np.arange(1, K + 1) * (1 + I[: n - 1 - k].sum() - I[: K].cumsum()) B /= float(K) * ( 1 + I[K: n - 1 - k].sum() ) Jk = J[:k] #Suprathreshold voxels connected to new voxel XYZk = np.abs(XYZ[:, Jk] - XYZ[:, J[k - 1]].reshape(3, 1)) Lk = np.where((XYZk.sum(axis=0) <= 2) * (XYZk.max(axis=0) <= 1))[0]\ [: - 1] if len(Lk) == 0: L[J[k - 1]] = 1 else: L[J[Lk]] = 0 Ik = np.where(L[Jk] == 1)[0] nk = len(Ik) #Partial sums if nk >= K: T = sortX[Ik[:K]].cumsum() elif nk == 0: T = sortX[k + 1: k + K + 1].cumsum() else: T[:nk] = sortX[Ik].cumsum() T[nk:] = T[nk - 1] + sortX[k + 1:k + K - nk + 1].cumsum() # Conditional expectations Q = B * T[-1] if p == np.inf: C[k] = np.abs(T - Q).max() / np.sqrt(n) else: C[k] = (np.abs(T - Q) ** p).sum() / n ** (p / 2.0 + 1) if verbose: print "k:", k, "nk:", nk, "C[k]:", C[k] if C[k] > C[k - 1] and C[k - 1] < C[k - 2] and stop: break return C def randthresh_varwind_knownull_connex(X, K, XYZ, p=np.inf, stop=False, verbose=False): """Random threshold with varying window and known null distribution Parameters ========== X (n,): Observations (Exp(1) under H0) K : Some positive integer (lower bound on the number of null hypotheses) XYZ (3,n): voxel coordinates p : lp norm stop : Stop when minimum is attained (save computation time) Returns ======= C (n-K) Lp norm of partial sums fluctuation about their conditional expectation """ n = len(X) I = 1.0 / np.arange(1, n + 1) #Sort data J = np.argsort(X)[:: - 1] sortX = X[J] C = np.zeros(n - K, float) L = np.zeros(n, int) L[J[0]] = 1 for k in xrange(2, n - K): Jk = J[:k] #Suprathreshold voxels connected to new voxel XYZk = np.abs(XYZ[:, Jk] - XYZ[:, J[k-1]].reshape(3, 1)) Lk = np.where((XYZk.sum(axis=0) <= 2) * (XYZk.max(axis=0) <= 1))\ [0][:-1] if len(Lk) == 0: L[J[k - 1]] = 1 else: L[J[Lk]] = 0 Ik = np.where(L[Jk] == 1)[0] #Ik = isolated(XYZ[:, Jk]) nk = len(Ik) #Ratio of expectations B = np.arange(1, n - k + nk) * ( 1 + I[:n - 1 - k + nk].sum() - I[:n - k - 1 + nk].cumsum()) B /= float(n - k - 1 + nk) #Partial sums if nk == 0: T = sortX[k + 1:].cumsum() else: T = np.zeros(n - k + nk - 1, float) T[:nk] = sortX[Ik].cumsum() T[nk:] = T[nk - 1] + sortX[k + 1:].cumsum() #Conditional expectations Q = B * T[-1] if p == np.inf: C[k] = np.abs(T - Q).max() / np.sqrt(n - k - 1 + nk) else: C[k] = ( np.abs(T - Q) ** p ).sum() / (n - k - 1 + nk) **\ (p / 2.0 + 1) if C[k] > C[k - 1] and C[k - 1] < C[k - 2] and stop: break if verbose: print "k:", k, "nk:", nk, "C[k]:", C[k] return C def randthresh_fixwind_gaussnull_connex(X, K, XYZ, p=np.inf, stop=False, verbose=False): """Random threshold with fixed-window and gaussian null distribution, using connexity constraint on non-null set. Parameters ========== X (n,): Observations (assumed Gaussian under H0) XYZ (3,n): voxel coordinates K : Some positive integer (lower bound on the number of null hypotheses) p : Lp norm stop : Stop when minimum is attained (save computation time) Returns ======= C (n-K): Lp norm of partial sums fluctuation about their conditional expectation """ n = len(X) I = 1.0 / np.arange(1, n + 1) #Sort data J = np.argsort(X ** 2)[:: - 1] sortX = np.square(X)[J] C = np.zeros(n - K, float) V = np.zeros(n - K, float) T = np.zeros(K, float) L = np.zeros(n, int) L[J[0]] = 1 for k in xrange(2, n - K): #Ratio of expectations B = np.arange(1, K + 1) * ( 1 + I[:n - 1 - k].sum() - I[:K].cumsum()) B /= float(K) * ( 1 + I[K:n - 1 - k].sum()) Jk = J[:k] #Suprathreshold voxels connected to new voxel XYZk = np.abs(XYZ[:, Jk] - XYZ[:, J[k - 1]].reshape(3, 1)) Lk = np.where((XYZk.sum(axis=0) <= 2) * (XYZk.max(axis=0) <= 1))[0][: - 1] if len(Lk) == 0: L[J[k - 1]] = 1 else: L[J[Lk]] = 0 Ik = np.where(L[Jk] == 1)[0] nk = len(Ik) #Null variance V[k] = (sortX[Ik].sum() + sortX[k + 1:].sum()) / float(nk + n - k - 1) #Partial sums if nk >= K: T = np.clip( - np.log(1 - st.chi2.cdf( sortX[Ik[:K]], 1, 0, scale=V[k])), 0, 1 / tol).cumsum() elif nk == 0: T = np.clip( - np.log(1 - st.chi2.cdf(sortX[k + 1:k + K + 1], 1, 0, scale=V[k])), 0, 1 / tol).cumsum() else: T[:nk] = np.clip( - np.log(1 - st.chi2.cdf(sortX[Ik], 1, 0, scale=V[k])), 0, 1 / tol).cumsum() T[nk:] = T[nk - 1] + np.clip( - np.log(1 - st.chi2.cdf(sortX[k + 1:k + K - nk + 1], 1, 0, scale=V[k])), 0, 1 / tol).cumsum() # Conditional expectations Q = B * T[ - 1] if p == np.inf: C[k] = np.abs(T - Q).max() / np.sqrt(n) else: C[k] = (np.abs(T - Q) ** p).sum() / n ** (p / 2.0 + 1) if verbose: print "k:", k, "nk:", nk, "C[k]:", C[k] if C[k] > C[k - 1] and C[k - 1] < C[k - 2] and stop: break return C, V def randthresh_varwind_gaussnull_connex(X, K, XYZ, p=np.inf, stop=False, verbose=False): """Random threshold with fixed-window and gaussian null distribution, using connexity constraint on non-null set. Parameters ========== X (n,): Observations (assumed Gaussian under H0) XYZ (3,n): voxel coordinates K : Some positive integer (lower bound on the number of null hypotheses) p : Lp norm stop : Stop when minimum is attained (save computation time) Returns ======= C (n-K): Lp norm of partial sums fluctuation about their conditional expectation """ n = len(X) I = 1.0 / np.arange(1, n + 1) #Sort data J = np.argsort(X ** 2)[:: - 1] sortX = np.square(X)[J] C = np.zeros(n - K, float) V = np.zeros(n - K, float) T = np.zeros(K, float) L = np.zeros(n, int) L[J[0]] = 1 for k in xrange(2, n - K): Jk = J[:k] #Suprathreshold voxels connected to new voxel XYZk = np.abs(XYZ[:, Jk] - XYZ[:, J[k - 1]].reshape(3, 1)) Lk = np.where((XYZk.sum(axis=0) <= 2) * (XYZk.max(axis=0) <= 1))[0][: - 1] if len(Lk) == 0: L[J[k - 1]] = 1 else: L[J[Lk]] = 0 Ik = np.where(L[Jk] == 1)[0] #Ik = isolated(XYZ[:, Jk]) nk = len(Ik) #Ratio of expectations B = np.arange(1, n - k + nk) * ( 1 + I[:n - 1 - k + nk].sum() - I[:n - k - 1 + nk].cumsum()) B /= float(n - k - 1 + nk) #Null variance V[k] = (sortX[Ik].sum() + sortX[k + 1:].sum()) / float(nk + n - k - 1) #Partial sums if nk == 0: T = np.clip( - np.log(1 - st.chi2.cdf(sortX[k + 1:], 1, 0, scale=V[k])), 0, 1 / tol).cumsum() else: T = np.zeros(n - k + nk - 1, float) T[:nk] = np.clip( - np.log(1 - st.chi2.cdf(sortX[Ik], 1, 0, scale=V[k])), 0, 1 / tol).cumsum() T[nk:] = T[nk-1] + np.clip( - np.log(1 - st.chi2.cdf(sortX[k + 1:], 1, 0, scale=V[k])), 0, 1 / tol).cumsum() #Conditional expectations Q = B * T[-1] if p == np.inf: C[k] = np.abs(T - Q).max() / np.sqrt(n - k - 1 + nk) else: C[k] = ( np.abs(T - Q) ** p).sum() / \ (n - k - 1 + nk) ** (p / 2.0 + 1) if verbose: print "k:", k, "nk:", nk, "C[k]:", C[k] if C[k] > C[k - 1] and C[k - 1] < C[k - 2] and stop: break return C, V ############################################################################# # Miscellanous functions def test_stat(X, p=np.inf): """Test statistic of global null hypothesis that all observations have zero-mean Parameters ========== X (n,) : X[j] = -log(1-F(|Y[j]|)) where F: cdf of |Y[j]| under null hypothesis (must be computed beforehand) p : Lp norm (<= inf) to use for computing test statistic Returns ======= D : test statistic """ n = len(X) I = 1.0 / np.arange(1, n + 1) #Partial sums T = np.cumsum(np.sort(X)[:: - 1]) #Expectation of partial sums E = np.arange(1, n + 1) * (1 + I.sum() - I.cumsum()) #Conditional expectation of partial sums Q = E / n * T[ - 1] #Test statistic if p == np.inf: return np.max( ( np.abs(T - Q) ) / np.sqrt(n) ) else: return sum(np.abs(T - Q) ** p) / (n ** (0.5 * p + 1)) def isolated(XYZ, k=18): """ Outputs an index I of isolated points from their integer coordinates, XYZ (3, n), and under k-connectivity, k = 6, 18 or 26. """ label = wgraph_from_3d_grid(XYZ.T, k).cc() # Isolated points ncc = label.max() + 1 p = XYZ.shape[1] size = np.zeros(ncc, float) ones = np.ones((p, 1), float) add_lines(ones, size.reshape(ncc, 1), label) return np.where(size[label] == 1)[0] nipy-0.3.0/nipy/labs/utils/reproducibility_measures.py000066400000000000000000000606471210344137400232340ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ These are several functions for computing reproducibility measures. A use script should be appended soon on the repository. In general thuis proceeds as follows: The dataset is subject to jacknife subampling ('splitting'), each subsample being analysed independently. A reproducibility measure is then derived; All is used to produce the work described in Analysis of a large fMRI cohort: Statistical and methodological issues for group analyses. Thirion B, Pinel P, Meriaux S, Roche A, Dehaene S, Poline JB. Neuroimage. 2007 Mar;35(1):105-20. Bertrand Thirion, 2009-2010 """ import numpy as np from nipy.labs.spatial_models.discrete_domain import \ grid_domain_from_binary_array # --------------------------------------------------------- # ----- cluster handling functions ------------------------ # --------------------------------------------------------- def histo_repro(h): """ Given the histogram h, compute a standardized reproducibility measure Parameters ---------- h array of shape(xmax+1), the histogram values Returns ------- hr, float: the measure """ k = np.size(h) - 1 if k == 1: return 0. nf = np.dot(h, np.arange(k + 1)) / k if nf == 0: return 0. n1k = np.arange(1, k + 1) res = 1.0 * np.dot(h[1:], n1k * (n1k - 1)) / (k * (k - 1)) return res / nf def cluster_threshold(stat_map, domain, th, csize): """Perform a thresholding of a map at the cluster-level Parameters ---------- stat_map: array of shape(nbvox) the input data domain: Nifti1Image instance, referential- and domain-defining image th (float): cluster-forming threshold cisze (int>0): cluster size threshold Returns ------- binary array of shape (nvox): the binarized thresholded map Notes ----- Should be replaced by a more standard function in the future """ if stat_map.shape[0] != domain.size: raise ValueError('incompatible dimensions') # first build a domain of supra_threshold regions thresholded_domain = domain.mask(stat_map > th) # get the connected components label = thresholded_domain.connected_components() binary = - np.ones(domain.size) binary[stat_map > th] = label nbcc = len(np.unique(label)) for i in range(nbcc): if np.sum(label == i) < csize: binary[binary == i] = - 1 binary = (binary > -1) return binary def get_cluster_position_from_thresholded_map(stat_map, domain, thr=3.0, csize=10): """ the clusters above thr of size greater than csize in 18-connectivity are computed Parameters ---------- stat_map : array of shape (nbvox), map to threshold mask: Nifti1Image instance, referential- and domain-defining image thr: float, optional, cluster-forming threshold cisze=10: int cluster size threshold Returns ------- positions array of shape(k,anat_dim): the cluster positions in physical coordinates where k= number of clusters if no such cluster exists, None is returned """ # if no supra-threshold voxel, return if (stat_map <= thr).all(): return None # first build a domain of supra_threshold regions thresholded_domain = domain.mask(stat_map > thr) # get the connected components label = thresholded_domain.connected_components() # get the coordinates coord = thresholded_domain.get_coord() # get the barycenters baryc = [] for i in range(label.max() + 1): if np.sum(label == i) >= csize: baryc.append(np.mean(coord[label == i], 0)) if len(baryc) == 0: return None baryc = np.vstack(baryc) return baryc def get_peak_position_from_thresholded_map(stat_map, domain, threshold): """The peaks above thr in 18-connectivity are computed Parameters ---------- stat_map: array of shape (nbvox): map to threshold deomain: referential- and domain-defining image thr, float: cluster-forming threshold Returns ------- positions array of shape(k,anat_dim): the cluster positions in physical coordinates where k= number of clusters if no such cluster exists, None is returned """ from ..statistical_mapping import get_3d_peaks # create an image to represent stat_map simage = domain.to_image(data=stat_map) # extract the peaks peaks = get_3d_peaks(simage, threshold=threshold, order_th=2) if peaks == None: return None pos = np.array([p['pos'] for p in peaks]) return pos # --------------------------------------------------------- # ----- data splitting functions ------------------------ # --------------------------------------------------------- def bootstrap_group(nsubj, ngroups): """Split the proposed group into redundant subgroups by bootstrap Parameters ---------- nsubj (int) the number of subjects in the population ngroups(int) Number of subbgroups to be drawn Returns ------- samples: a list of ngroups arrays containing the indexes of the subjects in each subgroup """ groupsize = nsubj samples = [(groupsize * np.random.rand(groupsize)).astype(np.int) for i in range(ngroups)] return samples def split_group(nsubj, ngroups): """Split the proposed group into random disjoint subgroups Parameters ---------- nsubj (int) the number of subjects to be split ngroups(int) Number of subbgroups to be drawn Returns ------- samples: a list of ngroups arrays containing the indexes of the subjects in each subgroup """ groupsize = int(np.floor(nsubj / ngroups)) rperm = np.argsort(np.random.rand(nsubj)) samples = [rperm[i * groupsize: (i + 1) * groupsize] for i in range(ngroups)] return samples # --------------------------------------------------------- # ----- statistic computation ----------------------------- # --------------------------------------------------------- def conjunction(x, vx, k): """Returns a conjunction statistic as the sum of the k lowest t-values Parameters ---------- x: array of shape(nrows, ncols), effect matrix vx: array of shape(nrows, ncols), variance matrix k: int, number of subjects in the conjunction Returns ------- t array of shape(nrows): conjunction statistic """ t = np.sort(x / np.sqrt(np.maximum(vx, 1.e-15))) cjt = np.sum(t[:, :k], 1) return cjt def ttest(x): """Returns the t-test for each row of the data x """ from ..group.onesample import stat t = stat(x.T, id='student', axis=0) return np.squeeze(t) def fttest(x, vx): """Assuming that x and vx represent a effect and variance estimates, returns a cumulated ('fixed effects') t-test of the data over each row Parameters ---------- x: array of shape(nrows, ncols): effect matrix vx: array of shape(nrows, ncols): variance matrix Returns ------- t array of shape(nrows): fixed effect statistics array """ if np.shape(x) != np.shape(vx): raise ValueError("incompatible dimensions for x and vx") n = x.shape[1] t = x / np.sqrt(np.maximum(vx, 1.e-15)) t = t.mean(1) * np.sqrt(n) return t def mfx_ttest(x, vx): """Idem fttest, but returns a mixed-effects statistic Parameters ---------- x: array of shape(nrows, ncols): effect matrix vx: array of shape(nrows, ncols): variance matrix Returns ------- t array of shape(nrows): mixed effect statistics array """ from ..group.onesample import stat_mfx t = stat_mfx(x.T, vx.T, id='student_mfx', axis=0) return np.squeeze(t) def voxel_thresholded_ttest(x, threshold): """Returns a binary map of the ttest>threshold """ t = ttest(x) return t > threshold def statistics_from_position(target, data, sigma=1.0): """ Return a number characterizing how close data is from target using a kernel-based statistic Parameters ---------- target: array of shape(nt,anat_dim) or None the target positions data: array of shape(nd,anat_dim) or None the data position sigma=1.0 (float), kernel parameter or a distance that say how good good is Returns ------- sensitivity (float): how well the targets are fitted by the data in [0,1] interval 1 is good 0 is bad """ from ...algorithms.utils.fast_distance import euclidean_distance as ed if data == None: if target == None: return 0.# could be 1.0 ? else: return 0. if target == None: return 0. dmatrix = ed(data, target) / sigma sensitivity = dmatrix.min(0) sensitivity = np.exp( - 0.5 * sensitivity ** 2) sensitivity = np.mean(sensitivity) return sensitivity # ------------------------------------------------------- # ---------- The main functions ----------------------------- # ------------------------------------------------------- def voxel_reproducibility(data, vardata, domain, ngroups, method='crfx', swap=False, verbose=0, **kwargs): """ return a measure of voxel-level reproducibility of activation patterns Parameters ---------- data: array of shape (nvox,nsubj) the input data from which everything is computed vardata: array of shape (nvox,nsubj) the corresponding variance information ngroups (int): Number of subbgroups to be drawn domain: referential- and domain-defining image ngourps: int, number of groups to be used in the resampling procedure method: string, to be chosen among 'crfx', 'cmfx', 'cffx' inference method under study verbose: bool, verbosity mode Returns ------- kappa (float): the desired reproducibility index """ rmap = map_reproducibility(data, vardata, domain, ngroups, method, swap, verbose, **kwargs) h = np.array([np.sum(rmap == i) for i in range(ngroups + 1)]) hr = histo_repro(h) return hr def draw_samples(nsubj, ngroups, split_method='default'): """ Draw randomly ngroups sets of samples from [0..nsubj-1] Parameters ---------- nsubj, int, the total number of items ngroups, int, the number of desired groups split_method: string, optional, to be chosen among 'default', 'bootstrap', 'jacknife' if 'bootstrap', then each group will be nsubj drawn with repetitions among nsubj if 'jacknife' the population is divided into ngroups disjoint equally-sized subgroups if 'default', 'bootstrap' is used when nsubj < 10 * ngroups otherwise jacknife is used Returns ------- samples, a list of ngroups array that represent the subsets. fixme : this should allow variable bootstrap, i.e. draw ngroups of groupsize among nsubj """ if split_method == 'default': if nsubj > 10 * ngroups: samples = split_group(nsubj, ngroups) else: samples = bootstrap_group(nsubj, ngroups) elif split_method == 'bootstrap': samples = bootstrap_group(nsubj, ngroups) elif split_method == '': samples = split_group(nsubj, ngroups) else: raise ValueError('unknown splitting method') return samples def map_reproducibility(data, vardata, domain, ngroups, method='crfx', swap=False, verbose=0, **kwargs): """ Return a reproducibility map for the given method Parameters ---------- data: array of shape (nvox,nsubj) the input data from which everything is computed vardata: array of the same size the corresponding variance information domain: referential- and domain-defining image ngroups (int): the size of each subrgoup to be studied threshold (float): binarization threshold (makes sense only if method==rfx) method='crfx', string to be chosen among 'crfx', 'cmfx', 'cffx' inference method under study verbose=0 : verbosity mode Returns ------- rmap: array of shape(nvox) the reproducibility map """ nsubj = data.shape[1] nvox = data.shape[0] samples = draw_samples(nsubj, ngroups) rmap = np.zeros(nvox) for i in range(ngroups): x = data[:, samples[i]] if swap: # randomly swap the sign of x x *= (2 * (np.random.rand(len(samples[i])) > 0.5) - 1) if method is not 'crfx': vx = vardata[:, samples[i]] csize = kwargs['csize'] threshold = kwargs['threshold'] # compute the statistical maps according to the method you like if method == 'crfx': stat_map = ttest(x) elif method == 'cffx': stat_map = fttest(x, vx) elif method == 'cmfx': stat_map = mfx_ttest(x, vx) elif method == 'cjt': # if kwargs.has_key('k'): if 'k' in kwargs: k = kwargs['k'] else: k = nsubj / 2 stat_map = conjunction(x, vx, k) else: raise ValueError('unknown method') # add the binarized map to a reproducibility map rmap += cluster_threshold(stat_map, domain, threshold, csize) > 0 return rmap def peak_reproducibility(data, vardata, domain, ngroups, sigma, method='crfx', swap=False, verbose=0, **kwargs): """ Return a measure of cluster-level reproducibility of activation patterns (i.e. how far clusters are from each other) Parameters ---------- data: array of shape (nvox,nsubj) the input data from which everything is computed vardata: array of shape (nvox,nsubj) the variance of the data that is also available domain: refenrtial- and domain-defining image ngroups (int), Number of subbgroups to be drawn sigma: float, parameter that encodes how far far is threshold: float, binarization threshold method: string to be chosen among 'crfx', 'cmfx' or 'cffx', inference method under study swap = False: if True, a random sign swap of the data is performed This is used to simulate a null hypothesis on the data. verbose=0 : verbosity mode Returns ------- score (float): the desired cluster-level reproducibility index """ tiny = 1.e-15 nsubj = data.shape[1] samples = draw_samples(nsubj, ngroups) all_pos = [] # compute the positions in the different subgroups for i in range(ngroups): x = data[:, samples[i]] if swap: # apply a random sign swap to x x *= (2 * (np.random.rand(len(samples[i])) > 0.5) - 1) if method is not 'crfx': vx = vardata[:, samples[i]] if method is not 'bsa': threshold = kwargs['threshold'] if method == 'crfx': stat_map = ttest(x) elif method == 'cmfx': stat_map = mfx_ttest(x, vx) elif method == 'cffx': stat_map = fttest(x, vx) elif method == 'cjt': if 'k' in kwargs: k = kwargs['k'] else: k = nsubj / 2 stat_map = conjunction(x, vx, k) pos = get_peak_position_from_thresholded_map( stat_map, domain, threshold) all_pos.append(pos) else: # method='bsa' is a special case tx = x / (tiny + np.sqrt(vx)) afname = kwargs['afname'] theta = kwargs['theta'] dmax = kwargs['dmax'] ths = kwargs['ths'] thq = kwargs['thq'] smin = kwargs['smin'] niter = kwargs['niter'] afname = afname + '_%02d_%04d.pic' % (niter, i) pos = coord_bsa(domain, tx, theta, dmax, ths, thq, smin, afname) all_pos.append(pos) # derive a kernel-based goodness measure from the pairwise comparison # of sets of positions score = 0 for i in range(ngroups): for j in range(i): score += statistics_from_position(all_pos[i], all_pos[j], sigma) score += statistics_from_position(all_pos[j], all_pos[i], sigma) score /= (ngroups * (ngroups - 1)) return score def cluster_reproducibility(data, vardata, domain, ngroups, sigma, method='crfx', swap=False, verbose=0, **kwargs): """Returns a measure of cluster-level reproducibility of activation patterns (i.e. how far clusters are from each other) Parameters ---------- data: array of shape (nvox,nsubj) the input data from which everything is computed vardata: array of shape (nvox,nsubj) the variance of the data that is also available domain: referential- and domain- defining image instance ngroups (int), Number of subbgroups to be drawn sigma (float): parameter that encodes how far far is threshold (float): binarization threshold method='crfx', string to be chosen among 'crfx', 'cmfx' or 'cffx' inference method under study swap = False: if True, a random sign swap of the data is performed This is used to simulate a null hypothesis on the data. verbose=0 : verbosity mode Returns ------- score (float): the desired cluster-level reproducibility index """ tiny = 1.e-15 nsubj = data.shape[1] samples = draw_samples(nsubj, ngroups) all_pos = [] # compute the positions in the different subgroups for i in range(ngroups): x = data[:, samples[i]] if swap: # apply a random sign swap to x x *= (2 * (np.random.rand(len(samples[i])) > 0.5) - 1) if method is not 'crfx': vx = vardata[:, samples[i]] if method is not 'bsa': csize = kwargs['csize'] threshold = kwargs['threshold'] if method == 'crfx': stat_map = ttest(x) elif method == 'cmfx': stat_map = mfx_ttest(x, vx) elif method == 'cffx': stat_map = fttest(x, vx) elif method == 'cjt': if 'k' in kwargs: k = kwargs['k'] else: k = nsubj / 2 stat_map = conjunction(x, vx, k) pos = get_cluster_position_from_thresholded_map(stat_map, domain, threshold, csize) all_pos.append(pos) else: # method='bsa' is a special case tx = x / (tiny + np.sqrt(vx)) afname = kwargs['afname'] theta = kwargs['theta'] dmax = kwargs['dmax'] ths = kwargs['ths'] thq = kwargs['thq'] smin = kwargs['smin'] niter = kwargs['niter'] afname = afname + '_%02d_%04d.pic' % (niter, i) pos = coord_bsa(domain, tx, theta, dmax, ths, thq, smin, afname) all_pos.append(pos) # derive a kernel-based goodness measure from the pairwise comparison # of sets of positions score = 0 for i in range(ngroups): for j in range(i): score += statistics_from_position(all_pos[i], all_pos[j], sigma) score += statistics_from_position(all_pos[j], all_pos[i], sigma) score /= (ngroups * (ngroups - 1)) return score def group_reproducibility_metrics( mask_images, contrast_images, variance_images, thresholds, ngroups, method, cluster_threshold=10, number_of_samples=10, sigma=6., do_clusters=True, do_voxels=True, do_peaks=True, swap=False): """ Main function to perform reproducibility analysis, including nifti1 io Parameters ---------- threshold: list or 1-d array, the thresholds to be tested Returns ------- cluster_rep_results: dictionary, results of cluster-level reproducibility analysi voxel_rep_results: dictionary, results of voxel-level reproducibility analysis peak_rep_results: dictionary, results of peak-level reproducibility analysis """ from nibabel import load from ..mask import intersect_masks if ((len(variance_images) == 0) & (method is not 'crfx')): raise ValueError('Variance images are necessary') nsubj = len(contrast_images) # compute the group mask affine = load(mask_images[0]).get_affine() mask = intersect_masks(mask_images, threshold=0) > 0 domain = grid_domain_from_binary_array(mask, affine) # read the data group_con = [] group_var = [] for s in range(nsubj): group_con.append(load(contrast_images[s]).get_data()[mask]) if len(variance_images) > 0: group_var.append(load(variance_images[s]).get_data()[mask]) group_con = np.squeeze(np.array(group_con)).T group_con[np.isnan(group_con)] = 0 if len(variance_images) > 0: group_var = np.squeeze(np.array(group_var)).T group_var[np.isnan(group_var)] = 0 group_var = np.maximum(group_var, 1.e-15) # perform the analysis voxel_rep_results = {} cluster_rep_results = {} peak_rep_results = {} for ng in ngroups: if do_voxels: voxel_rep_results.update({ng: {}}) if do_clusters: cluster_rep_results.update({ng: {}}) if do_peaks: peak_rep_results.update({ng: {}}) for th in thresholds: kappa = [] cls = [] pk = [] kwargs = {'threshold': th, 'csize': cluster_threshold} for i in range(number_of_samples): if do_voxels: kappa.append(voxel_reproducibility( group_con, group_var, domain, ng, method, swap, **kwargs)) if do_clusters: cls.append(cluster_reproducibility( group_con, group_var, domain, ng, sigma, method, swap, **kwargs)) if do_peaks: pk.append(peak_reproducibility( group_con, group_var, domain, ng, sigma, method, swap, **kwargs)) if do_voxels: voxel_rep_results[ng].update({th: np.array(kappa)}) if do_clusters: cluster_rep_results[ng].update({th: np.array(cls)}) if do_peaks: peak_rep_results[ng].update({th: np.array(cls)}) return voxel_rep_results, cluster_rep_results, peak_rep_results # ------------------------------------------------------- # ---------- BSA stuff ---------------------------------- # ------------------------------------------------------- def coord_bsa(domain, betas, theta=3., dmax=5., ths=0, thq=0.5, smin=0, afname=None): """ main function for performing bsa on a dataset where bsa = nipy.labs.spatial_models.bayesian_structural_analysis Parameters ---------- domain: image instance, referential- and domain-defining image betas: array of shape (nbnodes, subjects), the multi-subject statistical maps theta: float, optional first level threshold dmax: float>0, optional expected cluster std in the common space in units of coord ths: int, >=0), optional representatitivity threshold thq: float, optional, posterior significance threshold should be in [0,1] smin: int, optional, minimal size of the regions to validate them afname: string, optional path where intermediate resullts cam be pickelized Returns ------- afcoord array of shape(number_of_regions,3): coordinate of the found landmark regions """ from ..spatial_models.bayesian_structural_analysis import compute_BSA_quick crmap, AF, BF, p = compute_BSA_quick( domain, betas, dmax, thq, smin, ths, theta, verbose=0) if AF == None: return None if afname is not None: import pickle pickle.dump(AF, afname) afcoord = AF.discrete_to_roi_features('position') return afcoord nipy-0.3.0/nipy/labs/utils/routines.c000066400000000000000000011205211210344137400175460ustar00rootroot00000000000000/* Generated by Cython 0.17.4 on Sat Jan 12 17:27:40 2013 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02040000 #error Cython requires Python 2.4+. #else #include /* For offsetof */ #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #endif #if PY_VERSION_HEX < 0x02050000 typedef int Py_ssize_t; #define PY_SSIZE_T_MAX INT_MAX #define PY_SSIZE_T_MIN INT_MIN #define PY_FORMAT_SIZE_T "" #define CYTHON_FORMAT_SSIZE_T "" #define PyInt_FromSsize_t(z) PyInt_FromLong(z) #define PyInt_AsSsize_t(o) __Pyx_PyInt_AsInt(o) #define PyNumber_Index(o) ((PyNumber_Check(o) && !PyFloat_Check(o)) ? PyNumber_Int(o) : \ (PyErr_Format(PyExc_TypeError, \ "expected index value, got %.200s", Py_TYPE(o)->tp_name), \ (PyObject*)0)) #define __Pyx_PyIndex_Check(o) (PyNumber_Check(o) && !PyFloat_Check(o) && \ !PyComplex_Check(o)) #define PyIndex_Check __Pyx_PyIndex_Check #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message) #define __PYX_BUILD_PY_SSIZE_T "i" #else #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #define __Pyx_PyIndex_Check PyIndex_Check #endif #if PY_VERSION_HEX < 0x02060000 #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) #define PyVarObject_HEAD_INIT(type, size) \ PyObject_HEAD_INIT(type) size, #define PyType_Modified(t) typedef struct { void *buf; PyObject *obj; Py_ssize_t len; Py_ssize_t itemsize; int readonly; int ndim; char *format; Py_ssize_t *shape; Py_ssize_t *strides; Py_ssize_t *suboffsets; void *internal; } Py_buffer; #define PyBUF_SIMPLE 0 #define PyBUF_WRITABLE 0x0001 #define PyBUF_FORMAT 0x0004 #define PyBUF_ND 0x0008 #define PyBUF_STRIDES (0x0010 | PyBUF_ND) #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) #define PyBUF_RECORDS (PyBUF_STRIDES | PyBUF_FORMAT | PyBUF_WRITABLE) #define PyBUF_FULL (PyBUF_INDIRECT | PyBUF_FORMAT | PyBUF_WRITABLE) typedef int (*getbufferproc)(PyObject *, Py_buffer *, int); typedef void (*releasebufferproc)(PyObject *, Py_buffer *); #endif #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #endif #if PY_MAJOR_VERSION < 3 && PY_MINOR_VERSION < 6 #define PyUnicode_FromString(s) PyUnicode_Decode(s, strlen(s), "UTF-8", "strict") #endif #if PY_MAJOR_VERSION >= 3 #define Py_TPFLAGS_CHECKTYPES 0 #define Py_TPFLAGS_HAVE_INDEX 0 #endif #if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3) #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ? \ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #else #define CYTHON_PEP393_ENABLED 0 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_READ(k, d, i) ((k=k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_VERSION_HEX < 0x02060000 #define PyBytesObject PyStringObject #define PyBytes_Type PyString_Type #define PyBytes_Check PyString_Check #define PyBytes_CheckExact PyString_CheckExact #define PyBytes_FromString PyString_FromString #define PyBytes_FromStringAndSize PyString_FromStringAndSize #define PyBytes_FromFormat PyString_FromFormat #define PyBytes_DecodeEscape PyString_DecodeEscape #define PyBytes_AsString PyString_AsString #define PyBytes_AsStringAndSize PyString_AsStringAndSize #define PyBytes_Size PyString_Size #define PyBytes_AS_STRING PyString_AS_STRING #define PyBytes_GET_SIZE PyString_GET_SIZE #define PyBytes_Repr PyString_Repr #define PyBytes_Concat PyString_Concat #define PyBytes_ConcatAndDel PyString_ConcatAndDel #endif #if PY_VERSION_HEX < 0x02060000 #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type) #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_VERSION_HEX < 0x03020000 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300) #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b) #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value) #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b) #else #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0))) #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1))) #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1))) #endif #if PY_MAJOR_VERSION >= 3 #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n))) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n))) #else #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n)) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_NAMESTR(n) ((char *)(n)) #define __Pyx_DOCSTR(n) ((char *)(n)) #else #define __Pyx_NAMESTR(n) (n) #define __Pyx_DOCSTR(n) (n) #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include #define __PYX_HAVE__nipy__labs__utils__routines #define __PYX_HAVE_API__nipy__labs__utils__routines #include "stdio.h" #include "stdlib.h" #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "fff_base.h" #include "fff_vector.h" #include "fff_matrix.h" #include "fff_array.h" #include "fffpy.h" #include "fff_gen_stats.h" #include "fff_specfun.h" #include "fff_lapack.h" #ifdef _OPENMP #include #endif /* _OPENMP */ #ifdef PYREX_WITHOUT_ASSERTIONS #define CYTHON_WITHOUT_ASSERTIONS #endif /* inline attribute */ #ifndef CYTHON_INLINE #if defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif /* unused attribute */ #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif typedef struct {PyObject **p; char *s; const long n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/ /* Type Conversion Predeclarations */ #define __Pyx_PyBytes_FromUString(s) PyBytes_FromString((char*)s) #define __Pyx_PyBytes_AsUString(s) ((unsigned char*) PyBytes_AsString(s)) #define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x); static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject*); #if CYTHON_COMPILING_IN_CPYTHON #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #ifdef __GNUC__ /* Test for GCC > 2.95 */ #if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* __GNUC__ > 2 ... */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ > 2 ... */ #else /* __GNUC__ */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static PyObject *__pyx_m; static PyObject *__pyx_b; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include #else #include #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "routines.pyx", "numpy.pxd", "type.pxd", }; /* "numpy.pxd":723 * # in Cython to enable them only on the right systems. * * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t */ typedef npy_int8 __pyx_t_5numpy_int8_t; /* "numpy.pxd":724 * * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t */ typedef npy_int16 __pyx_t_5numpy_int16_t; /* "numpy.pxd":725 * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< * ctypedef npy_int64 int64_t * #ctypedef npy_int96 int96_t */ typedef npy_int32 __pyx_t_5numpy_int32_t; /* "numpy.pxd":726 * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< * #ctypedef npy_int96 int96_t * #ctypedef npy_int128 int128_t */ typedef npy_int64 __pyx_t_5numpy_int64_t; /* "numpy.pxd":730 * #ctypedef npy_int128 int128_t * * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t */ typedef npy_uint8 __pyx_t_5numpy_uint8_t; /* "numpy.pxd":731 * * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t */ typedef npy_uint16 __pyx_t_5numpy_uint16_t; /* "numpy.pxd":732 * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< * ctypedef npy_uint64 uint64_t * #ctypedef npy_uint96 uint96_t */ typedef npy_uint32 __pyx_t_5numpy_uint32_t; /* "numpy.pxd":733 * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< * #ctypedef npy_uint96 uint96_t * #ctypedef npy_uint128 uint128_t */ typedef npy_uint64 __pyx_t_5numpy_uint64_t; /* "numpy.pxd":737 * #ctypedef npy_uint128 uint128_t * * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< * ctypedef npy_float64 float64_t * #ctypedef npy_float80 float80_t */ typedef npy_float32 __pyx_t_5numpy_float32_t; /* "numpy.pxd":738 * * ctypedef npy_float32 float32_t * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< * #ctypedef npy_float80 float80_t * #ctypedef npy_float128 float128_t */ typedef npy_float64 __pyx_t_5numpy_float64_t; /* "numpy.pxd":747 * # The int types are mapped a bit surprising -- * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t # <<<<<<<<<<<<<< * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t */ typedef npy_long __pyx_t_5numpy_int_t; /* "numpy.pxd":748 * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< * ctypedef npy_longlong longlong_t * */ typedef npy_longlong __pyx_t_5numpy_long_t; /* "numpy.pxd":749 * ctypedef npy_long int_t * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< * * ctypedef npy_ulong uint_t */ typedef npy_longlong __pyx_t_5numpy_longlong_t; /* "numpy.pxd":751 * ctypedef npy_longlong longlong_t * * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t */ typedef npy_ulong __pyx_t_5numpy_uint_t; /* "numpy.pxd":752 * * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulonglong_t * */ typedef npy_ulonglong __pyx_t_5numpy_ulong_t; /* "numpy.pxd":753 * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< * * ctypedef npy_intp intp_t */ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; /* "numpy.pxd":755 * ctypedef npy_ulonglong ulonglong_t * * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< * ctypedef npy_uintp uintp_t * */ typedef npy_intp __pyx_t_5numpy_intp_t; /* "numpy.pxd":756 * * ctypedef npy_intp intp_t * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< * * ctypedef npy_double float_t */ typedef npy_uintp __pyx_t_5numpy_uintp_t; /* "numpy.pxd":758 * ctypedef npy_uintp uintp_t * * ctypedef npy_double float_t # <<<<<<<<<<<<<< * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t */ typedef npy_double __pyx_t_5numpy_float_t; /* "numpy.pxd":759 * * ctypedef npy_double float_t * ctypedef npy_double double_t # <<<<<<<<<<<<<< * ctypedef npy_longdouble longdouble_t * */ typedef npy_double __pyx_t_5numpy_double_t; /* "numpy.pxd":760 * ctypedef npy_double float_t * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cfloat cfloat_t */ typedef npy_longdouble __pyx_t_5numpy_longdouble_t; /* "fff.pxd":9 * * # Redefine size_t * ctypedef unsigned long int size_t # <<<<<<<<<<<<<< * * */ typedef unsigned long __pyx_t_3fff_size_t; #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< float > __pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< double > __pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif /*--- Type declarations ---*/ /* "numpy.pxd":762 * ctypedef npy_longdouble longdouble_t * * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t */ typedef npy_cfloat __pyx_t_5numpy_cfloat_t; /* "numpy.pxd":763 * * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< * ctypedef npy_clongdouble clongdouble_t * */ typedef npy_cdouble __pyx_t_5numpy_cdouble_t; /* "numpy.pxd":764 * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cdouble complex_t */ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; /* "numpy.pxd":766 * ctypedef npy_clongdouble clongdouble_t * * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew1(a): */ typedef npy_cdouble __pyx_t_5numpy_complex_t; #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/ #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil) \ if (acquire_gil) { \ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ PyGILState_Release(__pyx_gilstate_save); \ } else { \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil) \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext() \ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif /* CYTHON_REFNANNY */ #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /*proto*/ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], \ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, \ const char* function_name); /*proto*/ #define __Pyx_SetItemInt(o, i, v, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ __Pyx_SetItemInt_Fast(o, i, v) : \ __Pyx_SetItemInt_Generic(o, to_py_func(i), v)) static CYTHON_INLINE int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v) { int r; if (!j) return -1; r = PyObject_SetItem(o, j, v); Py_DECREF(j); return r; } static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v) { #if CYTHON_COMPILING_IN_CPYTHON if (PyList_CheckExact(o)) { Py_ssize_t n = (likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if (likely((n >= 0) & (n < PyList_GET_SIZE(o)))) { PyObject* old = PyList_GET_ITEM(o, n); Py_INCREF(v); PyList_SET_ITEM(o, n, v); Py_DECREF(old); return 1; } } else { /* inlined PySequence_SetItem() */ PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_ass_item)) { if (unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (unlikely(l < 0)) return -1; i += l; } return m->sq_ass_item(o, i, v); } } #else #if CYTHON_COMPILING_IN_PYPY if (PySequence_Check(o) && !PyDict_Check(o)) { #else if (PySequence_Check(o)) { #endif return PySequence_SetItem(o, i, v); } #endif return __Pyx_SetItemInt_Generic(o, PyInt_FromSsize_t(i), v); } static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/ static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } #define __Pyx_GetItemInt_List(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ __Pyx_GetItemInt_List_Fast(o, i) : \ __Pyx_GetItemInt_Generic(o, to_py_func(i))) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i) { #if CYTHON_COMPILING_IN_CPYTHON if (likely((0 <= i) & (i < PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, i); Py_INCREF(r); return r; } else if ((-PyList_GET_SIZE(o) <= i) & (i < 0)) { PyObject *r = PyList_GET_ITEM(o, PyList_GET_SIZE(o) + i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } #define __Pyx_GetItemInt_Tuple(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ __Pyx_GetItemInt_Tuple_Fast(o, i) : \ __Pyx_GetItemInt_Generic(o, to_py_func(i))) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i) { #if CYTHON_COMPILING_IN_CPYTHON if (likely((0 <= i) & (i < PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, i); Py_INCREF(r); return r; } else if ((-PyTuple_GET_SIZE(o) <= i) & (i < 0)) { PyObject *r = PyTuple_GET_ITEM(o, PyTuple_GET_SIZE(o) + i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } #define __Pyx_GetItemInt(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \ __Pyx_GetItemInt_Fast(o, i) : \ __Pyx_GetItemInt_Generic(o, to_py_func(i))) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i) { #if CYTHON_COMPILING_IN_CPYTHON if (PyList_CheckExact(o)) { Py_ssize_t n = (likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if (likely((n >= 0) & (n < PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = (likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if (likely((n >= 0) & (n < PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { /* inlined PySequence_GetItem() */ PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (unlikely(l < 0)) return NULL; i += l; } return m->sq_item(o, i); } } #else if (PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/ static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); static CYTHON_INLINE int __Pyx_IterFinish(void); /*proto*/ static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); /*proto*/ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level); /*proto*/ #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if defined(_WIN32) && defined(__cplusplus) && CYTHON_CCOMPLEX #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); #if CYTHON_CCOMPLEX #define __Pyx_c_eqf(a, b) ((a)==(b)) #define __Pyx_c_sumf(a, b) ((a)+(b)) #define __Pyx_c_difff(a, b) ((a)-(b)) #define __Pyx_c_prodf(a, b) ((a)*(b)) #define __Pyx_c_quotf(a, b) ((a)/(b)) #define __Pyx_c_negf(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zerof(z) ((z)==(float)0) #define __Pyx_c_conjf(z) (::std::conj(z)) #if 1 #define __Pyx_c_absf(z) (::std::abs(z)) #define __Pyx_c_powf(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zerof(z) ((z)==0) #define __Pyx_c_conjf(z) (conjf(z)) #if 1 #define __Pyx_c_absf(z) (cabsf(z)) #define __Pyx_c_powf(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); #if CYTHON_CCOMPLEX #define __Pyx_c_eq(a, b) ((a)==(b)) #define __Pyx_c_sum(a, b) ((a)+(b)) #define __Pyx_c_diff(a, b) ((a)-(b)) #define __Pyx_c_prod(a, b) ((a)*(b)) #define __Pyx_c_quot(a, b) ((a)/(b)) #define __Pyx_c_neg(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero(z) ((z)==(double)0) #define __Pyx_c_conj(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs(z) (::std::abs(z)) #define __Pyx_c_pow(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero(z) ((z)==0) #define __Pyx_c_conj(z) (conj(z)) #if 1 #define __Pyx_c_abs(z) (cabs(z)) #define __Pyx_c_pow(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject *); static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject *); static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject *); static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject *); static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject *); static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject *); static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject *); static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject *); static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject *); static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject *); static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject *); static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject *); static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject *); static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject *); static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject *); static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject *); static int __Pyx_check_binary_version(void); #if !defined(__Pyx_PyIdentifier_FromString) #if PY_MAJOR_VERSION < 3 #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) #else #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) #endif #endif static PyObject *__Pyx_ImportModule(const char *name); /*proto*/ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /*proto*/ typedef struct { int code_line; PyCodeObject* code_object; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /*proto*/ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/ /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from 'cpython.object' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'libc.stdlib' */ /* Module declarations from 'numpy' */ /* Module declarations from 'numpy' */ static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ /* Module declarations from 'fff' */ /* Module declarations from 'nipy.labs.utils.routines' */ #define __Pyx_MODULE_NAME "nipy.labs.utils.routines" int __pyx_module_is_main_nipy__labs__utils__routines = 0; /* Implementation of 'nipy.labs.utils.routines' */ static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_RuntimeError; static PyObject *__pyx_pf_4nipy_4labs_5utils_8routines_quantile(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X, double __pyx_v_ratio, int __pyx_v_interp, int __pyx_v_axis); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_5utils_8routines_2median(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_x, PyObject *__pyx_v_axis); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_5utils_8routines_4mahalanobis(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X, PyObject *__pyx_v_VX); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_5utils_8routines_6svd(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_5utils_8routines_8permutations(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_n, unsigned int __pyx_v_m, unsigned long __pyx_v_magic); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_5utils_8routines_10combinations(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_k, unsigned int __pyx_v_n, unsigned int __pyx_v_m, unsigned long __pyx_v_magic); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_5utils_8routines_12gamln(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_x); /* proto */ static PyObject *__pyx_pf_4nipy_4labs_5utils_8routines_14psi(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_x); /* proto */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ static char __pyx_k_1[] = "ndarray is not C contiguous"; static char __pyx_k_3[] = "ndarray is not Fortran contiguous"; static char __pyx_k_5[] = "Non-native byte order not supported"; static char __pyx_k_7[] = "unknown dtype code in numpy.pxd (%d)"; static char __pyx_k_8[] = "Format string allocated too short, see comment in numpy.pxd"; static char __pyx_k_11[] = "Format string allocated too short."; static char __pyx_k_13[] = "\nMiscellaneous fff routines.\n\nAuthor: Alexis Roche, 2008.\n"; static char __pyx_k_14[] = "0.1"; static char __pyx_k_17[] = "/Users/mb312/dev_trees/nipy/nipy/labs/utils/routines.pyx"; static char __pyx_k_18[] = "nipy.labs.utils.routines"; static char __pyx_k__B[] = "B"; static char __pyx_k__C[] = "C"; static char __pyx_k__H[] = "H"; static char __pyx_k__I[] = "I"; static char __pyx_k__L[] = "L"; static char __pyx_k__O[] = "O"; static char __pyx_k__P[] = "P"; static char __pyx_k__Q[] = "Q"; static char __pyx_k__S[] = "S"; static char __pyx_k__U[] = "U"; static char __pyx_k__X[] = "X"; static char __pyx_k__Y[] = "Y"; static char __pyx_k__b[] = "b"; static char __pyx_k__d[] = "d"; static char __pyx_k__f[] = "f"; static char __pyx_k__g[] = "g"; static char __pyx_k__h[] = "h"; static char __pyx_k__i[] = "i"; static char __pyx_k__k[] = "k"; static char __pyx_k__l[] = "l"; static char __pyx_k__m[] = "m"; static char __pyx_k__n[] = "n"; static char __pyx_k__p[] = "p"; static char __pyx_k__q[] = "q"; static char __pyx_k__s[] = "s"; static char __pyx_k__x[] = "x"; static char __pyx_k__y[] = "y"; static char __pyx_k__D2[] = "D2"; static char __pyx_k__Sx[] = "Sx"; static char __pyx_k__VX[] = "VX"; static char __pyx_k__Vt[] = "Vt"; static char __pyx_k__Zd[] = "Zd"; static char __pyx_k__Zf[] = "Zf"; static char __pyx_k__Zg[] = "Zg"; static char __pyx_k__d2[] = "d2"; static char __pyx_k__np[] = "np"; static char __pyx_k__pi[] = "pi"; static char __pyx_k__vx[] = "vx"; static char __pyx_k__Aux[] = "Aux"; static char __pyx_k__dim[] = "dim"; static char __pyx_k__psi[] = "psi"; static char __pyx_k__svd[] = "svd"; static char __pyx_k__axis[] = "axis"; static char __pyx_k__dims[] = "dims"; static char __pyx_k__dmax[] = "dmax"; static char __pyx_k__dmin[] = "dmin"; static char __pyx_k__info[] = "info"; static char __pyx_k__work[] = "work"; static char __pyx_k__gamln[] = "gamln"; static char __pyx_k__iwork[] = "iwork"; static char __pyx_k__lwork[] = "lwork"; static char __pyx_k__magic[] = "magic"; static char __pyx_k__multi[] = "multi"; static char __pyx_k__numpy[] = "numpy"; static char __pyx_k__range[] = "range"; static char __pyx_k__ratio[] = "ratio"; static char __pyx_k__s_tmp[] = "s_tmp"; static char __pyx_k__shape[] = "shape"; static char __pyx_k__x_tmp[] = "x_tmp"; static char __pyx_k__zeros[] = "zeros"; static char __pyx_k__Sx_tmp[] = "Sx_tmp"; static char __pyx_k__X_flat[] = "X_flat"; static char __pyx_k__endims[] = "endims"; static char __pyx_k__interp[] = "interp"; static char __pyx_k__liwork[] = "liwork"; static char __pyx_k__median[] = "median"; static char __pyx_k__vx_tmp[] = "vx_tmp"; static char __pyx_k__x_flat[] = "x_flat"; static char __pyx_k__VX_flat[] = "VX_flat"; static char __pyx_k__pi_view[] = "pi_view"; static char __pyx_k__reshape[] = "reshape"; static char __pyx_k____main__[] = "__main__"; static char __pyx_k____test__[] = "__test__"; static char __pyx_k__quantile[] = "quantile"; static char __pyx_k__ValueError[] = "ValueError"; static char __pyx_k__x_flat_tmp[] = "x_flat_tmp"; static char __pyx_k____version__[] = "__version__"; static char __pyx_k__mahalanobis[] = "mahalanobis"; static char __pyx_k__RuntimeError[] = "RuntimeError"; static char __pyx_k__combinations[] = "combinations"; static char __pyx_k__permutations[] = "permutations"; static PyObject *__pyx_kp_u_1; static PyObject *__pyx_kp_u_11; static PyObject *__pyx_kp_s_14; static PyObject *__pyx_kp_s_17; static PyObject *__pyx_n_s_18; static PyObject *__pyx_kp_u_3; static PyObject *__pyx_kp_u_5; static PyObject *__pyx_kp_u_7; static PyObject *__pyx_kp_u_8; static PyObject *__pyx_n_s__Aux; static PyObject *__pyx_n_s__C; static PyObject *__pyx_n_s__D2; static PyObject *__pyx_n_s__P; static PyObject *__pyx_n_s__RuntimeError; static PyObject *__pyx_n_s__S; static PyObject *__pyx_n_s__Sx; static PyObject *__pyx_n_s__Sx_tmp; static PyObject *__pyx_n_s__U; static PyObject *__pyx_n_s__VX; static PyObject *__pyx_n_s__VX_flat; static PyObject *__pyx_n_s__ValueError; static PyObject *__pyx_n_s__Vt; static PyObject *__pyx_n_s__X; static PyObject *__pyx_n_s__X_flat; static PyObject *__pyx_n_s__Y; static PyObject *__pyx_n_s____main__; static PyObject *__pyx_n_s____test__; static PyObject *__pyx_n_s____version__; static PyObject *__pyx_n_s__axis; static PyObject *__pyx_n_s__combinations; static PyObject *__pyx_n_s__d2; static PyObject *__pyx_n_s__dim; static PyObject *__pyx_n_s__dims; static PyObject *__pyx_n_s__dmax; static PyObject *__pyx_n_s__dmin; static PyObject *__pyx_n_s__endims; static PyObject *__pyx_n_s__gamln; static PyObject *__pyx_n_s__i; static PyObject *__pyx_n_s__info; static PyObject *__pyx_n_s__interp; static PyObject *__pyx_n_s__iwork; static PyObject *__pyx_n_s__k; static PyObject *__pyx_n_s__liwork; static PyObject *__pyx_n_s__lwork; static PyObject *__pyx_n_s__m; static PyObject *__pyx_n_s__magic; static PyObject *__pyx_n_s__mahalanobis; static PyObject *__pyx_n_s__median; static PyObject *__pyx_n_s__multi; static PyObject *__pyx_n_s__n; static PyObject *__pyx_n_s__np; static PyObject *__pyx_n_s__numpy; static PyObject *__pyx_n_s__p; static PyObject *__pyx_n_s__permutations; static PyObject *__pyx_n_s__pi; static PyObject *__pyx_n_s__pi_view; static PyObject *__pyx_n_s__psi; static PyObject *__pyx_n_s__quantile; static PyObject *__pyx_n_s__range; static PyObject *__pyx_n_s__ratio; static PyObject *__pyx_n_s__reshape; static PyObject *__pyx_n_s__s; static PyObject *__pyx_n_s__s_tmp; static PyObject *__pyx_n_s__shape; static PyObject *__pyx_n_s__svd; static PyObject *__pyx_n_s__vx; static PyObject *__pyx_n_s__vx_tmp; static PyObject *__pyx_n_s__work; static PyObject *__pyx_n_s__x; static PyObject *__pyx_n_s__x_flat; static PyObject *__pyx_n_s__x_flat_tmp; static PyObject *__pyx_n_s__x_tmp; static PyObject *__pyx_n_s__y; static PyObject *__pyx_n_s__zeros; static PyObject *__pyx_int_0; static PyObject *__pyx_int_1; static PyObject *__pyx_int_15; static PyObject *__pyx_k_tuple_2; static PyObject *__pyx_k_tuple_4; static PyObject *__pyx_k_tuple_6; static PyObject *__pyx_k_tuple_9; static PyObject *__pyx_k_tuple_10; static PyObject *__pyx_k_tuple_12; static PyObject *__pyx_k_tuple_15; static PyObject *__pyx_k_tuple_19; static PyObject *__pyx_k_tuple_21; static PyObject *__pyx_k_tuple_23; static PyObject *__pyx_k_tuple_25; static PyObject *__pyx_k_tuple_27; static PyObject *__pyx_k_tuple_29; static PyObject *__pyx_k_tuple_31; static PyObject *__pyx_k_codeobj_16; static PyObject *__pyx_k_codeobj_20; static PyObject *__pyx_k_codeobj_22; static PyObject *__pyx_k_codeobj_24; static PyObject *__pyx_k_codeobj_26; static PyObject *__pyx_k_codeobj_28; static PyObject *__pyx_k_codeobj_30; static PyObject *__pyx_k_codeobj_32; /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_5utils_8routines_1quantile(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_5utils_8routines_quantile[] = "\n q = quantile(data, ratio, interp=False, axis=0).\n\n Partial sorting algorithm, very fast!!!\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_5utils_8routines_1quantile = {__Pyx_NAMESTR("quantile"), (PyCFunction)__pyx_pw_4nipy_4labs_5utils_8routines_1quantile, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_5utils_8routines_quantile)}; static PyObject *__pyx_pw_4nipy_4labs_5utils_8routines_1quantile(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_X = 0; double __pyx_v_ratio; int __pyx_v_interp; int __pyx_v_axis; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("quantile (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__X,&__pyx_n_s__ratio,&__pyx_n_s__interp,&__pyx_n_s__axis,0}; PyObject* values[4] = {0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__X)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__ratio)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("quantile", 0, 2, 4, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__interp); if (value) { values[2] = value; kw_args--; } } case 3: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__axis); if (value) { values[3] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "quantile") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_X = values[0]; __pyx_v_ratio = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_ratio == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; __pyx_clineno = __LINE__; goto __pyx_L3_error;} if (values[2]) { __pyx_v_interp = __Pyx_PyInt_AsInt(values[2]); if (unlikely((__pyx_v_interp == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { /* "nipy/labs/utils/routines.pyx":45 * # This is faster than scipy.stats.scoreatpercentile due to partial * # sorting * def quantile(X, double ratio, int interp=False, int axis=0): # <<<<<<<<<<<<<< * """ * q = quantile(data, ratio, interp=False, axis=0). */ __pyx_v_interp = ((int)0); } if (values[3]) { __pyx_v_axis = __Pyx_PyInt_AsInt(values[3]); if (unlikely((__pyx_v_axis == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_axis = ((int)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("quantile", 0, 2, 4, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.utils.routines.quantile", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_4labs_5utils_8routines_quantile(__pyx_self, __pyx_v_X, __pyx_v_ratio, __pyx_v_interp, __pyx_v_axis); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4nipy_4labs_5utils_8routines_quantile(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X, double __pyx_v_ratio, int __pyx_v_interp, int __pyx_v_axis) { fff_vector *__pyx_v_x; fff_vector *__pyx_v_y; fffpy_multi_iterator *__pyx_v_multi; PyObject *__pyx_v_dims = NULL; PyObject *__pyx_v_Y = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("quantile", 0); /* "nipy/labs/utils/routines.pyx":55 * * # Allocate output array Y * dims = list(X.shape) # <<<<<<<<<<<<<< * dims[axis] = 1 * Y = np.zeros(dims) */ __pyx_t_1 = PyObject_GetAttr(__pyx_v_X, __pyx_n_s__shape); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 55; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 55; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)(&PyList_Type))), ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 55; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_v_dims = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/utils/routines.pyx":56 * # Allocate output array Y * dims = list(X.shape) * dims[axis] = 1 # <<<<<<<<<<<<<< * Y = np.zeros(dims) * */ if (__Pyx_SetItemInt(((PyObject *)__pyx_v_dims), __pyx_v_axis, __pyx_int_1, sizeof(int), PyInt_FromLong) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/utils/routines.pyx":57 * dims = list(X.shape) * dims[axis] = 1 * Y = np.zeros(dims) # <<<<<<<<<<<<<< * * # Create a new array iterator */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__zeros); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)__pyx_v_dims)); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_v_dims)); __Pyx_GIVEREF(((PyObject *)__pyx_v_dims)); __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_v_Y = __pyx_t_3; __pyx_t_3 = 0; /* "nipy/labs/utils/routines.pyx":60 * * # Create a new array iterator * multi = fffpy_multi_iterator_new(2, axis, X, Y) # <<<<<<<<<<<<<< * * # Create vector views on both X and Y */ __pyx_v_multi = fffpy_multi_iterator_new(2, __pyx_v_axis, ((void *)__pyx_v_X), ((void *)__pyx_v_Y)); /* "nipy/labs/utils/routines.pyx":63 * * # Create vector views on both X and Y * x = multi.vector[0] # <<<<<<<<<<<<<< * y = multi.vector[1] * */ __pyx_v_x = (__pyx_v_multi->vector[0]); /* "nipy/labs/utils/routines.pyx":64 * # Create vector views on both X and Y * x = multi.vector[0] * y = multi.vector[1] # <<<<<<<<<<<<<< * * # Loop */ __pyx_v_y = (__pyx_v_multi->vector[1]); /* "nipy/labs/utils/routines.pyx":67 * * # Loop * while(multi.index < multi.size): # <<<<<<<<<<<<<< * y.data[0] = fff_vector_quantile(x, ratio, interp) * fffpy_multi_iterator_update(multi) */ while (1) { __pyx_t_4 = (__pyx_v_multi->index < __pyx_v_multi->size); if (!__pyx_t_4) break; /* "nipy/labs/utils/routines.pyx":68 * # Loop * while(multi.index < multi.size): * y.data[0] = fff_vector_quantile(x, ratio, interp) # <<<<<<<<<<<<<< * fffpy_multi_iterator_update(multi) * */ (__pyx_v_y->data[0]) = fff_vector_quantile(__pyx_v_x, __pyx_v_ratio, __pyx_v_interp); /* "nipy/labs/utils/routines.pyx":69 * while(multi.index < multi.size): * y.data[0] = fff_vector_quantile(x, ratio, interp) * fffpy_multi_iterator_update(multi) # <<<<<<<<<<<<<< * * # Delete local structures */ fffpy_multi_iterator_update(__pyx_v_multi); } /* "nipy/labs/utils/routines.pyx":72 * * # Delete local structures * fffpy_multi_iterator_delete(multi) # <<<<<<<<<<<<<< * return Y * */ fffpy_multi_iterator_delete(__pyx_v_multi); /* "nipy/labs/utils/routines.pyx":73 * # Delete local structures * fffpy_multi_iterator_delete(multi) * return Y # <<<<<<<<<<<<<< * * # This is faster than numpy.stats */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_Y); __pyx_r = __pyx_v_Y; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("nipy.labs.utils.routines.quantile", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_dims); __Pyx_XDECREF(__pyx_v_Y); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_5utils_8routines_3median(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_5utils_8routines_2median[] = "\n median(x, axis=0).\n Equivalent to: quantile(x, ratio=0.5, interp=True, axis=axis).\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_5utils_8routines_3median = {__Pyx_NAMESTR("median"), (PyCFunction)__pyx_pw_4nipy_4labs_5utils_8routines_3median, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_5utils_8routines_2median)}; static PyObject *__pyx_pw_4nipy_4labs_5utils_8routines_3median(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_x = 0; PyObject *__pyx_v_axis = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("median (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__x,&__pyx_n_s__axis,0}; PyObject* values[2] = {0,0}; values[1] = ((PyObject *)__pyx_int_0); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__axis); if (value) { values[1] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "median") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_x = values[0]; __pyx_v_axis = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("median", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.utils.routines.median", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_4labs_5utils_8routines_2median(__pyx_self, __pyx_v_x, __pyx_v_axis); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/utils/routines.pyx":78 * # due to the underlying algorithm that relies on * # partial sorting as opposed to full sorting. * def median(x, axis=0): # <<<<<<<<<<<<<< * """ * median(x, axis=0). */ static PyObject *__pyx_pf_4nipy_4labs_5utils_8routines_2median(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_x, PyObject *__pyx_v_axis) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("median", 0); /* "nipy/labs/utils/routines.pyx":83 * Equivalent to: quantile(x, ratio=0.5, interp=True, axis=axis). * """ * return quantile(x, axis=axis, ratio=0.5, interp=True) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__quantile); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_x); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_x); __Pyx_GIVEREF(__pyx_v_x); __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_3)); if (PyDict_SetItem(__pyx_t_3, ((PyObject *)__pyx_n_s__axis), __pyx_v_axis) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_4 = PyFloat_FromDouble(0.5); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_t_3, ((PyObject *)__pyx_n_s__ratio), __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyBool_FromLong(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_t_3, ((PyObject *)__pyx_n_s__interp), __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_2), ((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("nipy.labs.utils.routines.median", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_5utils_8routines_5mahalanobis(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_5utils_8routines_4mahalanobis[] = "\n d2 = mahalanobis(X, VX).\n\n ufunc-like function to compute Mahalanobis squared distances\n x'*inv(Vx)*x. \n\n axis == 0 assumed. If X is shaped (d,K), VX must be shaped\n (d,d,K).\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_5utils_8routines_5mahalanobis = {__Pyx_NAMESTR("mahalanobis"), (PyCFunction)__pyx_pw_4nipy_4labs_5utils_8routines_5mahalanobis, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_5utils_8routines_4mahalanobis)}; static PyObject *__pyx_pw_4nipy_4labs_5utils_8routines_5mahalanobis(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_X = 0; PyObject *__pyx_v_VX = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("mahalanobis (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__X,&__pyx_n_s__VX,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__X)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__VX)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mahalanobis", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "mahalanobis") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_X = values[0]; __pyx_v_VX = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("mahalanobis", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.utils.routines.mahalanobis", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_4labs_5utils_8routines_4mahalanobis(__pyx_self, __pyx_v_X, __pyx_v_VX); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/utils/routines.pyx":86 * * * def mahalanobis(X, VX): # <<<<<<<<<<<<<< * """ * d2 = mahalanobis(X, VX). */ static PyObject *__pyx_pf_4nipy_4labs_5utils_8routines_4mahalanobis(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X, PyObject *__pyx_v_VX) { fff_vector *__pyx_v_x; fff_vector *__pyx_v_vx; fff_vector *__pyx_v_x_tmp; fff_vector *__pyx_v_vx_tmp; fff_vector *__pyx_v_d2; fff_matrix __pyx_v_Sx; fff_matrix *__pyx_v_Sx_tmp; fffpy_multi_iterator *__pyx_v_multi; int __pyx_v_axis; int __pyx_v_n; PyObject *__pyx_v_dims = NULL; PyObject *__pyx_v_dim = NULL; PyObject *__pyx_v_D2 = NULL; PyObject *__pyx_v_VX_flat = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("mahalanobis", 0); /* "nipy/labs/utils/routines.pyx":100 * cdef fff_matrix *Sx_tmp * cdef fffpy_multi_iterator* multi * cdef int axis=0, n # <<<<<<<<<<<<<< * * # Allocate output array */ __pyx_v_axis = 0; /* "nipy/labs/utils/routines.pyx":103 * * # Allocate output array * dims = list(X.shape) # <<<<<<<<<<<<<< * dim = dims[0] * dims[0] = 1 */ __pyx_t_1 = PyObject_GetAttr(__pyx_v_X, __pyx_n_s__shape); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)(&PyList_Type))), ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_v_dims = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/utils/routines.pyx":104 * # Allocate output array * dims = list(X.shape) * dim = dims[0] # <<<<<<<<<<<<<< * dims[0] = 1 * D2 = np.zeros(dims) */ __pyx_t_1 = __Pyx_GetItemInt_List(((PyObject *)__pyx_v_dims), 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_dim = __pyx_t_1; __pyx_t_1 = 0; /* "nipy/labs/utils/routines.pyx":105 * dims = list(X.shape) * dim = dims[0] * dims[0] = 1 # <<<<<<<<<<<<<< * D2 = np.zeros(dims) * */ if (__Pyx_SetItemInt(((PyObject *)__pyx_v_dims), 0, __pyx_int_1, sizeof(long), PyInt_FromLong) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/utils/routines.pyx":106 * dim = dims[0] * dims[0] = 1 * D2 = np.zeros(dims) # <<<<<<<<<<<<<< * * # Flatten input variance array */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 106; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__zeros); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 106; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 106; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)__pyx_v_dims)); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_v_dims)); __Pyx_GIVEREF(((PyObject *)__pyx_v_dims)); __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 106; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_v_D2 = __pyx_t_3; __pyx_t_3 = 0; /* "nipy/labs/utils/routines.pyx":109 * * # Flatten input variance array * VX_flat = VX.reshape( [dim*dim]+list(VX.shape[2:]) ) # <<<<<<<<<<<<<< * * # Create a new array iterator */ __pyx_t_3 = PyObject_GetAttr(__pyx_v_VX, __pyx_n_s__reshape); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = PyNumber_Multiply(__pyx_v_dim, __pyx_v_dim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyObject_GetAttr(__pyx_v_VX, __pyx_n_s__shape); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PySequence_GetSlice(__pyx_t_1, 2, PY_SSIZE_T_MAX); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyObject_Call(((PyObject *)((PyObject*)(&PyList_Type))), ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_t_1 = PyNumber_Add(((PyObject *)__pyx_t_2), __pyx_t_4); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_1)); __Pyx_GIVEREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_t_1 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __pyx_v_VX_flat = __pyx_t_1; __pyx_t_1 = 0; /* "nipy/labs/utils/routines.pyx":112 * * # Create a new array iterator * multi = fffpy_multi_iterator_new(3, axis, X, VX_flat, D2) # <<<<<<<<<<<<<< * * # Allocate local structures */ __pyx_v_multi = fffpy_multi_iterator_new(3, __pyx_v_axis, ((void *)__pyx_v_X), ((void *)__pyx_v_VX_flat), ((void *)__pyx_v_D2)); /* "nipy/labs/utils/routines.pyx":115 * * # Allocate local structures * n = X.shape[axis] # <<<<<<<<<<<<<< * x_tmp = fff_vector_new(n) * vx_tmp = fff_vector_new(n*n) */ __pyx_t_1 = PyObject_GetAttr(__pyx_v_X, __pyx_n_s__shape); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_GetItemInt(__pyx_t_1, __pyx_v_axis, sizeof(int), PyInt_FromLong); if (!__pyx_t_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_5 = __Pyx_PyInt_AsInt(__pyx_t_4); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_n = ((int)__pyx_t_5); /* "nipy/labs/utils/routines.pyx":116 * # Allocate local structures * n = X.shape[axis] * x_tmp = fff_vector_new(n) # <<<<<<<<<<<<<< * vx_tmp = fff_vector_new(n*n) * Sx_tmp = fff_matrix_new(n, n) */ __pyx_v_x_tmp = fff_vector_new(__pyx_v_n); /* "nipy/labs/utils/routines.pyx":117 * n = X.shape[axis] * x_tmp = fff_vector_new(n) * vx_tmp = fff_vector_new(n*n) # <<<<<<<<<<<<<< * Sx_tmp = fff_matrix_new(n, n) * */ __pyx_v_vx_tmp = fff_vector_new((__pyx_v_n * __pyx_v_n)); /* "nipy/labs/utils/routines.pyx":118 * x_tmp = fff_vector_new(n) * vx_tmp = fff_vector_new(n*n) * Sx_tmp = fff_matrix_new(n, n) # <<<<<<<<<<<<<< * * # Create vector views on X, VX_flat and D2 */ __pyx_v_Sx_tmp = fff_matrix_new(__pyx_v_n, __pyx_v_n); /* "nipy/labs/utils/routines.pyx":121 * * # Create vector views on X, VX_flat and D2 * x = multi.vector[0] # <<<<<<<<<<<<<< * vx = multi.vector[1] * d2 = multi.vector[2] */ __pyx_v_x = (__pyx_v_multi->vector[0]); /* "nipy/labs/utils/routines.pyx":122 * # Create vector views on X, VX_flat and D2 * x = multi.vector[0] * vx = multi.vector[1] # <<<<<<<<<<<<<< * d2 = multi.vector[2] * */ __pyx_v_vx = (__pyx_v_multi->vector[1]); /* "nipy/labs/utils/routines.pyx":123 * x = multi.vector[0] * vx = multi.vector[1] * d2 = multi.vector[2] # <<<<<<<<<<<<<< * * # Loop */ __pyx_v_d2 = (__pyx_v_multi->vector[2]); /* "nipy/labs/utils/routines.pyx":126 * * # Loop * while(multi.index < multi.size): # <<<<<<<<<<<<<< * fff_vector_memcpy(x_tmp, x) * fff_vector_memcpy(vx_tmp, vx) */ while (1) { __pyx_t_6 = (__pyx_v_multi->index < __pyx_v_multi->size); if (!__pyx_t_6) break; /* "nipy/labs/utils/routines.pyx":127 * # Loop * while(multi.index < multi.size): * fff_vector_memcpy(x_tmp, x) # <<<<<<<<<<<<<< * fff_vector_memcpy(vx_tmp, vx) * Sx = fff_matrix_view(vx_tmp.data, n, n, n) # OK because vx_tmp is contiguous */ fff_vector_memcpy(__pyx_v_x_tmp, __pyx_v_x); /* "nipy/labs/utils/routines.pyx":128 * while(multi.index < multi.size): * fff_vector_memcpy(x_tmp, x) * fff_vector_memcpy(vx_tmp, vx) # <<<<<<<<<<<<<< * Sx = fff_matrix_view(vx_tmp.data, n, n, n) # OK because vx_tmp is contiguous * d2.data[0] = fff_mahalanobis(x_tmp, &Sx, Sx_tmp) */ fff_vector_memcpy(__pyx_v_vx_tmp, __pyx_v_vx); /* "nipy/labs/utils/routines.pyx":129 * fff_vector_memcpy(x_tmp, x) * fff_vector_memcpy(vx_tmp, vx) * Sx = fff_matrix_view(vx_tmp.data, n, n, n) # OK because vx_tmp is contiguous # <<<<<<<<<<<<<< * d2.data[0] = fff_mahalanobis(x_tmp, &Sx, Sx_tmp) * fffpy_multi_iterator_update(multi) */ __pyx_v_Sx = fff_matrix_view(__pyx_v_vx_tmp->data, __pyx_v_n, __pyx_v_n, __pyx_v_n); /* "nipy/labs/utils/routines.pyx":130 * fff_vector_memcpy(vx_tmp, vx) * Sx = fff_matrix_view(vx_tmp.data, n, n, n) # OK because vx_tmp is contiguous * d2.data[0] = fff_mahalanobis(x_tmp, &Sx, Sx_tmp) # <<<<<<<<<<<<<< * fffpy_multi_iterator_update(multi) * */ (__pyx_v_d2->data[0]) = fff_mahalanobis(__pyx_v_x_tmp, (&__pyx_v_Sx), __pyx_v_Sx_tmp); /* "nipy/labs/utils/routines.pyx":131 * Sx = fff_matrix_view(vx_tmp.data, n, n, n) # OK because vx_tmp is contiguous * d2.data[0] = fff_mahalanobis(x_tmp, &Sx, Sx_tmp) * fffpy_multi_iterator_update(multi) # <<<<<<<<<<<<<< * * # Delete local structs and views */ fffpy_multi_iterator_update(__pyx_v_multi); } /* "nipy/labs/utils/routines.pyx":134 * * # Delete local structs and views * fff_vector_delete(x_tmp) # <<<<<<<<<<<<<< * fff_vector_delete(vx_tmp) * fff_matrix_delete(Sx_tmp) */ fff_vector_delete(__pyx_v_x_tmp); /* "nipy/labs/utils/routines.pyx":135 * # Delete local structs and views * fff_vector_delete(x_tmp) * fff_vector_delete(vx_tmp) # <<<<<<<<<<<<<< * fff_matrix_delete(Sx_tmp) * fffpy_multi_iterator_delete(multi) */ fff_vector_delete(__pyx_v_vx_tmp); /* "nipy/labs/utils/routines.pyx":136 * fff_vector_delete(x_tmp) * fff_vector_delete(vx_tmp) * fff_matrix_delete(Sx_tmp) # <<<<<<<<<<<<<< * fffpy_multi_iterator_delete(multi) * */ fff_matrix_delete(__pyx_v_Sx_tmp); /* "nipy/labs/utils/routines.pyx":137 * fff_vector_delete(vx_tmp) * fff_matrix_delete(Sx_tmp) * fffpy_multi_iterator_delete(multi) # <<<<<<<<<<<<<< * * # Return */ fffpy_multi_iterator_delete(__pyx_v_multi); /* "nipy/labs/utils/routines.pyx":140 * * # Return * D2 = D2.reshape(VX.shape[2:]) # <<<<<<<<<<<<<< * return D2 * */ __pyx_t_4 = PyObject_GetAttr(__pyx_v_D2, __pyx_n_s__reshape); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = PyObject_GetAttr(__pyx_v_VX, __pyx_n_s__shape); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PySequence_GetSlice(__pyx_t_1, 2, PY_SSIZE_T_MAX); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_v_D2); __pyx_v_D2 = __pyx_t_3; __pyx_t_3 = 0; /* "nipy/labs/utils/routines.pyx":141 * # Return * D2 = D2.reshape(VX.shape[2:]) * return D2 # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_D2); __pyx_r = __pyx_v_D2; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("nipy.labs.utils.routines.mahalanobis", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_dims); __Pyx_XDECREF(__pyx_v_dim); __Pyx_XDECREF(__pyx_v_D2); __Pyx_XDECREF(__pyx_v_VX_flat); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_5utils_8routines_7svd(PyObject *__pyx_self, PyObject *__pyx_v_X); /*proto*/ static char __pyx_doc_4nipy_4labs_5utils_8routines_6svd[] = " Singular value decomposition of array `X`\n\n Y = svd(X)\n\n ufunc-like svd. Given an array X (m, n, K), perform an SV decomposition.\n\n Parameters\n ----------\n X : 2D array\n\n Returns\n -------\n S : (min(m,n), K)\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_5utils_8routines_7svd = {__Pyx_NAMESTR("svd"), (PyCFunction)__pyx_pw_4nipy_4labs_5utils_8routines_7svd, METH_O, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_5utils_8routines_6svd)}; static PyObject *__pyx_pw_4nipy_4labs_5utils_8routines_7svd(PyObject *__pyx_self, PyObject *__pyx_v_X) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("svd (wrapper)", 0); __pyx_r = __pyx_pf_4nipy_4labs_5utils_8routines_6svd(__pyx_self, ((PyObject *)__pyx_v_X)); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/utils/routines.pyx":144 * * * def svd(X): # <<<<<<<<<<<<<< * """ Singular value decomposition of array `X` * */ static PyObject *__pyx_pf_4nipy_4labs_5utils_8routines_6svd(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X) { int __pyx_v_axis; int __pyx_v_m; int __pyx_v_n; int __pyx_v_dmin; int __pyx_v_dmax; int __pyx_v_lwork; int __pyx_v_liwork; CYTHON_UNUSED int __pyx_v_info; fff_vector *__pyx_v_work; fff_vector *__pyx_v_x_flat; fff_vector *__pyx_v_x_flat_tmp; fff_vector *__pyx_v_s; fff_vector *__pyx_v_s_tmp; fff_matrix __pyx_v_x; fff_array *__pyx_v_iwork; fff_matrix *__pyx_v_Aux; fff_matrix *__pyx_v_U; fff_matrix *__pyx_v_Vt; fffpy_multi_iterator *__pyx_v_multi; PyObject *__pyx_v_endims = NULL; PyObject *__pyx_v_S = NULL; PyObject *__pyx_v_X_flat = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("svd", 0); /* "nipy/labs/utils/routines.pyx":159 * S : (min(m,n), K) * """ * cdef int axis=0 # <<<<<<<<<<<<<< * cdef int m, n, dmin, dmax, lwork, liwork, info * cdef fff_vector *work, *x_flat, *x_flat_tmp, *s, *s_tmp */ __pyx_v_axis = 0; /* "nipy/labs/utils/routines.pyx":168 * * # Shape of matrices * m = X.shape[0] # <<<<<<<<<<<<<< * n = X.shape[1] * if m > n: */ __pyx_t_1 = PyObject_GetAttr(__pyx_v_X, __pyx_n_s__shape); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 168; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_GetItemInt(__pyx_t_1, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 168; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_3 = __Pyx_PyInt_AsInt(__pyx_t_2); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 168; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_m = ((int)__pyx_t_3); /* "nipy/labs/utils/routines.pyx":169 * # Shape of matrices * m = X.shape[0] * n = X.shape[1] # <<<<<<<<<<<<<< * if m > n: * dmin = n */ __pyx_t_2 = PyObject_GetAttr(__pyx_v_X, __pyx_n_s__shape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_2, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_3 = __Pyx_PyInt_AsInt(__pyx_t_1); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_n = ((int)__pyx_t_3); /* "nipy/labs/utils/routines.pyx":170 * m = X.shape[0] * n = X.shape[1] * if m > n: # <<<<<<<<<<<<<< * dmin = n * dmax = m */ __pyx_t_4 = (__pyx_v_m > __pyx_v_n); if (__pyx_t_4) { /* "nipy/labs/utils/routines.pyx":171 * n = X.shape[1] * if m > n: * dmin = n # <<<<<<<<<<<<<< * dmax = m * else: */ __pyx_v_dmin = __pyx_v_n; /* "nipy/labs/utils/routines.pyx":172 * if m > n: * dmin = n * dmax = m # <<<<<<<<<<<<<< * else: * dmin = m */ __pyx_v_dmax = __pyx_v_m; goto __pyx_L3; } /*else*/ { /* "nipy/labs/utils/routines.pyx":174 * dmax = m * else: * dmin = m # <<<<<<<<<<<<<< * dmax = n * */ __pyx_v_dmin = __pyx_v_m; /* "nipy/labs/utils/routines.pyx":175 * else: * dmin = m * dmax = n # <<<<<<<<<<<<<< * * # Create auxiliary arrays */ __pyx_v_dmax = __pyx_v_n; } __pyx_L3:; /* "nipy/labs/utils/routines.pyx":178 * * # Create auxiliary arrays * lwork = 4*dmin*(dmin+1) # <<<<<<<<<<<<<< * if dmax > lwork: * lwork = dmax */ __pyx_v_lwork = ((4 * __pyx_v_dmin) * (__pyx_v_dmin + 1)); /* "nipy/labs/utils/routines.pyx":179 * # Create auxiliary arrays * lwork = 4*dmin*(dmin+1) * if dmax > lwork: # <<<<<<<<<<<<<< * lwork = dmax * lwork = 2*(3*dmin*dmin + lwork) */ __pyx_t_4 = (__pyx_v_dmax > __pyx_v_lwork); if (__pyx_t_4) { /* "nipy/labs/utils/routines.pyx":180 * lwork = 4*dmin*(dmin+1) * if dmax > lwork: * lwork = dmax # <<<<<<<<<<<<<< * lwork = 2*(3*dmin*dmin + lwork) * liwork = 8*dmin */ __pyx_v_lwork = __pyx_v_dmax; goto __pyx_L4; } __pyx_L4:; /* "nipy/labs/utils/routines.pyx":181 * if dmax > lwork: * lwork = dmax * lwork = 2*(3*dmin*dmin + lwork) # <<<<<<<<<<<<<< * liwork = 8*dmin * work = fff_vector_new(lwork) */ __pyx_v_lwork = (2 * (((3 * __pyx_v_dmin) * __pyx_v_dmin) + __pyx_v_lwork)); /* "nipy/labs/utils/routines.pyx":182 * lwork = dmax * lwork = 2*(3*dmin*dmin + lwork) * liwork = 8*dmin # <<<<<<<<<<<<<< * work = fff_vector_new(lwork) * iwork = fff_array_new1d(FFF_INT, liwork) */ __pyx_v_liwork = (8 * __pyx_v_dmin); /* "nipy/labs/utils/routines.pyx":183 * lwork = 2*(3*dmin*dmin + lwork) * liwork = 8*dmin * work = fff_vector_new(lwork) # <<<<<<<<<<<<<< * iwork = fff_array_new1d(FFF_INT, liwork) * Aux = fff_matrix_new(dmax, dmax) */ __pyx_v_work = fff_vector_new(__pyx_v_lwork); /* "nipy/labs/utils/routines.pyx":184 * liwork = 8*dmin * work = fff_vector_new(lwork) * iwork = fff_array_new1d(FFF_INT, liwork) # <<<<<<<<<<<<<< * Aux = fff_matrix_new(dmax, dmax) * U = fff_matrix_new(m, m) */ __pyx_v_iwork = fff_array_new1d(FFF_INT, __pyx_v_liwork); /* "nipy/labs/utils/routines.pyx":185 * work = fff_vector_new(lwork) * iwork = fff_array_new1d(FFF_INT, liwork) * Aux = fff_matrix_new(dmax, dmax) # <<<<<<<<<<<<<< * U = fff_matrix_new(m, m) * Vt = fff_matrix_new(n, n) */ __pyx_v_Aux = fff_matrix_new(__pyx_v_dmax, __pyx_v_dmax); /* "nipy/labs/utils/routines.pyx":186 * iwork = fff_array_new1d(FFF_INT, liwork) * Aux = fff_matrix_new(dmax, dmax) * U = fff_matrix_new(m, m) # <<<<<<<<<<<<<< * Vt = fff_matrix_new(n, n) * x_flat_tmp = fff_vector_new(m*n) */ __pyx_v_U = fff_matrix_new(__pyx_v_m, __pyx_v_m); /* "nipy/labs/utils/routines.pyx":187 * Aux = fff_matrix_new(dmax, dmax) * U = fff_matrix_new(m, m) * Vt = fff_matrix_new(n, n) # <<<<<<<<<<<<<< * x_flat_tmp = fff_vector_new(m*n) * s_tmp = fff_vector_new(dmin) */ __pyx_v_Vt = fff_matrix_new(__pyx_v_n, __pyx_v_n); /* "nipy/labs/utils/routines.pyx":188 * U = fff_matrix_new(m, m) * Vt = fff_matrix_new(n, n) * x_flat_tmp = fff_vector_new(m*n) # <<<<<<<<<<<<<< * s_tmp = fff_vector_new(dmin) * */ __pyx_v_x_flat_tmp = fff_vector_new((__pyx_v_m * __pyx_v_n)); /* "nipy/labs/utils/routines.pyx":189 * Vt = fff_matrix_new(n, n) * x_flat_tmp = fff_vector_new(m*n) * s_tmp = fff_vector_new(dmin) # <<<<<<<<<<<<<< * * # Allocate output array */ __pyx_v_s_tmp = fff_vector_new(__pyx_v_dmin); /* "nipy/labs/utils/routines.pyx":192 * * # Allocate output array * endims = list(X.shape[2:]) # <<<<<<<<<<<<<< * S = np.zeros([dmin]+endims) * */ __pyx_t_1 = PyObject_GetAttr(__pyx_v_X, __pyx_n_s__shape); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 192; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PySequence_GetSlice(__pyx_t_1, 2, PY_SSIZE_T_MAX); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 192; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 192; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyObject_Call(((PyObject *)((PyObject*)(&PyList_Type))), ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 192; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_v_endims = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "nipy/labs/utils/routines.pyx":193 * # Allocate output array * endims = list(X.shape[2:]) * S = np.zeros([dmin]+endims) # <<<<<<<<<<<<<< * * # Flatten input array */ __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__zeros); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyInt_FromLong(__pyx_v_dmin); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = PyList_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_Add(((PyObject *)__pyx_t_5), ((PyObject *)__pyx_v_endims)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_2)); __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_t_2)); __Pyx_GIVEREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __pyx_v_S = __pyx_t_2; __pyx_t_2 = 0; /* "nipy/labs/utils/routines.pyx":196 * * # Flatten input array * X_flat = X.reshape([m*n]+endims) # <<<<<<<<<<<<<< * * # Create a new array iterator */ __pyx_t_2 = PyObject_GetAttr(__pyx_v_X, __pyx_n_s__reshape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = PyInt_FromLong((__pyx_v_m * __pyx_v_n)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); PyList_SET_ITEM(__pyx_t_1, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyNumber_Add(((PyObject *)__pyx_t_1), ((PyObject *)__pyx_v_endims)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_5)); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_t_5)); __Pyx_GIVEREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __pyx_t_5 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_v_X_flat = __pyx_t_5; __pyx_t_5 = 0; /* "nipy/labs/utils/routines.pyx":199 * * # Create a new array iterator * multi = fffpy_multi_iterator_new(2, axis, X_flat, S) # <<<<<<<<<<<<<< * * # Create vector views */ __pyx_v_multi = fffpy_multi_iterator_new(2, __pyx_v_axis, ((void *)__pyx_v_X_flat), ((void *)__pyx_v_S)); /* "nipy/labs/utils/routines.pyx":202 * * # Create vector views * x_flat = multi.vector[0] # <<<<<<<<<<<<<< * s = multi.vector[1] * */ __pyx_v_x_flat = (__pyx_v_multi->vector[0]); /* "nipy/labs/utils/routines.pyx":203 * # Create vector views * x_flat = multi.vector[0] * s = multi.vector[1] # <<<<<<<<<<<<<< * * # Loop */ __pyx_v_s = (__pyx_v_multi->vector[1]); /* "nipy/labs/utils/routines.pyx":206 * * # Loop * while(multi.index < multi.size): # <<<<<<<<<<<<<< * fff_vector_memcpy(x_flat_tmp, x_flat) * fff_vector_memcpy(s_tmp, s) */ while (1) { __pyx_t_4 = (__pyx_v_multi->index < __pyx_v_multi->size); if (!__pyx_t_4) break; /* "nipy/labs/utils/routines.pyx":207 * # Loop * while(multi.index < multi.size): * fff_vector_memcpy(x_flat_tmp, x_flat) # <<<<<<<<<<<<<< * fff_vector_memcpy(s_tmp, s) * x = fff_matrix_view(x_flat_tmp.data, m, n, n) # OK because x_flat_tmp is contiguous */ fff_vector_memcpy(__pyx_v_x_flat_tmp, __pyx_v_x_flat); /* "nipy/labs/utils/routines.pyx":208 * while(multi.index < multi.size): * fff_vector_memcpy(x_flat_tmp, x_flat) * fff_vector_memcpy(s_tmp, s) # <<<<<<<<<<<<<< * x = fff_matrix_view(x_flat_tmp.data, m, n, n) # OK because x_flat_tmp is contiguous * info = fff_lapack_dgesdd(&x, s_tmp, U, Vt, work, iwork, Aux ) */ fff_vector_memcpy(__pyx_v_s_tmp, __pyx_v_s); /* "nipy/labs/utils/routines.pyx":209 * fff_vector_memcpy(x_flat_tmp, x_flat) * fff_vector_memcpy(s_tmp, s) * x = fff_matrix_view(x_flat_tmp.data, m, n, n) # OK because x_flat_tmp is contiguous # <<<<<<<<<<<<<< * info = fff_lapack_dgesdd(&x, s_tmp, U, Vt, work, iwork, Aux ) * fff_vector_memcpy(s, s_tmp) */ __pyx_v_x = fff_matrix_view(__pyx_v_x_flat_tmp->data, __pyx_v_m, __pyx_v_n, __pyx_v_n); /* "nipy/labs/utils/routines.pyx":210 * fff_vector_memcpy(s_tmp, s) * x = fff_matrix_view(x_flat_tmp.data, m, n, n) # OK because x_flat_tmp is contiguous * info = fff_lapack_dgesdd(&x, s_tmp, U, Vt, work, iwork, Aux ) # <<<<<<<<<<<<<< * fff_vector_memcpy(s, s_tmp) * fffpy_multi_iterator_update(multi) */ __pyx_v_info = fff_lapack_dgesdd((&__pyx_v_x), __pyx_v_s_tmp, __pyx_v_U, __pyx_v_Vt, __pyx_v_work, __pyx_v_iwork, __pyx_v_Aux); /* "nipy/labs/utils/routines.pyx":211 * x = fff_matrix_view(x_flat_tmp.data, m, n, n) # OK because x_flat_tmp is contiguous * info = fff_lapack_dgesdd(&x, s_tmp, U, Vt, work, iwork, Aux ) * fff_vector_memcpy(s, s_tmp) # <<<<<<<<<<<<<< * fffpy_multi_iterator_update(multi) * */ fff_vector_memcpy(__pyx_v_s, __pyx_v_s_tmp); /* "nipy/labs/utils/routines.pyx":212 * info = fff_lapack_dgesdd(&x, s_tmp, U, Vt, work, iwork, Aux ) * fff_vector_memcpy(s, s_tmp) * fffpy_multi_iterator_update(multi) # <<<<<<<<<<<<<< * * # Delete local structures */ fffpy_multi_iterator_update(__pyx_v_multi); } /* "nipy/labs/utils/routines.pyx":215 * * # Delete local structures * fff_vector_delete(work) # <<<<<<<<<<<<<< * fff_vector_delete(x_flat_tmp) * fff_vector_delete(s_tmp) */ fff_vector_delete(__pyx_v_work); /* "nipy/labs/utils/routines.pyx":216 * # Delete local structures * fff_vector_delete(work) * fff_vector_delete(x_flat_tmp) # <<<<<<<<<<<<<< * fff_vector_delete(s_tmp) * fff_array_delete(iwork) */ fff_vector_delete(__pyx_v_x_flat_tmp); /* "nipy/labs/utils/routines.pyx":217 * fff_vector_delete(work) * fff_vector_delete(x_flat_tmp) * fff_vector_delete(s_tmp) # <<<<<<<<<<<<<< * fff_array_delete(iwork) * fff_matrix_delete(Aux) */ fff_vector_delete(__pyx_v_s_tmp); /* "nipy/labs/utils/routines.pyx":218 * fff_vector_delete(x_flat_tmp) * fff_vector_delete(s_tmp) * fff_array_delete(iwork) # <<<<<<<<<<<<<< * fff_matrix_delete(Aux) * fff_matrix_delete(U) */ fff_array_delete(__pyx_v_iwork); /* "nipy/labs/utils/routines.pyx":219 * fff_vector_delete(s_tmp) * fff_array_delete(iwork) * fff_matrix_delete(Aux) # <<<<<<<<<<<<<< * fff_matrix_delete(U) * fff_matrix_delete(Vt) */ fff_matrix_delete(__pyx_v_Aux); /* "nipy/labs/utils/routines.pyx":220 * fff_array_delete(iwork) * fff_matrix_delete(Aux) * fff_matrix_delete(U) # <<<<<<<<<<<<<< * fff_matrix_delete(Vt) * fffpy_multi_iterator_delete(multi) */ fff_matrix_delete(__pyx_v_U); /* "nipy/labs/utils/routines.pyx":221 * fff_matrix_delete(Aux) * fff_matrix_delete(U) * fff_matrix_delete(Vt) # <<<<<<<<<<<<<< * fffpy_multi_iterator_delete(multi) * */ fff_matrix_delete(__pyx_v_Vt); /* "nipy/labs/utils/routines.pyx":222 * fff_matrix_delete(U) * fff_matrix_delete(Vt) * fffpy_multi_iterator_delete(multi) # <<<<<<<<<<<<<< * * # Return */ fffpy_multi_iterator_delete(__pyx_v_multi); /* "nipy/labs/utils/routines.pyx":225 * * # Return * return S # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_S); __pyx_r = __pyx_v_S; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("nipy.labs.utils.routines.svd", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_endims); __Pyx_XDECREF(__pyx_v_S); __Pyx_XDECREF(__pyx_v_X_flat); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_5utils_8routines_9permutations(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_5utils_8routines_8permutations[] = "\n P = permutations(n, m=1, magic=0).\n Generate m permutations from [0..n[.\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_5utils_8routines_9permutations = {__Pyx_NAMESTR("permutations"), (PyCFunction)__pyx_pw_4nipy_4labs_5utils_8routines_9permutations, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_5utils_8routines_8permutations)}; static PyObject *__pyx_pw_4nipy_4labs_5utils_8routines_9permutations(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { unsigned int __pyx_v_n; unsigned int __pyx_v_m; unsigned long __pyx_v_magic; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("permutations (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__n,&__pyx_n_s__m,&__pyx_n_s__magic,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__n)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__m); if (value) { values[1] = value; kw_args--; } } case 2: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__magic); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "permutations") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 228; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_n = __Pyx_PyInt_AsUnsignedInt(values[0]); if (unlikely((__pyx_v_n == (unsigned int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 228; __pyx_clineno = __LINE__; goto __pyx_L3_error;} if (values[1]) { __pyx_v_m = __Pyx_PyInt_AsUnsignedInt(values[1]); if (unlikely((__pyx_v_m == (unsigned int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 228; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_m = ((unsigned int)1); } if (values[2]) { __pyx_v_magic = __Pyx_PyInt_AsUnsignedLong(values[2]); if (unlikely((__pyx_v_magic == (unsigned long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 228; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_magic = ((unsigned long)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("permutations", 0, 1, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 228; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.utils.routines.permutations", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_4labs_5utils_8routines_8permutations(__pyx_self, __pyx_v_n, __pyx_v_m, __pyx_v_magic); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/utils/routines.pyx":228 * * * def permutations(unsigned int n, unsigned int m=1, unsigned long magic=0): # <<<<<<<<<<<<<< * """ * P = permutations(n, m=1, magic=0). */ static PyObject *__pyx_pf_4nipy_4labs_5utils_8routines_8permutations(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_n, unsigned int __pyx_v_m, unsigned long __pyx_v_magic) { fff_array *__pyx_v_p; fff_array *__pyx_v_pi; fff_array __pyx_v_pi_view; unsigned int __pyx_v_i; PyArrayObject *__pyx_v_P = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations unsigned int __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("permutations", 0); /* "nipy/labs/utils/routines.pyx":236 * cdef fff_array pi_view * cdef unsigned int i * p = fff_array_new2d(FFF_UINT, n, m) # <<<<<<<<<<<<<< * pi = fff_array_new1d(FFF_UINT, n) ## contiguous, dims=(n,1,1,1) * */ __pyx_v_p = fff_array_new2d(FFF_UINT, __pyx_v_n, __pyx_v_m); /* "nipy/labs/utils/routines.pyx":237 * cdef unsigned int i * p = fff_array_new2d(FFF_UINT, n, m) * pi = fff_array_new1d(FFF_UINT, n) ## contiguous, dims=(n,1,1,1) # <<<<<<<<<<<<<< * * for i from 0 <= i < m: */ __pyx_v_pi = fff_array_new1d(FFF_UINT, __pyx_v_n); /* "nipy/labs/utils/routines.pyx":239 * pi = fff_array_new1d(FFF_UINT, n) ## contiguous, dims=(n,1,1,1) * * for i from 0 <= i < m: # <<<<<<<<<<<<<< * fff_permutation(pi.data, n, magic+i) * pi_view = fff_array_get_block2d(p, 0, n-1, 1, i, i, 1) ## dims=(n,1,1,1) */ __pyx_t_1 = __pyx_v_m; for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_1; __pyx_v_i++) { /* "nipy/labs/utils/routines.pyx":240 * * for i from 0 <= i < m: * fff_permutation(pi.data, n, magic+i) # <<<<<<<<<<<<<< * pi_view = fff_array_get_block2d(p, 0, n-1, 1, i, i, 1) ## dims=(n,1,1,1) * fff_array_copy(&pi_view, pi) */ fff_permutation(((unsigned int *)__pyx_v_pi->data), __pyx_v_n, (__pyx_v_magic + __pyx_v_i)); /* "nipy/labs/utils/routines.pyx":241 * for i from 0 <= i < m: * fff_permutation(pi.data, n, magic+i) * pi_view = fff_array_get_block2d(p, 0, n-1, 1, i, i, 1) ## dims=(n,1,1,1) # <<<<<<<<<<<<<< * fff_array_copy(&pi_view, pi) * */ __pyx_v_pi_view = fff_array_get_block2d(__pyx_v_p, 0, (__pyx_v_n - 1), 1, __pyx_v_i, __pyx_v_i, 1); /* "nipy/labs/utils/routines.pyx":242 * fff_permutation(pi.data, n, magic+i) * pi_view = fff_array_get_block2d(p, 0, n-1, 1, i, i, 1) ## dims=(n,1,1,1) * fff_array_copy(&pi_view, pi) # <<<<<<<<<<<<<< * * P = fff_array_toPyArray(p) */ fff_array_copy((&__pyx_v_pi_view), __pyx_v_pi); } /* "nipy/labs/utils/routines.pyx":244 * fff_array_copy(&pi_view, pi) * * P = fff_array_toPyArray(p) # <<<<<<<<<<<<<< * return P * */ __pyx_t_2 = ((PyObject *)fff_array_toPyArray(__pyx_v_p)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_v_P = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "nipy/labs/utils/routines.pyx":245 * * P = fff_array_toPyArray(p) * return P # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_P)); __pyx_r = ((PyObject *)__pyx_v_P); goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("nipy.labs.utils.routines.permutations", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_P); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_5utils_8routines_11combinations(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_4nipy_4labs_5utils_8routines_10combinations[] = "\n P = combinations(k, n, m=1, magic=0).\n Generate m combinations of k elements from [0..n[.\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_5utils_8routines_11combinations = {__Pyx_NAMESTR("combinations"), (PyCFunction)__pyx_pw_4nipy_4labs_5utils_8routines_11combinations, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_5utils_8routines_10combinations)}; static PyObject *__pyx_pw_4nipy_4labs_5utils_8routines_11combinations(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { unsigned int __pyx_v_k; unsigned int __pyx_v_n; unsigned int __pyx_v_m; unsigned long __pyx_v_magic; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("combinations (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__k,&__pyx_n_s__n,&__pyx_n_s__m,&__pyx_n_s__magic,0}; PyObject* values[4] = {0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__k)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__n)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("combinations", 0, 2, 4, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 248; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__m); if (value) { values[2] = value; kw_args--; } } case 3: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__magic); if (value) { values[3] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "combinations") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 248; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_k = __Pyx_PyInt_AsUnsignedInt(values[0]); if (unlikely((__pyx_v_k == (unsigned int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 248; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_n = __Pyx_PyInt_AsUnsignedInt(values[1]); if (unlikely((__pyx_v_n == (unsigned int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 248; __pyx_clineno = __LINE__; goto __pyx_L3_error;} if (values[2]) { __pyx_v_m = __Pyx_PyInt_AsUnsignedInt(values[2]); if (unlikely((__pyx_v_m == (unsigned int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 248; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_m = ((unsigned int)1); } if (values[3]) { __pyx_v_magic = __Pyx_PyInt_AsUnsignedLong(values[3]); if (unlikely((__pyx_v_magic == (unsigned long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 248; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_magic = ((unsigned long)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("combinations", 0, 2, 4, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 248; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.utils.routines.combinations", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_4labs_5utils_8routines_10combinations(__pyx_self, __pyx_v_k, __pyx_v_n, __pyx_v_m, __pyx_v_magic); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/utils/routines.pyx":248 * * * def combinations(unsigned int k, unsigned int n, unsigned int m=1, unsigned long magic=0): # <<<<<<<<<<<<<< * """ * P = combinations(k, n, m=1, magic=0). */ static PyObject *__pyx_pf_4nipy_4labs_5utils_8routines_10combinations(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_k, unsigned int __pyx_v_n, unsigned int __pyx_v_m, unsigned long __pyx_v_magic) { fff_array *__pyx_v_p; fff_array *__pyx_v_pi; fff_array __pyx_v_pi_view; unsigned int __pyx_v_i; PyArrayObject *__pyx_v_C = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations unsigned int __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("combinations", 0); /* "nipy/labs/utils/routines.pyx":256 * cdef fff_array pi_view * cdef unsigned int i * p = fff_array_new2d(FFF_UINT, k, m) # <<<<<<<<<<<<<< * pi = fff_array_new1d(FFF_UINT, k) ## contiguous, dims=(n,1,1,1) * */ __pyx_v_p = fff_array_new2d(FFF_UINT, __pyx_v_k, __pyx_v_m); /* "nipy/labs/utils/routines.pyx":257 * cdef unsigned int i * p = fff_array_new2d(FFF_UINT, k, m) * pi = fff_array_new1d(FFF_UINT, k) ## contiguous, dims=(n,1,1,1) # <<<<<<<<<<<<<< * * for i from 0 <= i < m: */ __pyx_v_pi = fff_array_new1d(FFF_UINT, __pyx_v_k); /* "nipy/labs/utils/routines.pyx":259 * pi = fff_array_new1d(FFF_UINT, k) ## contiguous, dims=(n,1,1,1) * * for i from 0 <= i < m: # <<<<<<<<<<<<<< * fff_combination(pi.data, k, n, magic+i) * pi_view = fff_array_get_block2d(p, 0, k-1, 1, i, i, 1) ## dims=(k,1,1,1) */ __pyx_t_1 = __pyx_v_m; for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_1; __pyx_v_i++) { /* "nipy/labs/utils/routines.pyx":260 * * for i from 0 <= i < m: * fff_combination(pi.data, k, n, magic+i) # <<<<<<<<<<<<<< * pi_view = fff_array_get_block2d(p, 0, k-1, 1, i, i, 1) ## dims=(k,1,1,1) * fff_array_copy(&pi_view, pi) */ fff_combination(((unsigned int *)__pyx_v_pi->data), __pyx_v_k, __pyx_v_n, (__pyx_v_magic + __pyx_v_i)); /* "nipy/labs/utils/routines.pyx":261 * for i from 0 <= i < m: * fff_combination(pi.data, k, n, magic+i) * pi_view = fff_array_get_block2d(p, 0, k-1, 1, i, i, 1) ## dims=(k,1,1,1) # <<<<<<<<<<<<<< * fff_array_copy(&pi_view, pi) * */ __pyx_v_pi_view = fff_array_get_block2d(__pyx_v_p, 0, (__pyx_v_k - 1), 1, __pyx_v_i, __pyx_v_i, 1); /* "nipy/labs/utils/routines.pyx":262 * fff_combination(pi.data, k, n, magic+i) * pi_view = fff_array_get_block2d(p, 0, k-1, 1, i, i, 1) ## dims=(k,1,1,1) * fff_array_copy(&pi_view, pi) # <<<<<<<<<<<<<< * * C = fff_array_toPyArray(p) */ fff_array_copy((&__pyx_v_pi_view), __pyx_v_pi); } /* "nipy/labs/utils/routines.pyx":264 * fff_array_copy(&pi_view, pi) * * C = fff_array_toPyArray(p) # <<<<<<<<<<<<<< * return C * */ __pyx_t_2 = ((PyObject *)fff_array_toPyArray(__pyx_v_p)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 264; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_v_C = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "nipy/labs/utils/routines.pyx":265 * * C = fff_array_toPyArray(p) * return C # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_C)); __pyx_r = ((PyObject *)__pyx_v_C); goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("nipy.labs.utils.routines.combinations", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_C); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_5utils_8routines_13gamln(PyObject *__pyx_self, PyObject *__pyx_arg_x); /*proto*/ static char __pyx_doc_4nipy_4labs_5utils_8routines_12gamln[] = " Python bindings to log gamma. Do not use, this is there only for\n testing. Use scipy.special.gammaln.\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_5utils_8routines_13gamln = {__Pyx_NAMESTR("gamln"), (PyCFunction)__pyx_pw_4nipy_4labs_5utils_8routines_13gamln, METH_O, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_5utils_8routines_12gamln)}; static PyObject *__pyx_pw_4nipy_4labs_5utils_8routines_13gamln(PyObject *__pyx_self, PyObject *__pyx_arg_x) { double __pyx_v_x; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gamln (wrapper)", 0); assert(__pyx_arg_x); { __pyx_v_x = __pyx_PyFloat_AsDouble(__pyx_arg_x); if (unlikely((__pyx_v_x == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 268; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.utils.routines.gamln", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_4labs_5utils_8routines_12gamln(__pyx_self, ((double)__pyx_v_x)); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/utils/routines.pyx":268 * * * def gamln(double x): # <<<<<<<<<<<<<< * """ Python bindings to log gamma. Do not use, this is there only for * testing. Use scipy.special.gammaln. */ static PyObject *__pyx_pf_4nipy_4labs_5utils_8routines_12gamln(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_x) { double __pyx_v_y; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gamln", 0); /* "nipy/labs/utils/routines.pyx":273 * """ * cdef double y * y = fff_gamln(x) # <<<<<<<<<<<<<< * return y * */ __pyx_v_y = fff_gamln(__pyx_v_x); /* "nipy/labs/utils/routines.pyx":274 * cdef double y * y = fff_gamln(x) * return y # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyFloat_FromDouble(__pyx_v_y); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 274; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.labs.utils.routines.gamln", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_4nipy_4labs_5utils_8routines_15psi(PyObject *__pyx_self, PyObject *__pyx_arg_x); /*proto*/ static char __pyx_doc_4nipy_4labs_5utils_8routines_14psi[] = " Python bindings to psi (d gamln(x)/dx. Do not use, this is there only \n for testing. Use scipy.special.psi.\n "; static PyMethodDef __pyx_mdef_4nipy_4labs_5utils_8routines_15psi = {__Pyx_NAMESTR("psi"), (PyCFunction)__pyx_pw_4nipy_4labs_5utils_8routines_15psi, METH_O, __Pyx_DOCSTR(__pyx_doc_4nipy_4labs_5utils_8routines_14psi)}; static PyObject *__pyx_pw_4nipy_4labs_5utils_8routines_15psi(PyObject *__pyx_self, PyObject *__pyx_arg_x) { double __pyx_v_x; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("psi (wrapper)", 0); assert(__pyx_arg_x); { __pyx_v_x = __pyx_PyFloat_AsDouble(__pyx_arg_x); if (unlikely((__pyx_v_x == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 277; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; __Pyx_AddTraceback("nipy.labs.utils.routines.psi", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4nipy_4labs_5utils_8routines_14psi(__pyx_self, ((double)__pyx_v_x)); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "nipy/labs/utils/routines.pyx":277 * * * def psi(double x): # <<<<<<<<<<<<<< * """ Python bindings to psi (d gamln(x)/dx. Do not use, this is there only * for testing. Use scipy.special.psi. */ static PyObject *__pyx_pf_4nipy_4labs_5utils_8routines_14psi(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_x) { double __pyx_v_y; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("psi", 0); /* "nipy/labs/utils/routines.pyx":282 * """ * cdef double y * y = fff_psi(x) # <<<<<<<<<<<<<< * return y * */ __pyx_v_y = fff_psi(__pyx_v_x); /* "nipy/labs/utils/routines.pyx":283 * cdef double y * y = fff_psi(x) * return y # <<<<<<<<<<<<<< * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyFloat_FromDouble(__pyx_v_y); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("nipy.labs.utils.routines.psi", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":194 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_copy_shape; int __pyx_v_i; int __pyx_v_ndim; int __pyx_v_endian_detector; int __pyx_v_little_endian; int __pyx_v_t; char *__pyx_v_f; PyArray_Descr *__pyx_v_descr = 0; int __pyx_v_offset; int __pyx_v_hasfields; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; char *__pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "numpy.pxd":200 * # of flags * * if info == NULL: return # <<<<<<<<<<<<<< * * cdef int copy_shape, i, ndim */ __pyx_t_1 = (__pyx_v_info == NULL); if (__pyx_t_1) { __pyx_r = 0; goto __pyx_L0; goto __pyx_L3; } __pyx_L3:; /* "numpy.pxd":203 * * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((&endian_detector)[0] != 0) * */ __pyx_v_endian_detector = 1; /* "numpy.pxd":204 * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * * ndim = PyArray_NDIM(self) */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "numpy.pxd":206 * cdef bint little_endian = ((&endian_detector)[0] != 0) * * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< * * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); /* "numpy.pxd":208 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); if (__pyx_t_1) { /* "numpy.pxd":209 * * if sizeof(npy_intp) != sizeof(Py_ssize_t): * copy_shape = 1 # <<<<<<<<<<<<<< * else: * copy_shape = 0 */ __pyx_v_copy_shape = 1; goto __pyx_L4; } /*else*/ { /* "numpy.pxd":211 * copy_shape = 1 * else: * copy_shape = 0 # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) */ __pyx_v_copy_shape = 0; } __pyx_L4:; /* "numpy.pxd":213 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS); if (__pyx_t_1) { /* "numpy.pxd":214 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not C contiguous") * */ __pyx_t_2 = (!PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS)); __pyx_t_3 = __pyx_t_2; } else { __pyx_t_3 = __pyx_t_1; } if (__pyx_t_3) { /* "numpy.pxd":215 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L5; } __pyx_L5:; /* "numpy.pxd":217 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ __pyx_t_3 = ((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS); if (__pyx_t_3) { /* "numpy.pxd":218 * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not Fortran contiguous") * */ __pyx_t_1 = (!PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS)); __pyx_t_2 = __pyx_t_1; } else { __pyx_t_2 = __pyx_t_3; } if (__pyx_t_2) { /* "numpy.pxd":219 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_4), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L6; } __pyx_L6:; /* "numpy.pxd":221 * raise ValueError(u"ndarray is not Fortran contiguous") * * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< * info.ndim = ndim * if copy_shape: */ __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); /* "numpy.pxd":222 * * info.buf = PyArray_DATA(self) * info.ndim = ndim # <<<<<<<<<<<<<< * if copy_shape: * # Allocate new buffer for strides and shape info. */ __pyx_v_info->ndim = __pyx_v_ndim; /* "numpy.pxd":223 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ if (__pyx_v_copy_shape) { /* "numpy.pxd":226 * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) # <<<<<<<<<<<<<< * info.shape = info.strides + ndim * for i in range(ndim): */ __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2))); /* "numpy.pxd":227 * # This is allocated as one block, strides first. * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) * info.shape = info.strides + ndim # <<<<<<<<<<<<<< * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] */ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); /* "numpy.pxd":228 * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) * info.shape = info.strides + ndim * for i in range(ndim): # <<<<<<<<<<<<<< * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] */ __pyx_t_5 = __pyx_v_ndim; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "numpy.pxd":229 * info.shape = info.strides + ndim * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< * info.shape[i] = PyArray_DIMS(self)[i] * else: */ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); /* "numpy.pxd":230 * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< * else: * info.strides = PyArray_STRIDES(self) */ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); } goto __pyx_L7; } /*else*/ { /* "numpy.pxd":232 * info.shape[i] = PyArray_DIMS(self)[i] * else: * info.strides = PyArray_STRIDES(self) # <<<<<<<<<<<<<< * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL */ __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); /* "numpy.pxd":233 * else: * info.strides = PyArray_STRIDES(self) * info.shape = PyArray_DIMS(self) # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) */ __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); } __pyx_L7:; /* "numpy.pxd":234 * info.strides = PyArray_STRIDES(self) * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) */ __pyx_v_info->suboffsets = NULL; /* "numpy.pxd":235 * info.shape = PyArray_DIMS(self) * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< * info.readonly = not PyArray_ISWRITEABLE(self) * */ __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); /* "numpy.pxd":236 * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< * * cdef int t */ __pyx_v_info->readonly = (!PyArray_ISWRITEABLE(__pyx_v_self)); /* "numpy.pxd":239 * * cdef int t * cdef char* f = NULL # <<<<<<<<<<<<<< * cdef dtype descr = self.descr * cdef list stack */ __pyx_v_f = NULL; /* "numpy.pxd":240 * cdef int t * cdef char* f = NULL * cdef dtype descr = self.descr # <<<<<<<<<<<<<< * cdef list stack * cdef int offset */ __pyx_t_4 = ((PyObject *)__pyx_v_self->descr); __Pyx_INCREF(__pyx_t_4); __pyx_v_descr = ((PyArray_Descr *)__pyx_t_4); __pyx_t_4 = 0; /* "numpy.pxd":244 * cdef int offset * * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< * * if not hasfields and not copy_shape: */ __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); /* "numpy.pxd":246 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ __pyx_t_2 = (!__pyx_v_hasfields); if (__pyx_t_2) { __pyx_t_3 = (!__pyx_v_copy_shape); __pyx_t_1 = __pyx_t_3; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "numpy.pxd":248 * if not hasfields and not copy_shape: * # do not call releasebuffer * info.obj = None # <<<<<<<<<<<<<< * else: * # need to call releasebuffer */ __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = Py_None; goto __pyx_L10; } /*else*/ { /* "numpy.pxd":251 * else: * # need to call releasebuffer * info.obj = self # <<<<<<<<<<<<<< * * if not hasfields: */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); } __pyx_L10:; /* "numpy.pxd":253 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ __pyx_t_1 = (!__pyx_v_hasfields); if (__pyx_t_1) { /* "numpy.pxd":254 * * if not hasfields: * t = descr.type_num # <<<<<<<<<<<<<< * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): */ __pyx_t_5 = __pyx_v_descr->type_num; __pyx_v_t = __pyx_t_5; /* "numpy.pxd":255 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_1 = (__pyx_v_descr->byteorder == '>'); if (__pyx_t_1) { __pyx_t_2 = __pyx_v_little_endian; } else { __pyx_t_2 = __pyx_t_1; } if (!__pyx_t_2) { /* "numpy.pxd":256 * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" */ __pyx_t_1 = (__pyx_v_descr->byteorder == '<'); if (__pyx_t_1) { __pyx_t_3 = (!__pyx_v_little_endian); __pyx_t_7 = __pyx_t_3; } else { __pyx_t_7 = __pyx_t_1; } __pyx_t_1 = __pyx_t_7; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "numpy.pxd":257 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_6), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L12; } __pyx_L12:; /* "numpy.pxd":258 * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" */ __pyx_t_1 = (__pyx_v_t == NPY_BYTE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__b; goto __pyx_L13; } /* "numpy.pxd":259 * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" */ __pyx_t_1 = (__pyx_v_t == NPY_UBYTE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__B; goto __pyx_L13; } /* "numpy.pxd":260 * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" */ __pyx_t_1 = (__pyx_v_t == NPY_SHORT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__h; goto __pyx_L13; } /* "numpy.pxd":261 * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" */ __pyx_t_1 = (__pyx_v_t == NPY_USHORT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__H; goto __pyx_L13; } /* "numpy.pxd":262 * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" */ __pyx_t_1 = (__pyx_v_t == NPY_INT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__i; goto __pyx_L13; } /* "numpy.pxd":263 * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" */ __pyx_t_1 = (__pyx_v_t == NPY_UINT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__I; goto __pyx_L13; } /* "numpy.pxd":264 * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" */ __pyx_t_1 = (__pyx_v_t == NPY_LONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__l; goto __pyx_L13; } /* "numpy.pxd":265 * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" */ __pyx_t_1 = (__pyx_v_t == NPY_ULONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__L; goto __pyx_L13; } /* "numpy.pxd":266 * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" */ __pyx_t_1 = (__pyx_v_t == NPY_LONGLONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__q; goto __pyx_L13; } /* "numpy.pxd":267 * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" */ __pyx_t_1 = (__pyx_v_t == NPY_ULONGLONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Q; goto __pyx_L13; } /* "numpy.pxd":268 * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" */ __pyx_t_1 = (__pyx_v_t == NPY_FLOAT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__f; goto __pyx_L13; } /* "numpy.pxd":269 * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" */ __pyx_t_1 = (__pyx_v_t == NPY_DOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__d; goto __pyx_L13; } /* "numpy.pxd":270 * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" */ __pyx_t_1 = (__pyx_v_t == NPY_LONGDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__g; goto __pyx_L13; } /* "numpy.pxd":271 * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" */ __pyx_t_1 = (__pyx_v_t == NPY_CFLOAT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zf; goto __pyx_L13; } /* "numpy.pxd":272 * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" */ __pyx_t_1 = (__pyx_v_t == NPY_CDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zd; goto __pyx_L13; } /* "numpy.pxd":273 * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f = "O" * else: */ __pyx_t_1 = (__pyx_v_t == NPY_CLONGDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zg; goto __pyx_L13; } /* "numpy.pxd":274 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_1 = (__pyx_v_t == NPY_OBJECT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__O; goto __pyx_L13; } /*else*/ { /* "numpy.pxd":276 * elif t == NPY_OBJECT: f = "O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * info.format = f * return */ __pyx_t_4 = PyInt_FromLong(__pyx_v_t); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_8 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_7), __pyx_t_4); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_8)); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_8)); __Pyx_GIVEREF(((PyObject *)__pyx_t_8)); __pyx_t_8 = 0; __pyx_t_8 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L13:; /* "numpy.pxd":277 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f # <<<<<<<<<<<<<< * return * else: */ __pyx_v_info->format = __pyx_v_f; /* "numpy.pxd":278 * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f * return # <<<<<<<<<<<<<< * else: * info.format = stdlib.malloc(_buffer_format_string_len) */ __pyx_r = 0; goto __pyx_L0; goto __pyx_L11; } /*else*/ { /* "numpy.pxd":280 * return * else: * info.format = stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 */ __pyx_v_info->format = ((char *)malloc(255)); /* "numpy.pxd":281 * else: * info.format = stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< * offset = 0 * f = _util_dtypestring(descr, info.format + 1, */ (__pyx_v_info->format[0]) = '^'; /* "numpy.pxd":282 * info.format = stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 # <<<<<<<<<<<<<< * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, */ __pyx_v_offset = 0; /* "numpy.pxd":285 * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, * &offset) # <<<<<<<<<<<<<< * f[0] = c'\0' # Terminate format string * */ __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_9; /* "numpy.pxd":286 * info.format + _buffer_format_string_len, * &offset) * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< * * def __releasebuffer__(ndarray self, Py_buffer* info): */ (__pyx_v_f[0]) = '\x00'; } __pyx_L11:; __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_descr); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":288 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__releasebuffer__", 0); /* "numpy.pxd":289 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_t_1 = PyArray_HASFIELDS(__pyx_v_self); if (__pyx_t_1) { /* "numpy.pxd":290 * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): * stdlib.free(info.format) # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) */ free(__pyx_v_info->format); goto __pyx_L3; } __pyx_L3:; /* "numpy.pxd":291 * if PyArray_HASFIELDS(self): * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * stdlib.free(info.strides) * # info.shape was stored after info.strides in the same block */ __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); if (__pyx_t_1) { /* "numpy.pxd":292 * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) # <<<<<<<<<<<<<< * # info.shape was stored after info.strides in the same block * */ free(__pyx_v_info->strides); goto __pyx_L4; } __pyx_L4:; __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":768 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, a) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); /* "numpy.pxd":769 * * cdef inline object PyArray_MultiIterNew1(a): * return PyArray_MultiIterNew(1, a) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew2(a, b): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 769; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":771 * return PyArray_MultiIterNew(1, a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, a, b) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); /* "numpy.pxd":772 * * cdef inline object PyArray_MultiIterNew2(a, b): * return PyArray_MultiIterNew(2, a, b) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew3(a, b, c): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":774 * return PyArray_MultiIterNew(2, a, b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, a, b, c) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); /* "numpy.pxd":775 * * cdef inline object PyArray_MultiIterNew3(a, b, c): * return PyArray_MultiIterNew(3, a, b, c) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":777 * return PyArray_MultiIterNew(3, a, b, c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, a, b, c, d) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); /* "numpy.pxd":778 * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): * return PyArray_MultiIterNew(4, a, b, c, d) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 778; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":780 * return PyArray_MultiIterNew(4, a, b, c, d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, a, b, c, d, e) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); /* "numpy.pxd":781 * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): * return PyArray_MultiIterNew(5, a, b, c, d, e) # <<<<<<<<<<<<<< * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 781; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":783 * return PyArray_MultiIterNew(5, a, b, c, d, e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { PyArray_Descr *__pyx_v_child = 0; int __pyx_v_endian_detector; int __pyx_v_little_endian; PyObject *__pyx_v_fields = 0; PyObject *__pyx_v_childname = NULL; PyObject *__pyx_v_new_offset = NULL; PyObject *__pyx_v_t = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *(*__pyx_t_6)(PyObject *); int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_t_10; long __pyx_t_11; char *__pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_util_dtypestring", 0); /* "numpy.pxd":790 * cdef int delta_offset * cdef tuple i * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((&endian_detector)[0] != 0) * cdef tuple fields */ __pyx_v_endian_detector = 1; /* "numpy.pxd":791 * cdef tuple i * cdef int endian_detector = 1 * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * cdef tuple fields * */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "numpy.pxd":794 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ if (unlikely(((PyObject *)__pyx_v_descr->names) == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_1 = ((PyObject *)__pyx_v_descr->names); __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; for (;;) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif __Pyx_XDECREF(__pyx_v_childname); __pyx_v_childname = __pyx_t_3; __pyx_t_3 = 0; /* "numpy.pxd":795 * * for childname in descr.names: * fields = descr.fields[childname] # <<<<<<<<<<<<<< * child, new_offset = fields * */ __pyx_t_3 = PyObject_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (!__pyx_t_3) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected tuple, got %.200s", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF(((PyObject *)__pyx_v_fields)); __pyx_v_fields = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "numpy.pxd":796 * for childname in descr.names: * fields = descr.fields[childname] * child, new_offset = fields # <<<<<<<<<<<<<< * * if (end - f) - (new_offset - offset[0]) < 15: */ if (likely(PyTuple_CheckExact(((PyObject *)__pyx_v_fields)))) { PyObject* sequence = ((PyObject *)__pyx_v_fields); #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else if (1) { __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } else { Py_ssize_t index = -1; __pyx_t_5 = PyObject_GetIter(((PyObject *)__pyx_v_fields)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = Py_TYPE(__pyx_t_5)->tp_iternext; index = 0; __pyx_t_3 = __pyx_t_6(__pyx_t_5); if (unlikely(!__pyx_t_3)) goto __pyx_L5_unpacking_failed; __Pyx_GOTREF(__pyx_t_3); index = 1; __pyx_t_4 = __pyx_t_6(__pyx_t_5); if (unlikely(!__pyx_t_4)) goto __pyx_L5_unpacking_failed; __Pyx_GOTREF(__pyx_t_4); if (__Pyx_IternextUnpackEndCheck(__pyx_t_6(__pyx_t_5), 2) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = NULL; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L6_unpacking_done; __pyx_L5_unpacking_failed:; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_L6_unpacking_done:; } if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF(((PyObject *)__pyx_v_child)); __pyx_v_child = ((PyArray_Descr *)__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_v_new_offset); __pyx_v_new_offset = __pyx_t_4; __pyx_t_4 = 0; /* "numpy.pxd":798 * child, new_offset = fields * * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ __pyx_t_4 = PyInt_FromLong((__pyx_v_end - __pyx_v_f)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_Subtract(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyObject_RichCompare(__pyx_t_3, __pyx_int_15, Py_LT); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { /* "numpy.pxd":799 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_t_5 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_9), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L7; } __pyx_L7:; /* "numpy.pxd":801 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_7 = (__pyx_v_child->byteorder == '>'); if (__pyx_t_7) { __pyx_t_8 = __pyx_v_little_endian; } else { __pyx_t_8 = __pyx_t_7; } if (!__pyx_t_8) { /* "numpy.pxd":802 * * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * # One could encode it in the format string and have Cython */ __pyx_t_7 = (__pyx_v_child->byteorder == '<'); if (__pyx_t_7) { __pyx_t_9 = (!__pyx_v_little_endian); __pyx_t_10 = __pyx_t_9; } else { __pyx_t_10 = __pyx_t_7; } __pyx_t_7 = __pyx_t_10; } else { __pyx_t_7 = __pyx_t_8; } if (__pyx_t_7) { /* "numpy.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_10), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L8; } __pyx_L8:; /* "numpy.pxd":813 * * # Output padding bytes * while offset[0] < new_offset: # <<<<<<<<<<<<<< * f[0] = 120 # "x"; pad byte * f += 1 */ while (1) { __pyx_t_5 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_t_5, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!__pyx_t_7) break; /* "numpy.pxd":814 * # Output padding bytes * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< * f += 1 * offset[0] += 1 */ (__pyx_v_f[0]) = 120; /* "numpy.pxd":815 * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte * f += 1 # <<<<<<<<<<<<<< * offset[0] += 1 * */ __pyx_v_f = (__pyx_v_f + 1); /* "numpy.pxd":816 * f[0] = 120 # "x"; pad byte * f += 1 * offset[0] += 1 # <<<<<<<<<<<<<< * * offset[0] += child.itemsize */ __pyx_t_11 = 0; (__pyx_v_offset[__pyx_t_11]) = ((__pyx_v_offset[__pyx_t_11]) + 1); } /* "numpy.pxd":818 * offset[0] += 1 * * offset[0] += child.itemsize # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(child): */ __pyx_t_11 = 0; (__pyx_v_offset[__pyx_t_11]) = ((__pyx_v_offset[__pyx_t_11]) + __pyx_v_child->elsize); /* "numpy.pxd":820 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ __pyx_t_7 = (!PyDataType_HASFIELDS(__pyx_v_child)); if (__pyx_t_7) { /* "numpy.pxd":821 * * if not PyDataType_HASFIELDS(child): * t = child.type_num # <<<<<<<<<<<<<< * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") */ __pyx_t_3 = PyInt_FromLong(__pyx_v_child->type_num); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 821; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_XDECREF(__pyx_v_t); __pyx_v_t = __pyx_t_3; __pyx_t_3 = 0; /* "numpy.pxd":822 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ __pyx_t_7 = ((__pyx_v_end - __pyx_v_f) < 5); if (__pyx_t_7) { /* "numpy.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_t_3 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_12), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L12; } __pyx_L12:; /* "numpy.pxd":826 * * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" */ __pyx_t_3 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 98; goto __pyx_L13; } /* "numpy.pxd":827 * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" */ __pyx_t_5 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 66; goto __pyx_L13; } /* "numpy.pxd":828 * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" */ __pyx_t_3 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 104; goto __pyx_L13; } /* "numpy.pxd":829 * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" */ __pyx_t_5 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 72; goto __pyx_L13; } /* "numpy.pxd":830 * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" */ __pyx_t_3 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 105; goto __pyx_L13; } /* "numpy.pxd":831 * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" */ __pyx_t_5 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 73; goto __pyx_L13; } /* "numpy.pxd":832 * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" */ __pyx_t_3 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 108; goto __pyx_L13; } /* "numpy.pxd":833 * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" */ __pyx_t_5 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 76; goto __pyx_L13; } /* "numpy.pxd":834 * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" */ __pyx_t_3 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 113; goto __pyx_L13; } /* "numpy.pxd":835 * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" */ __pyx_t_5 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 81; goto __pyx_L13; } /* "numpy.pxd":836 * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" */ __pyx_t_3 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 102; goto __pyx_L13; } /* "numpy.pxd":837 * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf */ __pyx_t_5 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 100; goto __pyx_L13; } /* "numpy.pxd":838 * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd */ __pyx_t_3 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 103; goto __pyx_L13; } /* "numpy.pxd":839 * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg */ __pyx_t_5 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 102; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":840 * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" */ __pyx_t_3 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 100; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":841 * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: */ __pyx_t_5 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 103; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L13; } /* "numpy.pxd":842 * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_3 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_7) { (__pyx_v_f[0]) = 79; goto __pyx_L13; } /*else*/ { /* "numpy.pxd":844 * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * f += 1 * else: */ __pyx_t_5 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_7), __pyx_v_t); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_5)); __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_5)); __Pyx_GIVEREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L13:; /* "numpy.pxd":845 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * f += 1 # <<<<<<<<<<<<<< * else: * # Cython ignores struct boundary information ("T{...}"), */ __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L11; } /*else*/ { /* "numpy.pxd":849 * # Cython ignores struct boundary information ("T{...}"), * # so don't output it * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< * return f * */ __pyx_t_12 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_12 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 849; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_12; } __pyx_L11:; } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "numpy.pxd":850 * # so don't output it * f = _util_dtypestring(child, f, end, offset) * return f # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_f; goto __pyx_L0; __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_child); __Pyx_XDECREF(__pyx_v_fields); __Pyx_XDECREF(__pyx_v_childname); __Pyx_XDECREF(__pyx_v_new_offset); __Pyx_XDECREF(__pyx_v_t); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":965 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { PyObject *__pyx_v_baseptr; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("set_array_base", 0); /* "numpy.pxd":967 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ __pyx_t_1 = (__pyx_v_base == Py_None); if (__pyx_t_1) { /* "numpy.pxd":968 * cdef PyObject* baseptr * if base is None: * baseptr = NULL # <<<<<<<<<<<<<< * else: * Py_INCREF(base) # important to do this before decref below! */ __pyx_v_baseptr = NULL; goto __pyx_L3; } /*else*/ { /* "numpy.pxd":970 * baseptr = NULL * else: * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< * baseptr = base * Py_XDECREF(arr.base) */ Py_INCREF(__pyx_v_base); /* "numpy.pxd":971 * else: * Py_INCREF(base) # important to do this before decref below! * baseptr = base # <<<<<<<<<<<<<< * Py_XDECREF(arr.base) * arr.base = baseptr */ __pyx_v_baseptr = ((PyObject *)__pyx_v_base); } __pyx_L3:; /* "numpy.pxd":972 * Py_INCREF(base) # important to do this before decref below! * baseptr = base * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< * arr.base = baseptr * */ Py_XDECREF(__pyx_v_arr->base); /* "numpy.pxd":973 * baseptr = base * Py_XDECREF(arr.base) * arr.base = baseptr # <<<<<<<<<<<<<< * * cdef inline object get_array_base(ndarray arr): */ __pyx_v_arr->base = __pyx_v_baseptr; __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":975 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("get_array_base", 0); /* "numpy.pxd":976 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ __pyx_t_1 = (__pyx_v_arr->base == NULL); if (__pyx_t_1) { /* "numpy.pxd":977 * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: * return None # <<<<<<<<<<<<<< * else: * return arr.base */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; goto __pyx_L3; } /*else*/ { /* "numpy.pxd":979 * return None * else: * return arr.base # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); __pyx_r = ((PyObject *)__pyx_v_arr->base); goto __pyx_L0; } __pyx_L3:; __pyx_r = Py_None; __Pyx_INCREF(Py_None); __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, __Pyx_NAMESTR("routines"), __Pyx_DOCSTR(__pyx_k_13), /* m_doc */ -1, /* m_size */ __pyx_methods /* m_methods */, NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_kp_u_1, __pyx_k_1, sizeof(__pyx_k_1), 0, 1, 0, 0}, {&__pyx_kp_u_11, __pyx_k_11, sizeof(__pyx_k_11), 0, 1, 0, 0}, {&__pyx_kp_s_14, __pyx_k_14, sizeof(__pyx_k_14), 0, 0, 1, 0}, {&__pyx_kp_s_17, __pyx_k_17, sizeof(__pyx_k_17), 0, 0, 1, 0}, {&__pyx_n_s_18, __pyx_k_18, sizeof(__pyx_k_18), 0, 0, 1, 1}, {&__pyx_kp_u_3, __pyx_k_3, sizeof(__pyx_k_3), 0, 1, 0, 0}, {&__pyx_kp_u_5, __pyx_k_5, sizeof(__pyx_k_5), 0, 1, 0, 0}, {&__pyx_kp_u_7, __pyx_k_7, sizeof(__pyx_k_7), 0, 1, 0, 0}, {&__pyx_kp_u_8, __pyx_k_8, sizeof(__pyx_k_8), 0, 1, 0, 0}, {&__pyx_n_s__Aux, __pyx_k__Aux, sizeof(__pyx_k__Aux), 0, 0, 1, 1}, {&__pyx_n_s__C, __pyx_k__C, sizeof(__pyx_k__C), 0, 0, 1, 1}, {&__pyx_n_s__D2, __pyx_k__D2, sizeof(__pyx_k__D2), 0, 0, 1, 1}, {&__pyx_n_s__P, __pyx_k__P, sizeof(__pyx_k__P), 0, 0, 1, 1}, {&__pyx_n_s__RuntimeError, __pyx_k__RuntimeError, sizeof(__pyx_k__RuntimeError), 0, 0, 1, 1}, {&__pyx_n_s__S, __pyx_k__S, sizeof(__pyx_k__S), 0, 0, 1, 1}, {&__pyx_n_s__Sx, __pyx_k__Sx, sizeof(__pyx_k__Sx), 0, 0, 1, 1}, {&__pyx_n_s__Sx_tmp, __pyx_k__Sx_tmp, sizeof(__pyx_k__Sx_tmp), 0, 0, 1, 1}, {&__pyx_n_s__U, __pyx_k__U, sizeof(__pyx_k__U), 0, 0, 1, 1}, {&__pyx_n_s__VX, __pyx_k__VX, sizeof(__pyx_k__VX), 0, 0, 1, 1}, {&__pyx_n_s__VX_flat, __pyx_k__VX_flat, sizeof(__pyx_k__VX_flat), 0, 0, 1, 1}, {&__pyx_n_s__ValueError, __pyx_k__ValueError, sizeof(__pyx_k__ValueError), 0, 0, 1, 1}, {&__pyx_n_s__Vt, __pyx_k__Vt, sizeof(__pyx_k__Vt), 0, 0, 1, 1}, {&__pyx_n_s__X, __pyx_k__X, sizeof(__pyx_k__X), 0, 0, 1, 1}, {&__pyx_n_s__X_flat, __pyx_k__X_flat, sizeof(__pyx_k__X_flat), 0, 0, 1, 1}, {&__pyx_n_s__Y, __pyx_k__Y, sizeof(__pyx_k__Y), 0, 0, 1, 1}, {&__pyx_n_s____main__, __pyx_k____main__, sizeof(__pyx_k____main__), 0, 0, 1, 1}, {&__pyx_n_s____test__, __pyx_k____test__, sizeof(__pyx_k____test__), 0, 0, 1, 1}, {&__pyx_n_s____version__, __pyx_k____version__, sizeof(__pyx_k____version__), 0, 0, 1, 1}, {&__pyx_n_s__axis, __pyx_k__axis, sizeof(__pyx_k__axis), 0, 0, 1, 1}, {&__pyx_n_s__combinations, __pyx_k__combinations, sizeof(__pyx_k__combinations), 0, 0, 1, 1}, {&__pyx_n_s__d2, __pyx_k__d2, sizeof(__pyx_k__d2), 0, 0, 1, 1}, {&__pyx_n_s__dim, __pyx_k__dim, sizeof(__pyx_k__dim), 0, 0, 1, 1}, {&__pyx_n_s__dims, __pyx_k__dims, sizeof(__pyx_k__dims), 0, 0, 1, 1}, {&__pyx_n_s__dmax, __pyx_k__dmax, sizeof(__pyx_k__dmax), 0, 0, 1, 1}, {&__pyx_n_s__dmin, __pyx_k__dmin, sizeof(__pyx_k__dmin), 0, 0, 1, 1}, {&__pyx_n_s__endims, __pyx_k__endims, sizeof(__pyx_k__endims), 0, 0, 1, 1}, {&__pyx_n_s__gamln, __pyx_k__gamln, sizeof(__pyx_k__gamln), 0, 0, 1, 1}, {&__pyx_n_s__i, __pyx_k__i, sizeof(__pyx_k__i), 0, 0, 1, 1}, {&__pyx_n_s__info, __pyx_k__info, sizeof(__pyx_k__info), 0, 0, 1, 1}, {&__pyx_n_s__interp, __pyx_k__interp, sizeof(__pyx_k__interp), 0, 0, 1, 1}, {&__pyx_n_s__iwork, __pyx_k__iwork, sizeof(__pyx_k__iwork), 0, 0, 1, 1}, {&__pyx_n_s__k, __pyx_k__k, sizeof(__pyx_k__k), 0, 0, 1, 1}, {&__pyx_n_s__liwork, __pyx_k__liwork, sizeof(__pyx_k__liwork), 0, 0, 1, 1}, {&__pyx_n_s__lwork, __pyx_k__lwork, sizeof(__pyx_k__lwork), 0, 0, 1, 1}, {&__pyx_n_s__m, __pyx_k__m, sizeof(__pyx_k__m), 0, 0, 1, 1}, {&__pyx_n_s__magic, __pyx_k__magic, sizeof(__pyx_k__magic), 0, 0, 1, 1}, {&__pyx_n_s__mahalanobis, __pyx_k__mahalanobis, sizeof(__pyx_k__mahalanobis), 0, 0, 1, 1}, {&__pyx_n_s__median, __pyx_k__median, sizeof(__pyx_k__median), 0, 0, 1, 1}, {&__pyx_n_s__multi, __pyx_k__multi, sizeof(__pyx_k__multi), 0, 0, 1, 1}, {&__pyx_n_s__n, __pyx_k__n, sizeof(__pyx_k__n), 0, 0, 1, 1}, {&__pyx_n_s__np, __pyx_k__np, sizeof(__pyx_k__np), 0, 0, 1, 1}, {&__pyx_n_s__numpy, __pyx_k__numpy, sizeof(__pyx_k__numpy), 0, 0, 1, 1}, {&__pyx_n_s__p, __pyx_k__p, sizeof(__pyx_k__p), 0, 0, 1, 1}, {&__pyx_n_s__permutations, __pyx_k__permutations, sizeof(__pyx_k__permutations), 0, 0, 1, 1}, {&__pyx_n_s__pi, __pyx_k__pi, sizeof(__pyx_k__pi), 0, 0, 1, 1}, {&__pyx_n_s__pi_view, __pyx_k__pi_view, sizeof(__pyx_k__pi_view), 0, 0, 1, 1}, {&__pyx_n_s__psi, __pyx_k__psi, sizeof(__pyx_k__psi), 0, 0, 1, 1}, {&__pyx_n_s__quantile, __pyx_k__quantile, sizeof(__pyx_k__quantile), 0, 0, 1, 1}, {&__pyx_n_s__range, __pyx_k__range, sizeof(__pyx_k__range), 0, 0, 1, 1}, {&__pyx_n_s__ratio, __pyx_k__ratio, sizeof(__pyx_k__ratio), 0, 0, 1, 1}, {&__pyx_n_s__reshape, __pyx_k__reshape, sizeof(__pyx_k__reshape), 0, 0, 1, 1}, {&__pyx_n_s__s, __pyx_k__s, sizeof(__pyx_k__s), 0, 0, 1, 1}, {&__pyx_n_s__s_tmp, __pyx_k__s_tmp, sizeof(__pyx_k__s_tmp), 0, 0, 1, 1}, {&__pyx_n_s__shape, __pyx_k__shape, sizeof(__pyx_k__shape), 0, 0, 1, 1}, {&__pyx_n_s__svd, __pyx_k__svd, sizeof(__pyx_k__svd), 0, 0, 1, 1}, {&__pyx_n_s__vx, __pyx_k__vx, sizeof(__pyx_k__vx), 0, 0, 1, 1}, {&__pyx_n_s__vx_tmp, __pyx_k__vx_tmp, sizeof(__pyx_k__vx_tmp), 0, 0, 1, 1}, {&__pyx_n_s__work, __pyx_k__work, sizeof(__pyx_k__work), 0, 0, 1, 1}, {&__pyx_n_s__x, __pyx_k__x, sizeof(__pyx_k__x), 0, 0, 1, 1}, {&__pyx_n_s__x_flat, __pyx_k__x_flat, sizeof(__pyx_k__x_flat), 0, 0, 1, 1}, {&__pyx_n_s__x_flat_tmp, __pyx_k__x_flat_tmp, sizeof(__pyx_k__x_flat_tmp), 0, 0, 1, 1}, {&__pyx_n_s__x_tmp, __pyx_k__x_tmp, sizeof(__pyx_k__x_tmp), 0, 0, 1, 1}, {&__pyx_n_s__y, __pyx_k__y, sizeof(__pyx_k__y), 0, 0, 1, 1}, {&__pyx_n_s__zeros, __pyx_k__zeros, sizeof(__pyx_k__zeros), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_ValueError = __Pyx_GetName(__pyx_b, __pyx_n_s__ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_range = __Pyx_GetName(__pyx_b, __pyx_n_s__range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 228; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_RuntimeError = __Pyx_GetName(__pyx_b, __pyx_n_s__RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "numpy.pxd":215 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_k_tuple_2 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_2); __Pyx_INCREF(((PyObject *)__pyx_kp_u_1)); PyTuple_SET_ITEM(__pyx_k_tuple_2, 0, ((PyObject *)__pyx_kp_u_1)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_1)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_2)); /* "numpy.pxd":219 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_k_tuple_4 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_4); __Pyx_INCREF(((PyObject *)__pyx_kp_u_3)); PyTuple_SET_ITEM(__pyx_k_tuple_4, 0, ((PyObject *)__pyx_kp_u_3)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_3)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_4)); /* "numpy.pxd":257 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_k_tuple_6 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_6); __Pyx_INCREF(((PyObject *)__pyx_kp_u_5)); PyTuple_SET_ITEM(__pyx_k_tuple_6, 0, ((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_6)); /* "numpy.pxd":799 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_k_tuple_9 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_9); __Pyx_INCREF(((PyObject *)__pyx_kp_u_8)); PyTuple_SET_ITEM(__pyx_k_tuple_9, 0, ((PyObject *)__pyx_kp_u_8)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_8)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_9)); /* "numpy.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_k_tuple_10 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_10)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_10); __Pyx_INCREF(((PyObject *)__pyx_kp_u_5)); PyTuple_SET_ITEM(__pyx_k_tuple_10, 0, ((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_10)); /* "numpy.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_k_tuple_12 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_12)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_12); __Pyx_INCREF(((PyObject *)__pyx_kp_u_11)); PyTuple_SET_ITEM(__pyx_k_tuple_12, 0, ((PyObject *)__pyx_kp_u_11)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_11)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_12)); /* "nipy/labs/utils/routines.pyx":45 * # This is faster than scipy.stats.scoreatpercentile due to partial * # sorting * def quantile(X, double ratio, int interp=False, int axis=0): # <<<<<<<<<<<<<< * """ * q = quantile(data, ratio, interp=False, axis=0). */ __pyx_k_tuple_15 = PyTuple_New(9); if (unlikely(!__pyx_k_tuple_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_15); __Pyx_INCREF(((PyObject *)__pyx_n_s__X)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 0, ((PyObject *)__pyx_n_s__X)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__X)); __Pyx_INCREF(((PyObject *)__pyx_n_s__ratio)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 1, ((PyObject *)__pyx_n_s__ratio)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__ratio)); __Pyx_INCREF(((PyObject *)__pyx_n_s__interp)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 2, ((PyObject *)__pyx_n_s__interp)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__interp)); __Pyx_INCREF(((PyObject *)__pyx_n_s__axis)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 3, ((PyObject *)__pyx_n_s__axis)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__axis)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 4, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__y)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 5, ((PyObject *)__pyx_n_s__y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__y)); __Pyx_INCREF(((PyObject *)__pyx_n_s__multi)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 6, ((PyObject *)__pyx_n_s__multi)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__multi)); __Pyx_INCREF(((PyObject *)__pyx_n_s__dims)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 7, ((PyObject *)__pyx_n_s__dims)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__dims)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Y)); PyTuple_SET_ITEM(__pyx_k_tuple_15, 8, ((PyObject *)__pyx_n_s__Y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Y)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_15)); __pyx_k_codeobj_16 = (PyObject*)__Pyx_PyCode_New(4, 0, 9, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_15, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__quantile, 45, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/utils/routines.pyx":78 * # due to the underlying algorithm that relies on * # partial sorting as opposed to full sorting. * def median(x, axis=0): # <<<<<<<<<<<<<< * """ * median(x, axis=0). */ __pyx_k_tuple_19 = PyTuple_New(2); if (unlikely(!__pyx_k_tuple_19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_19); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 0, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__axis)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 1, ((PyObject *)__pyx_n_s__axis)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__axis)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_19)); __pyx_k_codeobj_20 = (PyObject*)__Pyx_PyCode_New(2, 0, 2, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_19, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__median, 78, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_20)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/utils/routines.pyx":86 * * * def mahalanobis(X, VX): # <<<<<<<<<<<<<< * """ * d2 = mahalanobis(X, VX). */ __pyx_k_tuple_21 = PyTuple_New(16); if (unlikely(!__pyx_k_tuple_21)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_21); __Pyx_INCREF(((PyObject *)__pyx_n_s__X)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 0, ((PyObject *)__pyx_n_s__X)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__X)); __Pyx_INCREF(((PyObject *)__pyx_n_s__VX)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 1, ((PyObject *)__pyx_n_s__VX)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__VX)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 2, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__vx)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 3, ((PyObject *)__pyx_n_s__vx)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__vx)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x_tmp)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 4, ((PyObject *)__pyx_n_s__x_tmp)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x_tmp)); __Pyx_INCREF(((PyObject *)__pyx_n_s__vx_tmp)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 5, ((PyObject *)__pyx_n_s__vx_tmp)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__vx_tmp)); __Pyx_INCREF(((PyObject *)__pyx_n_s__d2)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 6, ((PyObject *)__pyx_n_s__d2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__d2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Sx)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 7, ((PyObject *)__pyx_n_s__Sx)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Sx)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Sx_tmp)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 8, ((PyObject *)__pyx_n_s__Sx_tmp)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Sx_tmp)); __Pyx_INCREF(((PyObject *)__pyx_n_s__multi)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 9, ((PyObject *)__pyx_n_s__multi)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__multi)); __Pyx_INCREF(((PyObject *)__pyx_n_s__axis)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 10, ((PyObject *)__pyx_n_s__axis)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__axis)); __Pyx_INCREF(((PyObject *)__pyx_n_s__n)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 11, ((PyObject *)__pyx_n_s__n)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__n)); __Pyx_INCREF(((PyObject *)__pyx_n_s__dims)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 12, ((PyObject *)__pyx_n_s__dims)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__dims)); __Pyx_INCREF(((PyObject *)__pyx_n_s__dim)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 13, ((PyObject *)__pyx_n_s__dim)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__dim)); __Pyx_INCREF(((PyObject *)__pyx_n_s__D2)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 14, ((PyObject *)__pyx_n_s__D2)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__D2)); __Pyx_INCREF(((PyObject *)__pyx_n_s__VX_flat)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 15, ((PyObject *)__pyx_n_s__VX_flat)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__VX_flat)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_21)); __pyx_k_codeobj_22 = (PyObject*)__Pyx_PyCode_New(2, 0, 16, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_21, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__mahalanobis, 86, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_22)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/utils/routines.pyx":144 * * * def svd(X): # <<<<<<<<<<<<<< * """ Singular value decomposition of array `X` * */ __pyx_k_tuple_23 = PyTuple_New(23); if (unlikely(!__pyx_k_tuple_23)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_23); __Pyx_INCREF(((PyObject *)__pyx_n_s__X)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 0, ((PyObject *)__pyx_n_s__X)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__X)); __Pyx_INCREF(((PyObject *)__pyx_n_s__axis)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 1, ((PyObject *)__pyx_n_s__axis)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__axis)); __Pyx_INCREF(((PyObject *)__pyx_n_s__m)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 2, ((PyObject *)__pyx_n_s__m)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__m)); __Pyx_INCREF(((PyObject *)__pyx_n_s__n)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 3, ((PyObject *)__pyx_n_s__n)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__n)); __Pyx_INCREF(((PyObject *)__pyx_n_s__dmin)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 4, ((PyObject *)__pyx_n_s__dmin)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__dmin)); __Pyx_INCREF(((PyObject *)__pyx_n_s__dmax)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 5, ((PyObject *)__pyx_n_s__dmax)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__dmax)); __Pyx_INCREF(((PyObject *)__pyx_n_s__lwork)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 6, ((PyObject *)__pyx_n_s__lwork)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__lwork)); __Pyx_INCREF(((PyObject *)__pyx_n_s__liwork)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 7, ((PyObject *)__pyx_n_s__liwork)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__liwork)); __Pyx_INCREF(((PyObject *)__pyx_n_s__info)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 8, ((PyObject *)__pyx_n_s__info)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__info)); __Pyx_INCREF(((PyObject *)__pyx_n_s__work)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 9, ((PyObject *)__pyx_n_s__work)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__work)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x_flat)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 10, ((PyObject *)__pyx_n_s__x_flat)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x_flat)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x_flat_tmp)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 11, ((PyObject *)__pyx_n_s__x_flat_tmp)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x_flat_tmp)); __Pyx_INCREF(((PyObject *)__pyx_n_s__s)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 12, ((PyObject *)__pyx_n_s__s)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__s)); __Pyx_INCREF(((PyObject *)__pyx_n_s__s_tmp)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 13, ((PyObject *)__pyx_n_s__s_tmp)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__s_tmp)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 14, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__iwork)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 15, ((PyObject *)__pyx_n_s__iwork)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__iwork)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Aux)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 16, ((PyObject *)__pyx_n_s__Aux)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Aux)); __Pyx_INCREF(((PyObject *)__pyx_n_s__U)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 17, ((PyObject *)__pyx_n_s__U)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__U)); __Pyx_INCREF(((PyObject *)__pyx_n_s__Vt)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 18, ((PyObject *)__pyx_n_s__Vt)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Vt)); __Pyx_INCREF(((PyObject *)__pyx_n_s__multi)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 19, ((PyObject *)__pyx_n_s__multi)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__multi)); __Pyx_INCREF(((PyObject *)__pyx_n_s__endims)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 20, ((PyObject *)__pyx_n_s__endims)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__endims)); __Pyx_INCREF(((PyObject *)__pyx_n_s__S)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 21, ((PyObject *)__pyx_n_s__S)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__S)); __Pyx_INCREF(((PyObject *)__pyx_n_s__X_flat)); PyTuple_SET_ITEM(__pyx_k_tuple_23, 22, ((PyObject *)__pyx_n_s__X_flat)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__X_flat)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_23)); __pyx_k_codeobj_24 = (PyObject*)__Pyx_PyCode_New(1, 0, 23, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_23, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__svd, 144, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_24)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/utils/routines.pyx":228 * * * def permutations(unsigned int n, unsigned int m=1, unsigned long magic=0): # <<<<<<<<<<<<<< * """ * P = permutations(n, m=1, magic=0). */ __pyx_k_tuple_25 = PyTuple_New(8); if (unlikely(!__pyx_k_tuple_25)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 228; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_25); __Pyx_INCREF(((PyObject *)__pyx_n_s__n)); PyTuple_SET_ITEM(__pyx_k_tuple_25, 0, ((PyObject *)__pyx_n_s__n)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__n)); __Pyx_INCREF(((PyObject *)__pyx_n_s__m)); PyTuple_SET_ITEM(__pyx_k_tuple_25, 1, ((PyObject *)__pyx_n_s__m)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__m)); __Pyx_INCREF(((PyObject *)__pyx_n_s__magic)); PyTuple_SET_ITEM(__pyx_k_tuple_25, 2, ((PyObject *)__pyx_n_s__magic)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__magic)); __Pyx_INCREF(((PyObject *)__pyx_n_s__p)); PyTuple_SET_ITEM(__pyx_k_tuple_25, 3, ((PyObject *)__pyx_n_s__p)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__p)); __Pyx_INCREF(((PyObject *)__pyx_n_s__pi)); PyTuple_SET_ITEM(__pyx_k_tuple_25, 4, ((PyObject *)__pyx_n_s__pi)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__pi)); __Pyx_INCREF(((PyObject *)__pyx_n_s__pi_view)); PyTuple_SET_ITEM(__pyx_k_tuple_25, 5, ((PyObject *)__pyx_n_s__pi_view)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__pi_view)); __Pyx_INCREF(((PyObject *)__pyx_n_s__i)); PyTuple_SET_ITEM(__pyx_k_tuple_25, 6, ((PyObject *)__pyx_n_s__i)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__i)); __Pyx_INCREF(((PyObject *)__pyx_n_s__P)); PyTuple_SET_ITEM(__pyx_k_tuple_25, 7, ((PyObject *)__pyx_n_s__P)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__P)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_25)); __pyx_k_codeobj_26 = (PyObject*)__Pyx_PyCode_New(3, 0, 8, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_25, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__permutations, 228, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_26)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 228; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/utils/routines.pyx":248 * * * def combinations(unsigned int k, unsigned int n, unsigned int m=1, unsigned long magic=0): # <<<<<<<<<<<<<< * """ * P = combinations(k, n, m=1, magic=0). */ __pyx_k_tuple_27 = PyTuple_New(9); if (unlikely(!__pyx_k_tuple_27)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 248; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_27); __Pyx_INCREF(((PyObject *)__pyx_n_s__k)); PyTuple_SET_ITEM(__pyx_k_tuple_27, 0, ((PyObject *)__pyx_n_s__k)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__k)); __Pyx_INCREF(((PyObject *)__pyx_n_s__n)); PyTuple_SET_ITEM(__pyx_k_tuple_27, 1, ((PyObject *)__pyx_n_s__n)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__n)); __Pyx_INCREF(((PyObject *)__pyx_n_s__m)); PyTuple_SET_ITEM(__pyx_k_tuple_27, 2, ((PyObject *)__pyx_n_s__m)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__m)); __Pyx_INCREF(((PyObject *)__pyx_n_s__magic)); PyTuple_SET_ITEM(__pyx_k_tuple_27, 3, ((PyObject *)__pyx_n_s__magic)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__magic)); __Pyx_INCREF(((PyObject *)__pyx_n_s__p)); PyTuple_SET_ITEM(__pyx_k_tuple_27, 4, ((PyObject *)__pyx_n_s__p)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__p)); __Pyx_INCREF(((PyObject *)__pyx_n_s__pi)); PyTuple_SET_ITEM(__pyx_k_tuple_27, 5, ((PyObject *)__pyx_n_s__pi)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__pi)); __Pyx_INCREF(((PyObject *)__pyx_n_s__pi_view)); PyTuple_SET_ITEM(__pyx_k_tuple_27, 6, ((PyObject *)__pyx_n_s__pi_view)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__pi_view)); __Pyx_INCREF(((PyObject *)__pyx_n_s__i)); PyTuple_SET_ITEM(__pyx_k_tuple_27, 7, ((PyObject *)__pyx_n_s__i)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__i)); __Pyx_INCREF(((PyObject *)__pyx_n_s__C)); PyTuple_SET_ITEM(__pyx_k_tuple_27, 8, ((PyObject *)__pyx_n_s__C)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__C)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_27)); __pyx_k_codeobj_28 = (PyObject*)__Pyx_PyCode_New(4, 0, 9, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_27, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__combinations, 248, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_28)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 248; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/utils/routines.pyx":268 * * * def gamln(double x): # <<<<<<<<<<<<<< * """ Python bindings to log gamma. Do not use, this is there only for * testing. Use scipy.special.gammaln. */ __pyx_k_tuple_29 = PyTuple_New(3); if (unlikely(!__pyx_k_tuple_29)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 268; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_29); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_29, 0, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_29, 1, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__y)); PyTuple_SET_ITEM(__pyx_k_tuple_29, 2, ((PyObject *)__pyx_n_s__y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__y)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_29)); __pyx_k_codeobj_30 = (PyObject*)__Pyx_PyCode_New(1, 0, 3, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_29, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__gamln, 268, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_30)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 268; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/utils/routines.pyx":277 * * * def psi(double x): # <<<<<<<<<<<<<< * """ Python bindings to psi (d gamln(x)/dx. Do not use, this is there only * for testing. Use scipy.special.psi. */ __pyx_k_tuple_31 = PyTuple_New(3); if (unlikely(!__pyx_k_tuple_31)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 277; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_31); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_31, 0, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__x)); PyTuple_SET_ITEM(__pyx_k_tuple_31, 1, ((PyObject *)__pyx_n_s__x)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__x)); __Pyx_INCREF(((PyObject *)__pyx_n_s__y)); PyTuple_SET_ITEM(__pyx_k_tuple_31, 2, ((PyObject *)__pyx_n_s__y)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__y)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_31)); __pyx_k_codeobj_32 = (PyObject*)__Pyx_PyCode_New(1, 0, 3, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_31, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_17, __pyx_n_s__psi, 277, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_32)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 277; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_15 = PyInt_FromLong(15); if (unlikely(!__pyx_int_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC initroutines(void); /*proto*/ PyMODINIT_FUNC initroutines(void) #else PyMODINIT_FUNC PyInit_routines(void); /*proto*/ PyMODINIT_FUNC PyInit_routines(void) #endif { PyObject *__pyx_t_1 = NULL; __Pyx_RefNannyDeclarations #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_routines(void)", 0); if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #ifdef __Pyx_CyFunction_USED if (__Pyx_CyFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4(__Pyx_NAMESTR("routines"), __pyx_methods, __Pyx_DOCSTR(__pyx_k_13), 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (!PyDict_GetItemString(modules, "nipy.labs.utils.routines")) { if (unlikely(PyDict_SetItemString(modules, "nipy.labs.utils.routines", __pyx_m) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } } #endif __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); if (unlikely(!__pyx_b)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; /*--- Initialize various global constants etc. ---*/ if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_module_is_main_nipy__labs__utils__routines) { if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s____main__) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; } /*--- Builtin init code ---*/ if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Constants init code ---*/ if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Global init code ---*/ /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ /*--- Type import code ---*/ __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", #if CYTHON_COMPILING_IN_PYPY sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 861; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Variable import code ---*/ /*--- Function import code ---*/ /*--- Execution code ---*/ /* "nipy/labs/utils/routines.pyx":9 * """ * * __version__ = '0.1' # <<<<<<<<<<<<<< * * # Includes */ if (PyObject_SetAttr(__pyx_m, __pyx_n_s____version__, ((PyObject *)__pyx_kp_s_14)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "nipy/labs/utils/routines.pyx":39 * * # Initialize numpy * fffpy_import_array() # <<<<<<<<<<<<<< * cnp.import_array() * import numpy as np */ fffpy_import_array(); /* "nipy/labs/utils/routines.pyx":40 * # Initialize numpy * fffpy_import_array() * cnp.import_array() # <<<<<<<<<<<<<< * import numpy as np * */ import_array(); /* "nipy/labs/utils/routines.pyx":41 * fffpy_import_array() * cnp.import_array() * import numpy as np # <<<<<<<<<<<<<< * * # This is faster than scipy.stats.scoreatpercentile due to partial */ __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__numpy), 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 41; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__np, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 41; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/utils/routines.pyx":45 * # This is faster than scipy.stats.scoreatpercentile due to partial * # sorting * def quantile(X, double ratio, int interp=False, int axis=0): # <<<<<<<<<<<<<< * """ * q = quantile(data, ratio, interp=False, axis=0). */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_5utils_8routines_1quantile, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__quantile, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/utils/routines.pyx":78 * # due to the underlying algorithm that relies on * # partial sorting as opposed to full sorting. * def median(x, axis=0): # <<<<<<<<<<<<<< * """ * median(x, axis=0). */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_5utils_8routines_3median, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__median, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/utils/routines.pyx":86 * * * def mahalanobis(X, VX): # <<<<<<<<<<<<<< * """ * d2 = mahalanobis(X, VX). */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_5utils_8routines_5mahalanobis, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__mahalanobis, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/utils/routines.pyx":144 * * * def svd(X): # <<<<<<<<<<<<<< * """ Singular value decomposition of array `X` * */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_5utils_8routines_7svd, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__svd, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/utils/routines.pyx":228 * * * def permutations(unsigned int n, unsigned int m=1, unsigned long magic=0): # <<<<<<<<<<<<<< * """ * P = permutations(n, m=1, magic=0). */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_5utils_8routines_9permutations, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 228; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__permutations, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 228; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/utils/routines.pyx":248 * * * def combinations(unsigned int k, unsigned int n, unsigned int m=1, unsigned long magic=0): # <<<<<<<<<<<<<< * """ * P = combinations(k, n, m=1, magic=0). */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_5utils_8routines_11combinations, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 248; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__combinations, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 248; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/utils/routines.pyx":268 * * * def gamln(double x): # <<<<<<<<<<<<<< * """ Python bindings to log gamma. Do not use, this is there only for * testing. Use scipy.special.gammaln. */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_5utils_8routines_13gamln, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 268; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__gamln, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 268; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/utils/routines.pyx":277 * * * def psi(double x): # <<<<<<<<<<<<<< * """ Python bindings to psi (d gamln(x)/dx. Do not use, this is there only * for testing. Use scipy.special.psi. */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4nipy_4labs_5utils_8routines_15psi, NULL, __pyx_n_s_18); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 277; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__psi, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 277; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nipy/labs/utils/routines.pyx":1 * # -*- Mode: Python -*- Not really, but the syntax is close enough # <<<<<<<<<<<<<< * * """ */ __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); if (PyObject_SetAttr(__pyx_m, __pyx_n_s____test__, ((PyObject *)__pyx_t_1)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; /* "numpy.pxd":975 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); if (__pyx_m) { __Pyx_AddTraceback("init nipy.labs.utils.routines", __pyx_clineno, __pyx_lineno, __pyx_filename); Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init nipy.labs.utils.routines"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if PY_MAJOR_VERSION < 3 return; #else return __pyx_m; #endif } /* Runtime support code */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* CYTHON_REFNANNY */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%s() takes %s %" CYTHON_FORMAT_SSIZE_T "d positional argument%s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%s() got an unexpected keyword argument '%s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) { PyObject *result; result = PyObject_GetAttr(dict, name); if (!result) { if (dict != __pyx_b) { PyErr_Clear(); result = PyObject_GetAttr(__pyx_b, name); } if (!result) { PyErr_SetObject(PyExc_NameError, name); } } return result; } static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) { #if CYTHON_COMPILING_IN_CPYTHON PyObject *tmp_type, *tmp_value, *tmp_tb; PyThreadState *tstate = PyThreadState_GET(); tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_Restore(type, value, tb); #endif } static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(type, value, tb); #endif } #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } #if PY_VERSION_HEX < 0x02050000 if (PyClass_Check(type)) { #else if (PyType_Check(type)) { #endif #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; #if PY_VERSION_HEX < 0x02050000 if (PyInstance_Check(type)) { type = (PyObject*) ((PyInstanceObject*)type)->in_class; Py_INCREF(type); } else { type = 0; PyErr_SetString(PyExc_TypeError, "raise: exception must be an old-style class or instance"); goto raise_error; } #else type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } #endif } __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else /* Python 3+ */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyEval_CallObject(type, args); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause && cause != Py_None) { PyObject *fixed_cause; if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { PyThreadState *tstate = PyThreadState_GET(); PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } } bad: Py_XDECREF(owned_instance); return; } #endif static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%s to unpack", index, (index == 1) ? "" : "s"); } static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } static CYTHON_INLINE int __Pyx_IterFinish(void) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); PyObject* exc_type = tstate->curexc_type; if (unlikely(exc_type)) { if (likely(exc_type == PyExc_StopIteration) || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration)) { PyObject *exc_value, *exc_tb; exc_value = tstate->curexc_value; exc_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; Py_DECREF(exc_type); Py_XDECREF(exc_value); Py_XDECREF(exc_tb); return 0; } else { return -1; } } return 0; #else if (unlikely(PyErr_Occurred())) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) { PyErr_Clear(); return 0; } else { return -1; } } return 0; #endif } static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) { if (unlikely(retval)) { Py_DECREF(retval); __Pyx_RaiseTooManyValuesError(expected); return -1; } else { return __Pyx_IterFinish(); } return 0; } static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_Format(PyExc_SystemError, "Missing type object"); return 0; } if (likely(PyObject_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level) { PyObject *py_import = 0; PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; py_import = __Pyx_GetAttrString(__pyx_b, "__import__"); if (!py_import) goto bad; if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; #if PY_VERSION_HEX >= 0x02050000 { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { /* try package relative import first */ PyObject *py_level = PyInt_FromLong(1); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; /* try absolute import on failure */ } #endif if (!module) { PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); } } #else if (level>0) { PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4."); goto bad; } module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, NULL); #endif bad: Py_XDECREF(empty_list); Py_XDECREF(py_import); Py_XDECREF(empty_dict); return module; } #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return ::std::complex< float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y*(__pyx_t_float_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real*z.real + z.imag*z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(a, a); case 3: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, a); case 4: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_absf(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return ::std::complex< double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y*(__pyx_t_double_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real*z.real + z.imag*z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(a, a); case 3: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, a); case 4: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_abs(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject* x) { const unsigned char neg_one = (unsigned char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned char" : "value too large to convert to unsigned char"); } return (unsigned char)-1; } return (unsigned char)val; } return (unsigned char)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject* x) { const unsigned short neg_one = (unsigned short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned short" : "value too large to convert to unsigned short"); } return (unsigned short)-1; } return (unsigned short)val; } return (unsigned short)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject* x) { const unsigned int neg_one = (unsigned int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned int" : "value too large to convert to unsigned int"); } return (unsigned int)-1; } return (unsigned int)val; } return (unsigned int)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject* x) { const char neg_one = (char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to char" : "value too large to convert to char"); } return (char)-1; } return (char)val; } return (char)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject* x) { const short neg_one = (short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to short" : "value too large to convert to short"); } return (short)-1; } return (short)val; } return (short)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject* x) { const int neg_one = (int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to int" : "value too large to convert to int"); } return (int)-1; } return (int)val; } return (int)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject* x) { const signed char neg_one = (signed char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed char" : "value too large to convert to signed char"); } return (signed char)-1; } return (signed char)val; } return (signed char)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject* x) { const signed short neg_one = (signed short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed short" : "value too large to convert to signed short"); } return (signed short)-1; } return (signed short)val; } return (signed short)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject* x) { const signed int neg_one = (signed int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed int" : "value too large to convert to signed int"); } return (signed int)-1; } return (signed int)val; } return (signed int)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject* x) { const int neg_one = (int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to int" : "value too large to convert to int"); } return (int)-1; } return (int)val; } return (int)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) { const unsigned long neg_one = (unsigned long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned long"); return (unsigned long)-1; } return (unsigned long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned long"); return (unsigned long)-1; } return (unsigned long)PyLong_AsUnsignedLong(x); } else { return (unsigned long)PyLong_AsLong(x); } } else { unsigned long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned long)-1; val = __Pyx_PyInt_AsUnsignedLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject* x) { const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned PY_LONG_LONG"); return (unsigned PY_LONG_LONG)-1; } return (unsigned PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned PY_LONG_LONG"); return (unsigned PY_LONG_LONG)-1; } return (unsigned PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (unsigned PY_LONG_LONG)PyLong_AsLongLong(x); } } else { unsigned PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned PY_LONG_LONG)-1; val = __Pyx_PyInt_AsUnsignedLongLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject* x) { const long neg_one = (long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long)-1; } return (long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long)-1; } return (long)PyLong_AsUnsignedLong(x); } else { return (long)PyLong_AsLong(x); } } else { long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (long)-1; val = __Pyx_PyInt_AsLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) { const PY_LONG_LONG neg_one = (PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to PY_LONG_LONG"); return (PY_LONG_LONG)-1; } return (PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to PY_LONG_LONG"); return (PY_LONG_LONG)-1; } return (PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (PY_LONG_LONG)PyLong_AsLongLong(x); } } else { PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (PY_LONG_LONG)-1; val = __Pyx_PyInt_AsLongLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) { const signed long neg_one = (signed long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed long"); return (signed long)-1; } return (signed long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed long"); return (signed long)-1; } return (signed long)PyLong_AsUnsignedLong(x); } else { return (signed long)PyLong_AsLong(x); } } else { signed long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (signed long)-1; val = __Pyx_PyInt_AsSignedLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject* x) { const signed PY_LONG_LONG neg_one = (signed PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed PY_LONG_LONG"); return (signed PY_LONG_LONG)-1; } return (signed PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed PY_LONG_LONG"); return (signed PY_LONG_LONG)-1; } return (signed PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (signed PY_LONG_LONG)PyLong_AsLongLong(x); } } else { signed PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (signed PY_LONG_LONG)-1; val = __Pyx_PyInt_AsSignedLongLong(tmp); Py_DECREF(tmp); return val; } } static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); #if PY_VERSION_HEX < 0x02050000 return PyErr_Warn(NULL, message); #else return PyErr_WarnEx(NULL, message, 1); #endif } return 0; } #ifndef __PYX_HAVE_RT_ImportModule #define __PYX_HAVE_RT_ImportModule static PyObject *__Pyx_ImportModule(const char *name) { PyObject *py_name = 0; PyObject *py_module = 0; py_name = __Pyx_PyIdentifier_FromString(name); if (!py_name) goto bad; py_module = PyImport_Import(py_name); Py_DECREF(py_name); return py_module; bad: Py_XDECREF(py_name); return 0; } #endif #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict) { PyObject *py_module = 0; PyObject *result = 0; PyObject *py_name = 0; char warning[200]; py_module = __Pyx_ImportModule(module_name); if (!py_module) goto bad; py_name = __Pyx_PyIdentifier_FromString(class_name); if (!py_name) goto bad; result = PyObject_GetAttr(py_module, py_name); Py_DECREF(py_name); py_name = 0; Py_DECREF(py_module); py_module = 0; if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%s.%s is not a type object", module_name, class_name); goto bad; } if (!strict && (size_t)((PyTypeObject *)result)->tp_basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility", module_name, class_name); #if PY_VERSION_HEX < 0x02050000 if (PyErr_Warn(NULL, warning) < 0) goto bad; #else if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; #endif } else if ((size_t)((PyTypeObject *)result)->tp_basicsize != size) { PyErr_Format(PyExc_ValueError, "%s.%s has the wrong size, try recompiling", module_name, class_name); goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(py_module); Py_XDECREF(result); return NULL; } #endif static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = (start + end) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, /*int argcount,*/ 0, /*int kwonlyargcount,*/ 0, /*int nlocals,*/ 0, /*int stacksize,*/ 0, /*int flags,*/ __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, /*int firstlineno,*/ __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_globals = 0; PyFrameObject *py_frame = 0; py_code = __pyx_find_code_object(c_line ? c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? c_line : py_line, py_code); } py_globals = PyModule_GetDict(__pyx_m); if (!py_globals) goto bad; py_frame = PyFrame_New( PyThreadState_GET(), /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ py_globals, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; py_frame->f_lineno = py_line; PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else /* Python 3+ has unicode identifiers */ if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; ++t; } return 0; } /* Type Conversion Functions */ static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) { PyNumberMethods *m; const char *name = NULL; PyObject *res = NULL; #if PY_VERSION_HEX < 0x03000000 if (PyInt_Check(x) || PyLong_Check(x)) #else if (PyLong_Check(x)) #endif return Py_INCREF(x), x; m = Py_TYPE(x)->tp_as_number; #if PY_VERSION_HEX < 0x03000000 if (m && m->nb_int) { name = "int"; res = PyNumber_Int(x); } else if (m && m->nb_long) { name = "long"; res = PyNumber_Long(x); } #else if (m && m->nb_int) { name = "int"; res = PyNumber_Long(x); } #endif if (res) { #if PY_VERSION_HEX < 0x03000000 if (!PyInt_Check(res) && !PyLong_Check(res)) { #else if (!PyLong_Check(res)) { #endif PyErr_Format(PyExc_TypeError, "__%s__ returned non-%s (type %.200s)", name, name, Py_TYPE(res)->tp_name); Py_DECREF(res); return NULL; } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject* x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { #if PY_VERSION_HEX < 0x02050000 if (ival <= LONG_MAX) return PyInt_FromLong((long)ival); else { unsigned char *bytes = (unsigned char *) &ival; int one = 1; int little = (int)*(unsigned char*)&one; return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0); } #else return PyInt_FromSize_t(ival); #endif } static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject* x) { unsigned PY_LONG_LONG val = __Pyx_PyInt_AsUnsignedLongLong(x); if (unlikely(val == (unsigned PY_LONG_LONG)-1 && PyErr_Occurred())) { return (size_t)-1; } else if (unlikely(val != (unsigned PY_LONG_LONG)(size_t)val)) { PyErr_SetString(PyExc_OverflowError, "value too large to convert to size_t"); return (size_t)-1; } return (size_t)val; } #endif /* Py_PYTHON_H */ nipy-0.3.0/nipy/labs/utils/routines.pyx000066400000000000000000000165271210344137400201550ustar00rootroot00000000000000# -*- Mode: Python -*- Not really, but the syntax is close enough """ Miscellaneous fff routines. Author: Alexis Roche, 2008. """ __version__ = '0.1' # Includes from fff cimport * cimport numpy as cnp # Exports from fff_gen_stats.h cdef extern from "fff_gen_stats.h": double fff_mahalanobis(fff_vector* x, fff_matrix* S, fff_matrix* Saux) void fff_permutation(unsigned int* x, unsigned int n, unsigned long int magic) void fff_combination(unsigned int* x, unsigned int k, unsigned int n, unsigned long magic) # Exports from fff_specfun.h cdef extern from "fff_specfun.h": extern double fff_gamln(double x) extern double fff_psi(double x) # Exports from fff_lapack.h cdef extern from "fff_lapack.h": extern int fff_lapack_dgesdd(fff_matrix* A, fff_vector* s, fff_matrix* U, fff_matrix* Vt, fff_vector* work, fff_array* iwork, fff_matrix* Aux) # Initialize numpy fffpy_import_array() cnp.import_array() import numpy as np # This is faster than scipy.stats.scoreatpercentile due to partial # sorting def quantile(X, double ratio, int interp=False, int axis=0): """ q = quantile(data, ratio, interp=False, axis=0). Partial sorting algorithm, very fast!!! """ cdef fff_vector *x, *y cdef fffpy_multi_iterator* multi # Allocate output array Y dims = list(X.shape) dims[axis] = 1 Y = np.zeros(dims) # Create a new array iterator multi = fffpy_multi_iterator_new(2, axis, X, Y) # Create vector views on both X and Y x = multi.vector[0] y = multi.vector[1] # Loop while(multi.index < multi.size): y.data[0] = fff_vector_quantile(x, ratio, interp) fffpy_multi_iterator_update(multi) # Delete local structures fffpy_multi_iterator_delete(multi) return Y # This is faster than numpy.stats # due to the underlying algorithm that relies on # partial sorting as opposed to full sorting. def median(x, axis=0): """ median(x, axis=0). Equivalent to: quantile(x, ratio=0.5, interp=True, axis=axis). """ return quantile(x, axis=axis, ratio=0.5, interp=True) def mahalanobis(X, VX): """ d2 = mahalanobis(X, VX). ufunc-like function to compute Mahalanobis squared distances x'*inv(Vx)*x. axis == 0 assumed. If X is shaped (d,K), VX must be shaped (d,d,K). """ cdef fff_vector *x, *vx, *x_tmp, *vx_tmp, *d2 cdef fff_matrix Sx cdef fff_matrix *Sx_tmp cdef fffpy_multi_iterator* multi cdef int axis=0, n # Allocate output array dims = list(X.shape) dim = dims[0] dims[0] = 1 D2 = np.zeros(dims) # Flatten input variance array VX_flat = VX.reshape( [dim*dim]+list(VX.shape[2:]) ) # Create a new array iterator multi = fffpy_multi_iterator_new(3, axis, X, VX_flat, D2) # Allocate local structures n = X.shape[axis] x_tmp = fff_vector_new(n) vx_tmp = fff_vector_new(n*n) Sx_tmp = fff_matrix_new(n, n) # Create vector views on X, VX_flat and D2 x = multi.vector[0] vx = multi.vector[1] d2 = multi.vector[2] # Loop while(multi.index < multi.size): fff_vector_memcpy(x_tmp, x) fff_vector_memcpy(vx_tmp, vx) Sx = fff_matrix_view(vx_tmp.data, n, n, n) # OK because vx_tmp is contiguous d2.data[0] = fff_mahalanobis(x_tmp, &Sx, Sx_tmp) fffpy_multi_iterator_update(multi) # Delete local structs and views fff_vector_delete(x_tmp) fff_vector_delete(vx_tmp) fff_matrix_delete(Sx_tmp) fffpy_multi_iterator_delete(multi) # Return D2 = D2.reshape(VX.shape[2:]) return D2 def svd(X): """ Singular value decomposition of array `X` Y = svd(X) ufunc-like svd. Given an array X (m, n, K), perform an SV decomposition. Parameters ---------- X : 2D array Returns ------- S : (min(m,n), K) """ cdef int axis=0 cdef int m, n, dmin, dmax, lwork, liwork, info cdef fff_vector *work, *x_flat, *x_flat_tmp, *s, *s_tmp cdef fff_matrix x cdef fff_array *iwork cdef fff_matrix *Aux, *U, *Vt cdef fffpy_multi_iterator* multi # Shape of matrices m = X.shape[0] n = X.shape[1] if m > n: dmin = n dmax = m else: dmin = m dmax = n # Create auxiliary arrays lwork = 4*dmin*(dmin+1) if dmax > lwork: lwork = dmax lwork = 2*(3*dmin*dmin + lwork) liwork = 8*dmin work = fff_vector_new(lwork) iwork = fff_array_new1d(FFF_INT, liwork) Aux = fff_matrix_new(dmax, dmax) U = fff_matrix_new(m, m) Vt = fff_matrix_new(n, n) x_flat_tmp = fff_vector_new(m*n) s_tmp = fff_vector_new(dmin) # Allocate output array endims = list(X.shape[2:]) S = np.zeros([dmin]+endims) # Flatten input array X_flat = X.reshape([m*n]+endims) # Create a new array iterator multi = fffpy_multi_iterator_new(2, axis, X_flat, S) # Create vector views x_flat = multi.vector[0] s = multi.vector[1] # Loop while(multi.index < multi.size): fff_vector_memcpy(x_flat_tmp, x_flat) fff_vector_memcpy(s_tmp, s) x = fff_matrix_view(x_flat_tmp.data, m, n, n) # OK because x_flat_tmp is contiguous info = fff_lapack_dgesdd(&x, s_tmp, U, Vt, work, iwork, Aux ) fff_vector_memcpy(s, s_tmp) fffpy_multi_iterator_update(multi) # Delete local structures fff_vector_delete(work) fff_vector_delete(x_flat_tmp) fff_vector_delete(s_tmp) fff_array_delete(iwork) fff_matrix_delete(Aux) fff_matrix_delete(U) fff_matrix_delete(Vt) fffpy_multi_iterator_delete(multi) # Return return S def permutations(unsigned int n, unsigned int m=1, unsigned long magic=0): """ P = permutations(n, m=1, magic=0). Generate m permutations from [0..n[. """ cdef fff_array *p, *pi cdef fff_array pi_view cdef unsigned int i p = fff_array_new2d(FFF_UINT, n, m) pi = fff_array_new1d(FFF_UINT, n) ## contiguous, dims=(n,1,1,1) for i from 0 <= i < m: fff_permutation(pi.data, n, magic+i) pi_view = fff_array_get_block2d(p, 0, n-1, 1, i, i, 1) ## dims=(n,1,1,1) fff_array_copy(&pi_view, pi) P = fff_array_toPyArray(p) return P def combinations(unsigned int k, unsigned int n, unsigned int m=1, unsigned long magic=0): """ P = combinations(k, n, m=1, magic=0). Generate m combinations of k elements from [0..n[. """ cdef fff_array *p, *pi cdef fff_array pi_view cdef unsigned int i p = fff_array_new2d(FFF_UINT, k, m) pi = fff_array_new1d(FFF_UINT, k) ## contiguous, dims=(n,1,1,1) for i from 0 <= i < m: fff_combination(pi.data, k, n, magic+i) pi_view = fff_array_get_block2d(p, 0, k-1, 1, i, i, 1) ## dims=(k,1,1,1) fff_array_copy(&pi_view, pi) C = fff_array_toPyArray(p) return C def gamln(double x): """ Python bindings to log gamma. Do not use, this is there only for testing. Use scipy.special.gammaln. """ cdef double y y = fff_gamln(x) return y def psi(double x): """ Python bindings to psi (d gamln(x)/dx. Do not use, this is there only for testing. Use scipy.special.psi. """ cdef double y y = fff_psi(x) return y nipy-0.3.0/nipy/labs/utils/setup.py000066400000000000000000000017331210344137400172460ustar00rootroot00000000000000#!/usr/bin/env python def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration # We need this because libcstat.a is linked to lapack, which can # be a fortran library, and the linker needs this information. from numpy.distutils.system_info import get_info lapack_info = get_info('lapack_opt', 0) if 'libraries' not in lapack_info: # But on OSX that may not give us what we need, so try with 'lapack' # instead. NOTE: scipy.linalg uses lapack_opt, not 'lapack'... lapack_info = get_info('lapack', 0) config = Configuration('utils', parent_package, top_path) config.add_subpackage('tests') config.add_extension( 'routines', sources=['routines.pyx'], libraries=['cstat'], extra_info=lapack_info, ) return config if __name__ == '__main__': print('This is the wrong setup.py file to run') nipy-0.3.0/nipy/labs/utils/simul_multisubject_fmri_dataset.py000066400000000000000000000250241210344137400245520ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This module conatins a function to produce a dataset which simulates a collection of 2D images This dataset is saved as a 3D image (each slice being a subject) and a 3D array Author : Bertrand Thirion, 2008-2010 """ import numpy as np import scipy.ndimage as nd from nibabel import save, Nifti1Image # definition of the maxima at the group level pos = np.array([[6, 7], [10, 10], [15, 10]]) ampli = np.array([3, 4, 4]) def _cone2d(shape, ij, pos, ampli, width): """Define a cone of the proposed grid """ temp = np.zeros(shape) pos = np.reshape(pos, (1, 2)) dist = np.sqrt(np.sum((ij - pos) ** 2, axis=1)) codi = (width - dist) * (dist < width) / width temp[ij[:, 0], ij[:, 1]] = codi * ampli return temp def _cone3d(shape, ij, pos, ampli, width): """Define a cone of the proposed grid """ temp = np.zeros(shape) pos = np.reshape(pos, (1, 3)) dist = np.sqrt(np.sum((ij - pos) ** 2, axis=1)) codi = (width - dist) * (dist < width) / width temp[ij[:, 0], ij[:, 1], ij[:, 2]] = codi * ampli return temp def surrogate_2d_dataset(n_subj=10, shape=(30, 30), sk=1.0, noise_level=1.0, pos=pos, ampli=ampli, spatial_jitter=1.0, signal_jitter=1.0, width=5.0, width_jitter=0, out_text_file=None, out_image_file=None, seed=False): """ Create surrogate (simulated) 2D activation data with spatial noise Parameters ----------- n_subj: integer, optionnal The number of subjects, ie the number of different maps generated. shape=(30,30): tuple of integers, the shape of each image sk: float, optionnal Amount of spatial noise smoothness. noise_level: float, optionnal Amplitude of the spatial noise. amplitude=noise_level) pos: 2D ndarray of integers, optionnal x, y positions of the various simulated activations. ampli: 1D ndarray of floats, optionnal Respective amplitude of each activation spatial_jitter: float, optionnal Random spatial jitter added to the position of each activation, in pixel. signal_jitter: float, optionnal Random amplitude fluctuation for each activation, added to the amplitude specified by `ampli` width: float or ndarray, optionnal Width of the activations width_jitter: float Relative width jitter of the blobs out_text_file: string or None, optionnal If not None, the resulting array is saved as a text file with the given file name out_image_file: string or None, optionnal If not None, the resulting is saved as a nifti file with the given file name. seed=False: int, optionnal If seed is not False, the random number generator is initialized at a certain value Returns ------- dataset: 3D ndarray The surrogate activation map, with dimensions ``(n_subj,) + shape`` """ if seed: nr = np.random.RandomState([seed]) else: import numpy.random as nr ij = np.array(np.where(np.ones(shape))).T dataset = [] for s in range(n_subj): # make the signal data = np.zeros(shape) lpos = pos + spatial_jitter * nr.randn(1, 2) lampli = ampli + signal_jitter * nr.randn(np.size(ampli)) this_width = width * (1 - width_jitter * nr.randn(np.size(ampli))) for k in range(np.size(lampli)): data = np.maximum(data, _cone2d(shape, ij, lpos[k], lampli[k], this_width[k])) # make some noise noise = nr.randn(*shape) # smooth the noise noise = nd.gaussian_filter(noise, sk) noise = np.reshape(noise, ( - 1, 1)) noise *= noise_level / np.std(noise) #make the mixture data += np.reshape(noise, shape) dataset.append(data) dataset = np.array(dataset) if out_text_file is not None: dataset.tofile(out_text_file) if out_image_file is not None: save(Nifti1Image(dataset, np.eye(4)), out_image_file) return dataset def surrogate_3d_dataset(n_subj=1, shape=(20, 20, 20), mask=None, sk=1.0, noise_level=1.0, pos=None, ampli=None, spatial_jitter=1.0, signal_jitter=1.0, width=5.0, out_text_file=None, out_image_file=None, seed=False): """Create surrogate (simulated) 3D activation data with spatial noise. Parameters ----------- n_subj: integer, optionnal The number of subjects, ie the number of different maps generated. shape=(20,20,20): tuple of 3 integers, the shape of each image mask=None: Nifti1Image instance, referential- and mask- defining image (overrides shape) sk: float, optionnal Amount of spatial noise smoothness. noise_level: float, optionnal Amplitude of the spatial noise. amplitude=noise_level) pos: 2D ndarray of integers, optionnal x, y positions of the various simulated activations. ampli: 1D ndarray of floats, optionnal Respective amplitude of each activation spatial_jitter: float, optionnal Random spatial jitter added to the position of each activation, in pixel. signal_jitter: float, optionnal Random amplitude fluctuation for each activation, added to the amplitude specified by ampli width: float or ndarray, optionnal Width of the activations out_text_file: string or None, optionnal If not None, the resulting array is saved as a text file with the given file name out_image_file: string or None, optionnal If not None, the resulting is saved as a nifti file with the given file name. seed=False: int, optionnal If seed is not False, the random number generator is initialized at a certain value Returns ------- dataset: 3D ndarray The surrogate activation map, with dimensions ``(n_subj,) + shape`` """ if seed: nr = np.random.RandomState([seed]) else: import numpy.random as nr if mask is not None: shape = mask.shape mask_data = mask.get_data() else: mask_data = np.ones(shape) ijk = np.array(np.where(mask_data)).T dataset = [] # make the signal for s in range(n_subj): data = np.zeros(shape) lampli = [] if pos is not None: if len(pos) != len(ampli): raise ValueError('ampli and pos do not have the same len') lpos = pos + spatial_jitter * nr.randn(1, 3) lampli = ampli + signal_jitter * nr.randn(np.size(ampli)) for k in range(np.size(lampli)): data = np.maximum(data, _cone3d(shape, ijk, lpos[k], lampli[k], width)) # make some noise noise = nr.randn(shape[0], shape[1], shape[2]) # smooth the noise noise = nd.gaussian_filter(noise, sk) noise *= noise_level / np.std(noise) # make the mixture data += noise data[mask_data == 0] = 0 dataset.append(data) dataset = np.array(dataset) if n_subj == 1: dataset = dataset[0] if out_text_file is not None: dataset.tofile(out_text_file) if out_image_file is not None: save(Nifti1Image(dataset, np.eye(4)), out_image_file) return dataset def surrogate_4d_dataset(shape=(20, 20, 20), mask=None, n_scans=1, n_sess=1, dmtx=None, sk=1.0, noise_level=1.0, signal_level=1.0, out_image_file=None, seed=False): """ Create surrogate (simulated) 3D activation data with spatial noise. Parameters ----------- shape = (20, 20, 20): tuple of integers, the shape of each image mask=None: brifti image instance, referential- and mask- defining image (overrides shape) n_scans: int, optional, number of scans to be simlulated overrided by the design matrix n_sess: int, optional, the number of simulated sessions dmtx: array of shape(n_scans, n_rows), the design matrix sk: float, optionnal Amount of spatial noise smoothness. noise_level: float, optionnal Amplitude of the spatial noise. amplitude=noise_level) signal_level: float, optional, Amplitude of the signal out_image_file: string or list of strings or None, optionnal If not None, the resulting is saved as (set of) nifti file(s) with the given file path(s) seed=False: int, optionnal If seed is not False, the random number generator is initialized at a certain value Returns ------- dataset: a list of n_sess ndarray of shape (shape[0], shape[1], shape[2], n_scans) The surrogate activation map """ if seed: nr = np.random.RandomState([seed]) else: import numpy.random as nr if mask is not None: shape = mask.shape affine = mask.get_affine() mask_data = mask.get_data().astype('bool') else: affine = np.eye(4) mask_data = np.ones(shape).astype('bool') if dmtx is not None: n_scans = dmtx.shape[0] if (out_image_file is not None) and isinstance(out_image_file, basestring): out_image_file = [out_image_file] shape_4d = shape + (n_scans,) output_images = [] if dmtx is not None: beta = [] for r in range(dmtx.shape[1]): betar = nd.gaussian_filter(nr.randn(*shape), sk) betar /= np.std(betar) beta.append(signal_level * betar) beta = np.rollaxis(np.array(beta), 0, 4) for ns in range(n_sess): data = np.zeros(shape_4d) # make the signal if dmtx is not None: data[mask_data] += np.dot(beta[mask_data], dmtx.T) for s in range(n_scans): # make some noise noise = nr.randn(*shape) # smooth the noise noise = nd.gaussian_filter(noise, sk) noise *= noise_level / np.std(noise) # make the mixture data[:, :, :, s] += noise data[:, :, :, s] += 100 * mask_data wim = Nifti1Image(data, affine) output_images.append(wim) if out_image_file is not None: save(wim, out_image_file[ns]) return output_images nipy-0.3.0/nipy/labs/utils/tests/000077500000000000000000000000001210344137400166725ustar00rootroot00000000000000nipy-0.3.0/nipy/labs/utils/tests/__init__.py000066400000000000000000000000001210344137400207710ustar00rootroot00000000000000nipy-0.3.0/nipy/labs/utils/tests/test_misc.py000066400000000000000000000035451210344137400212450ustar00rootroot00000000000000#!/usr/bin/env python import numpy as np from scipy import special from ..routines import median, mahalanobis, gamln, psi from nose.tools import assert_true from numpy.testing import assert_almost_equal, assert_equal, TestCase class TestAll(TestCase): def test_median(self): x = np.random.rand(100) assert_almost_equal(median(x), np.median(x)) def test_median2(self): x = np.random.rand(101) assert_equal(median(x), np.median(x)) def test_median3(self): x = np.random.rand(10, 30, 11) assert_almost_equal(np.squeeze(median(x,axis=1)), np.median(x,axis=1)) def test_mahalanobis(self): x = np.random.rand(100) / 100 A = np.random.rand(100, 100) / 100 A = np.dot(A.transpose(), A) + np.eye(100) mah = np.dot(x, np.dot(np.linalg.inv(A), x)) assert_almost_equal(mah, mahalanobis(x, A), decimal=1) def test_mahalanobis2(self): x = np.random.rand(100,3,4) Aa = np.zeros([100,100,3,4]) for i in range(3): for j in range(4): A = np.random.rand(100,100) A = np.dot(A.T, A) Aa[:,:,i,j] = A i = np.random.randint(3) j = np.random.randint(4) mah = np.dot(x[:,i,j], np.dot(np.linalg.inv(Aa[:,:,i,j]), x[:,i,j])) f_mah = (mahalanobis(x, Aa))[i,j] assert_true(np.allclose(mah, f_mah)) def test_gamln(self): for x in (0.01+100*np.random.random(50)): scipy_gamln = special.gammaln(x) my_gamln = gamln(x) assert_almost_equal(scipy_gamln, my_gamln) def test_psi(self): for x in (0.01+100*np.random.random(50)): scipy_psi = special.psi(x) my_psi = psi(x) assert_almost_equal(scipy_psi, my_psi) if __name__ == "__main__": import nose nose.run(argv=['', __file__]) nipy-0.3.0/nipy/labs/utils/tests/test_random_threshold.py000066400000000000000000000067161210344137400236510ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import unittest import numpy as np import scipy.stats as st from ..random_threshold import randthresh_main def make_data(n=10, dim=20, r=5, mdim=15, maskdim=20, amplitude=10, noise=1, jitter=None, activation=False): XYZvol = np.zeros((dim,dim,dim),int) XYZ = np.array(np.where(XYZvol==0)) p = XYZ.shape[1] #mask = np.arange(p) XYZvol[XYZ[0],XYZ[1],XYZ[2]] = np.arange(p) o = np.array([dim/2,dim/2,dim/2]) I = XYZvol[(dim-mdim)/2:(dim+mdim)/2,(dim-mdim)/2:(dim+mdim)/2,(dim-mdim)/2:(dim+mdim)/2].ravel() mask = XYZvol[ (dim-maskdim)/2 : (dim+maskdim)/2, (dim-maskdim)/2 : (dim+maskdim)/2, (dim-maskdim)/2 : (dim+maskdim)/2 ].ravel() q = len(mask) maskvol = np.zeros((dim,dim,dim),int) maskvol[XYZ[0,mask],XYZ[1,mask],XYZ[2,mask]] = np.arange(q) Isignal = maskvol[dim/2-r:dim/2+r,dim/2-r:dim/2+r,dim/2-r:dim/2+r].ravel() signal = np.zeros(q,float) signal[Isignal] += amplitude X = np.zeros((n,p),float) + np.nan data = np.zeros((n,p),float) + np.nan vardata = np.zeros((n,p),float) + np.nan for i in xrange(n): X[i,I] = np.random.randn(len(I)) if activation: o = np.array([dim/2,dim/2,dim/2]) if jitter!=None: # numpy 2 casting rules do no allow in place addition of ints # and floats - hence not in-place here o = o + np.round(np.random.randn(3)*jitter).clip(r-mdim/2,mdim/2-r) #print o Ii = XYZvol[o[0]-r:o[0]+r,o[1]-r:o[1]+r,o[2]-r:o[2]+r].ravel() X[i,Ii] += amplitude vardata[i,I] = np.square(np.random.randn(len(I)))*noise**2 data[i,I] = X[i,I] + np.random.randn(len(I))*np.sqrt(vardata[i,I]) return data, XYZ, mask, XYZvol, vardata, signal class TestRandomThreshold(unittest.TestCase): def test_random_threshold(self): # Just run all random threshold functions on toy data # for smoke testing data, XYZ, mask, XYZvol, vardata, signal = make_data(n=1, dim=20, r=3, mdim=20, maskdim=20, amplitude=4, noise=0, jitter=0, activation=True) Y = data[0] X = np.clip(-np.log(1 - st.chi2.cdf(Y**2, 1, 0)), 0, 1e10) K = (signal == 0).sum() - 100 verbose=False randthresh_main(X, K, XYZ=None, p=np.inf, varwind=True, knownull=True, stop=False, verbose=verbose) randthresh_main(Y, K, XYZ=None, p=np.inf, varwind=True, knownull=False, stop=True, verbose=verbose) randthresh_main(X, K, XYZ=None, p=np.inf, varwind=False, knownull=True, stop=True, verbose=verbose) randthresh_main(Y, K, XYZ=None, p=np.inf, varwind=False, knownull=False, stop=False, verbose=verbose) randthresh_main(X, K, XYZ=XYZ, p=np.inf, varwind=True, knownull=True, stop=False, verbose=verbose) randthresh_main(Y, K, XYZ=XYZ, p=np.inf, varwind=True, knownull=False, stop=False, verbose=verbose) randthresh_main(X, K, XYZ=XYZ, p=np.inf, varwind=False, knownull=True, stop=True, verbose=verbose) randthresh_main(Y, K, XYZ=XYZ, p=np.inf, varwind=False, knownull=False, stop=True, verbose=verbose) if __name__ == "__main__": unittest.main() nipy-0.3.0/nipy/labs/utils/tests/test_repro.py000066400000000000000000000076061210344137400214430ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Test the design_matrix utilities. Note that the tests just looks whether the data produces has correct dimension, not whether it is exact """ import numpy as np from ..simul_multisubject_fmri_dataset import surrogate_2d_dataset from ..reproducibility_measures import (voxel_reproducibility, cluster_reproducibility, peak_reproducibility) def make_dataset(ampli_factor=1.0, n_subj=10): """ Generate a standard multi-subject as a set of multi-subject 2D maps if null, no activation is added """ n_subj = 10 shape = (40, 40) pos = 2 * np.array([[ 6, 7], [10, 10], [15, 10]]) ampli = ampli_factor * np.array([5, 6, 7]) dataset = surrogate_2d_dataset(n_subj=n_subj, shape=shape, pos=pos, ampli=ampli, width=5.0, seed=1) return dataset def apply_repro_analysis(dataset, thresholds=[3.0], method = 'crfx'): """ perform the reproducibility analysis according to the """ from nipy.labs.spatial_models.discrete_domain import \ grid_domain_from_binary_array n_subj, dimx, dimy = dataset.shape func = np.reshape(dataset,(n_subj, dimx * dimy)).T var = np.ones((dimx * dimy, n_subj)) domain = grid_domain_from_binary_array(np.ones((dimx, dimy, 1))) ngroups = 5 sigma = 2.0 csize = 10 niter = 5 verbose = 0 swap = False kap, clt, pkd = [], [], [] for threshold in thresholds: kappa, cls, pks = [], [], [] kwargs = {'threshold':threshold, 'csize':csize} for i in range(niter): k = voxel_reproducibility(func, var, domain, ngroups, method, swap, verbose, **kwargs) kappa.append(k) cld = cluster_reproducibility(func, var, domain, ngroups, sigma, method, swap, verbose, **kwargs) cls.append(cld) pk = peak_reproducibility(func, var, domain, ngroups, sigma, method, swap, verbose, **kwargs) pks.append(pk) kap.append(np.array(kappa)) clt.append(np.array(cls)) pkd.append(np.array(pks)) kap = np.array(kap) clt = np.array(clt) pkd = np.array(pkd) return kap, clt, pkd def test_repro1(): """ Test on the kappa values for a standard dataset using bootstrap """ dataset = make_dataset() kap, clt, pks = apply_repro_analysis(dataset) assert ((kap.mean() > 0.3) & (kap.mean() < 0.9)) assert (pks.mean() > 0.4) def test_repro2(): """ Test on the cluster reproducibility values for a standard dataset using cluster-level rfx, bootstrap """ dataset = make_dataset() kap, clt, pks = apply_repro_analysis(dataset, thresholds=[5.0]) assert (clt.mean()>0.5) def test_repro3(): """ Test on the kappa values for a null dataset using cluster-level rfx, bootstrap """ dataset = make_dataset(ampli_factor=0) kap, clt, pks = apply_repro_analysis(dataset, thresholds=[4.0]) assert (kap.mean(1) < 0.3) assert (clt.mean(1) < 0.3) def test_repro5(): """ Test on the kappa values for a non-null dataset using cluster-level mfx, bootstrap """ dataset = make_dataset() kap, clt, pks = apply_repro_analysis(dataset, method='cmfx') assert (kap.mean(1) > 0.5) assert (clt.mean(1) > 0.5) def test_repro7(): """ Test on the kappa values for a standard dataset using jacknife subsampling """ dataset = make_dataset(n_subj = 101) kap, clt, pks = apply_repro_analysis(dataset, thresholds=[5.0]) assert ((kap.mean() > 0.4)) assert ((clt.mean() > 0.5)) if __name__ == "__main__": import nose nose.run(argv=['', __file__]) nipy-0.3.0/nipy/labs/utils/tests/test_simul_multisubject_fmri_dataset.py000066400000000000000000000106521210344137400267540ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Test surrogate data generation. """ import numpy as np from nose.tools import assert_true from nibabel import Nifti1Image from ..simul_multisubject_fmri_dataset import \ surrogate_2d_dataset, surrogate_3d_dataset, surrogate_4d_dataset def test_surrogate_array(): """ Check that with no noise, the surrogate activation correspond to the ones that we specify. 2D version """ # We can't use random positions, as the positions have to be # far-enough not to overlap. pos = np.array([[ 2, 10], [10, 4], [19, 15], [15, 19], [5, 18]]) ampli = np.random.random(5) data = surrogate_2d_dataset(n_subj=1, noise_level=0, spatial_jitter=0, signal_jitter=0, pos=pos, shape=(20,20), ampli=ampli).squeeze() x, y = pos.T np.testing.assert_array_equal(data[x, y], ampli) def test_surrogate_array_3d(): """ Check that with no noise, the surrogate activation correspond to the ones that we specify. 3D version """ # We can't use random positions, as the positions have to be # far-enough not to overlap. pos = np.array([[ 2, 10, 2], [10, 4, 4], [18, 13, 18], [13, 18, 5], [5, 18, 18]]) ampli = np.random.random(5) data = surrogate_3d_dataset(n_subj=1, noise_level=0, spatial_jitter=0, signal_jitter=0, pos=pos, shape=(20,20,20), ampli=ampli).squeeze() x, y, z = pos.T np.testing.assert_array_equal(data[x, y, z], ampli) def test_surrogate_array_3d_write(): """ Check that 3D version spits files when required """ from os import path from tempfile import mkdtemp write_path = path.join(mkdtemp(), 'img.nii') shape = (5, 6, 7) data = surrogate_3d_dataset(shape=shape, out_image_file=write_path) assert_true(path.isfile(write_path)) def test_surrogate_array_3d_mask(): """ Check that 3D version works when a mask is provided """ shape = (5, 6, 7) mask = np.random.rand(*shape) > 0.5 mask_img = Nifti1Image(mask.astype(np.uint8), np.eye(4)) img = surrogate_3d_dataset(mask=mask_img) mean_image = img[mask].mean() assert_true((img[mask == 0] == 0).all()) def test_surrogate_array_4d_shape(): """Run the 4D datageneration; check the output shape and length """ shape = (5, 6, 7) out_shape = shape + (1,) imgs = surrogate_4d_dataset(shape) assert_true(not np.any(np.asarray(imgs[0].shape) - np.asarray(out_shape))) n_sess = 3 imgs = surrogate_4d_dataset(shape, n_sess=n_sess) assert_true(imgs[0].shape == out_shape) assert_true(len(imgs) == n_sess) n_scans = 5 out_shape = shape + (n_scans,) imgs = surrogate_4d_dataset(shape, n_scans=n_scans) assert_true(imgs[0].shape == (out_shape)) def test_surrogate_array_4d_write(): """Run the 4D data generation; check that output images are written """ from os import path from tempfile import mkdtemp n_sess = 3 write_paths = [path.join(mkdtemp(), 'img_%d.nii' % i) for i in range(n_sess)] shape = (5, 6, 7) imgs = surrogate_4d_dataset(shape, out_image_file=write_paths[0]) assert_true(path.isfile(write_paths[0])) imgs = surrogate_4d_dataset(shape, n_sess=n_sess, out_image_file=write_paths) for wp in write_paths: assert_true(path.isfile(wp)) def test_surrogate_array_4d_mask(): """Run the 4D version, with masking """ shape = (5, 5, 5) mask = np.random.rand(*shape) > 0.5 mask_img = Nifti1Image(mask.astype(np.uint8), np.eye(4)) imgs = surrogate_4d_dataset(mask=mask_img) mean_image = imgs[0].get_data()[mask].mean() assert_true((imgs[0].get_data()[mask == 0] < mean_image / 2).all()) def test_surrogate_array_4d_dmtx(): """Run the 4D version, with design_matrix provided """ shape = (5, 5, 5) n_scans = 25 out_shape = shape + (n_scans,) dmtx = np.random.randn(n_scans, 3) imgs = surrogate_4d_dataset(shape, dmtx=dmtx) assert_true(not np.any(np.asarray(imgs[0].shape) - np.asarray(out_shape))) if __name__ == "__main__": import nose nose.run(argv=['', __file__]) nipy-0.3.0/nipy/labs/utils/zscore.py000066400000000000000000000005601210344137400174100ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np import scipy.stats TINY = 1e-15 def zscore(pvalue): """ Return the z-score corresponding to a given p-value. """ pvalue = np.minimum(np.maximum(pvalue, TINY), 1. - TINY) z = scipy.stats.norm.isf(pvalue) return z nipy-0.3.0/nipy/labs/viz.py000066400000000000000000000006331210344137400155540ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Entry point for doing 2D visualization with NiPy. """ from .viz_tools import cm from .viz_tools.activation_maps import plot_map, plot_anat, demo_plot_map # XXX: These should die from .viz_tools.coord_tools import coord_transform, find_cut_coords from .viz_tools.anat_cache import mni_sform nipy-0.3.0/nipy/labs/viz3d.py000066400000000000000000000004001210344137400157730ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ 3D plotting of neuroimaging volumes. """ from .viz_tools.maps_3d import affine_img_src, plot_map_3d, plot_anat_3d,\ demo_plot_map_3d nipy-0.3.0/nipy/labs/viz_tools/000077500000000000000000000000001210344137400164205ustar00rootroot00000000000000nipy-0.3.0/nipy/labs/viz_tools/__init__.py000066400000000000000000000000001210344137400205170ustar00rootroot00000000000000nipy-0.3.0/nipy/labs/viz_tools/activation_maps.py000066400000000000000000000342631210344137400221630ustar00rootroot00000000000000#!/usr/bin/env python """ Functions to do automatic visualization of activation-like maps. For 2D-only visualization, only matplotlib is required. For 3D visualization, Mayavi, version 3.0 or greater, is required. For a demo, see the 'demo_plot_map' function. """ # Author: Gael Varoquaux # License: BSD # Standard library imports import warnings import operator # Standard scientific libraries imports (more specific imports are # delayed, so that the part module can be used without them). import numpy as np # Import pylab from nipy.utils.skip_test import skip_if_running_nose try: import pylab as pl except ImportError: skip_if_running_nose('Could not import matplotlib') from .anat_cache import mni_sform, mni_sform_inv, _AnatCache from .coord_tools import coord_transform from .slicers import SLICERS, _xyz_order from edge_detect import _fast_abs_percentile ################################################################################ # Helper functions for 2D plotting of activation maps ################################################################################ def plot_map(map, affine, cut_coords=None, anat=None, anat_affine=None, slicer='ortho', figure=None, axes=None, title=None, threshold=None, annotate=True, draw_cross=True, do3d=False, threshold_3d=None, view_3d=(38.5, 70.5, 300, (-2.7, -12, 9.1)), black_bg=False, **kwargs): """ Plot three cuts of a given activation map (Frontal, Axial, and Lateral) Parameters ---------- map : 3D ndarray The activation map, as a 3D image. affine : 4x4 ndarray The affine matrix going from image voxel space to MNI space. cut_coords: None, or a tuple of floats The MNI coordinates of the point where the cut is performed, in MNI coordinates and order. If slicer is 'ortho', this should be a 3-tuple: (x, y, z) For slicer == 'x', 'y', or 'z', then these are the coordinates of each cut in the corresponding direction. If None is given, the cuts is calculated automaticaly. anat : 3D ndarray or False, optional The anatomical image to be used as a background. If None, the MNI152 T1 1mm template is used. If False, no anat is displayed. anat_affine : 4x4 ndarray, optional The affine matrix going from the anatomical image voxel space to MNI space. This parameter is not used when the default anatomical is used, but it is compulsory when using an explicite anatomical image. slicer: {'ortho', 'x', 'y', 'z'} Choose the direction of the cuts. With 'ortho' three cuts are performed in orthogonal directions figure : integer or matplotlib figure, optional Matplotlib figure used or its number. If None is given, a new figure is created. axes : matplotlib axes or 4 tuple of float: (xmin, ymin, width, height), optional The axes, or the coordinates, in matplotlib figure space, of the axes used to display the plot. If None, the complete figure is used. title : string, optional The title dispayed on the figure. threshold : a number, None, or 'auto' If None is given, the maps are not thresholded. If a number is given, it is used to threshold the maps: values below the threshold are plotted as transparent. If auto is given, the threshold is determined magically by analysis of the map. annotate: boolean, optional If annotate is True, positions and left/right annotation are added to the plot. draw_cross: boolean, optional If draw_cross is True, a cross is drawn on the plot to indicate the cut plosition. do3d: {True, False or 'interactive'}, optional If True, Mayavi is used to plot a 3D view of the map in addition to the slicing. If 'interactive', the 3D visualization is displayed in an additional interactive window. threshold_3d: The threshold to use for the 3D view (if any). Defaults to the same threshold as that used for the 2D view. view_3d: tuple, The view used to take the screenshot: azimuth, elevation, distance and focalpoint, see the docstring of mlab.view. black_bg: boolean, optional If True, the background of the image is set to be black. If you whish to save figures with a black background, you will need to pass "facecolor='k', edgecolor='k'" to pylab's savefig. kwargs: extra keyword arguments, optional Extra keyword arguments passed to pylab.imshow Notes ----- Arrays should be passed in numpy convention: (x, y, z) ordered. Use masked arrays to create transparency: import numpy as np map = np.ma.masked_less(map, 0.5) plot_map(map, affine) """ map, affine = _xyz_order(map, affine) nan_mask = np.isnan(np.asarray(map)) if np.any(nan_mask): map = map.copy() map[nan_mask] = 0 del nan_mask # Deal with automatic settings of plot parameters if threshold == 'auto': # Threshold epsilon above a percentile value, to be sure that some # voxels are indeed threshold threshold = _fast_abs_percentile(map) + 1e-5 if do3d: try: try: from mayavi import version except ImportError: from enthought.mayavi import version if not int(version.version[0]) > 2: raise ImportError except ImportError: warnings.warn('Mayavi > 3.x not installed, plotting only 2D') do3d = False slicer = SLICERS[slicer].init_with_figure(data=map, affine=affine, threshold=threshold, cut_coords=cut_coords, figure=figure, axes=axes, black_bg=black_bg, leave_space=do3d) # Use Mayavi for the 3D plotting if do3d: from .maps_3d import plot_map_3d, m2screenshot try: from tvtk.api import tvtk except ImportError: from enthought.tvtk.api import tvtk version = tvtk.Version() offscreen = True if (version.vtk_major_version, version.vtk_minor_version) < (5, 2): offscreen = False if do3d == 'interactive': offscreen = False cmap = kwargs.get('cmap', pl.cm.cmap_d[pl.rcParams['image.cmap']]) # Computing vmin and vmax is costly in time, and is needed # later, so we compute them now, and store them for future # use vmin = kwargs.get('vmin', map.min()) kwargs['vmin'] = vmin vmax = kwargs.get('vmax', map.max()) kwargs['vmax'] = vmax try: from mayavi import mlab except ImportError: from enthought.mayavi import mlab if threshold_3d is None: threshold_3d = threshold plot_map_3d(np.asarray(map), affine, cut_coords=cut_coords, anat=anat, anat_affine=anat_affine, offscreen=offscreen, cmap=cmap, threshold=threshold_3d, view=view_3d, vmin=vmin, vmax=vmax) ax = slicer.axes.values()[0].ax.figure.add_axes((0.001, 0, 0.29, 1)) ax.axis('off') m2screenshot(mpl_axes=ax) if offscreen: # Clean up, so that the offscreen engine doesn't become the # default mlab.clf() engine = mlab.get_engine() try: from mayavi.core.registry import registry except: from enthought.mayavi.core.registry import registry for key, value in registry.engines.iteritems(): if value is engine: registry.engines.pop(key) break if threshold: map = np.ma.masked_inside(map, -threshold, threshold, copy=False) _plot_anat(slicer, anat, anat_affine, title=title, annotate=annotate, draw_cross=draw_cross) slicer.plot_map(map, affine, **kwargs) return slicer def _plot_anat(slicer, anat, anat_affine, title=None, annotate=True, draw_cross=True, dim=False, cmap=pl.cm.gray): """ Internal function used to plot anatomy """ canonical_anat = False if anat is None: try: anat, anat_affine, vmax_anat = _AnatCache.get_anat() canonical_anat = True except OSError, e: anat = False warnings.warn(repr(e)) black_bg = slicer._black_bg # XXX: Check that we should indeed plot an anat: we have one, and the # cut_coords are in its range if anat is not False: if canonical_anat: # We special-case the 'canonical anat', as we don't need # to do a few transforms to it. vmin = 0 vmax = vmax_anat elif dim: vmin = anat.min() vmax = anat.max() else: vmin = None vmax = None anat, anat_affine = _xyz_order(anat, anat_affine) if dim: vmean = .5*(vmin + vmax) ptp = .5*(vmax - vmin) if not operator.isNumberType(dim): dim = .6 if black_bg: vmax = vmean + (1+dim)*ptp else: vmin = vmean - (1+dim)*ptp slicer.plot_map(anat, anat_affine, cmap=cmap, vmin=vmin, vmax=vmax) if annotate: slicer.annotate() if draw_cross: slicer.draw_cross() if black_bg: # To have a black background in PDF, we need to create a # patch in black for the background for ax in slicer.axes.values(): ax.ax.imshow(np.zeros((2, 2, 3)), extent=[-5000, 5000, -5000, 5000], zorder=-500) if title is not None and not title == '': slicer.title(title) return slicer def plot_anat(anat=None, anat_affine=None, cut_coords=None, slicer='ortho', figure=None, axes=None, title=None, annotate=True, draw_cross=True, black_bg=False, dim=False, cmap=pl.cm.gray): """ Plot three cuts of an anatomical image (Frontal, Axial, and Lateral) Parameters ---------- anat : 3D ndarray, optional The anatomical image to be used as a background. If None is given, nipy tries to find a T1 template. anat_affine : 4x4 ndarray, optional The affine matrix going from the anatomical image voxel space to MNI space. This parameter is not used when the default anatomical is used, but it is compulsory when using an explicite anatomical image. figure : integer or matplotlib figure, optional Matplotlib figure used or its number. If None is given, a new figure is created. cut_coords: None, or a tuple of floats The MNI coordinates of the point where the cut is performed, in MNI coordinates and order. If slicer is 'ortho', this should be a 3-tuple: (x, y, z) For slicer == 'x', 'y', or 'z', then these are the coordinates of each cut in the corresponding direction. If None is given, the cuts is calculated automaticaly. slicer: {'ortho', 'x', 'y', 'z'} Choose the direction of the cuts. With 'ortho' three cuts are performed in orthogonal directions figure : integer or matplotlib figure, optional Matplotlib figure used or its number. If None is given, a new figure is created. axes : matplotlib axes or 4 tuple of float: (xmin, ymin, width, height), optional The axes, or the coordinates, in matplotlib figure space, of the axes used to display the plot. If None, the complete figure is used. title : string, optional The title dispayed on the figure. annotate: boolean, optional If annotate is True, positions and left/right annotation are added to the plot. draw_cross: boolean, optional If draw_cross is True, a cross is drawn on the plot to indicate the cut plosition. black_bg: boolean, optional If True, the background of the image is set to be black. If you whish to save figures with a black background, you will need to pass "facecolor='k', edgecolor='k'" to pylab's savefig. cmap: matplotlib colormap, optional The colormap for the anat Notes ----- Arrays should be passed in numpy convention: (x, y, z) ordered. """ slicer = SLICERS[slicer].init_with_figure(data=anat, affine=anat_affine, threshold=0, cut_coords=cut_coords, figure=figure, axes=axes, black_bg=black_bg) _plot_anat(slicer, anat, anat_affine, title=title, annotate=annotate, draw_cross=draw_cross, dim=dim, cmap=cmap) return slicer def demo_plot_map(do3d=False, **kwargs): """ Demo activation map plotting. """ map = np.zeros((182, 218, 182)) # Color a asymetric rectangle around Broca area: x, y, z = -52, 10, 22 x_map, y_map, z_map = coord_transform(x, y, z, mni_sform_inv) # Compare to values obtained using fslview. We need to add one as # voxels do not start at 0 in fslview. assert x_map == 142 assert y_map +1 == 137 assert z_map +1 == 95 map[x_map-5:x_map+5, y_map-3:y_map+3, z_map-10:z_map+10] = 1 return plot_map(map, mni_sform, threshold='auto', title="Broca's area", do3d=do3d, **kwargs) nipy-0.3.0/nipy/labs/viz_tools/anat_cache.py000066400000000000000000000064641210344137400210520ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ 3D visualization of activation maps using Mayavi """ # Author: Gael Varoquaux # License: BSD # Standard library imports import os # Standard scientific libraries imports (more specific imports are # delayed, so that the part module can be used without them). import numpy as np from scipy import ndimage from nibabel import load # The sform for MNI templates mni_sform = np.array([[-1, 0, 0, 90], [ 0, 1, 0, -126], [ 0, 0, 1, -72], [ 0, 0, 0, 1]]) mni_sform_inv = np.linalg.inv(mni_sform) def find_mni_template(): """ Try to find an MNI template on the disk. """ from nipy.utils import templates, DataError try: filename = templates.get_filename( 'ICBM152', '1mm', 'T1_brain.nii.gz') if os.path.exists(filename): return filename except DataError: pass possible_paths = [ ('', 'usr', 'share', 'fsl', 'data', 'standard', 'avg152T1_brain.nii.gz'), ('', 'usr', 'share', 'data', 'fsl-mni152-templates', 'avg152T1_brain.nii.gz'), ('', 'usr', 'local', 'share', 'fsl', 'data', 'standard', 'avg152T1_brain.nii.gz'), ] if 'FSLDIR' in os.environ: fsl_path = os.environ['FSLDIR'].split(os.sep) fsl_path.extend(('data', 'standard', 'avg152T1_brain.nii.gz')) possible_paths.append(fsl_path) for path in possible_paths: filename = os.sep.join((path)) if os.path.exists(filename): return filename ################################################################################ # Caching of the MNI template. ################################################################################ class _AnatCache(object): """ Class to store the anat array in cache, to avoid reloading it each time. """ anat = None anat_sform = None blurred = None @classmethod def get_anat(cls): filename = find_mni_template() if cls.anat is None: if filename is None: raise OSError('Cannot find template file T1_brain.nii.gz ' 'required to plot anatomy, see the nipy documentation ' 'installaton section for how to install template files.') anat_im = load(filename) anat = anat_im.get_data() anat = anat.astype(np.float) anat_mask = ndimage.morphology.binary_fill_holes(anat > 0) anat = np.ma.masked_array(anat, np.logical_not(anat_mask)) cls.anat_sform = anat_im.get_affine() cls.anat = anat cls.anat_max = anat.max() return cls.anat, cls.anat_sform, cls.anat_max @classmethod def get_blurred(cls): if cls.blurred is not None: return cls.blurred anat, _, _ = cls.get_anat() cls.blurred = ndimage.gaussian_filter( (ndimage.morphology.binary_fill_holes( ndimage.gaussian_filter( (anat > 4800).astype(np.float), 6) > 0.5 )).astype(np.float), 2).T.ravel() return cls.blurred nipy-0.3.0/nipy/labs/viz_tools/cm.py000066400000000000000000000240611210344137400173740ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Matplotlib colormaps useful for neuroimaging. """ import numpy as _np from nipy.utils.skip_test import skip_if_running_nose try: from matplotlib import cm as _cm from matplotlib import colors as _colors except ImportError: skip_if_running_nose('Could not import matplotlib') ################################################################################ # Custom colormaps for two-tailed symmetric statistics ################################################################################ ################################################################################ # Helper functions def _rotate_cmap(cmap, swap_order=('green', 'red', 'blue')): """ Utility function to swap the colors of a colormap. """ orig_cdict = cmap._segmentdata.copy() cdict = dict() cdict['green'] = [(p, c1, c2) for (p, c1, c2) in orig_cdict[swap_order[0]]] cdict['blue'] = [(p, c1, c2) for (p, c1, c2) in orig_cdict[swap_order[1]]] cdict['red'] = [(p, c1, c2) for (p, c1, c2) in orig_cdict[swap_order[2]]] return cdict def _pigtailed_cmap(cmap, swap_order=('green', 'red', 'blue')): """ Utility function to make a new colormap by concatenating a colormap with its reverse. """ orig_cdict = cmap._segmentdata.copy() cdict = dict() cdict['green'] = [(0.5*(1-p), c1, c2) for (p, c1, c2) in reversed(orig_cdict[swap_order[0]])] cdict['blue'] = [(0.5*(1-p), c1, c2) for (p, c1, c2) in reversed(orig_cdict[swap_order[1]])] cdict['red'] = [(0.5*(1-p), c1, c2) for (p, c1, c2) in reversed(orig_cdict[swap_order[2]])] for color in ('red', 'green', 'blue'): cdict[color].extend([(0.5*(1+p), c1, c2) for (p, c1, c2) in orig_cdict[color]]) return cdict def _concat_cmap(cmap1, cmap2): """ Utility function to make a new colormap by concatenating two colormaps. """ cdict = dict() cdict1 = cmap1._segmentdata.copy() cdict2 = cmap2._segmentdata.copy() if not hasattr(cdict1['red'], '__call__'): for c in ['red', 'green', 'blue']: cdict[c] = [(0.5*p, c1, c2) for (p, c1, c2) in cdict1[c]] else: for c in ['red', 'green', 'blue']: cdict[c] = [] ps = _np.linspace(0, 1, 10) colors = cmap1(ps) for p, (r, g, b, a) in zip(ps, colors): cdict['red'].append((.5*p, r, r)) cdict['green'].append((.5*p, g, g)) cdict['blue'].append((.5*p, b, b)) if not hasattr(cdict2['red'], '__call__'): for c in ['red', 'green', 'blue']: cdict[c].extend([(0.5*(1+p), c1, c2) for (p, c1, c2) in cdict2[c]]) else: ps = _np.linspace(0, 1, 10) colors = cmap2(ps) for p, (r, g, b, a) in zip(ps, colors): cdict['red'].append((.5*(1+p), r, r)) cdict['green'].append((.5*(1+p), g, g)) cdict['blue'].append((.5*(1+p), b, b)) return cdict def alpha_cmap(color, name=''): """ Return a colormap with the given color, and alpha going from zero to 1. Parameters ---------- color: (r, g, b), or a string A triplet of floats ranging from 0 to 1, or a matplotlib color string """ red, green, blue = _colors.colorConverter.to_rgb(color) if name == '' and hasattr(color, 'startswith'): name = color cmapspec = [(red, green, blue, 0.), (red, green, blue, 1.), ] cmap = _colors.LinearSegmentedColormap.from_list( '%s_transparent' % name, cmapspec, _cm.LUTSIZE) cmap._init() cmap._lut[:, -1] = _np.linspace(.5, 1.0, cmap._lut.shape[0]) cmap._lut[-1, -1] = 0 return cmap ################################################################################ # Our colormaps definition _cmaps_data = dict( cold_hot = _pigtailed_cmap(_cm.hot), brown_blue = _pigtailed_cmap(_cm.bone), cyan_copper = _pigtailed_cmap(_cm.copper), cyan_orange = _pigtailed_cmap(_cm.YlOrBr_r), blue_red = _pigtailed_cmap(_cm.Reds_r), brown_cyan = _pigtailed_cmap(_cm.Blues_r), purple_green = _pigtailed_cmap(_cm.Greens_r, swap_order=('red', 'blue', 'green')), purple_blue = _pigtailed_cmap(_cm.Blues_r, swap_order=('red', 'blue', 'green')), blue_orange = _pigtailed_cmap(_cm.Oranges_r, swap_order=('green', 'red', 'blue')), black_blue = _rotate_cmap(_cm.hot), black_purple = _rotate_cmap(_cm.hot, swap_order=('blue', 'red', 'green')), black_pink = _rotate_cmap(_cm.hot, swap_order=('blue', 'green', 'red')), black_green = _rotate_cmap(_cm.hot, swap_order=('red', 'blue', 'green')), black_red = _cm.hot._segmentdata.copy(), ) if hasattr(_cm, 'ocean'): # MPL 0.99 doesn't have Ocean _cmaps_data['ocean_hot'] = _concat_cmap(_cm.ocean, _cm.hot_r) if hasattr(_cm, 'afmhot'): # or afmhot _cmaps_data['hot_white_bone'] = _concat_cmap(_cm.afmhot, _cm.bone_r) _cmaps_data['hot_black_bone'] = _concat_cmap(_cm.afmhot_r, _cm.bone) ################################################################################ # Build colormaps and their reverse. _cmap_d = dict() for _cmapname in _cmaps_data.keys(): _cmapname_r = _cmapname + '_r' _cmapspec = _cmaps_data[_cmapname] if 'red' in _cmapspec: _cmaps_data[_cmapname_r] = _cm.revcmap(_cmapspec) _cmap_d[_cmapname] = _colors.LinearSegmentedColormap( _cmapname, _cmapspec, _cm.LUTSIZE) _cmap_d[_cmapname_r] = _colors.LinearSegmentedColormap( _cmapname_r, _cmaps_data[_cmapname_r], _cm.LUTSIZE) else: _revspec = list(reversed(_cmapspec)) if len(_revspec[0]) == 2: # e.g., (1, (1.0, 0.0, 1.0)) _revspec = [(1.0 - a, b) for a, b in _revspec] _cmaps_data[_cmapname_r] = _revspec _cmap_d[_cmapname] = _colors.LinearSegmentedColormap.from_list( _cmapname, _cmapspec, _cm.LUTSIZE) _cmap_d[_cmapname_r] = _colors.LinearSegmentedColormap.from_list( _cmapname_r, _revspec, _cm.LUTSIZE) ################################################################################ # A few transparent colormaps for color, name in (((1, 0, 0), 'red'), ((0, 1, 0), 'blue'), ((0, 0, 1), 'green'), ): _cmap_d['%s_transparent' % name] = alpha_cmap(color, name=name) locals().update(_cmap_d) ################################################################################ # Utility to replace a colormap by another in an interval ################################################################################ def dim_cmap(cmap, factor=.3, to_white=True): """ Dim a colormap to white, or to black. """ assert factor >= 0 and factor <=1, ValueError( 'Dimming factor must be larger than 0 and smaller than 1, %s was passed.' % factor) if to_white: dimmer = lambda c: 1 - factor*(1-c) else: dimmer = lambda c: factor*c cdict = cmap._segmentdata.copy() for c_index, color in enumerate(('red', 'green', 'blue')): color_lst = list() for value, c1, c2 in cdict[color]: color_lst.append((value, dimmer(c1), dimmer(c2))) cdict[color] = color_lst return _colors.LinearSegmentedColormap( '%s_dimmed' % cmap.name, cdict, _cm.LUTSIZE) def replace_inside(outer_cmap, inner_cmap, vmin, vmax): """ Replace a colormap by another inside a pair of values. """ assert vmin < vmax, ValueError('vmin must be smaller than vmax') assert vmin >= 0, ValueError('vmin must be larger than 0, %s was passed.' % vmin) assert vmax <= 1, ValueError('vmax must be smaller than 1, %s was passed.' % vmax) outer_cdict = outer_cmap._segmentdata.copy() inner_cdict = inner_cmap._segmentdata.copy() cdict = dict() for this_cdict, cmap in [(outer_cdict, outer_cmap), (inner_cdict, inner_cmap)]: if hasattr(this_cdict['red'], '__call__'): ps = _np.linspace(0, 1, 25) colors = cmap(ps) this_cdict['red'] = list() this_cdict['green'] = list() this_cdict['blue'] = list() for p, (r, g, b, a) in zip(ps, colors): this_cdict['red'].append((p, r, r)) this_cdict['green'].append((p, g, g)) this_cdict['blue'].append((p, b, b)) for c_index, color in enumerate(('red', 'green', 'blue')): color_lst = list() for value, c1, c2 in outer_cdict[color]: if value >= vmin: break color_lst.append((value, c1, c2)) color_lst.append((vmin, outer_cmap(vmin)[c_index], inner_cmap(vmin)[c_index])) for value, c1, c2 in inner_cdict[color]: if value <= vmin: continue if value >= vmax: break color_lst.append((value, c1, c2)) color_lst.append((vmax, inner_cmap(vmax)[c_index], outer_cmap(vmax)[c_index])) for value, c1, c2 in outer_cdict[color]: if value <= vmax: continue color_lst.append((value, c1, c2)) cdict[color] = color_lst return _colors.LinearSegmentedColormap( '%s_inside_%s' % (inner_cmap.name, outer_cmap.name), cdict, _cm.LUTSIZE) nipy-0.3.0/nipy/labs/viz_tools/coord_tools.py000066400000000000000000000122161210344137400213220ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Misc tools to find activations and cut on maps """ # Author: Gael Varoquaux # License: BSD # Standard scientific libraries imports (more specific imports are # delayed, so that the part module can be used without them). import numpy as np from scipy import stats, ndimage # Local imports from ..mask import largest_cc from ..datasets.transforms.affine_utils import get_bounds ################################################################################ # Functions for automatic choice of cuts coordinates ################################################################################ def coord_transform(x, y, z, affine): """ Convert the x, y, z coordinates from one image space to another space. Parameters ---------- x : number or ndarray The x coordinates in the input space y : number or ndarray The y coordinates in the input space z : number or ndarray The z coordinates in the input space affine : 2D 4x4 ndarray affine that maps from input to output space. Returns ------- x : number or ndarray The x coordinates in the output space y : number or ndarray The y coordinates in the output space z : number or ndarray The z coordinates in the output space Warning: The x, y and z have their Talairach ordering, not 3D numy image ordering. """ coords = np.c_[np.atleast_1d(x).flat, np.atleast_1d(y).flat, np.atleast_1d(z).flat, np.ones_like(np.atleast_1d(z).flat)].T x, y, z, _ = np.dot(affine, coords) return x.squeeze(), y.squeeze(), z.squeeze() def find_cut_coords(map, mask=None, activation_threshold=None): """ Find the center of the largest activation connect component. Parameters ----------- map : 3D ndarray The activation map, as a 3D image. mask : 3D ndarray, boolean, optional An optional brain mask. activation_threshold : float, optional The lower threshold to the positive activation. If None, the activation threshold is computed using find_activation. Returns ------- x: float the x coordinate in voxels. y: float the y coordinate in voxels. z: float the z coordinate in voxels. """ # To speed up computations, we work with partial views of the array, # and keep track of the offset offset = np.zeros(3) # Deal with masked arrays: if hasattr(map, 'mask'): not_mask = np.logical_not(map.mask) if mask is None: mask = not_mask else: mask *= not_mask map = np.asarray(map) my_map = map.copy() if mask is not None: slice_x, slice_y, slice_z = ndimage.find_objects(mask)[0] my_map = my_map[slice_x, slice_y, slice_z] mask = mask[slice_x, slice_y, slice_z] my_map *= mask offset += [slice_x.start, slice_y.start, slice_z.start] # Testing min and max is faster than np.all(my_map == 0) if (my_map.max() == 0) and (my_map.min() == 0): return .5*np.array(map.shape) if activation_threshold is None: activation_threshold = stats.scoreatpercentile( np.abs(my_map[my_map !=0]).ravel(), 80) mask = np.abs(my_map) > activation_threshold-1.e-15 mask = largest_cc(mask) slice_x, slice_y, slice_z = ndimage.find_objects(mask)[0] my_map = my_map[slice_x, slice_y, slice_z] mask = mask[slice_x, slice_y, slice_z] my_map *= mask offset += [slice_x.start, slice_y.start, slice_z.start] # For the second threshold, we use a mean, as it is much faster, # althought it is less robust second_threshold = np.abs(np.mean(my_map[mask])) second_mask = (np.abs(my_map)>second_threshold) if second_mask.sum() > 50: my_map *= largest_cc(second_mask) cut_coords = ndimage.center_of_mass(np.abs(my_map)) return cut_coords + offset ################################################################################ def get_mask_bounds(mask, affine): """ Return the world-space bounds occupied by a mask given an affine. Notes ----- The mask should have only one connect component. The affine should be diagonal or diagonal-permuted. """ (xmin, xmax), (ymin, ymax), (zmin, zmax) = get_bounds(mask.shape, affine) x_slice, y_slice, z_slice = ndimage.find_objects(mask)[0] x_width, y_width, z_width = mask.shape xmin, xmax = (xmin + x_slice.start*(xmax - xmin)/x_width, xmin + x_slice.stop *(xmax - xmin)/x_width) ymin, ymax = (ymin + y_slice.start*(ymax - ymin)/y_width, ymin + y_slice.stop *(ymax - ymin)/y_width) zmin, zmax = (zmin + z_slice.start*(zmax - zmin)/z_width, zmin + z_slice.stop *(zmax - zmin)/z_width) return xmin, xmax, ymin, ymax, zmin, zmax nipy-0.3.0/nipy/labs/viz_tools/edge_detect.py000066400000000000000000000120321210344137400212240ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Edget detection routines """ import warnings import numpy as np from scipy import ndimage, signal try: # Protect this import as it is compiled code from nipy.algorithms.statistics import quantile except ImportError, e: warnings.warn('Could not import fast quantile function: %s' % e) quantile = None ################################################################################ # Edge detection def _fast_abs_percentile(map, percentile=80): """ A fast version of the percentile of the absolute value. """ if hasattr(map, 'mask'): map = np.asarray(map[np.logical_not(map.mask)]) map = np.abs(map) map = map.ravel() if quantile is not None: return quantile(map, .01*percentile) map.sort() nb = map.size return map[.01*percentile*nb] def _orientation_kernel(t): """ structure elements for calculating the value of neighbors in several directions """ sin = np.sin pi = np.pi t = pi*t arr = np.array([[sin(t), sin(t+.5*pi), sin(t+pi) ], [sin(t+1.5*pi), 0, sin(t+1.5*pi)], [sin(t+pi), sin(t+.5*pi), sin(t) ]]) return np.round(.5*((1+arr))**2).astype(np.bool) def _edge_detect(image, high_threshold=.75, low_threshold=.4): """ Edge detection for 2D images based on Canny filtering. Parameters ========== image: 2D array The image on which edge detection is applied high_threshold: float, optional The quantile defining the upper threshold of the hysteries thresholding: decrease this to keep more edges low_threshold: float, optional The quantile defining the lower threshold of the hysteries thresholding: decrease this to extract wider edges Returns ======== grad_mag: 2D array of floats The magnitude of the gradient edge_mask: 2D array of booleans A mask of where have edges been detected Notes ====== This function is based on a Canny filter, however it has been taylored to visualization purposes on brain images: don't use it in the general case. It computes the norm of the gradient, extracts the ridge by keeping only local maximum in each direction, and performs hysteresis filtering to keep only edges with high gradient magnitude. """ # This code is loosely based on code by Stefan van der Waalt # Convert to floats to avoid overflows np_err = np.seterr(all='ignore') img = signal.wiener(image.astype(np.float)) np.seterr(**np_err) # Where the noise variance is 0, Wiener can create nans img[np.isnan(img)] = image[np.isnan(img)] img /= img.max() grad_x = ndimage.sobel(img, mode='constant', axis=0) grad_y = ndimage.sobel(img, mode='constant', axis=1) grad_mag = np.sqrt(grad_x**2 + grad_y**2) grad_angle = np.arctan2(grad_y, grad_x) # Scale the angles in the range [0, 2] grad_angle = (grad_angle + np.pi) / np.pi # Non-maximal suppression: an edge pixel is only good if its magnitude is # greater than its neighbors normal to the edge direction. thinner = np.zeros(grad_mag.shape, dtype=np.bool) for angle in np.arange(0, 2, .25): thinner = thinner | ( (grad_mag > .85*ndimage.maximum_filter(grad_mag, footprint=_orientation_kernel(angle))) & (((grad_angle - angle) % 2) < .75) ) # Remove the edges next to the side of the image: they are not reliable thinner[0] = 0 thinner[-1] = 0 thinner[:, 0] = 0 thinner[:, -1] = 0 thinned_grad = thinner * grad_mag # Hysteresis thresholding: find seeds above a high threshold, then # expand out until we go below the low threshold grad_values = thinned_grad[thinner] high = thinned_grad > _fast_abs_percentile(grad_values, 100*high_threshold) low = thinned_grad > _fast_abs_percentile(grad_values, 100*low_threshold) edge_mask = ndimage.binary_dilation(high, structure=np.ones((3, 3)), iterations=-1, mask=low) return grad_mag, edge_mask def _edge_map(image): """ Return a maps of edges suitable for visualization. Parameters ========== image: 2D array The image that the edges are extracted from. Returns ======== edge_mask: 2D masked array A mask of the edge as a masked array with parts without edges masked and the large extents detected with lower coefficients. """ edge_mask = _edge_detect(image)[-1] edge_mask = edge_mask.astype(np.float) edge_mask = -np.sqrt(ndimage.distance_transform_cdt(edge_mask)) edge_mask[edge_mask != 0] -= -.05+edge_mask.min() edge_mask = np.ma.masked_less(edge_mask, .01) return edge_mask nipy-0.3.0/nipy/labs/viz_tools/maps_3d.py000066400000000000000000000342561210344137400203320ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ 3D visualization of activation maps using Mayavi """ # Author: Gael Varoquaux # License: BSD import os import tempfile # Standard scientific libraries imports (more specific imports are # delayed, so that the part module can be used without them). import numpy as np from scipy import stats # Local imports from .anat_cache import mni_sform, mni_sform_inv, _AnatCache from .coord_tools import coord_transform # A module global to avoid creating multiple time an offscreen engine. off_screen_engine = None ################################################################################ # Helper functions def affine_img_src(data, affine, scale=1, name='AffineImage', reverse_x=False): """ Make a Mayavi source defined by a 3D array and an affine, for wich the voxel of the 3D array are mapped by the affine. Parameters ----------- data: 3D ndarray The data arrays affine: (4 x 4) ndarray The (4 x 4) affine matrix relating voxels to world coordinates. scale: float, optional An optional addition scaling factor. name: string, optional The name of the Mayavi source created. reverse_x: boolean, optional Reverse the x (lateral) axis. Useful to compared with images in radiologic convention. Notes ------ The affine should be diagonal. """ # Late import to avoid triggering wx imports before needed. try: from mayavi.sources.api import ArraySource except ImportError: # Try out old install of Mayavi, with namespace packages from enthought.mayavi.sources.api import ArraySource center = np.r_[0, 0, 0, 1] spacing = np.diag(affine)[:3] origin = np.dot(affine, center)[:3] if reverse_x: # Radiologic convention spacing[0] *= -1 origin[0] *= -1 src = ArraySource(scalar_data=np.asarray(data, dtype=np.float), name=name, spacing=scale*spacing, origin=scale*origin) return src ################################################################################ # Mayavi helpers def autocrop_img(img, bg_color): red, green, blue = bg_color outline = ( (img[..., 0] != red) +(img[..., 1] != green) +(img[..., 2] != blue) ) outline_x = outline.sum(axis=0) outline_y = outline.sum(axis=1) outline_x = np.where(outline_x)[0] outline_y = np.where(outline_y)[0] if len(outline_x) == 0: return img else: x_min = outline_x.min() x_max = outline_x.max() if len(outline_y) == 0: return img else: y_min = outline_y.min() y_max = outline_y.max() return img[y_min:y_max, x_min:x_max] def m2screenshot(mayavi_fig=None, mpl_axes=None, autocrop=True): """ Capture a screeshot of the Mayavi figure and display it in the matplotlib axes. """ import pylab as pl # Late import to avoid triggering wx imports before needed. try: from mayavi import mlab except ImportError: # Try out old install of Mayavi, with namespace packages from enthought.mayavi import mlab if mayavi_fig is None: mayavi_fig = mlab.gcf() else: mlab.figure(mayavi_fig) if mpl_axes is not None: pl.axes(mpl_axes) filename = tempfile.mktemp('.png') mlab.savefig(filename, figure=mayavi_fig) image3d = pl.imread(filename) if autocrop: bg_color = mayavi_fig.scene.background image3d = autocrop_img(image3d, bg_color) pl.imshow(image3d) pl.axis('off') os.unlink(filename) # XXX: Should switch back to previous MPL axes: we have a side effect # here. ################################################################################ # Anatomy outline ################################################################################ def plot_anat_3d(anat=None, anat_affine=None, scale=1, sulci_opacity=0.5, gyri_opacity=0.3, opacity=None, skull_percentile=78, wm_percentile=79, outline_color=None): """ 3D anatomical display Parameters ---------- skull_percentile : float, optional The percentile of the values in the image that delimit the skull from the outside of the brain. The smaller the fraction of you field of view is occupied by the brain, the larger this value should be. wm_percentile : float, optional The percentile of the values in the image that delimit the white matter from the grey matter. Typical this is skull_percentile + 1 """ # Late import to avoid triggering wx imports before needed. try: from mayavi import mlab except ImportError: # Try out old install of Mayavi, with namespace packages from enthought.mayavi import mlab fig = mlab.gcf() disable_render = fig.scene.disable_render fig.scene.disable_render = True if anat is None: anat, anat_affine, anat_max = _AnatCache.get_anat() anat_blurred = _AnatCache.get_blurred() skull_threshold = 4800 inner_threshold = 5000 upper_threshold = 7227.8 else: from scipy import ndimage # XXX: This should be in a separate function voxel_size = np.sqrt((anat_affine[:3, :3]**2).sum()/3.) skull_threshold = stats.scoreatpercentile(anat.ravel(), skull_percentile) inner_threshold = stats.scoreatpercentile(anat.ravel(), wm_percentile) upper_threshold = anat.max() anat_blurred = ndimage.gaussian_filter( (ndimage.morphology.binary_fill_holes( ndimage.gaussian_filter( (anat > skull_threshold).astype(np.float), 6./voxel_size) > 0.5 )).astype(np.float), 2./voxel_size).T.ravel() if opacity is None: try: from tvtk.api import tvtk except ImportError: # Try out old install of Mayavi, with namespace packages from enthought.tvtk.api import tvtk version = tvtk.Version() if (version.vtk_major_version, version.vtk_minor_version) < (5, 2): opacity = .99 else: opacity = 1 ########################################################################### # Display the cortical surface (flattenned) anat_src = affine_img_src(anat, anat_affine, scale=scale, name='Anat') anat_src.image_data.point_data.add_array(anat_blurred) anat_src.image_data.point_data.get_array(1).name = 'blurred' anat_src.image_data.point_data.update() anat_blurred = mlab.pipeline.set_active_attribute( anat_src, point_scalars='blurred') anat_blurred.update_pipeline() # anat_blurred = anat_src cortex_surf = mlab.pipeline.set_active_attribute( mlab.pipeline.contour(anat_blurred), point_scalars='scalar') # XXX: the choice in vmin and vmax should be tuned to show the # sulci better cortex = mlab.pipeline.surface(cortex_surf, colormap='copper', opacity=opacity, vmin=skull_threshold, vmax=inner_threshold) cortex.enable_contours = True cortex.contour.filled_contours = True cortex.contour.auto_contours = False cortex.contour.contours = [0, inner_threshold, upper_threshold] #cortex.actor.property.backface_culling = True # XXX: Why do we do 'frontface_culling' to see the front. cortex.actor.property.frontface_culling = True cortex.actor.mapper.interpolate_scalars_before_mapping = True cortex.actor.property.interpolation = 'flat' # Add opacity variation to the colormap cmap = cortex.module_manager.scalar_lut_manager.lut.table.to_array() cmap[128:, -1] = gyri_opacity*255 cmap[:128, -1] = sulci_opacity*255 cortex.module_manager.scalar_lut_manager.lut.table = cmap if outline_color is not None: outline = mlab.pipeline.iso_surface( anat_blurred, contours=[0.4], color=outline_color, opacity=.9) outline.actor.property.backface_culling = True fig.scene.disable_render = disable_render return cortex ################################################################################ # Maps ################################################################################ def plot_map_3d(map, affine, cut_coords=None, anat=None, anat_affine=None, threshold=None, offscreen=False, vmin=None, vmax=None, cmap=None, view=(38.5, 70.5, 300, (-2.7, -12, 9.1)), ): """ Plot a 3D volume rendering view of the activation, with an outline of the brain. Parameters ---------- map : 3D ndarray The activation map, as a 3D image. affine : 4x4 ndarray The affine matrix going from image voxel space to MNI space. cut_coords: 3-tuple of floats, optional The MNI coordinates of a 3D cursor to indicate a feature or a cut, in MNI coordinates and order. anat : 3D ndarray, optional The anatomical image to be used as a background. If None, the MNI152 T1 1mm template is used. If False, no anatomical image is used. anat_affine : 4x4 ndarray, optional The affine matrix going from the anatomical image voxel space to MNI space. This parameter is not used when the default anatomical is used, but it is compulsory when using an explicite anatomical image. threshold : float, optional The lower threshold of the positive activation. This parameter is used to threshold the activation map. offscreen: boolean, optional If True, Mayavi attempts to plot offscreen. Will work only with VTK >= 5.2. vmin : float, optional The minimal value, for the colormap vmax : float, optional The maximum value, for the colormap cmap : a callable, or a pylab colormap A callable returning a (n, 4) array for n values between 0 and 1 for the colors. This can be for instance a pylab colormap. Notes ----- If you are using a VTK version below 5.2, there is no way to avoid opening a window during the rendering under Linux. This is necessary to use the graphics card for the rendering. You must maintain this window on top of others and on the screen. """ # Late import to avoid triggering wx imports before needed. try: from mayavi import mlab except ImportError: # Try out old install of Mayavi, with namespace packages from enthought.mayavi import mlab if offscreen: global off_screen_engine if off_screen_engine is None: try: from mayavi.core.off_screen_engine import OffScreenEngine except ImportError: # Try out old install of Mayavi, with namespace packages from enthought.mayavi.core.off_screen_engine import OffScreenEngine off_screen_engine = OffScreenEngine() off_screen_engine.start() fig = mlab.figure('__private_plot_map_3d__', bgcolor=(1, 1, 1), fgcolor=(0, 0, 0), size=(400, 330), engine=off_screen_engine) mlab.clf(figure=fig) else: fig = mlab.gcf() fig = mlab.figure(fig, bgcolor=(1, 1, 1), fgcolor=(0, 0, 0), size=(400, 350)) disable_render = fig.scene.disable_render fig.scene.disable_render = True if threshold is None: threshold = stats.scoreatpercentile( np.abs(map).ravel(), 80) contours = [] lower_map = map[map <= -threshold] if np.any(lower_map): contours.append(lower_map.max()) upper_map = map[map >= threshold] if np.any(upper_map): contours.append(map[map > threshold].min()) ########################################################################### # Display the map using iso-surfaces if len(contours) > 0: map_src = affine_img_src(map, affine) module = mlab.pipeline.iso_surface(map_src, contours=contours, vmin=vmin, vmax=vmax) if hasattr(cmap, '__call__'): # Stick the colormap in mayavi module.module_manager.scalar_lut_manager.lut.table \ = (255*cmap(np.linspace(0, 1, 256))).astype(np.int) else: module = None if not anat is False: plot_anat_3d(anat=anat, anat_affine=anat_affine, scale=1.05, outline_color=(.9, .9, .9), gyri_opacity=.2) ########################################################################### # Draw the cursor if cut_coords is not None: x0, y0, z0 = cut_coords mlab.plot3d((-90, 90), (y0, y0), (z0, z0), color=(.5, .5, .5), tube_radius=0.25) mlab.plot3d((x0, x0), (-126, 91), (z0, z0), color=(.5, .5, .5), tube_radius=0.25) mlab.plot3d((x0, x0), (y0, y0), (-72, 109), color=(.5, .5, .5), tube_radius=0.25) mlab.view(*view) fig.scene.disable_render = disable_render return module def demo_plot_map_3d(): map = np.zeros((182, 218, 182)) # Color a asymetric rectangle around Broca area: x, y, z = -52, 10, 22 x_map, y_map, z_map = coord_transform(x, y, z, mni_sform_inv) map[x_map-5:x_map+5, y_map-3:y_map+3, z_map-10:z_map+10] = 1 plot_map_3d(map, mni_sform, cut_coords=(x, y, z)) nipy-0.3.0/nipy/labs/viz_tools/setup.py000066400000000000000000000007161210344137400201360ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('viz_tools', parent_package, top_path) config.add_subpackage('test') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipy-0.3.0/nipy/labs/viz_tools/slicers.py000066400000000000000000000632561210344137400204520ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ The Slicer classes. The main purpose of these classes is to have auto adjust of axes size to the data with different layout of cuts. """ import operator import numpy as np from nipy.utils.skip_test import skip_if_running_nose try: import matplotlib as mpl import pylab as pl from matplotlib import transforms except ImportError: skip_if_running_nose('Could not import matplotlib') # Local imports from .coord_tools import coord_transform, get_bounds, get_mask_bounds, \ find_cut_coords from .edge_detect import _edge_map from . import cm from ..datasets import VolumeImg ################################################################################ # Bugware to have transparency work OK with MPL < .99.1 if mpl.__version__ < '0.99.1': # We wrap the lut as a callable and replace its evalution to put # alpha to zero where the mask is true. This is what is done in # MPL >= .99.1 from matplotlib import colors class CMapProxy(colors.Colormap): def __init__(self, lut): self.__lut = lut def __call__(self, arr, *args, **kwargs): results = self.__lut(arr, *args, **kwargs) if not isinstance(arr, np.ma.MaskedArray): return results else: results[arr.mask, -1] = 0 return results def __getattr__(self, attr): # Dark magic: we are delegating any call to the lut instance # we wrap return self.__dict__.get(attr, getattr(self.__lut, attr)) def _xyz_order(map, affine): img = VolumeImg(map, affine=affine, world_space='mine') img = img.xyz_ordered(resample=True, copy=False) map = img.get_data() affine = img.affine return map, affine ################################################################################ # class CutAxes ################################################################################ class CutAxes(object): """ An MPL axis-like object that displays a cut of 3D volumes """ def __init__(self, ax, direction, coord): """ An MPL axis-like object that displays a cut of 3D volumes Parameters ========== ax: a MPL axes instance The axes in which the plots will be drawn direction: {'x', 'y', 'z'} The directions of the cut coord: float The coordinnate along the direction of the cut """ self.ax = ax self.direction = direction self.coord = coord self._object_bounds = list() def do_cut(self, map, affine): """ Cut the 3D volume into a 2D slice Parameters ========== map: 3D ndarray The 3D volume to cut affine: 4x4 ndarray The affine of the volume """ coords = [0, 0, 0] coords['xyz'.index(self.direction)] = self.coord x_map, y_map, z_map = [int(np.round(c)) for c in coord_transform(coords[0], coords[1], coords[2], np.linalg.inv(affine))] if self.direction == 'y': cut = np.rot90(map[:, y_map, :]) elif self.direction == 'x': cut = np.rot90(map[x_map, :, :]) elif self.direction == 'z': cut = np.rot90(map[:, :, z_map]) else: raise ValueError('Invalid value for direction %s' % self.direction) return cut def draw_cut(self, cut, data_bounds, bounding_box, type='imshow', **kwargs): # kwargs massaging kwargs['origin'] = 'upper' if mpl.__version__ < '0.99.1': cmap = kwargs.get('cmap', pl.cm.cmap_d[pl.rcParams['image.cmap']]) kwargs['cmap'] = CMapProxy(cmap) if self.direction == 'y': (xmin, xmax), (_, _), (zmin, zmax) = data_bounds (xmin_, xmax_), (_, _), (zmin_, zmax_) = bounding_box elif self.direction == 'x': (_, _), (xmin, xmax), (zmin, zmax) = data_bounds (_, _), (xmin_, xmax_), (zmin_, zmax_) = bounding_box elif self.direction == 'z': (xmin, xmax), (zmin, zmax), (_, _) = data_bounds (xmin_, xmax_), (zmin_, zmax_), (_, _) = bounding_box else: raise ValueError('Invalid value for direction %s' % self.direction) ax = self.ax getattr(ax, type)(cut, extent=(xmin, xmax, zmin, zmax), **kwargs) self._object_bounds.append((xmin_, xmax_, zmin_, zmax_)) ax.axis(self.get_object_bounds()) def get_object_bounds(self): """ Return the bounds of the objects on this axes. """ if len(self._object_bounds) == 0: # Nothing plotted yet return -.01, .01, -.01, .01 xmins, xmaxs, ymins, ymaxs = np.array(self._object_bounds).T xmax = max(xmaxs.max(), xmins.max()) xmin = min(xmins.min(), xmaxs.min()) ymax = max(ymaxs.max(), ymins.max()) ymin = min(ymins.min(), ymaxs.min()) return xmin, xmax, ymin, ymax def draw_left_right(self, size, bg_color, **kwargs): if self.direction == 'x': return ax = self.ax ax.text(.1, .95, 'L', transform=ax.transAxes, horizontalalignment='left', verticalalignment='top', size=size, bbox=dict(boxstyle="square,pad=0", ec=bg_color, fc=bg_color, alpha=1), **kwargs) ax.text(.9, .95, 'R', transform=ax.transAxes, horizontalalignment='right', verticalalignment='top', size=size, bbox=dict(boxstyle="square,pad=0", ec=bg_color, fc=bg_color, alpha=1), **kwargs) def draw_position(self, size, bg_color, **kwargs): ax = self.ax ax.text(0, 0, '%s=%i' % (self.direction, self.coord), transform=ax.transAxes, horizontalalignment='left', verticalalignment='bottom', size=size, bbox=dict(boxstyle="square,pad=0", ec=bg_color, fc=bg_color, alpha=1), **kwargs) ################################################################################ # class BaseSlicer ################################################################################ class BaseSlicer(object): """ The main purpose of these class is to have auto adjust of axes size to the data with different layout of cuts. """ # This actually encodes the figsize for only one axe _default_figsize = [2.2, 2.6] def __init__(self, cut_coords, axes=None, black_bg=False): """ Create 3 linked axes for plotting orthogonal cuts. Parameters ---------- cut_coords: 3 tuple of ints The cut position, in world space. axes: matplotlib axes object, optional The axes that will be subdivided in 3. black_bg: boolean, optional If True, the background of the figure will be put to black. If you whish to save figures with a black background, you will need to pass "facecolor='k', edgecolor='k'" to pylab's savefig. """ self._cut_coords = cut_coords if axes is None: axes = pl.axes((0., 0., 1., 1.)) axes.axis('off') self.frame_axes = axes axes.set_zorder(1) bb = axes.get_position() self.rect = (bb.x0, bb.y0, bb.x1, bb.y1) self._black_bg = black_bg self._init_axes() @staticmethod def find_cut_coords(data=None, affine=None, threshold=None, cut_coords=None): # Implement this as a staticmethod or a classmethod when # subclassing raise NotImplementedError @classmethod def init_with_figure(cls, data=None, affine=None, threshold=None, cut_coords=None, figure=None, axes=None, black_bg=False, leave_space=False): cut_coords = cls.find_cut_coords(data, affine, threshold, cut_coords) if isinstance(axes, pl.Axes) and figure is None: figure = axes.figure if not isinstance(figure, pl.Figure): # Make sure that we have a figure figsize = cls._default_figsize[:] # Adjust for the number of axes figsize[0] *= len(cut_coords) facecolor = 'k' if black_bg else 'w' if leave_space: figsize[0] += 3.4 figure = pl.figure(figure, figsize=figsize, facecolor=facecolor) else: if isinstance(axes, pl.Axes): assert axes.figure is figure, ("The axes passed are not " "in the figure") if axes is None: axes = [0., 0., 1., 1.] if leave_space: axes = [0.3, 0, .7, 1.] if operator.isSequenceType(axes): axes = figure.add_axes(axes) # People forget to turn their axis off, or to set the zorder, and # then they cannot see their slicer axes.axis('off') return cls(cut_coords, axes, black_bg) def title(self, text, x=0.01, y=0.99, size=15, color=None, bgcolor=None, alpha=1, **kwargs): """ Write a title to the view. Parameters ---------- text: string The text of the title x: float, optional The horizontal position of the title on the frame in fraction of the frame width. y: float, optional The vertical position of the title on the frame in fraction of the frame height. size: integer, optional The size of the title text. color: matplotlib color specifier, optional The color of the font of the title. bgcolor: matplotlib color specifier, optional The color of the background of the title. alpha: float, optional The alpha value for the background. kwargs: Extra keyword arguments are passed to matplotlib's text function. """ if color is None: color = 'k' if self._black_bg else 'w' if bgcolor is None: bgcolor = 'w' if self._black_bg else 'k' self.frame_axes.text(x, y, text, transform=self.frame_axes.transAxes, horizontalalignment='left', verticalalignment='top', size=size, color=color, bbox=dict(boxstyle="square,pad=.3", ec=bgcolor, fc=bgcolor, alpha=alpha), **kwargs) def plot_map(self, map, affine, threshold=None, **kwargs): """ Plot a 3D map in all the views. Parameters ----------- map: 3D ndarray The 3D map to be plotted. If it is a masked array, only the non-masked part will be plotted. affine: 4x4 ndarray The affine matrix giving the transformation from voxel indices to world space. threshold : a number, None, or 'auto' If None is given, the maps are not thresholded. If a number is given, it is used to threshold the maps: values below the threshold are plotted as transparent. kwargs: Extra keyword arguments are passed to imshow. """ if threshold is not None: if threshold == 0: map = np.ma.masked_equal(map, 0, copy=False) else: map = np.ma.masked_inside(map, -threshold, threshold, copy=False) self._map_show(map, affine, type='imshow', **kwargs) def contour_map(self, map, affine, **kwargs): """ Contour a 3D map in all the views. Parameters ----------- map: 3D ndarray The 3D map to be plotted. If it is a masked array, only the non-masked part will be plotted. affine: 4x4 ndarray The affine matrix giving the transformation from voxel indices to world space. kwargs: Extra keyword arguments are passed to contour. """ self._map_show(map, affine, type='contour', **kwargs) def _map_show(self, map, affine, type='imshow', **kwargs): map, affine = _xyz_order(map, affine) data_bounds = get_bounds(map.shape, affine) (xmin, xmax), (ymin, ymax), (zmin, zmax) = data_bounds xmin_, xmax_, ymin_, ymax_, zmin_, zmax_ = \ xmin, xmax, ymin, ymax, zmin, zmax if hasattr(map, 'mask'): not_mask = np.logical_not(map.mask) xmin_, xmax_, ymin_, ymax_, zmin_, zmax_ = \ get_mask_bounds(not_mask, affine) if kwargs.get('vmin') is None and kwargs.get('vmax') is None: # Avoid dealing with masked arrays: they are slow masked_map = np.asarray(map)[not_mask] if kwargs.get('vmin') is None: kwargs['vmin'] = masked_map.min() if kwargs.get('max') is None: kwargs['vmax'] = masked_map.max() else: if not 'vmin' in kwargs: kwargs['vmin'] = map.min() if not 'vmax' in kwargs: kwargs['vmax'] = map.max() bounding_box = (xmin_, xmax_), (ymin_, ymax_), (zmin_, zmax_) # For each ax, cut the data and plot it for cut_ax in self.axes.itervalues(): try: cut = cut_ax.do_cut(map, affine) except IndexError: # We are cutting outside the indices of the data continue cut_ax.draw_cut(cut, data_bounds, bounding_box, type=type, **kwargs) def edge_map(self, map, affine, color='r'): """ Plot the edges of a 3D map in all the views. Parameters ----------- map: 3D ndarray The 3D map to be plotted. If it is a masked array, only the non-masked part will be plotted. affine: 4x4 ndarray The affine matrix giving the transformation from voxel indices to world space. color: matplotlib color: string or (r, g, b) value The color used to display the edge map """ map, affine = _xyz_order(map, affine) kwargs = dict(cmap=cm.alpha_cmap(color=color)) data_bounds = get_bounds(map.shape, affine) # For each ax, cut the data and plot it for cut_ax in self.axes.itervalues(): try: cut = cut_ax.do_cut(map, affine) edge_mask = _edge_map(cut) except IndexError: # We are cutting outside the indices of the data continue cut_ax.draw_cut(edge_mask, data_bounds, data_bounds, type='imshow', **kwargs) def annotate(self, left_right=True, positions=True, size=12, **kwargs): """ Add annotations to the plot. Parameters ---------- left_right: boolean, optional If left_right is True, annotations indicating which side is left and which side is right are drawn. positions: boolean, optional If positions is True, annotations indicating the positions of the cuts are drawn. size: integer, optional The size of the text used. kwargs: Extra keyword arguments are passed to matplotlib's text function. """ kwargs = kwargs.copy() if not 'color' in kwargs: if self._black_bg: kwargs['color'] = 'w' else: kwargs['color'] = 'k' bg_color = ('k' if self._black_bg else 'w') if left_right: for cut_ax in self.axes.values(): cut_ax.draw_left_right(size=size, bg_color=bg_color, **kwargs) if positions: for cut_ax in self.axes.values(): cut_ax.draw_position(size=size, bg_color=bg_color, **kwargs) ################################################################################ # class OrthoSlicer ################################################################################ class OrthoSlicer(BaseSlicer): """ A class to create 3 linked axes for plotting orthogonal cuts of 3D maps. Attributes ---------- axes: dictionnary of axes The 3 axes used to plot each view. frame_axes: axes The axes framing the whole set of views. Notes ----- The extent of the different axes are adjusted to fit the data best in the viewing area. """ @staticmethod def find_cut_coords(data=None, affine=None, threshold=None, cut_coords=None): if cut_coords is None: if data is None or data is False: cut_coords = (0, 0, 0) else: x_map, y_map, z_map = find_cut_coords(data, activation_threshold=threshold) cut_coords = coord_transform(x_map, y_map, z_map, affine) return cut_coords def _init_axes(self): x0, y0, x1, y1 = self.rect # Create our axes: self.axes = dict() for index, direction in enumerate(('y', 'x', 'z')): ax = pl.axes([0.3*index*(x1-x0) + x0, y0, .3*(x1-x0), y1-y0]) ax.axis('off') coord = self._cut_coords['xyz'.index(direction)] cut_ax = CutAxes(ax, direction, coord) self.axes[direction] = cut_ax ax.set_axes_locator(self._locator) def _locator(self, axes, renderer): """ The locator function used by matplotlib to position axes. Here we put the logic used to adjust the size of the axes. """ x0, y0, x1, y1 = self.rect width_dict = dict() cut_ax_dict = self.axes x_ax = cut_ax_dict['x'] y_ax = cut_ax_dict['y'] z_ax = cut_ax_dict['z'] for cut_ax in cut_ax_dict.itervalues(): bounds = cut_ax.get_object_bounds() if not bounds: # This happens if the call to _map_show was not # succesful. As it happens asyncroniously (during a # refresh of the figure) we capture the problem and # ignore it: it only adds a non informative traceback bounds = [0, 1, 0, 1] xmin, xmax, ymin, ymax = bounds width_dict[cut_ax.ax] = (xmax - xmin) total_width = float(sum(width_dict.values())) for ax, width in width_dict.iteritems(): width_dict[ax] = width/total_width*(x1 -x0) left_dict = dict() left_dict[y_ax.ax] = x0 left_dict[x_ax.ax] = x0 + width_dict[y_ax.ax] left_dict[z_ax.ax] = x0 + width_dict[x_ax.ax] + width_dict[y_ax.ax] return transforms.Bbox([[left_dict[axes], y0], [left_dict[axes] + width_dict[axes], y1]]) def draw_cross(self, cut_coords=None, **kwargs): """ Draw a crossbar on the plot to show where the cut is performed. Parameters ---------- cut_coords: 3-tuple of floats, optional The position of the cross to draw. If none is passed, the ortho_slicer's cut coordinnates are used. kwargs: Extra keyword arguments are passed to axhline """ if cut_coords is None: cut_coords = self._cut_coords x, y, z = cut_coords kwargs = kwargs.copy() if not 'color' in kwargs: if self._black_bg: kwargs['color'] = '.8' else: kwargs['color'] = 'k' ax = self.axes['y'].ax ax.axvline(x, ymin=.05, ymax=.95, **kwargs) ax.axhline(z, **kwargs) ax = self.axes['x'].ax ax.axvline(y, ymin=.05, ymax=.95, **kwargs) ax.axhline(z, xmax=.95, **kwargs) ax = self.axes['z'].ax ax.axvline(x, ymin=.05, ymax=.95, **kwargs) ax.axhline(y, **kwargs) def demo_ortho_slicer(): """ A small demo of the OrthoSlicer functionality. """ pl.clf() oslicer = OrthoSlicer(cut_coords=(0, 0, 0)) from anat_cache import _AnatCache map, affine, _ = _AnatCache.get_anat() oslicer.plot_map(map, affine, cmap=pl.cm.gray) return oslicer ################################################################################ # class BaseStackedSlicer ################################################################################ class BaseStackedSlicer(BaseSlicer): """ A class to create linked axes for plotting stacked cuts of 3D maps. Attributes ---------- axes: dictionnary of axes The axes used to plot each view. frame_axes: axes The axes framing the whole set of views. Notes ----- The extent of the different axes are adjusted to fit the data best in the viewing area. """ @classmethod def find_cut_coords(cls, data=None, affine=None, threshold=None, cut_coords=None): if cut_coords is None: if data is None or data is False: bounds = ((-40, 40), (-30, 30), (-30, 75)) else: if hasattr(data, 'mask'): mask = np.logical_not(data.mask) else: # The mask will be anything that is fairly different # from the values in the corners edge_value = float(data[0, 0, 0] + data[0, -1, 0] + data[-1, 0, 0] + data[0, 0, -1] + data[-1, -1, 0] + data[-1, 0, -1] + data[0, -1, -1] + data[-1, -1, -1] ) edge_value /= 6 mask = np.abs(data - edge_value) > .005*data.ptp() xmin, xmax, ymin, ymax, zmin, zmax = \ get_mask_bounds(mask, affine) bounds = (xmin, xmax), (ymin, ymax), (zmin, zmax) lower, upper = bounds['xyz'.index(cls._direction)] cut_coords = np.linspace(lower, upper, 10).tolist() return cut_coords def _init_axes(self): x0, y0, x1, y1 = self.rect # Create our axes: self.axes = dict() fraction = 1./len(self._cut_coords) for index, coord in enumerate(self._cut_coords): coord = float(coord) ax = pl.axes([fraction*index*(x1-x0) + x0, y0, fraction*(x1-x0), y1-y0]) ax.axis('off') cut_ax = CutAxes(ax, self._direction, coord) self.axes[coord] = cut_ax ax.set_axes_locator(self._locator) def _locator(self, axes, renderer): """ The locator function used by matplotlib to position axes. Here we put the logic used to adjust the size of the axes. """ x0, y0, x1, y1 = self.rect width_dict = dict() cut_ax_dict = self.axes for cut_ax in cut_ax_dict.itervalues(): bounds = cut_ax.get_object_bounds() if not bounds: # This happens if the call to _map_show was not # succesful. As it happens asyncroniously (during a # refresh of the figure) we capture the problem and # ignore it: it only adds a non informative traceback bounds = [0, 1, 0, 1] xmin, xmax, ymin, ymax = bounds width_dict[cut_ax.ax] = (xmax - xmin) total_width = float(sum(width_dict.values())) for ax, width in width_dict.iteritems(): width_dict[ax] = width/total_width*(x1 -x0) left_dict = dict() left = float(x0) for coord, cut_ax in sorted(cut_ax_dict.items()): left_dict[cut_ax.ax] = left this_width = width_dict[cut_ax.ax] left += this_width return transforms.Bbox([[left_dict[axes], y0], [left_dict[axes] + width_dict[axes], y1]]) def draw_cross(self, cut_coords=None, **kwargs): """ Draw a crossbar on the plot to show where the cut is performed. Parameters ---------- cut_coords: 3-tuple of floats, optional The position of the cross to draw. If none is passed, the ortho_slicer's cut coordinnates are used. kwargs: Extra keyword arguments are passed to axhline """ return class XSlicer(BaseStackedSlicer): _direction = 'x' _default_figsize = [2.2, 2.3] class YSlicer(BaseStackedSlicer): _direction = 'y' _default_figsize = [2.6, 2.3] class ZSlicer(BaseStackedSlicer): _direction = 'z' SLICERS = dict(ortho=OrthoSlicer, x=XSlicer, y=YSlicer, z=ZSlicer) nipy-0.3.0/nipy/labs/viz_tools/test/000077500000000000000000000000001210344137400173775ustar00rootroot00000000000000nipy-0.3.0/nipy/labs/viz_tools/test/__init__.py000066400000000000000000000000001210344137400214760ustar00rootroot00000000000000nipy-0.3.0/nipy/labs/viz_tools/test/test_activation_maps.py000066400000000000000000000035641210344137400242010ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import tempfile import numpy as np from nose import SkipTest try: import matplotlib as mp # Make really sure that we don't try to open an Xserver connection. mp.use('svg', warn=False) import pylab as pl pl.switch_backend('svg') except ImportError: raise SkipTest('Could not import matplotlib') from ..activation_maps import demo_plot_map, plot_anat, plot_map from ..anat_cache import mni_sform, _AnatCache def test_demo_plot_map(): # This is only a smoke test mp.use('svg', warn=False) import pylab as pl pl.switch_backend('svg') demo_plot_map() # Test the black background code path demo_plot_map(black_bg=True) def test_plot_anat(): # This is only a smoke test mp.use('svg', warn=False) import pylab as pl pl.switch_backend('svg') data = np.zeros((20, 20, 20)) data[3:-3, 3:-3, 3:-3] = 1 ortho_slicer = plot_anat(data, mni_sform, dim=True) ortho_slicer = plot_anat(data, mni_sform, cut_coords=(80, -120, -60)) # Saving forces a draw, and thus smoke-tests the axes locators pl.savefig(tempfile.TemporaryFile()) ortho_slicer.edge_map(data, mni_sform, color='c') # Test saving with empty plot z_slicer = plot_anat(anat=False, slicer='z') pl.savefig(tempfile.TemporaryFile()) z_slicer = plot_anat(slicer='z') pl.savefig(tempfile.TemporaryFile()) z_slicer.edge_map(data, mni_sform, color='c') # Smoke test coordinate finder, with and without mask plot_map(np.ma.masked_equal(data, 0), mni_sform, slicer='x') plot_map(data, mni_sform, slicer='y') def test_anat_cache(): # A smoke test, that can work only if the templates are installed try: _AnatCache.get_blurred() except OSError: "The templates are not there" pass nipy-0.3.0/nipy/labs/viz_tools/test/test_cm.py000066400000000000000000000017671210344137400214220ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Smoke testing the cm module """ from nose import SkipTest try: import matplotlib as mp # Make really sure that we don't try to open an Xserver connection. mp.use('svg', warn=False) import pylab as pl pl.switch_backend('svg') except ImportError: raise SkipTest('Could not import matplotlib') from ..cm import dim_cmap, replace_inside def test_dim_cmap(): # This is only a smoke test mp.use('svg', warn=False) import pylab as pl dim_cmap(pl.cm.jet) def test_replace_inside(): # This is only a smoke test mp.use('svg', warn=False) import pylab as pl pl.switch_backend('svg') replace_inside(pl.cm.jet, pl.cm.hsv, .2, .8) # We also test with gnuplot, which is defined using function if hasattr(pl.cm, 'gnuplot'): # gnuplot is only in recent version of MPL replace_inside(pl.cm.gnuplot, pl.cm.gnuplot2, .2, .8) nipy-0.3.0/nipy/labs/viz_tools/test/test_coord_tools.py000066400000000000000000000021431210344137400233360ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np from ..coord_tools import coord_transform, find_cut_coords def test_coord_transform_trivial(): sform = np.eye(4) x = np.random.random((10,)) y = np.random.random((10,)) z = np.random.random((10,)) x_, y_, z_ = coord_transform(x, y, z, sform) np.testing.assert_array_equal(x, x_) np.testing.assert_array_equal(y, y_) np.testing.assert_array_equal(z, z_) sform[:, -1] = 1 x_, y_, z_ = coord_transform(x, y, z, sform) np.testing.assert_array_equal(x+1, x_) np.testing.assert_array_equal(y+1, y_) np.testing.assert_array_equal(z+1, z_) def test_find_cut_coords(): map = np.zeros((100, 100, 100)) x_map, y_map, z_map = 50, 10, 40 map[x_map-30:x_map+30, y_map-3:y_map+3, z_map-10:z_map+10] = 1 x, y, z = find_cut_coords(map, mask=np.ones(map.shape, np.bool)) np.testing.assert_array_equal( (int(round(x)), int(round(y)), int(round(z))), (x_map, y_map, z_map)) nipy-0.3.0/nipy/labs/viz_tools/test/test_edge_detect.py000066400000000000000000000011461210344137400232460ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import nose import numpy as np from ..edge_detect import _edge_detect, _fast_abs_percentile ################################################################################ def test_fast_abs_percentile(): data = np.arange(1, 100) for p in range(10, 100, 10): yield nose.tools.assert_equal, _fast_abs_percentile(data, p-1), p def test_edge_detect(): img = np.zeros((10, 10)) img[:5] = 1 _, edge_mask = _edge_detect(img) np.testing.assert_almost_equal(img[4], 1) nipy-0.3.0/nipy/labs/viz_tools/test/test_slicers.py000066400000000000000000000016371210344137400224630ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import nose try: import matplotlib as mp # Make really sure that we don't try to open an Xserver connection. mp.use('svg', warn=False) import pylab as pl pl.switch_backend('svg') except ImportError: raise nose.SkipTest('Could not import matplotlib') from ..slicers import demo_ortho_slicer from ..anat_cache import find_mni_template ################################################################################ # Some smoke testing for graphics-related code def test_demo_ortho_slicer(): # This is only a smoke test # conditioned on presence of MNI templated if not find_mni_template(): raise nose.SkipTest("MNI Template is absent for the smoke test") mp.use('svg', warn=False) import pylab as pl pl.switch_backend('svg') demo_ortho_slicer() nipy-0.3.0/nipy/modalities/000077500000000000000000000000001210344137400156015ustar00rootroot00000000000000nipy-0.3.0/nipy/modalities/__init__.py000066400000000000000000000004301210344137400177070ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Package containing modality-specific classes. """ __docformat__ = 'restructuredtext' from nipy.testing import Tester test = Tester().test bench = Tester().bench nipy-0.3.0/nipy/modalities/fmri/000077500000000000000000000000001210344137400165365ustar00rootroot00000000000000nipy-0.3.0/nipy/modalities/fmri/__init__.py000066400000000000000000000004101210344137400206420ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ TODO """ __docformat__ = 'restructuredtext' from . import fmristat from nipy.testing import Tester test = Tester().test bench = Tester().bench nipy-0.3.0/nipy/modalities/fmri/api.py000066400000000000000000000002431210344137400176600ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from fmri import FmriImageList, axis0_generator nipy-0.3.0/nipy/modalities/fmri/design.py000066400000000000000000000202451210344137400203640ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Convenience functions for specifying a design in the GLM """ import numpy as np from nipy.algorithms.statistics.utils import combinations from nipy.algorithms.statistics.formula import formulae from nipy.algorithms.statistics.formula.formulae import ( Formula, Factor, Term, make_recarray) from .utils import events, fourier_basis as fourier_basis_sym from .hrf import glover def fourier_basis(t, freq): """ Create a design matrix with columns given by the Fourier basis with a given set of frequencies. Parameters ---------- t : np.ndarray An array of np.float values at which to evaluate the design. Common examples would be the acquisition times of an fMRI image. freq : sequence of float Frequencies for the terms in the Fourier basis. Returns ------- X : np.ndarray Examples -------- >>> t = np.linspace(0,50,101) >>> drift = fourier_basis(t, np.array([4,6,8])) >>> drift.shape (101, 6) """ tval = make_recarray(t, ['t']) f = fourier_basis_sym(freq) return f.design(tval, return_float=True) def natural_spline(tvals, knots=None, order=3, intercept=True): """ Create a design matrix with columns given by a natural spline of a given order and a specified set of knots. Parameters ---------- tvals : np.array Time values knots : None or sequence, optional Sequence of float. Default None (same as empty list) order : int, optional Order of the spline. Defaults to a cubic (==3) intercept : bool, optional If True, include a constant function in the natural spline. Default is False Returns ------- X : np.ndarray Examples -------- >>> tvals = np.linspace(0,50,101) >>> drift = natural_spline(tvals, knots=[10,20,30,40]) >>> drift.shape (101, 8) """ tvals = make_recarray(tvals, ['t']) t = Term('t') f = formulae.natural_spline(t, knots=knots, order=order, intercept=intercept) return f.design(tvals, return_float=True) def event_design(event_spec, t, order=2, hrfs=[glover]): """ Create a design matrix for a GLM analysis based on an event specification, evaluating it a sequence of time values. Each column in the design matrix will be convolved with each HRF in hrfs. Parameters ---------- event_spec : np.recarray A recarray having at least a field named 'time' signifying the event time, and all other fields will be treated as factors in an ANOVA-type model. t : np.ndarray An array of np.float values at which to evaluate the design. Common examples would be the acquisition times of an fMRI image. order : int The highest order interaction to be considered in constructing the contrast matrices. hrfs : seq A sequence of (symbolic) HRF that will be convolved with each event. If empty, glover is used. Returns ------- X : np.ndarray The design matrix with X.shape[0] == t.shape[0]. The number of columns will depend on the other fields of event_spec. contrasts : dict Dictionary of contrasts that is expected to be of interest from the event specification. For each interaction / effect up to a given order will be returned. Also, a contrast is generated for each interaction / effect for each HRF specified in hrfs. """ fields = list(event_spec.dtype.names) if 'time' not in fields: raise ValueError('expecting a field called "time"') fields.pop(fields.index('time')) e_factors = [Factor(n, np.unique(event_spec[n])) for n in fields] e_formula = np.product(e_factors) e_contrasts = {} if len(e_factors) > 1: for i in range(1, order+1): for comb in combinations(zip(fields, e_factors), i): names = [c[0] for c in comb] fs = [c[1].main_effect for c in comb] e_contrasts[":".join(names)] = np.product(fs).design(event_spec) e_contrasts['constant'] = formulae.I.design(event_spec) # Design and contrasts in event space # TODO: make it so I don't have to call design twice here # to get both the contrasts and the e_X matrix as a recarray e_X = e_formula.design(event_spec) e_dtype = e_formula.dtype # Now construct the design in time space t_terms = [] t_contrasts = {} for l, h in enumerate(hrfs): t_terms += [events(event_spec['time'], \ amplitudes=e_X[n], f=h) for i, n in enumerate(e_dtype.names)] for n, c in e_contrasts.items(): t_contrasts["%s_%d" % (n, l)] = Formula([ \ events(event_spec['time'], amplitudes=c[nn], f=h) for i, nn in enumerate(c.dtype.names)]) t_formula = Formula(t_terms) tval = make_recarray(t, ['t']) X_t, c_t = t_formula.design(tval, contrasts=t_contrasts) return X_t, c_t def stack2designs(old_X, new_X, old_contrasts={}, new_contrasts={}): """ Add some columns to a design matrix that has contrasts matrices already specified, adding some possibly new contrasts as well. This basically performs an np.hstack of old_X, new_X and makes sure the contrast matrices are dealt with accordingly. If two contrasts have the same name, an exception is raised. Parameters ---------- old_X : np.ndarray A design matrix new_X : np.ndarray A second design matrix to be stacked with old_X old_contrast : dict Dictionary of contrasts in the old_X column space new_contrasts : dict Dictionary of contrasts in the new_X column space Returns ------- X : np.ndarray A new design matrix: np.hstack([old_X, new_X]) contrasts : dict The new contrast matrices reflecting changes to the columns. """ contrasts = {} if old_X.ndim == 1: old_X = old_X.reshape((old_X.shape[0], 1)) if new_X.ndim == 1: new_X = new_X.reshape((new_X.shape[0], 1)) X = np.hstack([old_X, new_X]) if set(old_contrasts.keys()).intersection(new_contrasts.keys()) != set([]): raise ValueError('old and new contrasts must have different names') for n, c in old_contrasts.items(): if c.ndim > 1: cm = np.zeros((c.shape[0], X.shape[1])) cm[:,:old_X.shape[1]] = c else: cm = np.zeros(X.shape[1]) cm[:old_X.shape[1]] = c contrasts[n] = cm for n, c in new_contrasts.items(): if c.ndim > 1: cm = np.zeros((c.shape[0], X.shape[1])) cm[:,old_X.shape[1]:] = c else: cm = np.zeros(X.shape[1]) cm[old_X.shape[1]:] = c contrasts[n] = cm return X, contrasts def stack_contrasts(contrasts, name, keys): """ Create a new F-contrast matrix called 'name' based on a sequence of keys. The contrast is added to contrasts, in-place. Parameters ---------- contrasts : dict Dictionary of contrast matrices name : str Name of new contrast. Should not already be a key of contrasts. keys : sequence of str Keys of contrasts that are to be stacked. Returns ------- None """ if name in contrasts.keys(): raise ValueError('contrast "%s" already exists' % name) contrasts[name] = np.vstack([contrasts[k] for k in keys]) def stack_designs(*pairs): """ Stack a sequence of design / contrast dictionary pairs. Uses multiple calls to stack2designs Parameters ---------- pairs : sequence filled with (np.ndarray, dict) or np.ndarray Returns ------- X : np.ndarray new design matrix: np.hstack([old_X, new_X]) contrasts : dict The new contrast matrices reflecting changes to the columns. """ p = pairs[0] if len(p) == 1: X = p[0]; contrasts={} else: X, contrasts = p for q in pairs[1:]: if len(q) == 1: new_X = q[0]; new_con = {} else: new_X, new_con = q X, contrasts = stack2designs(X, new_X, contrasts, new_con) return X, contrasts nipy-0.3.0/nipy/modalities/fmri/design_matrix.py000066400000000000000000000351751210344137400217600ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import with_statement """ This module implements fMRI Design Matrix creation. The DesignMatrix object is just a container that represents the design matrix. Computations of the different parts of the design matrix are confined to the make_dmtx() function, that instantiates the DesignMatrix object. All the remainder are just ancillary functions. Design matrices contain three different types of regressors: 1. Task-related regressors, that result from the convolution of the experimental paradigm regressors with hemodynamic models 2. User-specified regressors, that represent information available on the data, e.g. motion parameters, physiological data resampled at the acquisition rate, or sinusoidal regressors that model the signal at a frequency of interest. 3. Drift regressors, that represent low_frequency phenomena of no interest in the data; they need to be included to reduce variance estimates. Author: Bertrand Thirion, 2009-2011 """ import numpy as np from warnings import warn from ...utils.compat3 import open4csv from .hemodynamic_models import compute_regressor, _orthogonalize ###################################################################### # Ancillary functions ###################################################################### def _poly_drift(order, frametimes): """Create a polynomial drift matrix Parameters ---------- order, int, number of polynomials in the drift model tmax, float maximal time value used in the sequence this is used to normalize properly the columns Returns ------- pol, array of shape(n_scans, order + 1) all the polynomial drift plus a constant regressor """ order = int(order) pol = np.zeros((np.size(frametimes), order + 1)) tmax = float(frametimes.max()) for k in range(order + 1): pol[:, k] = (frametimes / tmax) ** k pol = _orthogonalize(pol) pol = np.hstack((pol[:, 1:], pol[:, :1])) return pol def _cosine_drift(hfcut, frametimes): """Create a cosine drift matrix Parameters ---------- hfcut, float , cut frequency of the low-pass filter frametimes: array of shape(nscans): the sampling time Returns ------- cdrift: array of shape(n_scans, n_drifts) polynomial drifts plus a constant regressor """ tmax = float(frametimes.max()) tsteps = len(frametimes) order = int(np.floor(2 * float(tmax) / float(hfcut)) + 1) cdrift = np.zeros((tsteps, order)) for k in range(1, order): cdrift[:, k - 1] = np.sqrt(2.0 / tmax) * np.cos( np.pi * (frametimes / tmax + 0.5 / tsteps) * k) cdrift[:, order - 1] = np.ones_like(frametimes) return cdrift def _blank_drift(frametimes): """ Create the blank drift matrix Returns ------- np.ones_like(frametimes) """ return np.reshape(np.ones_like(frametimes), (np.size(frametimes), 1)) def _make_drift(drift_model, frametimes, order=1, hfcut=128.): """Create the drift matrix Parameters ---------- drift_model: string, to be chosen among 'polynomial', 'cosine', 'blank' that specifies the desired drift model frametimes: array of shape(n_scans), list of values representing the desired TRs order: int, optional, order of the drift model (in case it is polynomial) hfcut: float, optional, frequency cut in case of a cosine model Returns ------- drift: array of shape(n_scans, n_drifts), the drift matrix names: list of length(ndrifts), the associated names """ drift_model = drift_model.lower() # for robust comparisons if drift_model == 'polynomial': drift = _poly_drift(order, frametimes) elif drift_model == 'cosine': drift = _cosine_drift(hfcut, frametimes) elif drift_model == 'blank': drift = _blank_drift(frametimes) else: raise NotImplementedError("Unknown drift model %r" % (drift_model)) names = [] for k in range(drift.shape[1] - 1): names.append('drift_%d' % (k + 1)) names.append('constant') return drift, names def _convolve_regressors(paradigm, hrf_model, frametimes, fir_delays=[0]): """ Creation of a matrix that comprises the convolution of the conditions onset with a certain hrf model Parameters ---------- paradigm: paradigm instance hrf_model: string that can be 'canonical', 'canonical with derivative' or 'fir' that specifies the hemodynamic response function frametimes: array of shape(n_scans) the targeted timing for the design matrix fir_delays=[0], optional, array of shape(nb_onsets) or list in case of FIR design, yields the array of delays used in the FIR model Returns ------- rmatrix: array of shape(n_scans, n_regressors), contains the convolved regressors associated with the experimental condition names: list of strings, the condition names, that depend on the hrf model used if 'canonical' then this is identical to the input names if 'canonical with derivative', then two names are produced for input name 'name': 'name' and 'name_derivative' """ hnames = [] rmatrix = None if hrf_model == 'fir': oversampling = 1 else: oversampling = 16 for nc in np.unique(paradigm.con_id): onsets = paradigm.onset[paradigm.con_id == nc] nos = np.size(onsets) if paradigm.amplitude is not None: values = paradigm.amplitude[paradigm.con_id == nc] else: values = np.ones(nos) if nos < 1: continue if paradigm.type == 'event': duration = np.zeros_like(onsets) else: duration = paradigm.duration[paradigm.con_id == nc] exp_condition = (onsets, duration, values) reg, names = compute_regressor(exp_condition, hrf_model, frametimes, con_id=nc, fir_delays=fir_delays, oversampling=oversampling) hnames += names if rmatrix == None: rmatrix = reg else: rmatrix = np.hstack((rmatrix, reg)) return rmatrix, hnames def _full_rank(X, cmax=1e15): """ This function possibly adds a scalar matrix to X to guarantee that the condition number is smaller than a given threshold. Parameters ---------- X: array of shape(nrows, ncols) cmax=1.e-15, float tolerance for condition number Returns ------- X: array of shape(nrows, ncols) after regularization cmax=1.e-15, float tolerance for condition number """ U, s, V = np.linalg.svd(X, 0) smax, smin = s.max(), s.min() c = smax / smin if c < cmax: return X, c warn('Matrix is singular at working precision, regularizing...') lda = (smax - cmax * smin) / (cmax - 1) s = s + lda X = np.dot(U, np.dot(np.diag(s), V)) return X, cmax ###################################################################### # Design matrix ###################################################################### class DesignMatrix(): """ This is a container for a light-weight class for design matrices This class is only used to make IO and visualization Class members ------------- matrix: array of shape(n_scans, n_regressors), the numerical specification of the matrix names: list of len (n_regressors); the names associated with the columns frametimes: array of shape(n_scans), optional, the occurrence time of the matrix rows """ def __init__(self, matrix, names, frametimes=None): """ """ matrix_ = np.atleast_2d(matrix) if matrix_.shape[1] != len(names): raise ValueError( 'The number of names should equate the number of columns') if frametimes is not None: if frametimes.size != matrix.shape[0]: raise ValueError( 'The number %d of frametimes is different from the' + \ 'number %d of rows' % (frametimes.size, matrix.shape[0])) self.frametimes = frametimes self.matrix = matrix_ self.names = names def write_csv(self, path): """ write self.matrix as a csv file with appropriate column names Parameters ---------- path: string, path of the resulting csv file Notes ----- The frametimes are not written """ import csv with open4csv(path, "w") as fid: writer = csv.writer(fid) writer.writerow(self.names) writer.writerows(self.matrix) def show(self, rescale=True, ax=None): """Visualization of a design matrix Parameters ---------- rescale: bool, optional rescale columns magnitude for visualization or not ax: axis handle, optional Handle to axis onto which we will draw design matrix Returns ------- ax: axis handle """ import matplotlib.pyplot as plt # normalize the values per column for better visualization x = self.matrix.copy() if rescale: x = x / np.sqrt(np.sum(x ** 2, 0)) if ax is None: plt.figure() ax = plt.subplot(1, 1, 1) ax.imshow(x, interpolation='Nearest', aspect='auto') ax.set_label('conditions') ax.set_ylabel('scan number') if self.names is not None: ax.set_xticks(range(len(self.names))) ax.set_xticklabels(self.names, rotation=60, ha='right') return ax def make_dmtx(frametimes, paradigm=None, hrf_model='canonical', drift_model='cosine', hfcut=128, drift_order=1, fir_delays=[0], add_regs=None, add_reg_names=None): """ Generate a design matrix from the input parameters Parameters ---------- frametimes: array of shape(nbframes), the timing of the scans paradigm: Paradigm instance, optional description of the experimental paradigm hrf_model: string, optional, that specifies the hemodynamic response function it can be 'canonical', 'canonical with derivative' or 'fir' drift_model: string, optional specifies the desired drift model, to be chosen among 'polynomial', 'cosine', 'blank' hfcut: float, optional cut frequency of the low-pass filter drift_order: int, optional order of the drift model (in case it is polynomial) fir_delays: array of shape(nb_onsets) or list, optional, in case of FIR design, yields the array of delays used in the FIR model add_regs: array of shape(nbframes, naddreg), optional additional user-supplied regressors add_reg_names: list of (naddreg) regressor names, optional if None, while naddreg>0, these will be termed 'reg_%i',i=0..naddreg-1 Returns ------- DesignMatrix instance """ # check arguments # check that additional regressor specification is correct n_add_regs = 0 if add_regs is not None: if add_regs.shape[0] == np.size(add_regs): add_regs = np.reshape(add_regs, (np.size(add_regs), 1)) n_add_regs = add_regs.shape[1] assert add_regs.shape[0] == np.size(frametimes), \ ValueError( 'incorrect specification of additional regressors: ' 'length of regressors provided: %s, number of ' 'time-frames: %s' % (add_regs.shape[0], np.size(frametimes))) # check that additional regressor names are well specified if add_reg_names == None: add_reg_names = ['reg%d' % k for k in range(n_add_regs)] elif len(add_reg_names) != n_add_regs: raise ValueError( 'Incorrect number of additional regressor names was provided' '(%s provided, %s expected) % (len(add_reg_names),' 'n_add_regs)') # computation of the matrix names = [] matrix = np.zeros((frametimes.size, 0)) # step 1: paradigm-related regressors if paradigm is not None: # create the condition-related regressors matrix, names = _convolve_regressors( paradigm, hrf_model.lower(), frametimes, fir_delays) # step 2: additional regressors if add_regs is not None: # add user-supplied regressors and corresponding names matrix = np.hstack((matrix, add_regs)) names += add_reg_names # setp 3: drifts drift, dnames = _make_drift(drift_model.lower(), frametimes, drift_order, hfcut) matrix = np.hstack((matrix, drift)) names += dnames # step 4: Force the design matrix to be full rank at working precision matrix, _ = _full_rank(matrix) # complete the names with the drift terms return DesignMatrix(matrix, names, frametimes) def dmtx_from_csv(path, frametimes=None): """ Return a DesignMatrix instance from a csv file Parameters ---------- path: string, path of the .csv file Returns ------- A DesignMatrix instance """ import csv with open4csv(path, 'r') as csvfile: dialect = csv.Sniffer().sniff(csvfile.read()) csvfile.seek(0) reader = csv.reader(csvfile, dialect) boolfirst = True design = [] for row in reader: if boolfirst: names = [row[j] for j in range(len(row))] boolfirst = False else: design.append([row[j] for j in range(len(row))]) x = np.array([[float(t) for t in xr] for xr in design]) return(DesignMatrix(x, names, frametimes)) def dmtx_light(frametimes, paradigm=None, hrf_model='canonical', drift_model='cosine', hfcut=128, drift_order=1, fir_delays=[0], add_regs=None, add_reg_names=None, path=None): """Make a design matrix while avoiding framework Parameters ---------- see make_dmtx, plus path: string, optional: a path to write the output Returns ------- dmtx array of shape(nreg, nbframes): the sampled design matrix names list of strings of len (nreg) the names of the columns of the design matrix """ dmtx_ = make_dmtx(frametimes, paradigm, hrf_model, drift_model, hfcut, drift_order, fir_delays, add_regs, add_reg_names) if path is not None: dmtx_.write_csv(path) return dmtx_.matrix, dmtx_.names nipy-0.3.0/nipy/modalities/fmri/experimental_paradigm.py000066400000000000000000000203431210344137400234530ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import with_statement """ This module implements an object to deal with experimental paradigms. In fMRI data analysis, there are two main types of experimental paradigms: block and event-related paradigms. They correspond to 2 classes EventRelatedParadigm and BlockParadigm. Both are implemented here, together with functions to write paradigms to csv files. Notes ----- Although the Paradigm object have no notion of session or acquisitions (they are assumed to correspond to a sequential acquisition, called 'session' in SPM jargon), the .csv file used to represent paradigm may be multi-session, so it is assumed that the first column of a file yielding a paradigm is in fact a session index Author: Bertrand Thirion, 2009-2011 """ import numpy as np from ...utils.compat3 import open4csv ########################################################## # Paradigm handling ########################################################## class Paradigm(object): """ Simple class to handle the experimental paradigm in one session """ def __init__(self, con_id=None, onset=None, amplitude=None): """ Parameters ---------- con_id: array of shape (n_events), type = string, optional identifier of the events onset: array of shape (n_events), type = float, optional, onset time (in s.) of the events amplitude: array of shape (n_events), type = float, optional, amplitude of the events (if applicable) """ self.con_id = con_id self.onset = onset self.amplitude = amplitude self.n_event = 0 if con_id is not None: self.n_events = len(con_id) try: # this is only for backward compatibility: #if con_id were integers, they become a string self.con_id = np.array(['c' + str(int(float(c))) for c in con_id]) except: self.con_id = np.ravel(np.array(con_id)).astype('str') if onset is not None: if len(onset) != self.n_events: raise ValueError( 'inconsistent definition of ids and onsets') self.onset = np.ravel(np.array(onset)).astype(np.float) if amplitude is not None: if len(amplitude) != self.n_events: raise ValueError('inconsistent definition of amplitude') self.amplitude = np.ravel(np.array(amplitude)) self.type = 'event' self.n_conditions = len(np.unique(self.con_id)) def write_to_csv(self, csv_file, session='0'): """ Write the paradigm to a csv file Parameters ---------- csv_file: string, path of the csv file session: string, optional, session identifier """ import csv with open4csv(csv_file, "w") as fid: writer = csv.writer(fid, delimiter=' ') n_pres = np.size(self.con_id) sess = np.repeat(session, n_pres) pdata = np.vstack((sess, self.con_id, self.onset)).T # add the duration information if self.type == 'event': duration = np.zeros(np.size(self.con_id)) else: duration = self.duration pdata = np.hstack((pdata, np.reshape(duration, (n_pres, 1)))) # add the amplitude information if self.amplitude is not None: amplitude = np.reshape(self.amplitude, (n_pres, 1)) pdata = np.hstack((pdata, amplitude)) # write pdata for row in pdata: writer.writerow(row) class EventRelatedParadigm(Paradigm): """ Class to handle event-related paradigms """ def __init__(self, con_id=None, onset=None, amplitude=None): """ Parameters ---------- con_id: array of shape (n_events), type = string, optional id of the events (name of the experimental condition) onset: array of shape (n_events), type = float, optional onset time (in s.) of the events amplitude: array of shape (n_events), type = float, optional, amplitude of the events (if applicable) """ Paradigm.__init__(self, con_id, onset, amplitude) class BlockParadigm(Paradigm): """ Class to handle block paradigms """ def __init__(self, con_id=None, onset=None, duration=None, amplitude=None): """ Parameters ---------- con_id: array of shape (n_events), type = string, optional id of the events (name of the experimental condition) onset: array of shape (n_events), type = float, optional onset time (in s.) of the events amplitude: array of shape (n_events), type = float, optional, amplitude of the events (if applicable) """ Paradigm.__init__(self, con_id, onset, amplitude) self.duration = duration self.type = 'block' if duration is not None: if len(duration) != self.n_events: raise ValueError('inconsistent definition of duration') self.duration = np.ravel(np.array(duration)) def load_paradigm_from_csv_file(path, session=None): """ Read a (.csv) paradigm file consisting of values yielding (occurrence time, (duration), event ID, modulation) and returns a paradigm instance or a dictionary of paradigm instances Parameters ---------- path: string, path to a .csv file that describes the paradigm session: string, optional, session identifier by default the output is a dictionary of session-level dictionaries indexed by session Returns ------- paradigm, paradigm instance (if session is provided), or dictionary of paradigm instances otherwise, the resulting session-by-session paradigm Notes ----- It is assumed that the csv file contains the following columns: (session id, condition id, onset), plus possibly (duration) and/or (amplitude). If all the durations are 0, the paradigm will be handled as event-related. Fixme ----- would be much clearer if amplitude was put before duration in the .csv """ import csv with open4csv(path, 'r') as csvfile: dialect = csv.Sniffer().sniff(csvfile.read()) csvfile.seek(0) reader = csv.reader(csvfile, dialect) # load the csv as a paradigm array sess, cid, onset, amplitude, duration = [], [], [], [], [] for row in reader: sess.append(row[0]) cid.append(row[1]) onset.append(float(row[2])) if len(row) > 3: duration.append(float(row[3])) if len(row) > 4: amplitude.append(row[4]) paradigm_info = [np.array(sess), np.array(cid), np.array(onset), np.array(duration), np.array(amplitude)] paradigm_info = paradigm_info[:len(row)] def read_session(paradigm_info, session): """ return a paradigm instance corresponding to session """ ps = (paradigm_info[0] == session) if np.sum(ps) == 0: return None ampli = np.ones(np.sum(ps)) if len(paradigm_info) > 4: _, cid, onset, duration, ampli = [lp[ps] for lp in paradigm_info] if (duration == 0).all(): paradigm = EventRelatedParadigm(cid, onset, ampli) else: paradigm = BlockParadigm(cid, onset, duration, ampli) elif len(paradigm_info) > 3: _, cid, onset, duration = [lp[ps] for lp in paradigm_info] paradigm = BlockParadigm(cid, onset, duration, ampli) else: _, cid, onset = [lp[ps] for lp in paradigm_info] paradigm = EventRelatedParadigm(cid, onset, ampli) return paradigm sessions = np.unique(paradigm_info[0]) if session is None: paradigm = {} for session in sessions: paradigm[session] = read_session(paradigm_info, session) else: paradigm = read_session(paradigm_info, session) return paradigm nipy-0.3.0/nipy/modalities/fmri/fmri.py000066400000000000000000000115511210344137400200500ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import warnings import numpy as np from ...core.api import ImageList class FmriImageList(ImageList): """ Class to implement image list interface for FMRI time series Allows metadata such as volume and slice times """ def __init__(self, images=None, volume_start_times=None, slice_times=None): """ An implementation of an fMRI image as in ImageList Parameters ---------- images : iterable an iterable object whose items are meant to be images; this is checked by asserting that each has a `coordmap` attribute and a ``get_data`` method. Note that Image objects are not iterable by default; use the ``from_image`` classmethod or ``iter_axis`` function to convert images to image lists - see examples below for the latter. volume_start_times: None or float or (N,) ndarray start time of each frame. It can be specified either as an ndarray with ``N=len(images)`` elements or as a single float, the TR. None results in ``np.arange(len(images)).astype(np.float)`` slice_times: None or (N,) ndarray specifying offset for each slice of each frame, from the frame start time. See Also -------- nipy.core.image_list.ImageList Examples -------- >>> from nipy.testing import funcfile >>> from nipy.io.api import load_image >>> from nipy.core.api import iter_axis >>> funcim = load_image(funcfile) >>> iterable_img = iter_axis(funcim, 't') >>> fmrilist = FmriImageList(iterable_img) >>> print fmrilist.get_list_data(axis=0).shape (20, 17, 21, 3) >>> print fmrilist[4].shape (17, 21, 3) """ ImageList.__init__(self, images=images) if volume_start_times is None: volume_start_times = 1. v = np.asarray(volume_start_times) length = len(self.list) if v.shape == (length,): self.volume_start_times = volume_start_times else: v = float(volume_start_times) self.volume_start_times = np.arange(length) * v self.slice_times = slice_times def __getitem__(self, index): """ If index is an index, return self.list[index], an Image else return an FmriImageList with images=self.list[index]. """ if type(index) is type(1): return self.list[index] return self.__class__( images=self.list[index], volume_start_times=self.volume_start_times[index], slice_times=self.slice_times) @classmethod def from_image(klass, fourdimage, axis='t', volume_start_times=None, slice_times=None): """Create an FmriImageList from a 4D Image Get images by extracting 3d images along the 't' axis. Parameters ---------- fourdimage : ``Image`` instance A 4D Image volume_start_times: None or float or (N,) ndarray start time of each frame. It can be specified either as an ndarray with ``N=len(images)`` elements or as a single float, the TR. None results in ``np.arange(len(images)).astype(np.float)`` slice_times: None or (N,) ndarray specifying offset for each slice of each frame, from the frame start time. Returns ------- filist : ``FmriImageList`` instance """ if fourdimage.ndim != 4: raise ValueError('expecting a 4-dimensional Image') image_list = ImageList.from_image(fourdimage, axis) return klass(images=image_list.list, volume_start_times=volume_start_times, slice_times=slice_times) def axis0_generator(data, slicers=None): """ Takes array-like `data`, returning slices over axes > 0 This function takes an array-like object `data` and yields tuples of slicing thing and slices like:: [slicer, np.asarray(data)[:,slicer] for slicer in slicer] which in the default (`slicers` is None) case, boils down to:: [i, np.asarray(data)[:,i] for i in range(data.shape[1])] This can be used to get arrays of time series out of an array if the time axis is axis 0. Parameters ---------- data : array-like object such that ``arr = np.asarray(data)`` returns an array of at least 2 dimensions. slicers : None or sequence sequence of objects that can be used to slice into array ``arr`` returned from data. If None, default is ``range(data.shape[1])`` """ arr = np.asarray(data) if slicers is None: slicers = range(arr.shape[1]) for slicer in slicers: yield slicer, arr[:,slicer] nipy-0.3.0/nipy/modalities/fmri/fmristat/000077500000000000000000000000001210344137400203675ustar00rootroot00000000000000nipy-0.3.0/nipy/modalities/fmri/fmristat/__init__.py000066400000000000000000000005341210344137400225020ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This module is meant to reproduce the GLM analysis of fmristat. Liao et al. (2002). TODO fix reference here """ __docformat__ = 'restructuredtext' from nipy.testing import Tester test = Tester().test bench = Tester().bench nipy-0.3.0/nipy/modalities/fmri/fmristat/api.py000066400000000000000000000002661210344137400215160ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from .hrf import canonical as delay_hrf from .model import OLS, AR1 nipy-0.3.0/nipy/modalities/fmri/fmristat/delay.py000066400000000000000000000332401210344137400220410ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This module defines a class to output estimates of delays and contrasts of delays. Liao, C.H., Worsley, K.J., Poline, J-B., Aston, J.A.D., Duncan, G.H., Evans, A.C. (2002). \'Estimating the delay of the response in fMRI data.\' NeuroImage, 16:593-606. """ __docformat__ = 'restructuredtext' import os import numpy as np import numpy.linalg as L from nipy.algorithms.utils.matrices import pos_recipr, recipr0 # FIXME: This is broken. Don't know how to fix it #from nipy.fixes.scipy.stats.models.contrast import Contrast, ContrastResults class Contrast(object): """ Empty boggus class to get the docs building. """ # FIXME: This empty class needs to go. # XXX FIXME This is broken # class DelayContrast(Contrast): # """ # Specify a delay contrast. # Delay contrasts are specified by a sequence of functions and weights, the # functions should NOT already be convolved with any HRF. They will be # convolved with self.IRF which is expected to be a filter with a canonical # HRF and its derivative -- defaults to the Glover model. # Weights should have the same number of columns as len(fns), with each row # specifying a different contrast. # """ # Tmin = -100. # Tmax = 100. # def _sequence_call(self, time): # """ # :Parameters: # `time` : TODO # TODO # :Returns: ``numpy.ndarray`` # """ # return np.array([fn(time) for fn in self._sequence_fn]) # def __init__(self, fns, weights, formula, IRF=None, name='', rownames=[]): # """ # :Parameters: # `fns` : TODO # TODO # `weights` : TODO # TODO # `formula` : TODO # TODO # `IRF` : TODO # TODO # `name` : string # TODO # `rownames` : [string] # TODO # """ # if IRF is None: # self.IRF = canonical # else: # self.IRF = IRF # self.delayflag = True # self.name = name # self.formula = formula # if type(fns) in [type([]), type(())]: # self._sequence_fn = fns # self._nsequence = len(_sequence_fn) # self.fn = self._sequence_call # else: # self.fn = fns # self.weights = np.asarray(weights) # if self.weights.ndim == 1: # self.weights.shape = (1, self.weights.shape[0]) # ## if len(self._sequence_fn) != self.weights.shape[1]: # ## raise ValueError, 'length of weights does not match number of ' \ # ## 'terms in DelayContrast' # term = ExperimentalQuantitative('%s_delay' % self.name, self.fn) # term.convolve(self.IRF) # Contrast.__init__(self, term, self.formula, name=self.name) # if rownames == []: # if name == '': # raise ValueError, 'if rownames are not specified, name must be specified' # if self.weights.shape[0] > 1: # self.rownames = ['%srow%d' % (name, i) for i in # range(self.weights.shape[0])] # elif self.weights.shape[0] == 1: # self.rownames = [''] # else: # self.rownames = rownames # def compute_matrix(self, time=None): # """ # :Parameters: # `time` : TODO # TODO # :Returns: ``None`` # """ # Contrast.compute_matrix(self, time=time) # cnrow = self.matrix.shape[0] / 2 # self.effectmatrix = self.matrix[0:cnrow] # self.deltamatrix = self.matrix[cnrow:] # self.isestimable(time) # def isestimable(self, t): # """ # To estimate the delay, it is assumed that the response contains # (f ** HRF)(t + delta) # for each delay model time series 'f'. # More specifically, it is assumed that # f(t + delta) = c1 * (f ** HRF)(t) + delta * c2 * (f ** dHRF)(t) # where HRF and dHRF are the HRFs for this delay contrast. # This function checks to ensure that the columns # [(f ** HRF)(t), (f ** dHRF(t))] # are in the column space of the fMRI regression model. # :Parameters: # `t` : TODO # TODO # :Returns: ``None`` # :Raises ValueError: if any of the columns are not in the column space # of the model # """ # D = self.formula(t).T # pinvD = L.pinv(D) # C = self.term(t) # cnrow = C.shape[0] / 2 # effects = C[:cnrow] # deffects = C[cnrow:] # for i in range(self.weights.shape[0]): # for matrix in [effects, deffects]: # col = np.dot(self.weights[i], matrix) # colhat = np.dot(D, np.dot(pinvD, col)) # if not np.allclose(col, colhat): # if self.weights.shape[0] > 1: # name = self.rownames[i] # else: # name = '' # raise ValueError, 'delay contrast %snot estimable' % name # def _extract_effect(self, results): # delay = self.IRF.delay # self.gamma0 = np.dot(self.effectmatrix, results.beta) # self.gamma1 = np.dot(self.deltamatrix, results.beta) # nrow = self.gamma0.shape[0] # self.T0sq = np.zeros(self.gamma0.shape) # for i in range(nrow): # self.T0sq[i] = (self.gamma0[i]**2 * # recipr(results.cov_beta(matrix=self.effectmatrix[i]))) # self.r = self.gamma1 * recipr0(self.gamma0) # self.rC = self.r * self.T0sq / (1. + self.T0sq) # self.deltahat = delay.inverse(self.rC) # self._effect = np.dot(self.weights, self.deltahat) # def _extract_sd(self, results): # delay = self.IRF.delay # self.T1 = np.zeros(self.gamma0.shape) # nrow = self.gamma0.shape[0] # for i in range(nrow): # self.T1[i] = self.gamma1[i] * recipr(np.sqrt(results.cov_beta(matrix=self.deltamatrix[i]))) # a1 = 1 + 1. * recipr(self.T0sq) # gdot = np.array(([(self.r * (a1 - 2.) * # recipr0(self.gamma0 * a1**2)), # recipr0(self.gamma0 * a1)] * # recipr0(delay.dforward(self.deltahat)))) # Cov = results.cov_beta # E = self.effectmatrix # D = self.deltamatrix # nrow = self.effectmatrix.shape[0] # cov = np.zeros((nrow,)*2 + self.T0sq.shape[1:]) # for i in range(nrow): # for j in range(i + 1): # cov[i,j] = (gdot[0,i] * gdot[0,j] * Cov(matrix=E[i], # other=E[j]) + # gdot[0,i] * gdot[1,j] * Cov(matrix=E[i], # other=D[j]) + # gdot[1,i] * gdot[0,j] * Cov(matrix=D[i], # other=E[j]) + # gdot[1,i] * gdot[1,j] * Cov(matrix=D[i], # other=D[j])) # cov[j,i] = cov[i,j] # nout = self.weights.shape[0] # self._sd = np.zeros(self._effect.shape) # for r in range(nout): # var = 0 # for i in range(nrow): # var += cov[i,i] * np.power(self.weights[r,i], 2) # for j in range(i): # var += 2 * cov[i,j] * self.weights[r,i] * self.weights[r,j] # self._sd[r] = np.sqrt(var) # def _extract_t(self): # t = self._effect * recipr(self._sd) # t = np.clip(t, self.Tmin, self.Tmax) # return t # def extract(self, results): # """ # :Parameters: # `results` : TODO # TODO # :Returns: `ContrastResults` # """ # self._extract_effect(results) # self._extract_sd(results) # t = self._extract_t() # return ContrastResults(effect=self._effect, # sd=self._sd, # t=t, df_denom=results.df_resid) # class DelayContrastOutput(TOutput): # """ # TODO # """ # def __init__(self, coordmap, contrast, IRF=None, dt=0.01, delta=None, # subpath='delays', clobber=False, path='.', # ext='.hdr', volume_start_times=[], **kw): # """ # :Parameters: # `coordmap` : TODO # TODO # `contrast` : TODO # TODO # `IRF` : TODO # TODO # `dt` : float # TODO # `delta` : TODO # TODO # `subpath` : string # TODO # `clobber` : bool # TODO # `path` : string # TODO # `ext` : string # TODO # `volume_start_times` : TODO # TODO # `kw` : dict # Passed through to the constructor of `TContrastOutput` # """ # TContrastOutput.__init__(self, coordmap, contrast, subpath=subpath, # clobber=clobber, volume_start_times=volume_start_times, **kw) # self.IRF = IRF # self.dt = dt # if delta is None: # self.delta = np.linspace(-4.5, 4.5, 91) # else: # self.delta = delta # self.path = path # self.subpath = subpath # self.clobber = clobber # self._setup_output_delay(path, clobber, subpath, ext, volume_start_times) # def _setup_contrast(self, time=None): # """ # Setup the contrast for the delay. # """ # self.contrast.compute_matrix(time=time) # def _setup_output_delay(self, path, clobber, subpath, ext, volume_start_times): # """ # Setup the output for contrast, the DelayContrast. One t, sd, and # effect img is output for each row of contrast.weights. Further, # the \'magnitude\' (canonical HRF) contrast matrix and \'magnitude\' # column space are also output to illustrate what contrast this # corresponds to. # :Parameters: # `path` : string # TODO # `clobber` : bool # TODO # `subpath` : string # TODO # `ext` : TODO # TODO # `volume_start_times` : TODO # TODO # :Returns: ``None`` # """ # self.timgs = [] # self.sdimgs = [] # self.effectimgs = [] # self.timg_iters = [] # self.sdimg_iters = [] # self.effectimg_iters = [] # nout = self.contrast.weights.shape[0] # for i in range(nout): # rowname = self.contrast.rownames[i] # outdir = os.path.join(path, subpath, rowname) # if not os.path.exists(outdir): # os.makedirs(outdir) # cnrow = self.contrast.matrix.shape[0] / 2 # l = np.zeros(self.contrast.matrix.shape[0]) # l[0:cnrow] = self.contrast.weights[i] # img, it = self._setup_img(clobber, outdir, ext, "t") # self.timgs.append(img) # self.timg_iters.append(it) # img, it = self._setup_img(clobber, outdir, ext, "effect") # self.effectimgs.append(img) # self.effectimg_iters.append(it) # img, it = self._setup_img(clobber, outdir, ext, "sd") # self.sdimgs.append(img) # self.sdimg_iters.append(it) # matrix = np.squeeze(np.dot(l, self.contrast.matrix)) # outname = os.path.join(outdir, 'matrix%s.csv' % rowname) # outfile = open(outname, 'w') # outfile.write(','.join(fpformat.fix(x,4) for x in matrix) + '\n') # outfile.close() # outname = os.path.join(outdir, 'matrix%s.bin' % rowname) # outfile = open(outname, 'w') # matrix = matrix.astype(' 0) # test that we can get several components spectral, approx = spectral_decomposition(hrf.glover, ncomp=5) assert_equal(len(spectral), 5) nipy-0.3.0/nipy/modalities/fmri/fmristat/tests/test_iterables.py000066400000000000000000000055241210344137400251220ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import warnings import numpy as np from numpy.random import standard_normal as noise from nipy.io.api import load_image from nipy.core.image.image import rollimg from nipy.modalities.fmri.api import FmriImageList, axis0_generator from nipy.core.utils.generators import (write_data, parcels, f_generator) from nipy.algorithms.statistics.models.regression import OLSModel from numpy.testing import assert_array_almost_equal from nipy.testing import funcfile def setup(): # Suppress warnings during tests to reduce noise warnings.simplefilter("ignore") def teardown(): # Clear list of warning filters warnings.resetwarnings() # Module globals FIMG = load_image(funcfile) # Put time on first axis FIMG = rollimg(FIMG, 't') FDATA = FIMG.get_data() FIL = FmriImageList.from_image(FIMG) # I think it makes more sense to use FDATA instead of FIL for GLM # purposes -- reduces some noticeable overhead in creating the # array from FmriImageList # create a design matrix, model and contrast matrix DESIGN = noise((FDATA.shape[0],3)) MODEL = OLSModel(DESIGN) CMATRIX = np.array([[1,0,0],[0,1,0]]) # two prototypical functions in a GLM analysis def fit(input): return MODEL.fit(input).resid def contrast(results): return results.Fcontrast(CMATRIX) # generators def result_generator(datag): for i, fdata in datag: yield i, MODEL.fit(fdata) def flatten_generator(ing): for i, r in ing: r = r.reshape((r.shape[0], -1)) yield i, r def unflatten_generator(ing): for i, r in ing: r = r.reshape(FIMG.shape[2:]) yield i, r def contrast_generator(resultg): for i, r in resultg: yield i, np.asarray(contrast(r)) def test_iterate_over_image(): # Fit a model, iterating over the slices of an array # associated to an FmriImage. c = np.zeros(FDATA.shape[1:]) + 0.5 res_gen = result_generator(flatten_generator(axis0_generator(FDATA))) write_data(c, unflatten_generator(contrast_generator(res_gen))) # Fit a model, iterating over the array associated to an # FmriImage, iterating over a list of ROIs defined by binary # regions of the same shape as a frame of FmriImage # this might really be an anatomical image or AR(1) coefficients a = np.asarray(FDATA[0]) p = np.greater(a, a.mean()) d = np.ones(FDATA.shape[1:]) * 2.0 flat_gen = flatten_generator(axis0_generator(FDATA, parcels(p))) write_data(d, contrast_generator(result_generator(flat_gen))) assert_array_almost_equal(d, c) e = np.zeros(FDATA.shape[1:]) + 3.0 flat_gen2 = flatten_generator(axis0_generator(FDATA, parcels(p))) write_data(e, f_generator(contrast, result_generator(flat_gen2))) assert_array_almost_equal(d, e) nipy-0.3.0/nipy/modalities/fmri/fmristat/tests/test_model.py000066400000000000000000000112561210344137400242470ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import with_statement import numpy as np from nipy.io.api import load_image from nipy.core.image.image import rollimg from .. import model from ..model import ModelOutputImage, estimateAR from ...api import FmriImageList from nipy.algorithms.statistics.models.regression import ( OLSModel, ar_bias_corrector, ar_bias_correct) from nipy.algorithms.statistics.formula.formulae import( Formula, Term, make_recarray) from nibabel.tmpdirs import InTemporaryDirectory from nose.tools import assert_raises, assert_true, assert_equal from numpy.testing import assert_array_equal, assert_array_almost_equal from nipy.testing import funcfile, anatfile def test_model_out_img(): # Model output image cmap = load_image(anatfile).coordmap shape = (2,3,4) fname = 'myfile.nii' with InTemporaryDirectory(): moi = ModelOutputImage(fname, cmap, shape) for i in range(shape[0]): moi[i] = i for i in range(shape[0]): assert_array_equal(moi[i], i) moi.save() assert_raises(ValueError, moi.__setitem__, 0, 1) assert_raises(ValueError, moi.__getitem__, 0) new_img = load_image(fname) for i in range(shape[0]): assert_array_equal(new_img[i].get_data(), i) del new_img def test_run(): ar1_fname = 'ar1_out.nii' funcim = load_image(funcfile) fmriims = FmriImageList.from_image(funcim, volume_start_times=2.) one_vol = fmriims[0] # Formula - with an intercept t = Term('t') f = Formula([t, t**2, t**3, 1]) # Design matrix and contrasts time_vector = make_recarray(fmriims.volume_start_times, 't') con_defs = dict(c=t, c2=t+t**2) desmtx, cmatrices = f.design(time_vector, contrasts=con_defs) # Run with Image and ImageList for inp_img in (rollimg(funcim, 't'), fmriims): with InTemporaryDirectory(): # Run OLS model outputs = [] outputs.append(model.output_AR1(ar1_fname, fmriims)) outputs.append(model.output_resid('resid_OLS_out.nii', fmriims)) ols = model.OLS(fmriims, f, outputs) ols.execute() # Run AR1 model outputs = [] outputs.append( model.output_T('T_out.nii', cmatrices['c'], fmriims)) outputs.append( model.output_F('F_out.nii', cmatrices['c2'], fmriims)) outputs.append( model.output_resid('resid_AR_out.nii', fmriims)) rho = load_image(ar1_fname) ar = model.AR1(fmriims, f, rho, outputs) ar.execute() f_img = load_image('F_out.nii') assert_equal(f_img.shape, one_vol.shape) f_data = f_img.get_data() assert_true(np.all((f_data>=0) & (f_data<30))) resid_img = load_image('resid_AR_out.nii') assert_equal(resid_img.shape, funcim.shape) assert_array_almost_equal(np.mean(resid_img.get_data()), 0, 3) e_img = load_image('T_out_effect.nii') sd_img = load_image('T_out_sd.nii') t_img = load_image('T_out_t.nii') t_data = t_img.get_data() assert_array_almost_equal(t_data, e_img.get_data() / sd_img.get_data()) assert_true(np.all(np.abs(t_data) < 6)) # Need to delete to help windows delete temporary files del rho, resid_img, f_img, e_img, sd_img, t_img, f_data, t_data def test_ar_modeling(): # Compare against standard routines rng = np.random.RandomState(20110903) N = 10 Y = rng.normal(size=(N,1)) * 10 + 100 X = np.c_[np.linspace(-1,1,N), np.ones((N,))] my_model = OLSModel(X) results = my_model.fit(Y) # fmristat wrapper rhos = estimateAR(results.resid, my_model.design, order=2) assert_equal(rhos.shape, (2,)) assert_true(np.all(np.abs(rhos <= 1))) # standard routine rhos2 = ar_bias_correct(results, 2) assert_array_almost_equal(rhos, rhos2, 8) # Make 2D and 3D Y Y = rng.normal(size=(N,4)) * 10 + 100 results = my_model.fit(Y) rhos = estimateAR(results.resid, my_model.design, order=2) assert_equal(rhos.shape, (2,4)) assert_true(np.all(np.abs(rhos <= 1))) rhos2 = ar_bias_correct(results, 2) assert_array_almost_equal(rhos, rhos2, 8) # 3D results.resid = np.reshape(results.resid, (N,2,2)) rhos = estimateAR(results.resid, my_model.design, order=2) assert_equal(rhos.shape, (2,2,2)) assert_true(np.all(np.abs(rhos <= 1))) rhos2 = ar_bias_correct(results, 2) assert_array_almost_equal(rhos, rhos2, 8) nipy-0.3.0/nipy/modalities/fmri/fmristat/tests/test_outputters.py000066400000000000000000000037071210344137400254070ustar00rootroot00000000000000""" Tests for regression module """ import numpy as np from nipy.algorithms.statistics.api import OLSModel from ..outputters import output_T, output_F from nose.tools import assert_true, assert_equal, assert_raises from numpy.testing import (assert_array_almost_equal, assert_array_equal) N = 10 X = np.c_[np.linspace(-1,1,N), np.ones((N,))] RNG = np.random.RandomState(20110901) Y = RNG.normal(size=(10,1)) * 10 + 100 MODEL = OLSModel(X) RESULTS = MODEL.fit(Y) C1 = [1, 0] def test_model(): # Check basics about the model fit # Check we fit the mean assert_array_almost_equal(RESULTS.theta[1], np.mean(Y)) def test_output_T(): # Check we get required outputs res = RESULTS.Tcontrast(C1) # all return values # default is all return values assert_array_almost_equal([res.effect, res.sd, res.t], output_T(RESULTS, C1)) assert_array_almost_equal([res.effect, res.sd, res.t], output_T(RESULTS, C1, ('effect', 'sd', 't'))) # Input order determines return order assert_array_almost_equal([res.t, res.effect, res.sd], output_T(RESULTS, C1, ('t', 'effect', 'sd'))) # And can select inputs assert_array_almost_equal([res.t], output_T(RESULTS, C1, ('t',))) assert_array_almost_equal([res.sd], output_T(RESULTS, C1, ('sd',))) assert_array_almost_equal([res.effect], output_T(RESULTS, C1, ('effect',))) def test_output_F(): # Test output_F convenience function rng = np.random.RandomState(ord('F')) Y = rng.normal(size=(10,1)) * 10 + 100 X = np.c_[rng.normal(size=(10,3)), np.ones((N,))] c1 = np.zeros((X.shape[1],)) c1[0] = 1 model = OLSModel(X) results = model.fit(Y) # Check we get required outputs exp_f = results.t(0) **2 assert_array_almost_equal(exp_f, output_F(results, c1)) nipy-0.3.0/nipy/modalities/fmri/glm.py000066400000000000000000000550241210344137400176750ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This module presents an interface to use the glm implemented in nipy.algorithms.statistics.models.regression. It contains the GLM and contrast classes that are meant to be the main objects of fMRI data analyses. It is important to note that the GLM is meant as a one-session General Linear Model. But inference can be performed on multiple sessions by computing fixed effects on contrasts Examples -------- >>> import numpy as np >>> from nipy.modalities.fmri.glm import GeneralLinearModel >>> n, p, q = 100, 80, 10 >>> X, Y = np.random.randn(p, q), np.random.randn(p, n) >>> cval = np.hstack((1, np.zeros(9))) >>> model = GeneralLinearModel(X) >>> model.fit(Y) >>> z_vals = model.contrast(cval).z_score() # z-transformed statistics Example of fixed effects statistics across two contrasts >>> cval_ = cval.copy() >>> np.random.shuffle(cval_) >>> z_ffx = (model.contrast(cval) + model.contrast(cval_)).z_score() """ import numpy as np from warnings import warn import scipy.stats as sps from nibabel import load, Nifti1Image from nipy.labs.mask import compute_mask_sessions from nipy.algorithms.statistics.models.regression import OLSModel, ARModel from nipy.algorithms.statistics.utils import multiple_mahalanobis, z_score from nipy.core.api import is_image from nipy.testing.decorators import skip_doctest_if from nipy.utils import HAVE_EXAMPLE_DATA DEF_TINY = 1e-50 DEF_DOFMAX = 1e10 def data_scaling(Y): """Scaling of the data to have pourcent of baseline change columnwise Parameters ---------- Y: array of shape(n_time_points, n_voxels) the input data Returns ------- Y: array of shape (n_time_points, n_voxels), the data after mean-scaling, de-meaning and multiplication by 100 mean : array of shape (n_voxels,) the data mean """ mean = Y.mean(0) Y = 100 * (Y / mean - 1) return Y, mean class GeneralLinearModel(object): """ This class handles the so-called on General Linear Model Most of what it does in the fit() and contrast() methods fit() performs the standard two-step ('ols' then 'ar1') GLM fitting contrast() returns a contrast instance, yileding statistics and p-values. The link between fit() and constrast is done vis the two class members: glm_results : dictionary of nipy.algorithms.statistics.models. regression.RegressionResults instances, describing results of a GLM fit labels : array of shape(n_voxels), labels that associate each voxel with a results key """ def __init__(self, X): """ Parameters ---------- X : array of shape (n_time_points, n_regressors) the design matrix """ self.X = X self.labels_ = None self.results_ = None def fit(self, Y, model='ar1', steps=100): """GLM fitting of a dataset using 'ols' regression or the two-pass Parameters ---------- Y : array of shape(n_time_points, n_samples) the fMRI data model : {'ar1', 'ols'}, optional the temporal variance model. Defaults to 'ar1' steps : int, optional Maximum number of discrete steps for the AR(1) coef histogram """ if model not in ['ar1', 'ols']: raise ValueError('Unknown model') if Y.ndim == 1: Y = Y[:, np.newaxis] if Y.shape[0] != self.X.shape[0]: raise ValueError('Response and predictors are inconsistent') # fit the OLS model ols_result = OLSModel(self.X).fit(Y) # compute and discretize the AR1 coefs ar1 = ((ols_result.resid[1:] * ols_result.resid[:-1]).sum(0) / (ols_result.resid ** 2).sum(0)) ar1 = (ar1 * steps).astype(np.int) * 1. / steps # Fit the AR model acccording to current AR(1) estimates if model == 'ar1': self.results_ = {} self.labels_ = ar1 # fit the model for val in np.unique(self.labels_): m = ARModel(self.X, val) self.results_[val] = m.fit(Y[:, self.labels_ == val]) else: self.labels_ = np.zeros(Y.shape[1]) self.results_ = {0.0: ols_result} def get_beta(self, column_index=None): """Acessor for the best linear unbiased estimated of model parameters Parameters ---------- column_index: int or array-like of int or None, optional The indexed of the columns to be returned. if None (default behaviour), the whole vector is returned Returns ------- beta: array of shape (n_voxels, n_columns) the beta """ # make colum_index a list if it an int if column_index == None: column_index = np.arange(self.X.shape[1]) if not hasattr(column_index, '__iter__'): column_index = [int(column_index)] n_beta = len(column_index) # build the beta array beta = np.zeros((n_beta, self.labels_.size), dtype=np.float) for l in self.results_.keys(): beta[:, self.labels_ == l] = self.results_[l].theta[column_index] return beta def get_mse(self): """Acessor for the mean squared error of the model Returns ------- mse: array of shape (n_voxels) the sum of square error per voxel """ # build the beta array mse = np.zeros(self.labels_.size, dtype=np.float) for l in self.results_.keys(): mse[self.labels_ == l] = self.results_[l].MSE return mse def get_logL(self): """Acessor for the log-likelihood of the model Returns ------- logL: array of shape (n_voxels,) the sum of square error per voxel """ # build the beta array logL = np.zeros(self.labels_.size, dtype=np.float) for l in self.results_.keys(): logL[self.labels_ == l] = self.results_[l].logL return logL def contrast(self, con_val, contrast_type=None): """ Specify and estimate a linear contrast Parameters ---------- con_val : numpy.ndarray of shape (p) or (q, p) where q = number of contrast vectors and p = number of regressors contrast_type : {None, 't', 'F' or 'tmin-conjunction'}, optional type of the contrast. If None, then defaults to 't' for 1D `con_val` and 'F' for 2D `con_val` Returns ------- con: Contrast instance """ if self.labels_ == None or self.results_ == None: raise ValueError('The model has not been estimated yet') con_val = np.asarray(con_val) if con_val.ndim == 1: dim = 1 else: dim = con_val.shape[0] if contrast_type is None: if dim == 1: contrast_type = 't' else: contrast_type = 'F' if contrast_type not in ['t', 'F', 'tmin-conjunction']: raise ValueError('Unknown contrast type: %s' % contrast_type) effect_ = np.zeros((dim, self.labels_.size), dtype=np.float) var_ = np.zeros((dim, dim, self.labels_.size), dtype=np.float) if contrast_type == 't': for l in self.results_.keys(): resl = self.results_[l].Tcontrast(con_val) effect_[:, self.labels_ == l] = resl.effect.T var_[:, :, self.labels_ == l] = (resl.sd ** 2).T else: for l in self.results_.keys(): resl = self.results_[l].Fcontrast(con_val) effect_[:, self.labels_ == l] = resl.effect var_[:, :, self.labels_ == l] = resl.covariance dof_ = self.results_[l].df_resid return Contrast(effect=effect_, variance=var_, dof=dof_, contrast_type=contrast_type) class Contrast(object): """ The contrast class handles the estimation of statistical contrasts on a given model: student (t), Fisher (F), conjunction (tmin-conjunction). The important feature is that it supports addition, thus opening the possibility of fixed-effects models. The current implementation is meant to be simple, and could be enhanced in the future on the computational side (high-dimensional F constrasts may lead to memory breakage). Notes ----- The 'tmin-conjunction' test is the valid conjunction test discussed in: Nichols T, Brett M, Andersson J, Wager T, Poline JB. Valid conjunction inference with the minimum statistic. Neuroimage. 2005 Apr 15;25(3):653-60. This test gives the p-value of the z-values under the conjunction null, i.e. the union of the null hypotheses for all terms. """ def __init__(self, effect, variance, dof=DEF_DOFMAX, contrast_type='t', tiny=DEF_TINY, dofmax=DEF_DOFMAX): """ Parameters ========== effect: array of shape (contrast_dim, n_voxels) the effects related to the contrast variance: array of shape (contrast_dim, contrast_dim, n_voxels) the associated variance estimate dof: scalar, the degrees of freedom contrast_type: string to be chosen among 't' and 'F' """ if variance.ndim != 3: raise ValueError('Variance array should have 3 dimensions') if effect.ndim != 2: raise ValueError('Variance array should have 2 dimensions') if variance.shape[0] != variance.shape[1]: raise ValueError('Inconsistent shape for the variance estimate') if ((variance.shape[1] != effect.shape[0]) or (variance.shape[2] != effect.shape[1])): raise ValueError('Effect and variance have inconsistent shape') self.effect = effect self.variance = variance self.dof = float(dof) self.dim = effect.shape[0] if self.dim > 1 and contrast_type is 't': print 'Automatically converted multi-dimensional t to F contrast' contrast_type = 'F' self.contrast_type = contrast_type self.stat_ = None self.p_value_ = None self.baseline = 0 self.tiny = tiny self.dofmax = dofmax def stat(self, baseline=0.0): """ Return the decision statistic associated with the test of the null hypothesis: (H0) 'contrast equals baseline' Parameters ========== baseline: float, optional, Baseline value for the test statistic """ self.baseline = baseline # Case: one-dimensional contrast ==> t or t**2 if self.dim == 1: # avoids division by zero stat = (self.effect - baseline) / np.sqrt( np.maximum(self.variance, self.tiny)) if self.contrast_type == 'F': stat = stat ** 2 # Case: F contrast elif self.contrast_type == 'F': # F = |t|^2/q , |t|^2 = e^t inv(v) e if self.effect.ndim == 1: self.effect = self.effect[np.newaxis] if self.variance.ndim == 1: self.variance = self.variance[np.newaxis, np.newaxis] stat = (multiple_mahalanobis(self.effect - baseline, self.variance) / self.dim) # Case: tmin (conjunctions) elif self.contrast_type == 'tmin-conjunction': vdiag = self.variance.reshape([self.dim ** 2] + list( self.variance.shape[2:]))[:: self.dim + 1] stat = (self.effect - baseline) / np.sqrt( np.maximum(vdiag, self.tiny)) stat = stat.min(0) # Unknwon stat else: raise ValueError('Unknown statistic type') self.stat_ = stat return stat.ravel() def p_value(self, baseline=0.0): """Return a parametric estimate of the p-value associated with the null hypothesis: (H0) 'contrast equals baseline' Parameters ========== baseline: float, optional, Baseline value for the test statistic """ if self.stat_ == None or not self.baseline == baseline: self.stat_ = self.stat(baseline) # Valid conjunction as in Nichols et al, Neuroimage 25, 2005. if self.contrast_type in ['t', 'tmin-conjunction']: p = sps.t.sf(self.stat_, np.minimum(self.dof, self.dofmax)) elif self.contrast_type == 'F': p = sps.f.sf(self.stat_, self.dim, np.minimum( self.dof, self.dofmax)) else: raise ValueError('Unknown statistic type') self.p_value_ = p return p def z_score(self, baseline=0.0): """Return a parametric estimation of the z-score associated with the null hypothesis: (H0) 'contrast equals baseline' Parameters ========== baseline: float, optional, Baseline value for the test statistic """ if self.p_value_ == None or not self.baseline == baseline: self.p_value_ = self.p_value(baseline) # Avoid inf values kindly supplied by scipy. self.z_score_ = z_score(self.p_value_) return self.z_score_ def __add__(self, other): """Addition of selfwith others, Yields an new Contrast instance This should be used only on indepndent contrasts""" if self.contrast_type != other.contrast_type: raise ValueError( 'The two contrasts do not have consistant type dimensions') if self.dim != other.dim: raise ValueError( 'The two contrasts do not have compatible dimensions') effect_ = self.effect + other.effect variance_ = self.variance + other.variance dof_ = self.dof + other.dof return Contrast(effect=effect_, variance=variance_, dof=dof_, contrast_type=self.contrast_type) def __rmul__(self, scalar): """Multiplication of the contrast by a scalar""" scalar = float(scalar) effect_ = self.effect * scalar variance_ = self.variance * scalar ** 2 dof_ = self.dof return Contrast(effect=effect_, variance=variance_, dof=dof_, contrast_type=self.contrast_type) __mul__ = __rmul__ def __div__(self, scalar): return self.__rmul__(1 / float(scalar)) class FMRILinearModel(object): """ This class is meant to handle GLMs from a higher-level perspective i.e. by taking images as input and output """ @skip_doctest_if(not HAVE_EXAMPLE_DATA) def __init__(self, fmri_data, design_matrices, mask='compute', m=0.2, M=0.9, threshold=.5): """Load the data Parameters ---------- fmri_data : Image or str or sequence of Images / str fmri images / paths of the (4D) fmri images design_matrices : arrays or str or sequence of arrays / str design matrix arrays / paths of .npz files mask : str or Image or None, optional string can be 'compute' or a path to an image image is an input (assumed binary) mask image(s), if 'compute', the mask is computed if None, no masking will be applied m, M, threshold: float, optional parameters of the masking procedure. Should be within [0, 1] Notes ----- The only computation done here is mask computation (if required) Examples -------- We need the example data package for this example >>> from nipy.utils import example_data >>> from nipy.modalities.fmri.glm import FMRILinearModel >>> fmri_files = [example_data.get_filename('fiac', 'fiac0', run) ... for run in ['run1.nii.gz', 'run2.nii.gz']] >>> design_files = [example_data.get_filename('fiac', 'fiac0', run) ... for run in ['run1_design.npz', 'run2_design.npz']] >>> mask = example_data.get_filename('fiac', 'fiac0', 'mask.nii.gz') >>> multi_session_model = FMRILinearModel(fmri_files, design_files, mask) >>> multi_session_model.fit() >>> z_image, = multi_session_model.contrast([np.eye(13)[1]] * 2) The number of voxels with p < 0.001 >>> np.sum(z_image.get_data() > 3.09) 671 """ # manipulate the arguments if isinstance(fmri_data, basestring) or hasattr(fmri_data, 'get_data'): fmri_data = [fmri_data] if isinstance(design_matrices, (basestring, np.ndarray)): design_matrices = [design_matrices] if len(fmri_data) != len(design_matrices): raise ValueError('Incompatible number of fmri runs and ' 'design matrices were provided') self.fmri_data, self.design_matrices = [], [] self.glms, self.means = [], [] # load the fmri data for fmri_run in fmri_data: if isinstance(fmri_run, basestring): self.fmri_data.append(load(fmri_run)) else: self.fmri_data.append(fmri_run) # set self.affine as the affine of the first image self.affine = self.fmri_data[0].get_affine() # load the designs for design_matrix in design_matrices: if isinstance(design_matrix, basestring): loaded = np.load(design_matrix) self.design_matrices.append(loaded[loaded.files[0]]) else: self.design_matrices.append(design_matrix) # load the mask if mask == 'compute': mask = compute_mask_sessions( fmri_data, m=m, M=M, cc=1, threshold=threshold, opening=0) self.mask = Nifti1Image(mask.astype(np.int8), self.affine) elif mask == None: mask = np.ones(self.fmri_data[0].shape[:3]).astype(np.int8) self.mask = Nifti1Image(mask, self.affine) else: if isinstance(mask, basestring): self.mask = load(mask) else: self.mask = mask def fit(self, do_scaling=True, model='ar1', steps=100): """ Load the data, mask the data, scale the data, fit the GLM Parameters ---------- do_scaling : bool, optional if True, the data should be scaled as pourcent of voxel mean model : string, optional, the kind of glm ('ols' or 'ar1') you want to fit to the data steps : int, optional in case of an ar1, discretization of the ar1 parameter """ from nibabel import Nifti1Image # get the mask as an array mask = self.mask.get_data().astype(np.bool) self.glms, self.means = [], [] for fmri, design_matrix in zip(self.fmri_data, self.design_matrices): if do_scaling: # scale the data data, mean = data_scaling(fmri.get_data()[mask].T) else: data, mean = (fmri.get_data()[mask].T, fmri.get_data()[mask].T.mean(0)) mean_data = mask.astype(np.int16) mean_data[mask] = mean self.means.append(Nifti1Image(mean_data, self.affine)) # fit the GLM glm = GeneralLinearModel(design_matrix) glm.fit(data, model, steps) self.glms.append(glm) def contrast(self, contrasts, con_id='', contrast_type=None, output_z=True, output_stat=False, output_effects=False, output_variance=False): """ Estimation of a contrast as fixed effects on all sessions Parameters ---------- contrasts : array or list of arrays of shape (n_col) or (n_dim, n_col) where ``n_col`` is the number of columns of the design matrix, numerical definition of the contrast (one array per run) con_id : str, optional name of the contrast contrast_type : {'t', 'F', 'tmin-conjunction'}, optional type of the contrast output_z : bool, optional Return or not the corresponding z-stat image output_stat : bool, optional Return or not the base (t/F) stat image output_effects : bool, optional Return or not the corresponding effect image output_variance : bool, optional Return or not the corresponding variance image Returns ------- output_images : list of nibabel images The desired output images """ if self.glms == []: raise ValueError('first run fit() to estimate the model') if isinstance(contrasts, np.ndarray): contrasts = [contrasts] if len(contrasts) != len(self.glms): raise ValueError( 'contrasts must be a sequence of %d session contrasts' % len(self.glms)) contrast_ = None for i, (glm, con) in enumerate(zip(self.glms, contrasts)): if np.all(con == 0): warn('Contrast for session %d is null' % i) elif contrast_ is None: contrast_ = glm.contrast(con, contrast_type) else: contrast_ = contrast_ + glm.contrast(con, contrast_type) if output_z or output_stat: # compute the contrast and stat contrast_.z_score() # Prepare the returned images mask = self.mask.get_data().astype(np.bool) do_outputs = [output_z, output_stat, output_effects, output_variance] estimates = ['z_score_', 'stat_', 'effect', 'variance'] descrips = ['z statistic', 'Statistical value', 'Estimated effect', 'Estimated variance'] dims = [1, 1, contrast_.dim, contrast_.dim ** 2] n_vox = contrast_.z_score_.size output_images = [] for (do_output, estimate, descrip, dim) in zip( do_outputs, estimates, descrips, dims): if do_output: if dim > 1: result_map = np.tile( mask.astype(np.float)[:, :, :, np.newaxis], dim) result_map[mask] = np.reshape( getattr(contrast_, estimate).T, (n_vox, dim)) else: result_map = mask.astype(np.float) result_map[mask] = np.squeeze( getattr(contrast_, estimate)) output = Nifti1Image(result_map, self.affine) output.get_header()['descrip'] = ( '%s associated with contrast %s' % (descrip, con_id)) output_images.append(output) return output_images nipy-0.3.0/nipy/modalities/fmri/hemodynamic_models.py000066400000000000000000000303441210344137400227540ustar00rootroot00000000000000""" This module is for canonical hrf specification. Here we provide for SPM, Glover hrfs and finite timpulse response (FIR) models. This module closely follows SPM implementation Author: Bertrand Thirion, 2011 """ import numpy as np from scipy.stats import gamma def gamma_difference_hrf(tr, oversampling=16, time_length=32., onset=0., delay=6, undershoot=16., dispersion=1., u_dispersion=1., ratio=0.167): """ Compute an hrf as the difference of two gamma functions Parameters ---------- tr: float, scan repeat time, in seconds oversampling: int, temporal oversampling factor, optional time_length: float, hrf kernel length, in seconds onset: float, onset of the hrf Returns ------- hrf: array of shape(length / tr * oversampling, float), hrf sampling on the oversampled time grid """ dt = tr / oversampling time_stamps = np.linspace(0, time_length, float(time_length) / dt) time_stamps -= onset / dt hrf = gamma.pdf(time_stamps, delay / dispersion, dt / dispersion) - \ ratio * gamma.pdf( time_stamps, undershoot / u_dispersion, dt / u_dispersion) hrf /= hrf.sum() return hrf def spm_hrf(tr, oversampling=16, time_length=32., onset=0.): """ Implementation of the SPM hrf model Parameters ---------- tr: float, scan repeat time, in seconds oversampling: int, temporal oversampling factor, optional time_length: float, hrf kernel length, in seconds onset: float, onset of the response Returns ------- hrf: array of shape(length / tr * oversampling, float), hrf sampling on the oversampled time grid """ return gamma_difference_hrf(tr, oversampling, time_length, onset) def glover_hrf(tr, oversampling=16, time_length=32., onset=0.): """ Implementation of the Glover hrf model Parameters ---------- tr: float, scan repeat time, in seconds oversampling: int, temporal oversampling factor, optional time_length: float, hrf kernel length, in seconds onset: float, onset of the response Returns ------- hrf: array of shape(length / tr * oversampling, float), hrf sampling on the oversampled time grid """ return gamma_difference_hrf(tr, oversampling, time_length, onset, delay=6, undershoot=12., dispersion=.9, u_dispersion=.9, ratio=.35) def spm_time_derivative(tr, oversampling=16, time_length=32., onset=0.): """Implementation of the SPM time derivative hrf (dhrf) model Parameters ---------- tr: float, scan repeat time, in seconds oversampling: int, temporal oversampling factor, optional time_length: float, hrf kernel length, in seconds onset: float, onset of the response Returns ------- dhrf: array of shape(length / tr, float), dhrf sampling on the provided grid """ do = .1 dhrf = 1. / do * (spm_hrf(tr, oversampling, time_length, onset + do) - spm_hrf(tr, oversampling, time_length, onset)) return dhrf def glover_time_derivative(tr, oversampling=16, time_length=32., onset=0.): """Implementation of the flover time derivative hrf (dhrf) model Parameters ---------- tr: float, scan repeat time, in seconds oversampling: int, temporal oversampling factor, optional time_length: float, hrf kernel length, in seconds onset: float, onset of the response Returns ------- dhrf: array of shape(length / tr, float), dhrf sampling on the provided grid """ do = .1 dhrf = 1. / do * (glover_hrf(tr, oversampling, time_length, onset + do) - glover_hrf(tr, oversampling, time_length, onset)) return dhrf def spm_dispersion_derivative(tr, oversampling=16, time_length=32., onset=0.): """Implementation of the SPM dispersion derivative hrf model Parameters ---------- tr: float, scan repeat time, in seconds oversampling: int, temporal oversampling factor, optional time_length: float, hrf kernel length, in seconds onset: float, onset of the response Returns ------- dhrf: array of shape(length / tr * oversampling, float), dhrf sampling on the oversampled time grid """ dd = .01 dhrf = 1. / dd * (gamma_difference_hrf(tr, oversampling, time_length, onset, dispersion=1. + dd) - spm_hrf(tr, oversampling, time_length, onset)) return dhrf def sample_condition(exp_condition, frametimes, oversampling=16): """ this function samples the experimental condition at frametimes Parameters ---------- exp_condition: a tuple of 3 arrays of shape n, corresponding to (onsets, duration, value), describing the experimental condition frametimes: array of shape(n) over_sampling: int, over_sampling factor Returns ------- regressor: array of shape(n) """ # generate the oversampled frame times n = frametimes.size if oversampling == 1: hr_frametimes = frametimes else: hr_frametimes = np.linspace(0, frametimes.max() * (1 + 1. / (n - 1)), n * oversampling + 1) # get the regressor information onsets, duration, values = exp_condition onsets, duration, values = np.asanyarray(onsets), np.asanyarray(duration),\ np.asanyarray(values) # generate the regressor time course tmax = len(hr_frametimes) regressor = np.zeros_like(hr_frametimes).astype(np.float) t_onset = np.minimum(np.searchsorted(hr_frametimes, onsets), tmax - 1) regressor[t_onset] += values t_offset = np.minimum(np.searchsorted(hr_frametimes, onsets + duration), tmax - 1) # for event related, shift the offset by 1 for i, to in enumerate(t_offset): if to > 0 and to < tmax - 1 and to == t_onset[i]: t_offset[i] += 1 regressor[t_offset] -= values regressor = np.cumsum(regressor) # normalize the regressor in case of block design #if (duration > 0).any(): # regressor /= oversampling return regressor, hr_frametimes def resample_regressor(hr_regressor, hr_frametimes, frametimes, kind='linear'): """ this function samples the regressors at frametimes Parameters ---------- hr_regressor: array of shape(n), the regressor time course sampled at high temporal resolution hr_frametimes: array of shape(n), the corresponding time stamps frametimes: array of shape(p), the desired time stamps kind: string, optional, the kind of desired interpolation Returns ------- regressor: array of shape(p), the resampled regressor """ from scipy.interpolate import interp1d f = interp1d(hr_frametimes, hr_regressor) return f(frametimes).T def _orthogonalize(X): """ This function orthogonalizes the columns of the design matrix wrt to the first column Parameters ---------- X: array of shape(n, p), the data to be orthogonalized Returns ------- X: after orthogonalization Notes ----- X is changed in place. the columns are not normalized """ if X.size == X.shape[0]: return X from numpy.linalg import pinv for i in range(1, X.shape[1]): X[:, i] -= np.dot(X[:, i], np.dot(X[:, :i], pinv(X[:, :i]))) return X def _regressor_names(con_name, hrf_model, fir_delays=None): """ returns a list of regressor names, computed from con-name and hrf type Parameters ---------- con_name: string, identifier of the condition hrf_model: string, identifier of the hrf model Returns ------- names: a list of strings yielding the regressor names """ if hrf_model == 'canonical': return [con_name] elif hrf_model == "canonical with derivative": return [con_name, con_name + "_derivative"] elif hrf_model == 'spm': return [con_name] elif hrf_model == 'spm_time': return [con_name, con_name + "_derivative"] elif hrf_model == 'spm_time_dispersion': return [con_name, con_name + "_derivative", con_name + "_dispersion"] elif hrf_model == 'fir': return [con_name + "_delay_%d" % i for i in fir_delays] def _hrf_kernel(hrf_model, tr, oversampling=16, fir_delays=None): """ Given the specification of the hemodynamic model and time parameters, return the list of matching kernels Parameters ---------- hrf_model: string, identifier of the hrf model tr: the repetition time in seconds oversampling: int, temporal oversampling factor to have a smooth hrf fir_delays: list of for delays Returns ------- hkernel: a list of hrf kernels, depending on the specified hrf model """ if hrf_model == 'spm': hkernel = [spm_hrf(tr, oversampling)] elif hrf_model == 'spm_time': hkernel = [spm_hrf(tr, oversampling), spm_time_derivative(tr, oversampling)] elif hrf_model == 'spm_time_dispersion': hkernel = [spm_hrf(tr, oversampling), spm_time_derivative(tr, oversampling), spm_dispersion_derivative(tr, oversampling)] elif hrf_model == 'canonical': hkernel = [glover_hrf(tr, oversampling)] elif hrf_model == 'canonical with derivative': hkernel = [glover_hrf(tr, oversampling), glover_time_derivative(tr, oversampling)] elif hrf_model == 'fir': hkernel = [np.hstack((np.zeros(f * oversampling), np.ones(oversampling))) for f in fir_delays] else: raise ValueError('Unknown hrf model') return hkernel def compute_regressor(exp_condition, hrf_model, frametimes, con_id='cond', oversampling=16, fir_delays=None): """ This is the main function to convolve regressors with hrf model Parameters ---------- exp_condition: descriptor of an experimental condition hrf_model: string, the hrf model to be used. Can be chosen among: 'spm', 'spm_time', 'spm_time_dispersion', 'canonical', 'canonical_derivative', 'fir' frametimes: array of shape (n):the sought con_id: string, optional identifier of the condition oversampling: int, optional, oversampling factor to perform the convolution fir_delays: array-like of int, onsets corresponding to the fir basis Returns ------- creg: array of shape(n_scans, n_reg): computed regressors sampled at frametimes reg_names: list of strings, corresponding regressor names Notes ----- The different hemodynamic models can be understood as follows: 'spm': this is the hrf model used in spm 'spm_time': this is the spm model plus its time derivative (2 regressors) 'spm_time_dispersion': idem, plus dispersion derivative (3 regressors) 'canonical': this one corresponds to the Glover hrf 'canonical_derivative': the Glover hrf + time derivative (2 regressors) 'fir': finite impulse response basis, a set of delayed dirac models with arbitrary length. This one currently assumes regularly spaced frametimes (i.e. fixed time of repetition). It is expected that spm standard and Glover model would not yield large differences in most cases. """ # this is the average tr in this session, not necessarily the true tr tr = float(frametimes.max()) / (np.size(frametimes) - 1) # 1. create the high temporal resolution regressor hr_regressor, hr_frametimes = sample_condition( exp_condition, frametimes, oversampling) # 2. create the hrf model(s) hkernel = _hrf_kernel(hrf_model, tr, oversampling, fir_delays) # 3. convolve the regressor and hrf, and downsample the regressor conv_reg = np.array([np.convolve(hr_regressor, h)[:hr_regressor.size] for h in hkernel]) # 4. temporally resample the regressors creg = resample_regressor(conv_reg, hr_frametimes, frametimes) # 5. ortogonalize the regressors if hrf_model != 'fir': creg = _orthogonalize(creg) # 6 generate regressor names reg_names = _regressor_names(con_id, hrf_model, fir_delays=fir_delays) return creg, reg_names nipy-0.3.0/nipy/modalities/fmri/hrf.py000066400000000000000000000103351210344137400176710ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This module provides definitions of various hemodynamic response functions (hrf). In particular, it provides Gary Glover's canonical HRF, AFNI's default HRF, and a spectral HRF. The Glover HRF is based on: @article{glover1999deconvolution, title={{Deconvolution of impulse response in event-related BOLD fMRI}}, author={Glover, G.H.}, journal={NeuroImage}, volume={9}, number={4}, pages={416--429}, year={1999}, publisher={Orlando, FL: Academic Press, c1992-} } This paramaterization is from fmristat: http://www.math.mcgill.ca/keith/fmristat/ fmristat models the HRF as the difference of two gamma functions, ``g1`` and ``g2``, each defined by the timing of the gamma function peaks (``pk1, pk2``) and the fwhms (``width1, width2``): raw_hrf = g1(pk1, width1) - a2 * g2(pk2, width2) where ``a2`` is the scale factor for the ``g2`` gamma function. The actual hrf is the raw hrf set to have an integral of 1. fmristat used ``pk1, width1, pk2, width2, a2 = (5.4 5.2 10.8 7.35 0.35)``. These are parameters to match Glover's 1 second duration auditory stimulus curves. Glover wrote these as: y(t) = c1 * t**n1 * exp(t/t1) - a2 * c2 * t**n2 * exp(t/t2) with ``n1, t1, n2, t2, a2 = (6.0, 0.9, 12, 0.9, 0.35)``. The difference between Glover's expression and ours is because we (and fmristat) use the peak location and width to characterize the function rather than ``n1, t1``. The values we use are equivalent. Specifically, in our formulation: >>> n1, t1, c1 = gamma_params(5.4, 5.2) >>> np.allclose((n1-1, t1), (6.0, 0.9), rtol=0.02) True >>> n2, t2, c2 = gamma_params(10.8, 7.35) >>> np.allclose((n2-1, t2), (12.0, 0.9), rtol=0.02) True """ import numpy as np import sympy # backwards compatibility with sympy 0.6.x try: sympy_abs = sympy.Abs # 0.7.0 except AttributeError: sympy_abs = sympy.abs from nipy.fixes.sympy.utilities.lambdify import implemented_function from .utils import lambdify_t, T def gamma_params(peak_location, peak_fwhm): """ Parameters for gamma density given peak and width TODO: where does the coef come from again.... check fmristat code From a peak location and peak fwhm, determine the parameters (shape, scale) of a Gamma density: f(x) = coef * x**(shape-1) * exp(-x/scale) The coefficient returned ensures that the f has integral 1 over [0,np.inf] Parameters ---------- peak_location : float Location of the peak of the Gamma density peak_fwhm : float FWHM at the peak Returns ------- shape : float Shape parameter in the Gamma density scale : float Scale parameter in the Gamma density coef : float Coefficient needed to ensure the density has integral 1. """ shape_m1 = np.power(peak_location / peak_fwhm, 2) * 8 * np.log(2.0) scale = np.power(peak_fwhm, 2) / peak_location / 8 / np.log(2.0) coef = peak_location**(-shape_m1) * np.exp(peak_location / scale) return shape_m1 + 1, scale, coef def gamma_expr(peak_location, peak_fwhm): shape, scale, coef = gamma_params(peak_location, peak_fwhm) return ( coef * ((T >= 0) * (T+1.0e-14))**(shape-1) * sympy.exp(-(T+1.0e-14)/scale) ) # Glover canonical HRF models # they are both Sympy objects def _getint(f, dt=0.02, t=50): # numerical integral of function lf = lambdify_t(f) tt = np.arange(dt,t+dt,dt) return lf(tt).sum() * dt _gexpr = gamma_expr(5.4, 5.2) - 0.35 * gamma_expr(10.8, 7.35) _gexpr = _gexpr / _getint(_gexpr) _glover = lambdify_t(_gexpr) glover = implemented_function('glover', _glover) glovert = lambdify_t(glover(T)) # Derivative of Glover HRF _dgexpr = _gexpr.diff(T) dpos = sympy.Derivative((T >= 0), T) _dgexpr = _dgexpr.subs(dpos, 0) _dgexpr = _dgexpr / _getint(sympy_abs(_dgexpr)) _dglover = lambdify_t(_dgexpr) dglover = implemented_function('dglover', _dglover) dglovert = lambdify_t(dglover(T)) del(_glover); del(_gexpr); del(dpos); del(_dgexpr); del(_dglover) # AFNI's HRF _aexpr = ((T >= 0) * T)**8.6 * sympy.exp(-T/0.547) _aexpr = _aexpr / _getint(_aexpr) _afni = lambdify_t(_aexpr) afni = implemented_function('afni', _afni) afnit = lambdify_t(afni(T)) nipy-0.3.0/nipy/modalities/fmri/setup.py000066400000000000000000000010721210344137400202500ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('fmri', parent_package, top_path) config.add_subpackage('tests') config.add_data_files('tests/*.npz') config.add_subpackage('fmristat') config.add_subpackage('spm') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipy-0.3.0/nipy/modalities/fmri/spm/000077500000000000000000000000001210344137400173355ustar00rootroot00000000000000nipy-0.3.0/nipy/modalities/fmri/spm/__init__.py000066400000000000000000000005671210344137400214560ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ An (approximate) version of SPM's run-level model for fMRI data Consists of an OLS pass through the data, followed by a pooled estimate of a covariance matrix constructed from a series expansion of an AR1 model, expanded in terms of rho. """ import model nipy-0.3.0/nipy/modalities/fmri/spm/correlation.py000066400000000000000000000030451210344137400222320ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np from numpy.linalg import inv def ARcovariance(rho, n, cor=False, sigma=1.): """ Return covariance matrix of a sample of length n from an AR(p) process with parameters rho. INPUTS: rho -- an array of length p sigma -- standard deviation of the white noise """ rho = np.asarray(rho) p = rho.shape[0] invK = np.identity(n) for i in range(p): invK -= np.diag((rho[i] / sigma) * np.ones(n-i-1), k=-i-1) K = inv(invK) Q = np.dot(K, K.T) if cor: sd = np.sqrt(np.diag(Q)) sdout = np.multiply.outer(sd, sd) Q /= sd return Q def ARcomponents(rho, n, drho=0.05, cor=False, sigma=1): """ Numerically differentiate covariance matrices of AR(p) of length n with respect to AR parameters around the value rho. If drho is a vector, they are treated as steps in the numerical differentiation. """ rho = np.asarray(rho) drho = np.asarray(drho) p = rho.shape[0] value = [] if drho.shape == (): drho = np.ones(p, np.float) * drho drho = np.diag(drho) Q = ARcovariance(rho, n, cor=cor, sigma=sigma) value = [Q] for i in range(p): value.append((ARcovariance(rho + drho[i], n, cor=cor) - Q) / drho[i,i]) return np.asarray(value) if __name__ == "__main__": #print np.diag(ARcovariance([0.3], 100, cor=True), k=0) print len(ARcomponents([0.321],8, drho=0.02)) nipy-0.3.0/nipy/modalities/fmri/spm/example.py000066400000000000000000000006421210344137400213440ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import sys sys.path.insert(0, "..") import model ''' The following line does not match the ``model`` module - is it outdated? ''' #model.run() ## from io import data_path ## study = Study(root=io.data_path) ## subject = Subject(subj, study=study) ## runmodel = Run(subject, run) nipy-0.3.0/nipy/modalities/fmri/spm/model.py000066400000000000000000000075271210344137400210220ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np import numpy.linalg as L from scipy.stats import f as FDbn from nipy.algorithms.statistics.models.regression import OLSModel, GLSModel from nipy.core.api import Image from nipy.modalities.fmri.fmristat import model as fmristat from nipy.modalities.fmri.fmristat.model import OLS from . import correlation, reml def Fmask(Fimg, dfnum, dfdenom, pvalue=1.0e-04): """ Create mask for use in estimating pooled covariance based on an F contrast. """ ## TODO check nipy.algorithms.statistics.models.contrast to see if rank is ## correctly set -- I don't think it is right now. print dfnum, dfdenom thresh = FDbn.ppf(pvalue, dfnum, dfdenom) return Image(np.greater(np.asarray(Fimg), thresh), Fimg.grid.copy()) def estimate_pooled_covariance(resid, ARtarget=[0.3], mask=None): """ Use SPM's REML implementation to estimate a pooled covariance matrix. Thresholds an F statistic at a marginal pvalue to estimate covariance matrix. """ resid n = resid[:].shape[0] components = correlation.ARcomponents(ARtarget, n) raw_sigma = 0 nvox = 0 for i in range(resid.shape[1]): d = np.asarray(resid[:,i]) d.shape = (d.shape[0], np.product(d.shape[1:])) keep = np.asarray(mask[i]) keep.shape = np.product(keep.shape) d = d.compress(keep, axis=1) raw_sigma += np.dot(d, d.T) nvox += d.shape[1] raw_sigma /= nvox C, h, _ = reml.reml(raw_sigma, components, n=nvox) return C class SecondStage(object): """ Parameters ---------- fmri_image : `FmriImageList` object returning 4D array from ``np.asarray``, having attribute ``volume_start_times`` (if `volume_start_times` is None), and such that ``object[0]`` returns something with attributes ``shape`` formula : :class:`nipy.algorithms.statistics.formula.Formula` sigma : outputs : volume_start_times : """ def __init__(self, fmri_image, formula, sigma, outputs=[], volume_start_times=None): self.fmri_image = fmri_image self.data = np.asarray(fmri_image) self.formula = formula self.outputs = outputs self.sigma = sigma if volume_start_times is None: self.volume_start_times = self.fmri_image.volume_start_times else: self.volume_start_times = volume_start_times def execute(self): def model_params(*args): return (self.sigma,) m = fmristat.model_generator(self.formula, self.data, self.volume_start_times, model_type=GLSModel, model_params=model_params) r = fmristat.results_generator(m) def reshape(i, x): """ To write output, arrays have to be reshaped -- this function does the appropriate reshaping for the two passes of fMRIstat. These passes are i) 'slices through the z-axis' ii) 'parcels of approximately constant AR1 coefficient' """ if len(x.shape) == 2: if type(i) is type(1): x.shape = (x.shape[0],) + self.fmri_image[0].shape[1:] if type(i) not in [type([]), type(())]: i = (i,) else: i = tuple(i) i = (slice(None,None,None),) + tuple(i) else: if type(i) is type(1): x.shape = self.fmri_image[0].shape[1:] return i, x o = fmristat.generate_output(self.outputs, r, reshape=reshape) nipy-0.3.0/nipy/modalities/fmri/spm/reml.py000066400000000000000000000107021210344137400206460ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np import numpy.linalg as npl def orth(X, tol=1.0e-07): """ Compute orthonormal basis for the column span of X. Rank is determined by zeroing all singular values, u, less than or equal to tol*u.max(). INPUTS: X -- n-by-p matrix OUTPUTS: B -- n-by-rank(X) matrix with orthonormal columns spanning the column rank of X """ B, u, _ = npl.svd(X, full_matrices=False) nkeep = np.greater(u, tol*u.max()).astype(np.int).sum() return B[:,:nkeep] def reml(sigma, components, design=None, n=1, niter=128, penalty_cov=np.exp(-32), penalty_mean=0): """ Adapted from spm_reml.m ReML estimation of covariance components from sigma using design matrix. INPUTS: sigma -- m-by-m covariance matrix components -- q-by-m-by-m array of variance components mean of sigma is modeled as a some over components[i] design -- m-by-p design matrix whose effect is to be removed for ReML. If None, no effect removed (???) n -- degrees of freedom of sigma penalty_cov -- quadratic penalty to be applied in Fisher algorithm. If the value is a float, f, the penalty is f * identity(m). If the value is a 1d array, this is the diagonal of the penalty. penalty_mean -- mean of quadratic penalty to be applied in Fisher algorithm. If the value is a float, f, the location is f * np.ones(m). OUTPUTS: C -- estimated mean of sigma h -- array of length q representing coefficients of variance components cov_h -- estimated covariance matrix of h """ # initialise coefficient, gradient, Hessian Q = components PQ = np.zeros(Q.shape) q = Q.shape[0] m = Q.shape[1] # coefficient h = np.array([np.diag(Q[i]).mean() for i in range(q)]) ## SPM initialization ## h = np.array([np.any(np.diag(Q[i])) for i in range(q)]).astype(np.float) C = np.sum([h[i] * Q[i] for i in range(Q.shape[0])], axis=0) # gradient in Fisher algorithm dFdh = np.zeros(q) # Hessian in Fisher algorithm dFdhh = np.zeros((q,q)) # penalty terms penalty_cov = np.asarray(penalty_cov) if penalty_cov.shape == (): penalty_cov = penalty_cov * np.identity(q) elif penalty_cov.shape == (q,): penalty_cov = np.diag(penalty_cov) penalty_mean = np.asarray(penalty_mean) if penalty_mean.shape == (): penalty_mean = np.ones(q) * penalty_mean # compute orthonormal basis of design space if design is not None: X = orth(design) else: X = None _iter = 0 _F = np.inf while True: # Current estimate of mean parameter iC = npl.inv(C + np.identity(m) / np.exp(32)) # E-step: conditional covariance if X is not None: iCX = np.dot(iC, X) Cq = npl.inv(X.T, iCX) P = iC - np.dot(iCX, np.dot(Cq, iCX)) else: P = iC # M-step: ReML estimate of hyperparameters # Gradient dF/dh (first derivatives) # Expected curvature (second derivatives) U = np.identity(m) - np.dot(P, sigma) / n for i in range(q): PQ[i] = np.dot(P, Q[i]) dFdh[i] = -(PQ[i] * U).sum() * n / 2 for j in range(i+1): dFdhh[i,j] = -(PQ[i]*PQ[j]).sum() * n / 2 dFdhh[j,i] = dFdhh[i,j] # Enforce penalties: dFdh = dFdh - np.dot(penalty_cov, h - penalty_mean) dFdhh = dFdhh - penalty_cov dh = npl.solve(dFdhh, dFdh) h -= dh C = np.sum([h[i] * Q[i] for i in range(Q.shape[0])], axis=0) df = (dFdh * dh).sum() if np.fabs(df) < 1.0e-01: break _iter += 1 if _iter >= niter: break return C, h, -dFdhh if __name__ == "__main__": import numpy.random as R X = R.standard_normal((500,3)) Q = np.array([np.identity(3), np.array([[0,1,0],[1,0,0],[0,0,1]]), np.array([[1,0,0],[0,1,1],[0,1,1]])], np.float) print reml(np.dot(X.T,X), Q), nipy-0.3.0/nipy/modalities/fmri/spm/trace.py000066400000000000000000000040331210344137400210050ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import numpy as np from numpy.linalg import svd from reml import orth def _trace(x): """ Trace of a square 2d array. Does not check shape of x to ensure it's square. """ return np.diag(x).sum() def _frobenius(A, B): """ Frobenius inner product of A and B: Trace(A'B) Does not check shape of x to ensure it's square. """ return (A * B).sum() def trRV(X=None, V=None): """ If V is None it defaults to identity. If X is None, it defaults to the 0-dimensional subspace, i.e. R is the identity. >>> import numpy as np >>> from numpy.random import standard_normal >>> >>> X = standard_normal((100, 4)) >>> np.allclose(trRV(X), (96.0, 96.0)) True >>> V = np.identity(100) >>> np.allclose(trRV(X), (96.0, 96.0)) True >>> >>> X[:,3] = X[:,1] + X[:,2] >>> np.allclose(trRV(X), (97.0, 97.0)) True >>> >>> u = orth(X) >>> V = np.dot(u, u.T) >>> print np.allclose(trRV(X, V), 0) True """ n, p = X.shape if V is None: V = np.identity(n) if X is None: if V is None: trRV = trRVRV = n else: trRV = _trace(V) trRVRV = _frobenius(V, V) else: u = orth(X) if V is None: trRV = trRVRV = n - u.shape[1] else: Vu = np.dot(V, u) utVu = np.dot(u.T, Vu) trRV = _trace(V) - _frobenius(u, Vu) trRVRV = _frobenius(V, V) - 2 * _frobenius(Vu, Vu) + _frobenius(utVu, utVu) return trRV, trRVRV if __name__ == "__main__": from numpy.random import standard_normal X = standard_normal((100, 4)) print trRV(X) # should be (96,96) V = np.identity(100) print trRV(X, V) # should be (96,96) X[:,3] = X[:,1] + X[:,2] print trRV(X, V) # should be (97,97) u = orth(X) V = np.dot(u, u.T) print trRV(X, V) # should be (0,0) nipy-0.3.0/nipy/modalities/fmri/tests/000077500000000000000000000000001210344137400177005ustar00rootroot00000000000000nipy-0.3.0/nipy/modalities/fmri/tests/__init__.py000066400000000000000000000000001210344137400217770ustar00rootroot00000000000000nipy-0.3.0/nipy/modalities/fmri/tests/spm_dmtx.npz000066400000000000000000000151621210344137400222710ustar00rootroot00000000000000PK^"=il arr_1.npyNUMPYF{'descr': 'toR?"<5`˘?}/ļ?Tj#??Ya?GI?TQ?L?uA?9O0?08?2v??v?=?$;?H錀P?,iB5F.jgWG*VnQn}? :??\? ?QR?jZV?Op=R?H?B3斺?Wb?bؒ^?P`?cHJ?1p ?o#?[^7Ek*-OSM*VnQn}? :??\? ?QR?jZV?Op=R?H?B3斺?Wb?bؒ^?P`?cHJ?1p ?o#?[^7E-jcH|'0ŨEAJ +uD'M&g<tI_ָg_toR?"<5`˘?}/ļ?Tj#??Ya?GI?TQ?L?uA?9O0?08?2v??v?=?$;?H錀P?,iB5F.jgWG*VnQn}? :??\? ?QR?jZV?Op=R?H?B3斺?Wb?bؒ^?P`?cHJ?1p ?o#?[^7E-jcH|'0ŨEAJ +uD'M&g<tI_ָg_U A!c$;iЎCGi&SjTB{7Uxqzfe`aLiZN2N@1$p0X^toR?"<5`˘?}/ļ?Tj#??Ya?GI?TQ?L?uA?9O0?08?2v??v?=?$;?H錀P?,iBtoR?"<5`˘?}/ļ?Tj#??Ya?GI?TQ?L??A?HFm5?B9f[?[jX?`xT3?~5lvKy0@k#*-sؾ>2* ?墻?3$)?d[?Ճ? T?T?3Q??B3斺?Wb?bؒ^?P`?cHJ?1p ?o#?[^7E-jcH|'0ŨEAJ +uD'M&g<tI_ָg_U A!c$;iЎCGi&SjTB{7Uxqzfe`aLiZN2N@????????????????????????????????????????????????????????????????????????????????????????????????????PK^"=}Wr arr_0.npyNUMPYF{'descr': 'B*s?gJ?16 ??*4]??UЈ(a?~'<,?B?k,_?Wۢ?YH?}Cy{?GI f,]f䏿b?302  c9בLGf]^@gh!'~)^?!o#=4?0??in7?ϦR/?`Jۺ?.?Y*?z[?}Cy{?GI f,]f䏿b?302  c9בLGf]^@gh!'~)^?!o#=4?0??in7?ϦR/?`Jۺ?.?Y*?z[?}Cy{?GI f,]f䏿b?302  c9בLGf]e`(2}1vQcw>TpsfhiLg_qq_uNT KӃ@"'AB*s?gJ?16 ??*4]??UЈ(a?~'<,?B?k,_?Wۢ?YH?}Cy{?GI f,]f䏿b?302  c9בLGf]^@gh!'~)^?!o#=4?0??in7?ϦR/?`Jۺ?.?Y*?z[?}Cy{?GI f,]f䏿b?302  c9בLGf]e`(2}1vQcw>TpsfhiLg_qq_uNT KӃ@"'AB*s?gJ?16 ??*4]??UЈ(a?~'<,?B?k,_?Wۢ?YH?}Cy{?GI f,]f䏿b?302  c9בLGf]B*s?gJ?16 ??*4]??UЈ(a?~'<,?B?k,_?=_ۢ?;?rO?ƾ?4pW΄_?3=Ƙ?J?tTB?qz G?(F?|s^?]^~?gZTpsfhiLg_qq_uNT KӃ@"'A default names frametimes = np.arange(100) f, names = _convolve_regressors(paradigm, 'canonical', frametimes) assert_equal(names, ['c0', 'c1']) def test_dmtx1b(): # idem test_dmtx1, but different test tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) paradigm = basic_paradigm() hrf_model = 'Canonical' X, names= dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3) print np.shape(X) assert_true(X.shape == (128, 7)) def test_dmtx1c(): # idem test_dmtx1, but different test tr = 1.0 frametimes = np.linspace(0, 127 *tr, 128) paradigm = basic_paradigm() hrf_model = 'Canonical' X,names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3) assert_true((X[:, - 1] == 1).all()) def test_dmtx1d(): # idem test_dmtx1, but different test tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) paradigm = basic_paradigm() hrf_model = 'Canonical' X,names= dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3) assert_true((np.isnan(X) == 0).all()) def test_dmtx2(): # idem test_dmtx1 with a different drift term tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) paradigm = basic_paradigm() hrf_model = 'Canonical' X, names= dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='cosine', hfcut=63) assert_true(len(names) == 8) def test_dmtx3(): # idem test_dmtx1 with a different drift term tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) paradigm = basic_paradigm() hrf_model = 'Canonical' X,names= dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='blank') print names assert_true(len(names) == 4) def test_dmtx4(): # idem test_dmtx1 with a different hrf model tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) paradigm = basic_paradigm() hrf_model = 'Canonical With Derivative' X, names= dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3) assert_true(len(names) == 10) def test_dmtx5(): # idem test_dmtx1 with a block paradigm tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) paradigm = block_paradigm() hrf_model = 'Canonical' X, names= dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3) assert_true(len(names) == 7) def test_dmtx6(): # idem test_dmtx1 with a block paradigm and the hrf derivative tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) paradigm = block_paradigm() hrf_model = 'Canonical With Derivative' X, names= dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3) assert_true(len(names) == 10) def test_dmtx7(): # idem test_dmtx1, but odd paradigm tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) conditions = [0, 0, 0, 1, 1, 1, 3, 3, 3] # no condition 'c2' onsets = [30, 70, 100, 10, 30, 90, 30, 40, 60] paradigm = EventRelatedParadigm(conditions, onsets) hrf_model = 'Canonical' X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3) assert_true(len(names) == 7) def test_dmtx8(): # basic test based on basic_paradigm and FIR tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) paradigm = basic_paradigm() hrf_model = 'FIR' X, names= dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3) assert_true(len(names) == 7) def test_dmtx9(): # basic test based on basic_paradigm and FIR tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) paradigm = basic_paradigm() hrf_model = 'FIR' X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3, fir_delays=range(1, 5)) assert_true(len(names) == 16) def test_dmtx10(): # Check that the first column o FIR design matrix is OK tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) paradigm = basic_paradigm() hrf_model = 'FIR' X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3, fir_delays=range(1, 5)) onset = paradigm.onset[paradigm.con_id == 'c0'].astype(np.int) assert_true(np.all((X[onset + 1, 0] == 1))) def test_dmtx11(): # check that the second column of the FIR design matrix is OK indeed tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) paradigm = basic_paradigm() hrf_model = 'FIR' X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3, fir_delays=range(1, 5)) onset = paradigm.onset[paradigm.con_id == 'c0'].astype(np.int) assert_true(np.all(X[onset + 3, 2] == 1)) def test_dmtx12(): # check that the 11th column of a FIR design matrix is indeed OK tr = 1.0 frametimes = np.linspace(0, 127 * tr,128) paradigm = basic_paradigm() hrf_model = 'FIR' X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3, fir_delays=range(1, 5)) onset = paradigm.onset[paradigm.con_id == 'c2'].astype(np.int) assert_true(np.all(X[onset + 4, 11] == 1)) def test_dmtx13(): # Check that the fir_duration is well taken into account tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) paradigm = basic_paradigm() hrf_model = 'FIR' X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3, fir_delays=range(1, 5)) onset = paradigm.onset[paradigm.con_id == 'c0'].astype(np.int) assert_true(np.all(X[onset + 1, 0] == 1)) def test_dmtx14(): # Check that the first column o FIR design matrix is OK after a 1/2 # time shift tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) + tr / 2 paradigm = basic_paradigm() hrf_model = 'FIR' X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3, fir_delays=range(1, 5)) onset = paradigm.onset[paradigm.con_id == 'c0'].astype(np.int) assert_true(np.all(X[onset + 1, 0] == 1)) def test_dmtx15(): # basic test based on basic_paradigm, plus user supplied regressors tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) paradigm = basic_paradigm() hrf_model = 'Canonical' ax = np.random.randn(128, 4) X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3, add_regs=ax) assert(len(names) == 11) assert(X.shape[1] == 11) def test_dmtx16(): # Check that additional regressors are put at the right place tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) paradigm = basic_paradigm() hrf_model = 'Canonical' ax = np.random.randn(128, 4) X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3, add_regs=ax) assert_almost_equal(X[:, 3: 7], ax) def test_dmtx17(): # Test the effect of scaling on the events tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) paradigm = modulated_event_paradigm() hrf_model = 'Canonical' X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3) ct = paradigm.onset[paradigm.con_id == 'c0'].astype(np.int) + 1 assert((X[ct, 0] > 0).all()) def test_dmtx18(): # Test the effect of scaling on the blocks tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) paradigm = modulated_block_paradigm() hrf_model = 'Canonical' X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3) ct = paradigm.onset[paradigm.con_id == 'c0'].astype(np.int) + 3 assert((X[ct, 0] > 0).all()) def test_dmtx19(): # Test the effect of scaling on a FIR model tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) paradigm = modulated_event_paradigm() hrf_model = 'FIR' X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model, drift_model='polynomial', drift_order=3, fir_delays=range(1, 5)) idx = paradigm.onset[paradigm.con_id == 0].astype(np.int) assert_true((X[idx + 1, 0] == X[idx + 2, 1]).all()) def test_dmtx20(): # Test for commit 10662f7 frametimes = np.arange(0, 127) # integers paradigm = modulated_event_paradigm() X, names = dmtx_light(frametimes, paradigm, hrf_model='canonical', drift_model='cosine') # check that the drifts are not constant assert_true(np.all(np.diff(X[:, -2]) != 0)) def test_fir_block(): # tets FIR models on block designs bp = block_paradigm() tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) X, names = dmtx_light(frametimes, bp, hrf_model='fir', drift_model='blank', fir_delays=range(0, 4)) idx = bp.onset[bp.con_id == 1].astype(np.int) assert_true(X.shape == (128, 13)) assert_true((X[idx, 4] == 1).all()) assert_true((X[idx + 1, 5] == 1).all()) assert_true((X[idx + 2, 6] == 1).all()) assert_true((X[idx + 3, 7] == 1).all()) def test_csv_io(): # test the csv io on design matrices tr = 1.0 frametimes = np.linspace(0, 127 * tr, 128) paradigm = modulated_event_paradigm() DM = make_dmtx(frametimes, paradigm, hrf_model='Canonical', drift_model='polynomial', drift_order=3) path = 'dmtx.csv' with InTemporaryDirectory(): DM.write_csv(path) DM2 = dmtx_from_csv(path) assert_almost_equal(DM.matrix, DM2.matrix) assert_equal(DM.names, DM2.names) def test_spm_1(): # Check that the nipy design matrix is close enough to the SPM one # (it cannot be identical, because the hrf shape is different) tr = 1.0 frametimes = np.linspace(0, 99, 100) conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c2', 'c2', 'c2'] onsets = [30, 50, 70, 10, 30, 80, 30, 40, 60] hrf_model = 'Canonical' paradigm = EventRelatedParadigm(conditions, onsets) X1 = make_dmtx(frametimes, paradigm, drift_model='blank') spm_dmtx = DMTX['arr_0'] assert ((spm_dmtx - X1.matrix) ** 2).sum() / (spm_dmtx ** 2).sum() < .1 def test_spm_2(): # Check that the nipy design matrix is close enough to the SPM one # (it cannot be identical, because the hrf shape is different) tr = 1.0 frametimes = np.linspace(0, 99, 100) conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c2', 'c2', 'c2'] onsets = [30, 50, 70, 10, 30, 80, 30, 40, 60] duration = 10 * np.ones(9) hrf_model = 'Canonical' paradigm = BlockParadigm(conditions, onsets, duration) X1 = make_dmtx(frametimes, paradigm, drift_model='blank') spm_dmtx = DMTX['arr_1'] assert ((spm_dmtx - X1.matrix) ** 2).sum() / (spm_dmtx ** 2).sum() < .1 if __name__ == "__main__": import nose nose.run(argv=['', __file__]) nipy-0.3.0/nipy/modalities/fmri/tests/test_fmri.py000066400000000000000000000041251210344137400222500ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import with_statement import gc import warnings import numpy as np from nipy.modalities.fmri.api import axis0_generator, FmriImageList from nipy.core.api import parcels, Image, AffineTransform as AfT from nipy.io.api import load_image, save_image from nose.tools import assert_equal, assert_true from nibabel.tmpdirs import InTemporaryDirectory from nipy.testing import funcfile def setup(): # Suppress warnings during tests to reduce noise warnings.simplefilter("ignore") def teardown(): # Clear list of warning filters warnings.resetwarnings() def test_write(): fname = 'myfile.nii' img = load_image(funcfile) with InTemporaryDirectory(): save_image(img, fname) test = FmriImageList.from_image(load_image(fname)) assert_equal(test[0].affine.shape, (4,4)) assert_equal(img[0].affine.shape, (5,4)) # Check the affine... A = np.identity(4) A[:3,:3] = img[:,:,:,0].affine[:3,:3] A[:3,-1] = img[:,:,:,0].affine[:3,-1] assert_true(np.allclose(test[0].affine, A)) del test def test_iter(): img = load_image(funcfile) img_shape = img.shape exp_shape = (img_shape[0],) + img_shape[2:] j = 0 for i, d in axis0_generator(img.get_data()): j += 1 assert_equal(d.shape, exp_shape) del(i); gc.collect() assert_equal(j, img_shape[1]) def test_subcoordmap(): img = load_image(funcfile) subcoordmap = img[3].coordmap xform = img.affine[:,1:] assert_true(np.allclose(subcoordmap.affine[1:], xform[1:])) assert_true(np.allclose(subcoordmap.affine[0], [0,0,0,img.coordmap([3,0,0,0])[0]])) def test_labels1(): img = load_image(funcfile) data = img.get_data() parcelmap = Image(img[0].get_data(), AfT('kji', 'zyx', np.eye(4))) parcelmap = (parcelmap.get_data() * 100).astype(np.int32) v = 0 for i, d in axis0_generator(data, parcels(parcelmap)): v += d.shape[1] assert_equal(v, parcelmap.size) nipy-0.3.0/nipy/modalities/fmri/tests/test_glm.py000066400000000000000000000257431210344137400221030ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Test the glm utilities. """ from __future__ import with_statement import numpy as np from nibabel import load, Nifti1Image, save from ..glm import GeneralLinearModel, data_scaling, FMRILinearModel from nose.tools import assert_true, assert_equal, assert_raises from numpy.testing import (assert_array_almost_equal, assert_almost_equal, assert_array_equal) from nibabel.tmpdirs import InTemporaryDirectory from nipy.testing import funcfile def write_fake_fmri_data(shapes, rk=3, affine=np.eye(4)): mask_file, fmri_files, design_files = 'mask.nii', [], [] for i, shape in enumerate(shapes): fmri_files.append('fmri_run%d.nii' %i) data = 100 + np.random.randn(*shape) data[0] -= 10 save(Nifti1Image(data, affine), fmri_files[-1]) design_files.append('dmtx_%d.npz' %i) np.savez(design_files[-1], np.random.randn(shape[3], rk)) save(Nifti1Image((np.random.rand(*shape[:3]) > .5).astype(np.int8), affine), mask_file) return mask_file, fmri_files, design_files def generate_fake_fmri_data(shapes, rk=3, affine=np.eye(4)): fmri_data = [] design_matrices = [] for i, shape in enumerate(shapes): data = 100 + np.random.randn(*shape) data[0] -= 10 fmri_data.append(Nifti1Image(data, affine)) design_matrices.append(np.random.randn(shape[3], rk)) mask = Nifti1Image((np.random.rand(*shape[:3]) > .5).astype(np.int8), affine) return mask, fmri_data, design_matrices def test_high_level_glm_with_paths(): shapes, rk = ((5, 6, 4, 20), (5, 6, 4, 19)), 3 with InTemporaryDirectory(): mask_file, fmri_files, design_files = write_fake_fmri_data(shapes, rk) multi_session_model = FMRILinearModel(fmri_files, design_files, mask_file) multi_session_model.fit() z_image, = multi_session_model.contrast([np.eye(rk)[1]] * 2) assert_array_equal(z_image.get_affine(), load(mask_file).get_affine()) assert_true(z_image.get_data().std() < 3.) # Delete objects attached to files to avoid WindowsError when deleting # temporary directory del z_image, fmri_files, multi_session_model def test_high_level_glm_with_data(): shapes, rk = ((7, 6, 5, 20), (7, 6, 5, 19)), 3 mask, fmri_data, design_matrices = write_fake_fmri_data(shapes, rk) # without mask multi_session_model = FMRILinearModel(fmri_data, design_matrices, mask=None) multi_session_model.fit() z_image, = multi_session_model.contrast([np.eye(rk)[1]] * 2) assert_equal(np.sum(z_image.get_data() == 0), 0) # compute the mask multi_session_model = FMRILinearModel(fmri_data, design_matrices, m=0, M=.01, threshold=0.) multi_session_model.fit() z_image, = multi_session_model.contrast([np.eye(rk)[1]] * 2) assert_true(z_image.get_data().std() < 3. ) # with mask multi_session_model = FMRILinearModel(fmri_data, design_matrices, mask) multi_session_model.fit() z_image, effect_image, variance_image= multi_session_model.contrast( [np.eye(rk)[:2]] * 2, output_effects=True, output_variance=True) assert_array_equal(z_image.get_data() == 0., load(mask).get_data() == 0.) assert_true( (variance_image.get_data()[load(mask).get_data() > 0, 0] > .001).all()) # without scaling multi_session_model.fit(do_scaling=False) z_image, = multi_session_model.contrast([np.eye(rk)[1]] * 2) assert_true(z_image.get_data().std() < 3. ) def test_high_level_glm_contrasts(): shapes, rk = ((5, 6, 7, 20), (5, 6, 7, 19)), 3 mask, fmri_data, design_matrices = write_fake_fmri_data(shapes, rk) multi_session_model = FMRILinearModel(fmri_data, design_matrices, mask=None) multi_session_model.fit() z_image, = multi_session_model.contrast([np.eye(rk)[:2]] * 2, contrast_type='tmin-conjunction') z1, = multi_session_model.contrast([np.eye(rk)[:1]] * 2) z2, = multi_session_model.contrast([np.eye(rk)[1:2]] * 2) assert_true((z_image.get_data() < np.maximum( z1.get_data(), z2.get_data())).all()) def test_high_level_glm_null_contrasts(): shapes, rk = ((5, 6, 7, 20), (5, 6, 7, 19)), 3 mask, fmri_data, design_matrices = generate_fake_fmri_data(shapes, rk) multi_session_model = FMRILinearModel( fmri_data, design_matrices, mask=None) multi_session_model.fit() single_session_model = FMRILinearModel( fmri_data[:1], design_matrices[:1], mask=None) single_session_model.fit() z1, = multi_session_model.contrast([np.eye(rk)[:1], np.zeros((1, rk))]) z2, = single_session_model.contrast([np.eye(rk)[:1]]) np.testing.assert_almost_equal(z1.get_data(), z2.get_data()) def ols_glm(n=100, p=80, q=10): X, Y = np.random.randn(p, q), np.random.randn(p, n) glm = GeneralLinearModel(X) glm.fit(Y, 'ols') return glm, n, p, q def ar1_glm(n=100, p=80, q=10): X, Y = np.random.randn(p, q), np.random.randn(p, n) glm = GeneralLinearModel(X) glm.fit(Y, 'ar1') return glm, n, p, q def test_glm_ols(): mulm, n, p, q = ols_glm() assert_array_equal(mulm.labels_, np.zeros(n)) assert_equal(mulm.results_.keys(), [0.0]) assert_equal(mulm.results_[0.0].theta.shape, (q, n)) assert_almost_equal(mulm.results_[0.0].theta.mean(), 0, 1) assert_almost_equal(mulm.results_[0.0].theta.var(), 1. / p, 1) def test_glm_beta(): mulm, n, p, q = ols_glm() assert_equal(mulm.get_beta().shape, (q, n)) assert_equal(mulm.get_beta([0, -1]).shape, (2, n)) assert_equal(mulm.get_beta(6).shape, (1, n)) def test_glm_mse(): mulm, n, p, q = ols_glm() mse = mulm.get_mse() assert_array_almost_equal(mse, np.ones(n), 0) def test_glm_logL(): mulm, n, p, q = ols_glm() logL = mulm.get_logL() assert_array_almost_equal(logL / n, - p * 1.41 * np.ones(n) / n, 0) def test_glm_ar(): mulm, n, p, q = ar1_glm() assert_equal(len(mulm.labels_), n) assert_true(len(mulm.results_.keys()) > 1) tmp = sum([mulm.results_[key].theta.shape[1] for key in mulm.results_.keys()]) assert_equal(tmp, n) def test_Tcontrast(): mulm, n, p, q = ar1_glm() cval = np.hstack((1, np.ones(9))) z_vals = mulm.contrast(cval).z_score() assert_almost_equal(z_vals.mean(), 0, 0) assert_almost_equal(z_vals.std(), 1, 0) def test_Fcontrast_1d(): mulm, n, p, q = ar1_glm() cval = np.hstack((1, np.ones(9))) con = mulm.contrast(cval, contrast_type='F') z_vals = con.z_score() assert_almost_equal(z_vals.mean(), 0, 0) assert_almost_equal(z_vals.std(), 1, 0) def test_Fcontrast_nd(): mulm, n, p, q = ar1_glm() cval = np.eye(q)[:3] con = mulm.contrast(cval) assert_equal(con.contrast_type, 'F') z_vals = con.z_score() assert_almost_equal(z_vals.mean(), 0, 0) assert_almost_equal(z_vals.std(), 1, 0) def test_Fcontrast_1d_old(): mulm, n, p, q = ols_glm() cval = np.hstack((1, np.ones(9))) con = mulm.contrast(cval, contrast_type='F') z_vals = con.z_score() assert_almost_equal(z_vals.mean(), 0, 0) assert_almost_equal(z_vals.std(), 1, 0) def test_Fcontrast_nd_ols(): mulm, n, p, q = ols_glm() cval = np.eye(q)[:3] con = mulm.contrast(cval) assert_equal(con.contrast_type, 'F') z_vals = con.z_score() assert_almost_equal(z_vals.mean(), 0, 0) assert_almost_equal(z_vals.std(), 1, 0) def test_t_contrast_add(): mulm, n, p, q = ols_glm() c1, c2 = np.eye(q)[0], np.eye(q)[1] con = mulm.contrast(c1) + mulm.contrast(c2) z_vals = con.z_score() assert_almost_equal(z_vals.mean(), 0, 0) assert_almost_equal(z_vals.std(), 1, 0) def test_F_contrast_add(): mulm, n, p, q = ar1_glm() # first test with independent contrast c1, c2 = np.eye(q)[:2], np.eye(q)[2:4] con = mulm.contrast(c1) + mulm.contrast(c2) z_vals = con.z_score() assert_almost_equal(z_vals.mean(), 0, 0) assert_almost_equal(z_vals.std(), 1, 0) # first test with dependent contrast con1 = mulm.contrast(c1) con2 = mulm.contrast(c1) + mulm.contrast(c1) assert_almost_equal(con1.effect * 2, con2.effect) assert_almost_equal(con1.variance * 2, con2.variance) assert_almost_equal(con1.stat() * 2, con2.stat()) def test_t_contrast_mul(): mulm, n, p, q = ar1_glm() con1 = mulm.contrast(np.eye(q)[0]) con2 = con1 * 2 assert_almost_equal(con1.z_score(), con2.z_score()) assert_almost_equal(con1.effect * 2, con2.effect) def test_F_contrast_mul(): mulm, n, p, q = ar1_glm() con1 = mulm.contrast(np.eye(q)[:4]) con2 = con1 * 2 assert_almost_equal(con1.z_score(), con2.z_score()) assert_almost_equal(con1.effect * 2, con2.effect) def test_t_contrast_values(): mulm, n, p, q = ar1_glm(n=1) cval = np.eye(q)[0] con = mulm.contrast(cval) t_ref = mulm.results_.values()[0].Tcontrast(cval).t assert_almost_equal(np.ravel(con.stat()), t_ref) def test_F_contrast_calues(): mulm, n, p, q = ar1_glm(n=1) cval = np.eye(q)[:3] con = mulm.contrast(cval) F_ref = mulm.results_.values()[0].Fcontrast(cval).F # Note that the values are not strictly equal, # this seems to be related to a bug in Mahalanobis assert_almost_equal(np.ravel(con.stat()), F_ref, 3) def test_tmin(): mulm, n, p, q = ar1_glm(n=1) c1, c2, c3 = np.eye(q)[0], np.eye(q)[1], np.eye(q)[2] t1, t2, t3 = mulm.contrast(c1).stat(), mulm.contrast(c2).stat(), \ mulm.contrast(c3).stat() tmin = min(t1, t2, t3) con = mulm.contrast(np.eye(q)[:3], 'tmin-conjunction') assert_equal(con.stat(), tmin) def test_scaling(): """Test the scaling function""" shape = (400, 10) u = np.random.randn(*shape) mean = 100 * np.random.rand(shape[1]) Y = u + mean Y, mean_ = data_scaling(Y) assert_almost_equal(Y.mean(0), 0) assert_almost_equal(mean_, mean, 0) assert_true(Y.std() > 1) def test_fmri_inputs(): # Test processing of FMRI inputs func_img = load(funcfile) T = func_img.shape[-1] des = np.ones((T, 1)) des_fname = 'design.npz' with InTemporaryDirectory(): np.savez(des_fname, des) for fi in func_img, funcfile: for d in des, des_fname: fmodel = FMRILinearModel(fi, d, mask='compute') fmodel = FMRILinearModel([fi], d, mask=None) fmodel = FMRILinearModel(fi, [d], mask=None) fmodel = FMRILinearModel([fi], [d], mask=None) fmodel = FMRILinearModel([fi, fi], [d, d], mask=None) fmodel = FMRILinearModel((fi, fi), (d, d), mask=None) assert_raises(ValueError, FMRILinearModel, [fi, fi], d, mask=None) assert_raises(ValueError, FMRILinearModel, fi, [d, d], mask=None) if __name__ == "__main__": import nose nose.run(argv=['', __file__]) nipy-0.3.0/nipy/modalities/fmri/tests/test_hemodynamic_models.py000066400000000000000000000147751210344137400251670ustar00rootroot00000000000000import numpy as np from numpy.testing import assert_almost_equal, assert_equal, TestCase from ..hemodynamic_models import ( spm_hrf, spm_time_derivative, spm_dispersion_derivative, resample_regressor, _orthogonalize, sample_condition, _regressor_names, _hrf_kernel, glover_hrf, glover_time_derivative, compute_regressor) def test_spm_hrf(): """ test that the spm_hrf is correctly normalized and has correct length """ h = spm_hrf(2.0) assert_almost_equal(h.sum(), 1) assert len(h) == 256 def test_spm_hrf_derivative(): """ test that the spm_hrf is correctly normalized and has correct length """ h = spm_time_derivative(2.0) assert_almost_equal(h.sum(), 0) assert len(h) == 256 h = spm_dispersion_derivative(2.0) assert_almost_equal(h.sum(), 0) assert len(h) == 256 def test_glover_hrf(): """ test that the spm_hrf is correctly normalized and has correct length """ h = glover_hrf(2.0) assert_almost_equal(h.sum(), 1) assert len(h) == 256 def test_glover_time_derivative(): """ test that the spm_hrf is correctly normalized and has correct length """ h = glover_time_derivative(2.0) assert_almost_equal(h.sum(), 0) assert len(h) == 256 def test_resample_regressor(): """ test regressor resampling on a linear function """ x = np.linspace(0, 1, 200) y = np.linspace(0, 1, 30) z = resample_regressor(x, x, y) assert_almost_equal(z, y) def test_resample_regressor_nl(): """ test regressor resampling on a sine function """ x = np.linspace(0, 10, 1000) y = np.linspace(0, 10, 30) z = resample_regressor(np.cos(x), x, y) assert_almost_equal(z, np.cos(y), decimal=2) def test_orthogonalize(): """ test that the orthogonalization is OK """ X = np.random.randn(100, 5) X = _orthogonalize(X) K = np.dot(X.T, X) K -= np.diag(np.diag(K)) assert (K ** 2).sum() < 1.e-16 def test_orthogonalize_trivial(): """ test that the orthogonalization is OK """ X = np.random.randn(100) Y = X.copy() X = _orthogonalize(X) assert (Y == X).all() def test_sample_condition_1(): """ Test that the experimental condition is correctly sampled """ condition = ([1, 20, 36.5], [0, 0, 0], [1, 1, 1]) frametimes = np.linspace(0, 49, 50) reg, rf = sample_condition(condition, frametimes, oversampling=1) assert reg.sum() == 3 assert reg[1] == 1 assert reg[37] == 1 assert reg[20] ==1 def test_sample_condition_2(): """ Test that the experimental condition is correctly sampled """ condition = ([1, 20, 36.5], [2, 2, 2], [1, 1, 1]) frametimes = np.linspace(0, 49, 50) reg, rf = sample_condition(condition, frametimes, oversampling=1) assert reg.sum() == 6 assert reg[1] == 1 assert reg[38] == 1 assert reg[21] ==1 def test_sample_condition_3(): """ Test that the experimental condition is correctly sampled """ condition = ([1, 20, 36.5], [2, 2, 2], [1, 1, 1]) frametimes = np.linspace(0, 49, 50) reg, rf = sample_condition(condition, frametimes, oversampling=10) assert_almost_equal(reg.sum(), 60.) assert reg[10] == 1 assert reg[380] == 1 assert reg[210] == 1 assert np.sum(reg > 0) == 60 def test_sample_condition_4(): """ Test that the experimental condition is correctly sampled with wrongly placed trials """ condition = ([-3, 1, 20, 36.5, 51], [0, 0, 0, 0, 0], [1, 1, 1, 1, 1]) frametimes = np.linspace(0, 49, 50) reg, rf = sample_condition(condition, frametimes, oversampling=1) assert reg.sum() == 3 assert reg[1] == 1 assert reg[37] == 1 assert reg[20] ==1 def test_sample_condition_5(): """ Test that the experimental condition is correctly sampled """ condition = ([1, 20, 36.5], [2, 2, 2], [1., -1., 5.]) frametimes = np.linspace(0, 49, 50) reg, rf = sample_condition(condition, frametimes, oversampling=1) assert reg.sum() == 10 assert reg[1] == 1. assert reg[20] == -1. assert reg[37] == 5. def test_names(): """ Test the regressor naming function """ name = 'con' assert _regressor_names(name, 'spm') == ['con'] assert _regressor_names(name, 'spm_time') == ['con', 'con_derivative'] assert _regressor_names(name, 'spm_time_dispersion') == \ ['con', 'con_derivative', 'con_dispersion'] assert _regressor_names(name, 'canonical') == ['con'] assert _regressor_names(name, 'canonical with derivative') == \ ['con', 'con_derivative'] def test_hkernel(): """ test the hrf computation """ tr = 2.0 h = _hrf_kernel('spm', tr) assert_almost_equal(h[0], spm_hrf(tr)) assert len(h) == 1 h = _hrf_kernel('spm_time', tr) assert_almost_equal(h[1], spm_time_derivative(tr)) assert len(h) == 2 h = _hrf_kernel('spm_time_dispersion', tr) assert_almost_equal(h[2], spm_dispersion_derivative(tr)) assert len(h) == 3 h = _hrf_kernel('canonical', tr) assert_almost_equal(h[0], glover_hrf(tr)) assert len(h) == 1 h = _hrf_kernel('canonical with derivative', tr) assert_almost_equal(h[1], glover_time_derivative(tr)) assert_almost_equal(h[0], glover_hrf(tr)) assert len(h) == 2 h = _hrf_kernel('fir', tr, fir_delays = np.arange(4)) assert len(h) == 4 for dh in h: assert dh.sum() == 16. def test_make_regressor_1(): """ test the generated regressor """ condition = ([1, 20, 36.5], [2, 2, 2], [1, 1, 1]) frametimes = np.linspace(0, 69, 70) hrf_model = 'spm' reg, reg_names = compute_regressor(condition, hrf_model, frametimes) assert_almost_equal(reg.sum(), 6, 1) assert reg_names[0] == 'cond' def test_make_regressor_2(): """ test the generated regressor """ condition = ([1, 20, 36.5], [0, 0, 0], [1, 1, 1]) frametimes = np.linspace(0, 69, 70) hrf_model = 'spm' reg, reg_names = compute_regressor(condition, hrf_model, frametimes) assert_almost_equal(reg.sum() * 16, 3, 1) assert reg_names[0] == 'cond' def test_make_regressor_3(): """ test the generated regressor """ condition = ([1, 20, 36.5], [0, 0, 0], [1, 1, 1]) frametimes = np.linspace(0, 138, 70) hrf_model = 'fir' reg, reg_names = compute_regressor(condition, hrf_model, frametimes, fir_delays=np.arange(4)) assert (np.unique(reg) == np.array([0, 1])).all() assert (np.sum(reg, 0) == np.array([3, 3, 3, 3])).all() assert len(reg_names) == 4 if __name__ == "__main__": import nose nose.run(argv=['', __file__]) nipy-0.3.0/nipy/modalities/fmri/tests/test_hrf.py000066400000000000000000000014011210344137400220640ustar00rootroot00000000000000""" Testing hrf module """ import numpy as np from scipy.stats import gamma from ..hrf import ( gamma_params, gamma_expr, lambdify_t, ) from numpy.testing import assert_array_almost_equal def test_gamma(): t = np.linspace(0, 30, 5000) # make up some numbers pk_t = 5.0 fwhm = 6.0 # get the estimated parameters shape, scale, coef = gamma_params(pk_t, fwhm) # get distribution function g_exp = gamma_expr(pk_t, fwhm) # make matching standard distribution gf = gamma(shape, scale=scale).pdf # get values L1t = gf(t) L2t = lambdify_t(g_exp)(t) # they are the same bar a scaling factor nz = np.abs(L1t) > 1e-15 sf = np.mean(L1t[nz] / L2t[nz]) assert_array_almost_equal(L1t , L2t*sf) nipy-0.3.0/nipy/modalities/fmri/tests/test_iterators.py000066400000000000000000000130551210344137400233310ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: #TODO the iterators are deprecated from nipy.testing import * from nipy.core.api import Image import nipy.core.reference.coordinate_map as coordinate_map from nipy.modalities.fmri.api import FmriImageList """ Comment out since these are slated for deletion and currently are broken. Keep for reference until generators are working. class test_Iterators(TestCase): def setUp(self): spacetime = ['time', 'zspace', 'yspace', 'xspace'] im = Image(np.zeros((3,4,5,6)), coordinate_map = coordinate_map.CoordinateMap.identity((3,4,5,6), spacetime)) self.img = FmriImageList(im) def test_fmri_parcel(self): parcelmap = np.zeros(self.img.shape[1:]) parcelmap[0,0,0] = 1 parcelmap[1,1,1] = 1 parcelmap[2,2,2] = 1 parcelmap[1,2,1] = 2 parcelmap[2,3,2] = 2 parcelmap[0,1,0] = 2 parcelseq = (0, 1, 2, 3) expected = [np.product(self.img.shape[1:]) - 6, 3, 3, 0] iterator = parcel_iterator(self.img, parcelmap, parcelseq) for i, slice_ in enumerate(iterator): self.assertEqual((self.img.shape[0], expected[i],), slice_.shape) iterator = parcel_iterator(self.img, parcelmap) for i, slice_ in enumerate(iterator): self.assertEqual((self.img.shape[0], expected[i],), slice_.shape) def test_fmri_parcel_write(self): parcelmap = np.zeros(self.img.shape[1:]) parcelmap[0,0,0] = 1 parcelmap[1,1,1] = 1 parcelmap[2,2,2] = 1 parcelmap[1,2,1] = 2 parcelmap[2,3,2] = 2 parcelmap[0,1,0] = 2 parcelseq = (0, 1, 2, 3) expected = [np.product(self.img.shape[1:]) - 6, 3, 3, 0] iterator = parcel_iterator(self.img, parcelmap, parcelseq, mode='w') for i, slice_ in enumerate(iterator): value = np.asarray([np.arange(expected[i]) for _ in range(self.img.shape[0])]) slice_.set(value) iterator = parcel_iterator(self.img, parcelmap, parcelseq) for i, slice_ in enumerate(iterator): self.assertEqual((self.img.shape[0], expected[i],), slice_.shape) assert_equal(slice_, np.asarray([np.arange(expected[i]) for _ in range(self.img.shape[0])])) iterator = parcel_iterator(self.img, parcelmap, mode='w') for i, slice_ in enumerate(iterator): value = np.asarray([np.arange(expected[i]) for _ in range(self.img.shape[0])]) slice_.set(value) iterator = parcel_iterator(self.img, parcelmap) for i, slice_ in enumerate(iterator): self.assertEqual((self.img.shape[0], expected[i],), slice_.shape) assert_equal(slice_, np.asarray([np.arange(expected[i]) for _ in range(self.img.shape[0])])) def test_fmri_parcel_copy(self): parcelmap = np.zeros(self.img.shape[1:]) parcelmap[0,0,0] = 1 parcelmap[1,1,1] = 1 parcelmap[2,2,2] = 1 parcelmap[1,2,1] = 2 parcelmap[2,3,2] = 2 parcelmap[0,1,0] = 2 parcelseq = (0, 1, 2, 3) expected = [np.product(self.img.shape[1:]) - 6, 3, 3, 0] iterator = parcel_iterator(self.img, parcelmap, parcelseq) tmp = FmriImageList(self.img[:] * 1., self.img.coordmap) new_iterator = iterator.copy(tmp) for i, slice_ in enumerate(new_iterator): self.assertEqual((self.img.shape[0], expected[i],), slice_.shape) iterator = parcel_iterator(self.img, parcelmap) for i, slice_ in enumerate(new_iterator): self.assertEqual((self.img.shape[0], expected[i],), slice_.shape) def test_fmri_sliceparcel(self): parcelmap = np.asarray([[[0,0,0,1,2,2]]*5, [[0,0,1,1,2,2]]*5, [[0,0,0,0,2,2]]*5]) parcelseq = ((1, 2), 0, 2) iterator = slice_parcel_iterator(self.img, parcelmap, parcelseq) for i, slice_ in enumerate(iterator): pm = parcelmap[i] ps = parcelseq[i] try: x = len([n for n in pm.flat if n in ps]) except TypeError: x = len([n for n in pm.flat if n == ps]) self.assertEqual(x, slice_.shape[1]) self.assertEqual(self.img.shape[0], slice_.shape[0]) def test_fmri_sliceparcel_write(self): parcelmap = np.asarray([[[0,0,0,1,2,2]]*5, [[0,0,1,1,2,2]]*5, [[0,0,0,0,2,2]]*5]) parcelseq = ((1, 2), 0, 2) iterator = slice_parcel_iterator(self.img, parcelmap, parcelseq, mode='w') for i, slice_ in enumerate(iterator): pm = parcelmap[i] ps = parcelseq[i] try: x = len([n for n in pm.flat if n in ps]) except TypeError: x = len([n for n in pm.flat if n == ps]) value = [i*np.arange(x) for i in range(self.img.shape[0])] slice_.set(value) iterator = slice_parcel_iterator(self.img, parcelmap, parcelseq) for i, slice_ in enumerate(iterator): pm = parcelmap[i] ps = parcelseq[i] try: x = len([n for n in pm.flat if n in ps]) except TypeError: x = len([n for n in pm.flat if n == ps]) value = [i*np.arange(x) for i in range(self.img.shape[0])] self.assertEqual(x, slice_.shape[1]) self.assertEqual(self.img.shape[0], slice_.shape[0]) assert_equal(slice_, value) """ nipy-0.3.0/nipy/modalities/fmri/tests/test_paradigm.py000066400000000000000000000053451210344137400231040ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Test the design_matrix utilities. Note that the tests just look whether the data produced has correct dimension, not whether it is exact. """ import numpy as np from ..experimental_paradigm import (EventRelatedParadigm, BlockParadigm, load_paradigm_from_csv_file) def basic_paradigm(): conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c2', 'c2', 'c2'] onsets = [30, 70, 100, 10, 30, 90, 30, 40, 60] paradigm = EventRelatedParadigm(conditions, onsets) return paradigm def modulated_block_paradigm(): conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c2', 'c2', 'c2'] onsets = [30, 70, 100, 10, 30, 90, 30, 40, 60] duration = 5 + 5 * np.random.rand(len(onsets)) values = np.random.rand(len(onsets)) paradigm = BlockParadigm(conditions, onsets, duration, values) return paradigm def modulated_event_paradigm(): conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c2', 'c2', 'c2'] onsets = [30, 70, 100, 10, 30, 90, 30, 40, 60] values = np.random.rand(len(onsets)) paradigm = EventRelatedParadigm(conditions, onsets, values) return paradigm def block_paradigm(): conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c2', 'c2', 'c2'] onsets = [30, 70, 100, 10, 30, 90, 30, 40, 60] duration = 5 * np.ones(9) paradigm = BlockParadigm (conditions, onsets, duration) return paradigm def write_paradigm(paradigm, session): """Function to write a paradigm to a file and return the address """ import tempfile csvfile = tempfile.mkdtemp() + '/paradigm.csv' paradigm.write_to_csv(csvfile, session) return csvfile def test_read_paradigm(): """ test that a paradigm is correctly read """ session = 'sess' paradigm = block_paradigm() csvfile = write_paradigm(paradigm, session) read_paradigm = load_paradigm_from_csv_file(csvfile)[session] assert (read_paradigm.onset == paradigm.onset).all() paradigm = modulated_event_paradigm() csvfile = write_paradigm(paradigm, session) read_paradigm = load_paradigm_from_csv_file(csvfile)[session] assert (read_paradigm.onset == paradigm.onset).all() paradigm = modulated_block_paradigm() csvfile = write_paradigm(paradigm, session) read_paradigm = load_paradigm_from_csv_file(csvfile)[session] assert (read_paradigm.onset == paradigm.onset).all() paradigm = basic_paradigm() csvfile = write_paradigm(paradigm, session) read_paradigm = load_paradigm_from_csv_file(csvfile)[session] assert (read_paradigm.onset == paradigm.onset).all() if __name__ == "__main__": import nose nose.run(argv=['', __file__]) nipy-0.3.0/nipy/modalities/fmri/tests/test_utils.py000066400000000000000000000176271210344137400224660ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Testing fmri utils """ import re import numpy as np import sympy from sympy import Symbol, Function, DiracDelta from nipy.fixes.sympy.utilities.lambdify import lambdify from nipy.algorithms.statistics.formula import Term from ..utils import ( lambdify_t, define, events, blocks, interp, linear_interp, step_function, convolve_functions, ) from .. import hrf from nose.tools import (assert_equal, assert_false, raises, assert_raises) from numpy.testing import (assert_array_equal, assert_array_almost_equal, assert_almost_equal) t = Term('t') def test_define(): expr = sympy.exp(3*t) assert_equal(str(expr), 'exp(3*t)') newf = define('f', expr) assert_equal(str(newf), 'f(t)') f = lambdify_t(newf) tval = np.random.standard_normal((3,)) assert_almost_equal(np.exp(3*tval), f(tval)) def test_events(): # test events utility function h = Function('hrf') evs = events([3,6,9]) assert_equal(DiracDelta(-9 + t) + DiracDelta(-6 + t) + DiracDelta(-3 + t), evs) evs = events([3,6,9], f=h) assert_equal(h(-3 + t) + h(-6 + t) + h(-9 + t), evs) # make some beta symbols b = [Symbol('b%d' % i, dummy=True) for i in range(3)] a = Symbol('a') p = b[0] + b[1]*a + b[2]*a**2 evs = events([3,6,9], amplitudes=[2,1,-1], g=p) assert_equal((2*b[1] + 4*b[2] + b[0])*DiracDelta(-3 + t) + (-b[1] + b[0] + b[2])*DiracDelta(-9 + t) + (b[0] + b[1] + b[2])*DiracDelta(-6 + t), evs) evs = events([3,6,9], amplitudes=[2,1,-1], g=p, f=h) assert_equal((2*b[1] + 4*b[2] + b[0])*h(-3 + t) + (-b[1] + b[0] + b[2])*h(-9 + t) + (b[0] + b[1] + b[2])*h(-6 + t), evs) # test no error for numpy int arrays onsets = np.array([30, 70, 100], dtype=np.int64) evs = events(onsets, f=hrf.glover) def test_interp(): times = [0,4,5.] values = [2.,4,6] for int_func in (interp, linear_interp): s = int_func(times, values, np.nan) tval = np.array([-0.1,0.1,3.9,4.1,5.1]) res = lambdify(t, s)(tval) assert_array_equal(np.isnan(res), [True, False, False, False, True]) assert_array_almost_equal(res[1:-1], [2.05, 3.95, 4.2]) # default is zero fill s = int_func(times, values) res = lambdify(t, s)(tval) assert_array_almost_equal(res, [0, 2.05, 3.95, 4.2, 0]) # Can be some other value s = int_func(times, values, fill=10) res = lambdify(t, s)(tval) assert_array_almost_equal(res, [10, 2.05, 3.95, 4.2, 10]) # If fill is None, raises error on interpolation outside bounds s = int_func(times, values, fill=None) f = lambdify(t, s) assert_array_almost_equal(f(tval[1:-1]), [2.05, 3.95, 4.2]) assert_raises(ValueError, f, tval[:-1]) # specifying kind as linear is OK s = linear_interp(times, values, kind='linear') # bounds_check should match fill int_func(times, values, bounds_error=False) int_func(times, values, fill=None, bounds_error=True) assert_raises(ValueError, int_func, times, values, bounds_error=True) # fill should match fill value int_func(times, values, fill=10, fill_value=10) int_func(times, values, fill_value=0) assert_raises(ValueError, int_func, times, values, fill=10, fill_value=9) int_func(times, values, fill=np.nan, fill_value=np.nan) assert_raises(ValueError, int_func, times, values, fill=10, fill_value=np.nan) assert_raises(ValueError, int_func, times, values, fill=np.nan, fill_value=0) @raises(ValueError) def test_linear_inter_kind(): linear_interp([0, 1], [1, 2], kind='cubic') def test_step_function(): # test step function # step function is a function of t s = step_function([0,4,5],[2,4,6]) tval = np.array([-0.1,0,3.9,4,4.1,5.1]) lam = lambdify(t, s) assert_array_equal(lam(tval), [0, 2, 2, 4, 4, 6]) s = step_function([0,4,5],[4,2,1]) lam = lambdify(t, s) assert_array_equal(lam(tval), [0, 4, 4, 2, 2, 1]) # Name default assert_false(re.match(r'step\d+\(t\)$', str(s)) is None) # Name reloaded s = step_function([0,4,5],[4,2,1], name='goodie_goodie_yum_yum') assert_equal(str(s), 'goodie_goodie_yum_yum(t)') def test_blocks(): on_off = [[1,2],[3,4]] tval = np.array([0.4,1.4,2.4,3.4]) b = blocks(on_off) lam = lambdify(t, b) assert_array_equal(lam(tval), [0, 1, 0, 1]) b = blocks(on_off, amplitudes=[3,5]) lam = lambdify(t, b) assert_array_equal(lam(tval), [0, 3, 0, 5]) # Check what happens with names # Default is from step function assert_false(re.match(r'step\d+\(t\)$', str(b)) is None) # Can pass in another b = blocks(on_off, name='funky_chicken') assert_equal(str(b), 'funky_chicken(t)') def numerical_convolve(func1, func2, interval, dt): mni, mxi = interval time = np.arange(mni, mxi, dt) vec1 = func1(time).astype(float) vec2 = func2(time).astype(float) value = np.convolve(vec1, vec2) * dt min_s = min(time.size, value.size) time = time[:min_s] value = value[:min_s] return time, value def test_convolve_functions(): # replicate convolution # This is a square wave on [0,1] f1 = (t > 0) * (t < 1) # ff1 is the numerical implementation of same ff1 = lambdify(t, f1) # Time delta dt = 1e-3 # The convolution of ``f1`` with itself is a triangular wave on # [0, 2], peaking at 1 with height 1 tri = convolve_functions(f1, f1, [0, 2], [0, 2], dt, name='conv') assert_equal(str(tri), 'conv(t)') ftri = lambdify(t, tri) time, value = numerical_convolve(ff1, ff1, [0, 2], dt) y = ftri(time) # numerical convolve about the same as ours assert_array_almost_equal(value, y) # peak is at 1 assert_array_almost_equal(time[np.argmax(y)], 1) # Flip the interval and get the same result for seq1, seq2 in (((0, 2), (2, 0)), ((2, 0), (0, 2)), ((2, 0), (2, 0))): tri = convolve_functions(f1, f1, seq1, seq2, dt) ftri = lambdify(t, tri) y = ftri(time) assert_array_almost_equal(value, y) # offset square wave by 1 - offset triangle by 1 f2 = (t > 1) * (t < 2) tri = convolve_functions(f1, f2, [0, 3], [0, 3], dt) ftri = lambdify(t, tri) o1_time = np.arange(0, 3, dt) z1s = np.zeros((np.round(1./dt))) assert_array_almost_equal(ftri(o1_time), np.r_[z1s, value]) # Same for input function tri = convolve_functions(f2, f1, [0, 3], [0, 3], dt) ftri = lambdify(t, tri) assert_array_almost_equal(ftri(o1_time), np.r_[z1s, value]) # 2 seconds for both tri = convolve_functions(f2, f2, [0, 4], [0, 4], dt) ftri = lambdify(t, tri) o2_time = np.arange(0, 4, dt) assert_array_almost_equal(ftri(o2_time), np.r_[z1s, z1s, value]) # offset by -0.5 - offset triangle by -0.5 f3 = (t > -0.5) * (t < 0.5) tri = convolve_functions(f1, f3, [0, 2], [-0.5, 1.5], dt) ftri = lambdify(t, tri) o1_time = np.arange(-0.5, 1.5, dt) assert_array_almost_equal(ftri(o1_time), value) # Same for input function tri = convolve_functions(f3, f1, [-0.5, 1.5], [0, 2], dt) ftri = lambdify(t, tri) assert_array_almost_equal(ftri(o1_time), value) # -1 second for both tri = convolve_functions(f3, f3, [-0.5, 1.5], [-0.5, 1.5], dt) ftri = lambdify(t, tri) o2_time = np.arange(-1, 1, dt) assert_array_almost_equal(ftri(o2_time), value) # Check it's OK to be off the dt grid tri = convolve_functions(f1, f1, [dt/2, 2 + dt/2], [0, 2], dt, name='conv') ftri = lambdify(t, tri) assert_array_almost_equal(ftri(time), value, 3) nipy-0.3.0/nipy/modalities/fmri/utils.py000066400000000000000000000332361210344137400202570ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This module defines some convenience functions of time. interp : an expresion for a interpolated function of time linear_interp : an expression for a linearly interpolated function of time step_function : an expression for a step function of time events : a convenience function to generate sums of events blocks : a convenience function to generate sums of blocks convolve_functions : numerically convolve two functions of time fourier_basis : a convenience function to generate a Fourier basis """ import itertools import numpy as np import numpy.fft as FFT from scipy.interpolate import interp1d import sympy from sympy import DiracDelta, Symbol from nipy.algorithms.statistics.formula.formulae import Term, Formula from nipy.fixes.sympy.utilities.lambdify import implemented_function, lambdify T = Term('t') def lambdify_t(expr): ''' Return sympy function of t `expr` lambdified as function of t Parameters ---------- expr : sympy expr Returns ------- func : callable Numerical implementation of function ''' return lambdify(T, expr, "numpy") def define(name, expr): """ Create function of t expression from arbitrary expression `expr` Take an arbitrarily complicated expression `expr` of 't' and make it an expression that is a simple function of t, of form ``'%s(t)' % name`` such that when it evaluates (via ``lambdify``) it has the right values. Parameters ---------- expr : sympy expression with only 't' as a Symbol name : str Returns ------- nexpr: sympy expression Examples -------- >>> t = Term('t') >>> expr = t**2 + 3*t >>> print expr #doctest: +SYMPY_EQUAL 3*t + t**2 >>> newexpr = define('f', expr) >>> print newexpr f(t) >>> f = lambdify_t(newexpr) >>> f(4) 28 >>> 3*4+4**2 28 """ # make numerical implementation of expression v = lambdify(T, expr, "numpy") # convert numerical implementation to sympy function f = implemented_function(name, v) # Return expression that is function of time return f(T) def fourier_basis(freq): """ sin and cos Formula for Fourier drift The Fourier basis consists of sine and cosine waves of given frequencies. Parameters ---------- freq : sequence of float Frequencies for the terms in the Fourier basis. Returns ------- f : Formula Examples -------- >>> f=fourier_basis([1,2,3]) >>> f.terms array([cos(2*pi*t), sin(2*pi*t), cos(4*pi*t), sin(4*pi*t), cos(6*pi*t), sin(6*pi*t)], dtype=object) >>> f.mean _b0*cos(2*pi*t) + _b1*sin(2*pi*t) + _b2*cos(4*pi*t) + _b3*sin(4*pi*t) + _b4*cos(6*pi*t) + _b5*sin(6*pi*t) """ r = [] for f in freq: r += [sympy.cos((2*sympy.pi*f*T)), sympy.sin((2*sympy.pi*f*T))] return Formula(r) def interp(times, values, fill=0, name=None, **kw): """ Generic interpolation function of t given `times` and `values` Imterpolator such that: f(times[i]) = values[i] if t < times[0] or t > times[-1]: f(t) = fill See ``scipy.interpolate.interp1d`` for details of interpolation types and other keyword arguments. Default is 'kind' is linear, making this function, by default, have the same behavior as ``linear_interp``. Parameters ---------- times : array-like Increasing sequence of times values : array-like Values at the specified times fill : None or float, optional Value on the interval (-np.inf, times[0]). Default 0. If None, raises error outside bounds name : None or str, optional Name of symbolic expression to use. If None, a default is used. \*\*kw : keyword args, optional passed to ``interp1d`` Returns ------- f : sympy expression A Function of t. Examples -------- >>> s = interp([0,4,5.],[2.,4,6]) >>> tval = np.array([-0.1,0.1,3.9,4.1,5.1]) >>> res = lambdify_t(s)(tval) 0 outside bounds by default >>> np.allclose(res, [0, 2.05, 3.95, 4.2, 0]) True """ if not fill is None: if kw.get('bounds_error') is True: raise ValueError('fill conflicts with bounds error') fv = kw.get('fill_value') if not (fv is None or fv is fill or fv == fill): # allow for fill=np.nan raise ValueError('fill conflicts with fill_value') kw['bounds_error'] = False kw['fill_value'] = fill interpolator = interp1d(times, values, **kw) # make a new name if none provided if name is None: name = 'interp%d' % interp.counter interp.counter += 1 s = implemented_function(name, interpolator) return s(T) interp.counter = 0 def linear_interp(times, values, fill=0, name=None, **kw): """ Linear interpolation function of t given `times` and `values` Imterpolator such that: f(times[i]) = values[i] if t < times[0] or t > times[-1]: f(t) = fill This version of the function enforces the 'linear' kind of interpolation (argument to ``scipy.interpolate.interp1d``). Parameters ---------- times : array-like Increasing sequence of times values : array-like Values at the specified times fill : None or float, optional Value on the interval (-np.inf, times[0]). Default 0. If None, raises error outside bounds name : None or str, optional Name of symbolic expression to use. If None, a default is used. \*\*kw : keyword args, optional passed to ``interp1d`` Returns ------- f : sympy expression A Function of t. Examples -------- >>> s = linear_interp([0,4,5.],[2.,4,6]) >>> tval = np.array([-0.1,0.1,3.9,4.1,5.1]) >>> res = lambdify_t(s)(tval) 0 outside bounds by default >>> np.allclose(res, [0, 2.05, 3.95, 4.2, 0]) True """ kind = kw.get('kind') if kind is None: kw['kind'] = 'linear' elif kind != 'linear': raise ValueError('Only linear interpolation supported') return interp(times, values, fill, name, **kw) def step_function(times, values, name=None, fill=0): """ Right-continuous step function of time t Function of t such that f(times[i]) = values[i] if t < times[0]: f(t) = fill Parameters ---------- times : (N,) sequence Increasing sequence of times values : (N,) sequence Values at the specified times fill : float Value on the interval (-np.inf, times[0]) name : str Name of symbolic expression to use. If None, a default is used. Returns ------- f_t : sympy expr Sympy expression f(t) where f is a sympy implemented anonymous function of time that implements the step function. To get the numerical version of the function, use ``lambdify_t(f_t)`` Examples -------- >>> s = step_function([0,4,5],[2,4,6]) >>> tval = np.array([-0.1,3.9,4.1,5.1]) >>> lam = lambdify_t(s) >>> lam(tval) array([ 0., 2., 4., 6.]) """ if name is None: name = 'step%d' % step_function.counter step_function.counter += 1 def _imp(x): x = np.asarray(x) f = np.zeros(x.shape) + fill for time, val in zip(times, values): f[x >= time] = val return f s = implemented_function(name, _imp) return s(T) # Initialize counter for step function step_function.counter = 0 def events(times, amplitudes=None, f=DiracDelta, g=Symbol('a')): """ Return a sum of functions based on a sequence of times. Parameters ---------- times : sequence vector of onsets length $N$ amplitudes : None or sequence length $N$, optional Optional sequence of amplitudes. None (default) results in sequence length $N$ of 1s f : sympy.Function, optional Optional function. Defaults to DiracDelta, can be replaced with another function, f, in which case the result is the convolution with f. g : sympy.Basic, optional Optional sympy expression function of amplitudes. The amplitudes, should be represented by the symbol 'a', which will be substituted, by the corresponding value in `amplitudes`. Returns ------- sum_expression : Sympy.Add Sympy expression of time $t$, where onsets, as a function of $t$, have been symbolically convolved with function `f`, and any function `g` of corresponding amplitudes. Examples -------- We import some sympy stuff so we can test if we've got what we expected >>> from sympy import DiracDelta, Symbol, Function >>> from nipy.modalities.fmri.utils import T >>> evs = events([3,6,9]) >>> evs == DiracDelta(-9 + T) + DiracDelta(-6 + T) + DiracDelta(-3 + T) True >>> hrf = Function('hrf') >>> evs = events([3,6,9], f=hrf) >>> evs == hrf(-9 + T) + hrf(-6 + T) + hrf(-3 + T) True >>> evs = events([3,6,9], amplitudes=[2,1,-1]) >>> evs == -DiracDelta(-9 + T) + 2*DiracDelta(-3 + T) + DiracDelta(-6 + T) True """ e = 0 asymb = Symbol('a') if amplitudes is None: amplitudes = itertools.cycle([1]) for time, a in zip(times, amplitudes): e = e + g.subs(asymb, a) * f(T-time) return e def blocks(intervals, amplitudes=None, name=None): """ Step function based on a sequence of intervals. Parameters ---------- intervals : (S,) sequence of (2,) sequences Sequence (S0, S1, ... S(N-1)) of sequences, where S0 (etc) are sequences of length 2, giving 'on' and 'off' times of block amplitudes : (S,) sequence of float, optional Optional amplitudes for each block. Defaults to 1. name : None or str, optional Name of the convolved function in the resulting expression. Defaults to one created by ``utils.interp``. Returns ------- b_of_t : sympy expr Sympy expression b(t) where b is a sympy anonymous function of time that implements the block step function Examples -------- >>> on_off = [[1,2],[3,4]] >>> tval = np.array([0.4,1.4,2.4,3.4]) >>> b = blocks(on_off) >>> lam = lambdify_t(b) >>> lam(tval) array([ 0., 1., 0., 1.]) >>> b = blocks(on_off, amplitudes=[3,5]) >>> lam = lambdify_t(b) >>> lam(tval) array([ 0., 3., 0., 5.]) """ t = [-np.inf] v = [0] if amplitudes is None: amplitudes = itertools.cycle([1]) for _t, a in zip(intervals, amplitudes): t += list(_t) v += [a, 0] t.append(np.inf) v.append(0) return step_function(t, v, name=name) def convolve_functions(f, g, f_interval, g_interval, dt, fill=0, name=None, **kwargs): """ Expression containing numerical convolution of `fn1` with `fn2` Parameters ---------- f : sympy expr An expression that is a function of t only. g : sympy expr An expression that is a function of t only. f_interval : (2,) sequence of float The start and end of the interval of t over which to convolve values of f g_interval : (2,) sequence of floats Start and end of the interval of t over to convolve g dt : float Time step for discretization. We use this for creating the interpolator to form the numerical implementation fill : None or float Value to return from sampling output `fg` function outside range. name : None or str, optional Name of the convolved function in the resulting expression. Defaults to one created by ``utils.interp``. \*\*kwargs : keyword args, optional Any other arguments to pass to the ``interp1d`` function in creating the numerical funtion for `fg`. Returns ------- fg : sympy expr An symbolic expression that is a function of t only, and that can be lambdified to produce a function returning the convolved series from an input array. Examples -------- >>> import sympy >>> t = sympy.Symbol('t') This is a square wave on [0,1] >>> f1 = (t > 0) * (t < 1) The convolution of ``f1`` with itself is a triangular wave on [0, 2], peaking at 1 with height 1 >>> tri = convolve_functions(f1, f1, [0, 2], [0, 2], 1.0e-3, name='conv') The result is a symbolic function >>> print tri conv(t) Get the numerical values for a time vector >>> ftri = lambdify(t, tri) >>> x = np.arange(0, 2, 0.2) >>> y = ftri(x) The peak is at 1 >>> x[np.argmax(y)] 1.0 """ # Note that - from the doctest above - y is """ array([ -3.90255908e-16, 1.99000000e-01, 3.99000000e-01, 5.99000000e-01, 7.99000000e-01, 9.99000000e-01, 7.99000000e-01, 5.99000000e-01, 3.99000000e-01, 1.99000000e-01, 6.74679706e-16]) """ # - so the peak value is 1-dt - rather than 1 - but we get the same # result from using np.convolve - see tests. real_f = lambdify_t(f) real_g = lambdify_t(g) dt = float(dt) f_mn, f_mx = sorted(f_interval) f_time = np.arange(f_mn, f_mx, dt) # time values with support for f f_vals = real_f(f_time).astype(float) g_mn, g_mx = sorted(g_interval) g_time = np.arange(g_mn, g_mx, dt) # time values with support for g g_vals = real_g(g_time).astype(float) # f and g have been implicitly translated by -f_mn and -g_mn respectively, # because in terms of array indices, they both now start at 0 value = np.convolve(f_vals, g_vals) * dt # Full by default # Translate by f and g offsets fg_time = np.arange(len(value)) * dt + f_mn + g_mn return interp(fg_time, value, fill=fill, name=name, **kwargs) nipy-0.3.0/nipy/modalities/setup.py000066400000000000000000000007171210344137400173200ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('modalities', parent_package, top_path) config.add_subpackage('fmri') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipy-0.3.0/nipy/pkg_info.py000066400000000000000000000053151210344137400156210ustar00rootroot00000000000000import os import sys import subprocess from ConfigParser import ConfigParser COMMIT_INFO_FNAME = 'COMMIT_INFO.txt' def pkg_commit_hash(pkg_path): ''' Get short form of commit hash given directory `pkg_path` There should be a file called 'COMMIT_INFO.txt' in `pkg_path`. This is a file in INI file format, with at least one section: ``commit hash``, and two variables ``archive_subst_hash`` and ``install_hash``. The first has a substitution pattern in it which may have been filled by the execution of ``git archive`` if this is an archive generated that way. The second is filled in by the installation, if the installation is from a git archive. We get the commit hash from (in order of preference): * A substituted value in ``archive_subst_hash`` * A written commit hash value in ``install_hash` * git's output, if we are in a git repository If all these fail, we return a not-found placeholder tuple Parameters ---------- pkg_path : str directory containing package Returns ------- hash_from : str Where we got the hash from - description hash_str : str short form of hash ''' # Try and get commit from written commit text file pth = os.path.join(pkg_path, COMMIT_INFO_FNAME) if not os.path.isfile(pth): raise IOError('Missing commit info file %s' % pth) cfg_parser = ConfigParser() cfg_parser.read(pth) archive_subst = cfg_parser.get('commit hash', 'archive_subst_hash') if not archive_subst.startswith('$Format'): # it has been substituted return 'archive substitution', archive_subst install_subst = cfg_parser.get('commit hash', 'install_hash') if install_subst != '': return 'installation', install_subst # maybe we are in a repository proc = subprocess.Popen('git rev-parse --short HEAD', stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=pkg_path, shell=True) repo_commit, _ = proc.communicate() if repo_commit: return 'repository', repo_commit.strip() return '(none found)', '' def get_pkg_info(pkg_path): ''' Return dict describing the context of this package Parameters ---------- pkg_path : str path containing __init__.py for package Returns ------- context : dict with named parameters of interest ''' src, hsh = pkg_commit_hash(pkg_path) import numpy return dict( pkg_path=pkg_path, commit_source=src, commit_hash=hsh, sys_version=sys.version, sys_executable=sys.executable, sys_platform=sys.platform, np_version=numpy.__version__) nipy-0.3.0/nipy/setup.py000066400000000000000000000052561210344137400151710ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: try: # python 2 from ConfigParser import ConfigParser except ImportError: # python 3 from configparser import ConfigParser import os NIPY_DEFAULTS = dict() ################################################################################ def get_nipy_info(): """ Reuse numpy's distutils to get and store information about nipy in the site.cfg. """ from numpy.distutils.system_info import get_standard_file files = get_standard_file('site.cfg') cp = ConfigParser(NIPY_DEFAULTS) cp.read(files) if not cp.has_section('nipy'): cp.add_section('nipy') info = dict(cp.items('nipy')) for key, value in info.items(): if value.startswith('~'): info[key] = os.path.expanduser(value) # Ugly fix for bug 409269 if 'libraries' in info and isinstance(info['libraries'], basestring): info['libraries'] = [info['libraries']] # End of ugly fix return info ################################################################################ def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration from numpy.distutils.system_info import system_info config = Configuration('nipy', parent_package, top_path) # List all packages to be loaded here config.add_subpackage('algorithms') config.add_subpackage('interfaces') config.add_subpackage('core') config.add_subpackage('fixes') config.add_subpackage('io') config.add_subpackage('modalities') config.add_subpackage('utils') config.add_subpackage('tests') config.add_subpackage('externals') config.add_subpackage('testing') # Note: this is a special subpackage containing that will later be # migrated to whichever parts of the main package they logically # belong in. But initially we are putting everythin under this # subpackage to make the management and migration easier. config.add_subpackage('labs') ##################################################################### # Store the setup information, including the nipy-specific # information in a __config__ file. class nipy_info(system_info): """ We are subclassing numpy.distutils's system_info to insert information in the __config__ file. The class name determines the name of the variable in the __config__ file. """ nipy_info().set_info(**get_nipy_info()) config.make_config_py() return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipy-0.3.0/nipy/testing/000077500000000000000000000000001210344137400151245ustar00rootroot00000000000000nipy-0.3.0/nipy/testing/__init__.py000066400000000000000000000024621210344137400172410ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """The testing directory contains a small set of imaging files to be used for doctests only. More thorough tests and example data will be stored in a nipy data packages that you can download separately. .. note: We use the ``nose`` testing framework for tests. Nose is a dependency for the tests, but should not be a dependency for running the algorithms in the NIPY library. This file should import without nose being present on the python path. Examples -------- >>> from nipy.testing import funcfile >>> from nipy.io.api import load_image >>> img = load_image(funcfile) >>> img.shape (17, 21, 3, 20) """ import os #__all__ = ['funcfile', 'anatfile'] # Discover directory path filepath = os.path.abspath(__file__) basedir = os.path.dirname(filepath) funcfile = os.path.join(basedir, 'functional.nii.gz') anatfile = os.path.join(basedir, 'anatomical.nii.gz') from numpy.testing import * # Overwrites numpy.testing.Tester from .nosetester import NipyNoseTester as Tester test = Tester().test bench = Tester().bench from . import decorators as dec # Allow failed import of nose if not now running tests try: from nose.tools import assert_true, assert_false except ImportError: pass nipy-0.3.0/nipy/testing/anatomical.nii.gz000066400000000000000000001705241210344137400203650ustar00rootroot00000000000000ZYJanatomical.niis, Y62ܘsm۶m۶m۶mkwUQ5YY'y牘R')**&J2,Fy*߷&[m%G("kߩm6-{5m-:Gs*yF_.+?D_9t;+7nhܛ~OLt`qușf7%;Fvg Ճ3ZOJh@n³+0@ z&F2ZnW%&U9Mc41WĄAFnH7ք;y |-HFAm"K26,4BC]T $Nl&LHYhqh׭@O+=X]<"9a|8Wl$y7]$HFSNC᲼"簿ʩp~M"K_q/L4H3\T=0sJEQ=^'8&X8.fS~ϔIenA(` V 6:%~ruZHEZO|Q;Z1s!~"/MIHTO[}ln߽@~_lz:I%$ =60Gr'25/#Z\Gx#MnbM6cKlWH)H$-W==^KUe\W%$ڕ'a߱p\֢c6miS$o:JLCM5rڭW'j2AzFNl-WFkh!3Z Y^˜g7RթuQ;:/W;v7ʈV4I-WK?1(JmtnPH!_c.rm$BS|b!+ӂ:% ?=nKcy;raW>Gx݈6|!rQI8~Jdw]OHS_w;DPوF#QzJ)/>o^YlD\d˭PWvjx MQ5F[ĔR1RmN._W]|žl,_]r=!h3+ɃR]iieF>(A1"1 ^{T{N0}1jY;fS> .n+it5T݇(ORF42hVqtF:7t~);S-&*37,ẙ% {]Ms ԡ)Y6݉YO.5|n1xNqoYKFY|bS¨I|o_~MEvyn }_hvVPm}GSkc L>gI+Rf)\B'5;~F߅"/3W oLDv& 3ZKRQyىmg&ˤI^Nu߹xNpy{+UWKi7_bejDMMv;6i50iN#h/rg+N1|9A)_b.5OO{oP+6Z` ԧ7XUV)Tk;i}W׻/Ya(: XKbݭ}^nr+w^3FNPCmںz 6xšz*);jk*4PKWT)"!q*95eϘ`NBı$=hBw-oדcV_EP(MZ[u)P.8:jodBMg1)Goմr;l, 2TÿY1NRIe Zb7oLy:{fd_GXNf..D<+F2gR+(B#3ӵ @tIf Y2kYt⎸U^N}2[0td gd=^;kܱdqFў.~- ヹaT0UJSBhZi3GwNpg99qX656ף8+,,*KJT@](l}bT`S`++1}3F(U'3=hׇ}d&ٱ cHk1+.G;1V5̺7q—N>dںr^ ३Drw'lWE/FuzRw R=xa9ԖePJ?OnAa^3%P>z!X+l Y%qM%WPG} ޿)Ocbm?Js%\GP>>7+G,zT%C$:<̱(.*+JD3Ԟ'^2f?otd `&iH.W\UbѐnO:CP_n@ y%m0yE4(5>羊?q>S{=6yW'Sf\ .p}Tl.+`!)&6XI&oId%%N{Gc+B1^ %4`PlDZ*Ra(/G$֓w3g,3vr3,W$m=/u*;{ 炗L1>)d[nL _}6rM3) f0y係J 0_BY|7TRV;cԼ)ZME7Ƈ/fcGȂG!Hw0K2m$@*>oC텱 SG3[*ue%{ٽYDFHUARUp6l*JnyDr-ixq죻3F{hsV@=5S1x=ۂ ?zWP Iv2ii䮞)|DNW+f5󰸟3cJ'00ى1z` %pZli* l6D#w8.g j)7M,L-Vێĥ3%ouvpwGIxM}v#j#ZWjI#Ͱ話lmK(Ma* ulwnӲ7zW@.,gaxK-Ogojwz(N,񰑱 nUÆɯd#:`|Fal3Ry7{}̝r]uIBa5ڤ !0X)7zzIz!':'_ʻٮmDNO5qLCH.KbD5>)_c*{b޳F yDk Q>c+t8 ?3aHz0~J?E|L-gu/Z 0;cy[qn?7e I\O*m;e:Ye478g}g-2u{|9:Yy9*f7kU SIAo\5K48Q*.&*=~fe ={ [Ӟ(l}۟Kdkݻ]3;؛280,_8d6IO]]f6H@L%`j>(ett΅[}gk-6BE^rōNwb8yWHCil8wxI^鹃)F=N9,K֔)~T5z`{ ]X _t?c8xVJ[nt /[{VhCpk$B'Fqpa]\,:E<\O+/KN'Xəf L; :LY/Lڣ,GuZ70?.ӍX}ҿY~.vψYhW`0ʻP U͚#i^%VZ׵^u NqB+=!z&PY'S7~f }W\_Hs#L:-PQ ZU|2kڿCy-S9ڤb`x:[NPE}Qmݘ5h )FʠZiL>餅b>/<һjlK1vrHܭ܉y`87" A3o$,~# pb8DƗ0a2?XqGf YB%UN(z݇0??>#Ft_kkNGob6O,ƎscqD_ܖDa|B- eS'r34Q.KaPL"-\lz3_N&*f x?'O1,W*`s!ȴdp*4<Ž3>L}YiΜ'0Cx@$Ѝ鞩KsT{,uOxή0م\ l Q/mć^%]MF} *,]rt06 -T^M3*A=oyc|_LSlS@lYȒ(~t-.*Fy 3nO9Y_7VKӕF^I]@|I]*<ʊj&q_M@7`NU+$lwSGRyzF>*)a^b]t>JQ==]$+mra r$[KF.ֆu_e%>>%Eu2-#w/.uF\QZ~CKsP1-Wj$V'p)l!^gKBO0I)77Ū25y5ފ'|l!L$w|0k JׇxF 1Bf ]'93~0KjZ{Tg-}AD~ ̅GU?un[bvmvƳDKcBEav߆xE K+@O/3+׍ZvXۨx4<%5a]8-A%18cC l+xkJN!`^i)<2|6mYM.CT~K+u9rCUz r.ut-1/"4*%T44/%CGCIֽ"'bE.1RO9lq*Gx#}h,ƿO XTS>Y'*ݜyG.an&+|4J\ % aKBS׋&TX! )Wd2RHiB!dx7*q8JCJfrxBi-Ђ4nA.HŸ㟨R Ą؆]6aR\M]1^+ !R8FJzEN]2 v"^)%'ROVyf.zoT<$mz]~XEإ[ShN?h8чz/@|\UZ+TOaF 5wh&>AvB@p]8%R5rH=] RQ?] F*t}~q +m<&B.1K'OV>LPI{[ X_"VL醎QiDDĆ0eN(@vȦZM=.=Wi䵴Wت,I_(2ɛ/ E-~9 q$)Nps /uVJ簛]"$2_RҕhF >Lz=ޖ>wOG/ܾi=#s~4P,* k}X叴Rvg}U==\!Ua6r$G[q8<}p^!b@*Fsԯ/yRU(;6 ?Br<7&ΈW81!~Vܵ󞯨nf,.uobus%+`m6(s ՔdaM?n0#-;j` $[J L[]i(ìseQVA-zU>c=ݕ݀jM?V҆՜78cJ%Z [PC>)Fsm!<(yD[)?Y vDewE `y]f>w=/s#+uDlŻuD.F L7p=[^fVcbRkJ?y^SER dzqžͬ<>^8۝9Qp|Ϗ䶁?0Nae 2,s4՚au ~fNƍ`+YQW "?Wv8.7F:5!sɨiVPz ^\f\͘Ax]r|ILVmTAl>6o8S^~vL' 4؍6+r G'15Z85V3;[Bu}z[5Ӻ]鲲43j`p~-_7phΛz-ӏUAu8Լj=%VZnx׊ 7vP}7j\'ꌰƙ$ WnN>SBFgk/'aDD8%0,L Q/0ŝl>0šLg5!|#C1_FHwBefc};*mh /p]'QJ WpDx}IpZJ&R*}P>t/l15=8HK]Օz gu朳d`c3R|Aa.ћl,` M'8`4[Ã{-u7m˹SmZ0L!3݂@-#%l'M [5s`2#^0g7Hm^x w 9Meaȭx>%6np8otJnчorŻ\)&H ,ı."k#\c򽉞S.DmQU)ߓW(q ;8cC} j ݑԠ_%D}^T3-d[]j,NUPZFXRJlg. q[ v(m(;A!l{ByjqHh.BXXqV'+5G7)oew6RyI%9n;<n~ìf F1Rro8fVle7gc.?Y On-fY3Oщu}JHC?TjiSy_ϴ;0#1L3X]dTI`51Yi8PiR9d~^~Xxq;k:3_˨!J=H5&a2)/u?tEw;W',3[> }j!8X.>dpJ|v:us,P3t;D 2,XMORr*NQGp+Lj=L*ʂgH&/vgE|'E -SnT(][xU<[ގS.lg|~W&ksY/4 Ht2H|3S;HvZkZ餷:[+5\'>H9:rS{dboسU:PbbMCz\pJ9fM=q nQޏ=-ӖP,# #}V۩yQK]N!z[P58m?yy]ԵJEe;?*'_-7+o՚jeŌ0o##sA;d;fw8ڤPZ)K#noɾVʈ* OZ ^X/f%dx*_u^GP/c&ȵdN>J6ZqzLAv] a ~R8gt#(Siz?R@R5^"5EJMRM|pwn7սu)nh: EzfyP^hW/r`Kn7=g6=WWqw+^JhZX90L!E,TM?-54筜4 Ex]I}Ak"KyCL"@5UT6Q7ɏ@eCN 1pv.ޚ@A@Oon$6`ldwdtC0Zޜ:h1b[%wkȍW-Ic"yI8+E R[':5u m0 |6wb)J l;@bEץa ]kB_7 ^v.u ZE,0 tU P&SխF 5P{z$+K&u!OBrZLM"j 0^oϵDc'IߓSko;x#+ AP=D6MؗGcUmϩ'(؆c&Uʂp p걯\p^._v5 TԋP]\uuy6 7+v@ >_p.xD\5{jJl`fk=Ӯcг0m>rU,d XXa݃7uS^lfR!pG?\-\ t>uZO! (pkY?ĨcςԼóg*J$p_4: 6EJ?Eam3$"^9;H&gyi@:'@tj S-60KPK(a6=X@Ҩ`f̣mC.BzR(LLQ&+}?ąEKaGknՅ'\zPf`O$P X.՝$&*Y㏲"KߢvZSɛڌ)px: !UX:y+%쥏m#h-E}`[Bti7>̕w* v:OCOA&{M SEE<2 !l4^Ñc:[v(>/X j1/Qck2~Ve$z"7Ik1]jM("?k} ~\:pc u@m.9թ[AՕ{ CmZiH^:K`a+Xq~͢x!'P"\qLkXd'M -}Fb>SU*_2vϠ*ZVg28_W۽Hݤs`m kе|~+=SʝZ̳@l&/LOr@b)7N3 l4JOb圬x-x=ǂ9;B 8 0rۂX^ž}eR8cK D=;H0 `Wbḛ03ބ\CX-tyn n&kn$zc 'SZ^XZeh0'EzAM); k`qK/~P8eS"T]_*֥Anӥ\9NVE}g{vbsڌvڐEiHV>BǪ` ~g%"Oc-/n'' Ũg޼|&L̯El.clr0#QF-RIUT*qd&gn_ɺZ sqIŪګ@GsJq=JO=h,U;ԬzXZF)"COWz9lEQYV^F]{o0O_p!l:i r{3懗LQY608i⓼S%еAl*~K" J 'Gr ^տHo*n*V7>?#cb|ۉs# [Ȍㆊ+Ϥ_\k9+O =,fQ=Ix": BA7B~!Ru,RJqKrP[NwOZc׸14JEq@,U$+.x.r<4rM8ӎɯ4&|=׉\ íRS>k*L yAb#zҙbc3[d~9bWOdMbl5Y8ȤW.~Ee?myxxMzMZCΪ~HSR1/};+NW SGj~RIgx _qj?:":\ =$/\+((QbM8\[,2X97J{^K^K Jr8E$D*5f1t*VWϿB^'T")(l:N*6W>sZI5O1>H7ScXA"Dy!rOh./0 b{ Ӓ3~%?Do)dy2PzG}tE$v 꿠*L )O3E V¶73ZmBfk9b02|zoxhdw.yd(R؉6IJU/`/hJ7$+Dϯtu9͡iEb`8_#Z]O.pU5vGy6/,I>9wmٺnO2-5K8+2CBZe->ȥ1ʚQ'o]}$0)nĘOcKX?(gc$rZ *ju4jjjCsqƙ?Pz `/sZ_=тq톶@߬7{f vznC5<#f*^kGۥ@vQY#Q)-emWkkKQuS H"VB&53I8m(^_+?>bGpr:zuuL;ڛ nYFmHm (°-Ff2rӐi7WeOFu'h5ҮcgTL o ٝ]*xqT(>ck[퀝gqjskH焾uhsURu̔em*|.Úьp$;|1ځ<'l TOm5LЊ+gk*׎"Jw&b46u3r?M*@М-ˈ5`A_ⶉ,K,AcUワ10;Аάothc//Nlc\M ^gJe:$Q^%0Arr^6=;w!?S͌aHi6Vs0Q-/Zѫ.7drީuH/ԡɵP_72Vev+7GEQ[P *7!{U]\'2X]:/K/K̪Țf*W[\1k!} u.ɵr["dWHUSt|*ٍXN?Lb1xKzKm<,Cf \WEEB yt,~nH)ᔋAlfں &#/Rc0*%P$r2P5xL.#/ҵc'fЊ~E:\ys~ZGE$|lbb]Qvqt|Cu+7B/fun V*Ya󤻧!')>6\,c'y-u\OI8L]sBzF=*&॒_Z,6]*P5Z?jDK_5v%a>ByW AYZ;n^\XUͽƽwя 6(H#x i$h[ "jm X&ie70ѿyz? M;V~jjeN=6sȎ, t.[* B7uNONS^boAҮ:k+o54;k}1,DPDmXR;u' xc[vAh6Řjcq&Yf͍hj>6׆h}m^4' a5=c0+x>c_r@:U #&^_bVECxA2c5a7i`} &vܕ0 3Cl16!>I.dgzJ!]?j+)q nXeX*Nnx8Jj|@E\cN#y6ì1g2, ۡNOu25QDlT"sŵ喊5P2,3{^TcvDrevT65ve뜵wF[TBϧ"Be!&`xG/^'[v1Ҫmqjc~'o S9Wi(HO6lo Z=+6b/bD;C-<HurZ>=9򔉼" ixn#sGGZC9 [+CT* m.ֳg['蓶D:*b4iͭ~*EpdlL \sh.l*r{W?7g@039f@,* DR"=arU|6^֢ ͑nM/}:gp?nգp@xӘfn'Wx˼Wvr HqcKAg!uz]; v3A Oq~Ć4yrS>Y%-RuqFur+ MI@R|Z@%rM"نf3Bku{}.K2,A\1niAwvI]ԞL&M]ĒȊݠ?sv=˺$W! }=^*gKq>|HΠkKpCtNP.Isw+9y#fST ̣uLv:u=>喧bۈMD6΍{g}`uQŔĦG>MGIvycX r1 ;9ֺ-3jFhU X]7>ngXe)}- UQWB\m0o/gُBv_=ʌ3ŒhL >R[5r(U^'sk)KVbL!hYMFo0=\++JMy]\fDidk`/q.ɓ7 L'u  F)ktBz(/WSJ>>]/5|.Xv`I=vHe|PF>SaOyLG}mi*"I_Yp\P~)b-"jRnR@|MDJ؁[ϓ5ϓw?D:=Qe|Mj˟|lK;ӔJG y]xfzp^pW`*87u9p%I Yz_P1䣑n/+-m*OŽ%6L1_bq@\c[ӫ9s$5c]\SSK&}IฺL:?Ǚ21N%dӚO̤¾^nƵ؝2߫ÅxvVF/jX.A%VNDuDρO"43#YpBc@>-uAzl6%0 Հ`RR"~¹Lq~wFUJ!p >~r>i4Q_+as8sćVΡk 0clHqsҰ6LOa:Bt9 /ݍFPM 닑;?cr]&g`O,]A!=DCQ-ÍP,<SM{ +삚}:-0+n0MAm+VߧK~ŵhf*MRvi̇J}BMsS/(Oo˗_Q_S$& ۄb-6bGSlVEK$J}.6י ÅO'jWSNzvtVz,bvm@1Z#u 5C\dQēp@ 7%,.s|Nz턟gj%c GnWb'";R$:?}#ƦfSSTZ 9E eF<Oۼ 7lH@;&ڎ/Tk:+m`gn@jVZ9廅 3] ?~\O)zkC7k̒([|~R?}!:e{8f*J2#}0UD%rf3/(#jf}_?axK$fq{TV] G*3j\H.oS@M*dvr;+ {z唶G}#R|./˰mvȃTB!2Ocګ uA/dtZY=^Ua?:Z턞lRX|MR_ŜӚjF>TvV@ gPpQ5ãq-tIQ9S?I)V6`Ԉ*6怖V G`];3bTd1ՎJ7eTV"Kx](e2R&jQkQJbYG{y㥾EaՍ5 ]J㯔R+{ՓSp͹<,/]3c~kSm R]y!SJKqr9G,}TJ\p8Uj9Bj3Po}tkKO=J3%UyY.(䓪F\Z쭑kx%jC R!ɖH%VJ iJf_rZ OGOw)pGA:Rt4bx}d@ Av\*^/E7S=*a4OIo0Y/NT`CMbuy7Y~cuHP-Hʘ^3p,!>IV}ԹZW@]&e=&0̌aw5&ײ'kfRN\gjOQrv%T)J3)磪r0C&{JRYV/I0Q|ȵ}:fltRF%SFm3}uTO8VMEgV՘a0ngYxTpKRJoi^' IJ9^ǾJ2OiaP(-ӣD͝VUsylzp "d]Z-UY /jݍYeX%?fk% ( 8qv%:*Y?̭Eu˨, áM#W2{Ekm!j i3\Y+K SB-Ct(kFi-J\>sk`̜g2Hmi UܥyQmmVUlڠ_ڥLWs%vĠ= 1~z.#ػ:ZDKP8Zp^!gIەa}NbP! MGrm8~Vr\ #Z]ZMI$Z'!}V(;`7rPϫ-vTCoIE./,0 [uhLKyAUSખ BUs *UilpCyB*?D F[Ӭmm^ʙ4T@ȥ^joPE#i8=}Bx[8'/ cD<uoYViS΢tt"WnNZ#XOJ^eTIM`1#*n6Y ZD[ZCkszoT5P%u/!fH4?؍.Vc#d˳)*2jQYv@TI -t?!nxIR>J}`D;d#^'XESΨ`5L x^㙣g2|bkwC<)vF]V ;']\|oZ^:tin4EuӹlEv$r픅\3RW1Ow)s@.9 iMYgp߹\v6Nrn$ uR;nnr< 7|[FZt+??|W:mT&".:#))!S,ZĭictjmmRW\23>Idzź S!Rf:cv\gFI.G.V|LfczT?z^8)?C8]J.&[?DA\" 7Jm(N}SeH[CT[>{[(ѶkY–"~hթ#ϥ ew`r@ʌTJ-'wҀ|CW6ݸ:KvEnfLY,q6drOO{B"5,5 ;}yt%9?bV8iS3Y}.K\+7B,kxBP{$%RY"焎rP@vIe%xrYMSsgd3;Ee'SQ`l7Yh.7^1-(*~9Ƕ(c8LG /"Sc ,5t[*DKd%4ٔ 82<+M,TL4$ еAgn~fU{c2WMӥtUErq7~]Wf43yWgUK?%װ_+>#*jq9nL4wV9{vmyF+fF%J],W/ˀTW+[Yr|\)z'&c9N+mBZxT?įXԁ#gLU_ ?蕶>BQ u8CgBV9cYm8< b}0?/הd#bb_5F*[jU$XT.,;†;eQiO= +aTny1b@|TNcaoUcU[+k)%E.P6 j 3@GqЃ}{4WjT^g;v4uZ\ cd*amK_sΨQA!(n$; CvRMx=8E(Ah›LefpY;f4 ^qZnX _# 3DAB7ýBT  Hn,R_ SWi:2mX(UT 2!9#5}X֊33T0B*ŕcd0o[TOӽS{e!N&84BpIՕj]8z-1[c*+bl/!؟raaa#mI+aOh^7ky둶ˮkЎhb'g _7S3k8gI5KŧˋPysZ#o߸g6k@P>%o-n0ƴ6{o/,URxH)ٴ觹A gZ[f1V_eEy'Tj q?G/v:PHV*UStM{WÃxz ezIA7Ï؟rK)LWԎZ' ^֎/ԟq6z j0; O]|Oa'Q(4FɑW]h4EkДH7{*Z߀p<w}{KGzB(uF;跱Y߭1'f]#%/}R?੖=`Jh֑9]A+Q[ሃwka+pOj%p_Wl*W4kq<’SZ~e| L3°"͋'3ٺ'9.i\gY2siFL4ҍxӅcw汫ߜ?޽ sn@CpU-fn T.P]r2vki(992qw=c֓p1MU#<WUXSnLa.aL/]IzVnJ.aV D?P_(+~T]|w7Ż (`q`wp+T?)Mhy|G(5r1e]Uf.C/ޢEdy(_BZP2|Jg.rOy]- JC=O wP?ț6y;(R^r}ȅka<?^u 8nc99\ P1?ByӃ~`Nsj(RQrg^H-O ?Aif`zPL >I>-DaC=˚eT[X2ܥ [tŕ4v^T , ɨ92#FP`Z ޼ *l54ySخ\YUAMRAzpPuh !Ƅw3n s:ZM )=18@/-8?7Q.X=m UrN0Blب*ek`]юDCy&~dsDy%S- B*TMz=Uc5{ʊF)թ$᩠q7[OqML21={QJJmi\R}`V]Pnk:gX#NYBt/ A~_*Z]qt%al5N(vU ^YG]2P7>_+LPXe$5s1v$"v4GɫTTvGE[3[keUTC9h\+>CfQa\92OԱ3%$L!t4Zó` T|Åe g(p݉L&e'2T{dH^qDq+;v.4B 4QR{h:6Q&S?BI)R.ZJĦnߞn`cK/ksA0+PS#.3ڰ`G=b%,LRb-oQQS| 2(R4[M֦"&ҋ%A?IM NI ".@Cp(pUj7ه:^SV~7._ \Oҳo)?SSޏ_%[4SOo=V./;jIO[7r,8Pbw?dNs Be1 <g72͓㾢ZnRmO'Lo3 NwwFJ͂ C#`:iiX dQ72j_=WTj?pmd{gP}~D.ɃO.}XM2<^'y|y|Z)}PFG;#KNsi#[U1%o5] noto\VLߗ{VJ MU+.Aʣ갊UਭRtK֑Rѓi-//pi'y|U9Pa\ӛ}3OBâR1y]IN˞hR;YǗMNJU;vSa6CeROQ$te:p$F U@0E-Q^8Mb^R6BI8Si` rWr.?z5K T _M ynsXx&6< Hh1WJ\͟f-wWaaq0ke-g5Õ"-n\eGE<4?\ذ<)KK_ E{N89J:M3ҼV_NOFèCh')-\AvTMIs{C6:؋/!g<;A﬎T }MHcɯd (MN=?& J(Gm`/UGŴ)YvzcH$z#'w˂zR ];|7Ҙl6Z^NkF;3nGpo4]n0ްp8VN7HW #b9^S>1z|WYTV" J7j!,7gCx | Oǰ7-cKxhd{zp?/뎤ڞpOv}g[2@֛֐WSqf]x@O2|2-P;VCfV`ղǨK8VnhXl+YLF֌Y7?'bM>3L ;L5=ӛ }Hux. vRPk ~5}H-Eq @?W9*+F[qGk\~ ,V$WrNI|V!c;Hj(,_(a;ZW <#jlގS`&<.KEt&~wfBf/POX9R3W3?By6@:~od󜔢*,,&S~$:Itp[)@ #Y{u%$ѥJ>Mj:;Aв8H+$.Z]@v8^Ʉd֑4Z.`&,*8HN\hie$TV^ CAIWh+e@ =cx4Y>3,T!>+Wv;Nz4TEJX]~,#zֆU'HltJ*c'\G7B34 w*񎷁῰I?):^U')+qbh>nra̸ YAݔzѷ (= >c iqͪ W{y#7ّ9V4%|G%.ƿ=C4Ӻ\GJ{HmDky4XjO $ =~ F;ѴV>gS%T닧Azq-a~m2CII8~ NR(dT@r–[z_;_Ld2N%<= %d- 2I]|X  TSji6č52- pku [yLq8D8! d4x疞u@EOm>1}h/.}#WB3_/і<Ǻ#tBA;},3@E򙨫"$tfWb&lڭ cڱ~Z׉_c*;vB)ib@a`o$*),Wo_οy-ߒx.˽D_I1ryIv {nu_w!f!hySmx_m˟ea?~ |_mJ1Xj,rɋW»RȢEŗdu{$.7n`.j)q])YԒzFZz4 @1g,m2mؿ%l<'$%0 u /t^rXHǮz,Ϯ *#)b!?V˃ȢLuߴ=K$>PG{zƒLbPz)>&wD:vr;o= zU}{G䦤Y'y,~^R֓jU^,gAn@>D4l,]d䀟ͣtj ,Z O|R\GTګ,r!q :Jeaa3kdZ#4hBAyI#9JPxq.BqA5X=,U}y$]}Jڒ<\$@!+HWcM c<n"ӎ]Z,x 9{+lʃ#lRU~,ߕJ (sQ~k#<逵둲 /cw>V+-C~,)7k) Q!`B N-jG(e,xf9X)r&%6U%aY;-_ArH֑YO Fe$Q>t+/(\?R׼Ex<ΡIhh((/J?l$ Nl/A4< "܁* Ž=2/g"_4u)sL{9 =ô9/-\ESY= nGuTGa4HPM%Uv{Bo֜Wa@Q4<|'畝4NoJQH>S*B-89"wqPVZLj0UF+3X&^f"L%z.u%ݼ(-ZTJd?o_X~>IF,=HGvOa}i}^i; 7Z=ES|!/u IeO`Ka`Q ^:=yk{c-?'"C;-}i $2즛NX#d UCa^-NѮ})9Yy5t:TjF۰wB/'8A8,a= 鲾wl'sӾm: n2D-i|"|_ Ʃ,F/r>R>vL۴_B\`HźFHAztJa}w7K3d{?N|/>*MD" 6aŨX:oe}MN*@K>⿢t}GH YwH/e W $x7YdsVn0UKs%.**C=Ғ`?F+nVS_:#R)K/y R"ʣtK`_Nnf ^Ⅻ\@fQ- ?\,uqqJ( .nw'xGJCʧӼ5ݯhog9"gLTP\R  K<ɟm,i6~GF'a.\0yBFo/p]!Xȝ= JbCUJ"㳃MHP7%Aj o}$oBaʠ(v ħ(ƲVYWPfu@~TC*ud u*o]'m(Lbi(;ú0 ~prEN:*l$lO#-tEZؿ]Xc<3N+| 66+nr|tS`j8/ m^k}o&Мćjۑ=Ui,RuX_y(UQ9R6ܬ?d4Dei)>Jɩ6| NItN":N1Vx, QI6 ܲ~O͝&񒳭}qJD@5LxO =SPR Q:8 S zfZ{R7'[ ֣JF1 b &Τ-צ{KeHbds^ZzHP_=YIvO?R4^CV?v22vLN Cs22jSۥWfPBݫK]wҔ[h7tCO&ccnĞʹՌ/MsEgKԘ-U];R3ld74Kʣ_tYlH&x8x@滋v>]Zbk.&wdX5ʱlw;o {o O .88| BP YWf&)aOe(晓:TP]ӞkByk 2`G"m[?ژ,j&*~MLYK$u? L|īB=YaVR7}ZT$V6.|=qb'䏯z *yOEou0$vJD-]+@rFdhj!wWGeDzHE{|&r7/G%`1EnGNsX q-A?>}=BϓQge,$Q,7U-=V~x"ԀI$-#zWbIk;R3l56 c0؁z7pv VF۰ .CW6 OE; LbI4 !ɂVK4R?eH 6zHL_ZHwPI-$;i'*fIqE>Jv4EI,kˮ`5!,oOJyEcoN[IA% 1f5ɯc:Jt&UauR^dtBuVNli *&%id88$}.׵Jd*F%ûP C{%]o@z-~2h&IɚE]eUxg^>]r}=X͢d:EmZBY/qN@I=gh#274В4@X. Jt*16$~_˟yz6% {ZxJQd+֊`s11P0Zok7[o%NTnĈ3<~,-DhZ|=3s[뿁{kezt@T}z+|N&CnZ%;Lwb{k;4b]x]9"'@a|/bhޱ-x9u5LKkƝz< H#qHuPg_Ǟ*Pd]ZF_ Nsftiڡx;nM(ypbCIU ;QMǪՙv 援 d6np0Z Sc A3Н{JM" ˈ7ENxo*c[W ~o9AEe(!I)QFj&5Z-Zr5os)AkKW=H[KStZIoP׀)ȨRV_ycK-:kJ-$7 9^Ɗ{P&A;{e6QA!B4L.io .#ׯ W&UT$R\ Fgk{ w(Em-M՛ 7V('<PJfv}iCA^gȶzj՝,~za\>A(^!A`L#W>Tobe)VB"ƮqΓRa/͍d٫L'rS\\kډ, $Bf0݇,;fgg- ̀_UJZ^#^KM6r\Kz0,%]#ûC2f͗>4-*䎜8P6?=KيG TЪh^1J Yd~ )}tdY&˼9.\eO*2HyZdҪ;)SN~1$^ 1SJ^S26)Wl(5JGGz +ⶦIՎN˜^:(E˼Z RSA6֟V& [8{wIt0GkCCt+9ҤNpYKϹQ?V6"$B}BX*bUyw؅av4@WxZ'Ͷ _lTl᳒!I}\%MS>$G9 U'-"Mf$ߔx<5p4yez_cV/k4 V K@8D.8a._ ?ZR50ȢPktlVUQh2팧!HF-B)dtǴw3^'OGف^ܔC>@2gG/su7R54m7C4<Vx|a#WYCNx1LM޴UrYp >< sy@\uVGu0^ RjbiBsJt_H\$}bdM5pԑa]J*;`VrOq>@yrXmf/KHs - (pIڛlP=T/5@9Xn~ K}zSm~}dCjR뱞'EZ*a9I'BE|޴*M $  `54h@K0cL>jE{Œj."'cЇ=&g"9CLZ@>/YDs%}lX?\LAohNI9 ScOʖ9k`cg%3Fu%? i;#zg0F~$ZXa4W"5I*r< •z5I9V%|L/;7jQUTKI=g|\F%‹+0?5KظZn nN*g˂?x4+F.x(8 5dIoZ#kYy_{ u.-Nj|`7F8I5h'f/>@a׍762/;8$5OL<\|}iA\#H/aEڊd)OGod{oyq*W7%'{:H_ x**Ip/ɉGVxq= QMF6 +w9qVlOZn|~K1MPЄۏEp/M;pUL)19:h.{;ڗim_l3BS2l&ΦcA~2܄\)m`sM8tnpzXo`OWS{XYdyfl°%!m SX]'ĎZ%h&a)Vz('Z*%&>R3ؗmlƏtIt! c%<өȅTX-%z^YmCǰ^ A?6lߤ*7y]x9dzi]V~M V\ړXRVyN`"RNyB0 U@ӊ(O)!Tg弝66>,) ? -pK*ۖ9*@)*qer.A Ťi |JJGȄ*ǚq/Gn c0E^ 8x Id+ !6K\;Oh I3qp DmEz(e$%nNSUq*8\8YmWŨݨN~XpIUOD'rsqf`T#A ¿imZЦH t O ;FxPBA%Q,IKN"hsv%0nG!` u(lxspg`Uyǣmq٢KFo4Vy}Y0]4~$ t,vC ]]`ԤiCZ6mteȗU*ɴ~OeG;7$X:Qx]|?rtI&$|u9 V!AP%NJ+4@Hh{x.&NxDm"~Iih-~s;>oj @ 7ɢ1tD8yClY'1RBlXv-ߢld)}fМyOy=ޏvIV`-R\9=|}e:%Xxt)tL#i*޹<KvMAg`pnM.\[ڻP=8P<F8#p;{yF̧:e5/G!RwG<&1x*gt42*)'YE^_oy?u'@o؄N: S? ٨pʳ] [MuzHo߃Gku3##"z 0+[--*C#a1wՌVntPBuu<+h@7Ȃ2DWəJ*yIK/O O]z\Lh4!j1pC:T '-}AviVSZ(p2YFe:qfF-7Mb9\׳z4$1:낥 O)(8b<~P!ooaW]S ;UZSВ"]vIX_%b/e2^.!(Hoz{F+ETO*{ r+)IN|R& *qm\cSEp>Yet t{D8E%;\"#|!M?Ws5RzvL_RByG(0Acɠ#<*_12t7Ut9XZ}Ŕk2+C@yT.d gAނp2䃽f-_alIdeJıJ]xAZK$5PV ۻHCR羬yZ:[vZ8Zr[4_./KB3śdxkbϻXkJmomcugy#Nn \F7fk vaOe^Z[ωfP{w(Vc֑t}ފ'/]VRǑlŨi~_ޞA +Bc4=m=4ҀW_g1j U-&# wӏEզDꆂ)aZ$6*Z~>7]F}rgri |Oл=`ޛQ36;j0BEƠ4H:Jkat[hCxq :;F%PJîA錺tdpieD-&?.7|NDЇHdS>;s\(Fʼ"C,hpQ2އЭ K҇WFޕxKcH'D-G:Jk2ްq˫\aYC湟Q$2nh6nG] 'I,qh&̂vҥxK~ʺWJ5~'%}o.؆CwAq}#wљJ(+4\׀ʢ+u+jg#i C C9m?h$x]551L |]@k%i)v `Éx&IBLr*2CFDԺ3F% v\W&z ߓYdV49)f qIm++M'PIͶվ>uKœ̒k-x] O Wpu|RmJ@P 2 0ZI^mziou"5(/S?/N1P~Mf,c-(Art[}>zU[$X,X\cx +,0HOiJ$; } F}`m.ޑk'F]M\NqǒyWV CH:AM nH"9,G661!dfY;gǕ &<4B*Ny} {_okڶKdgގ}q2[&XcEWIAg|u}YY7}24'S.׾\ξbIَr4"' ; Gl:j;y3Ou3I(fj IitE qtE[Qr .Po[n(>\uߩjAiA`.꣔AhZsJ+*ik6jA>NWpA?Fo5V~EѼR*8QK^O ڇVI P]O V0 F4:Rwvwxk*ߤ8xaÒ Yq^?S˛1TJN4ԂDX)Rh2\gKT{g;l  UzbÑ]BU݃y4K? T>+E`ktgC=+cy]7T_CMRRO(U~7OT 9Gv#v76۔1 @<(M'ΖG]~٭5QNV;9Wuytmo4Sj(aSDS)ԕ)t#w_ ^53y-Œ樫44S2GlC`NP,V΂&HGQ@/gm[I\+h-@Ji[ ;EAITbU.\Jqp[!tJ[.0eZ{gX )ua oA7j1 K쵴%XzJ' РU^H/`5)cI+۲Xb$!Y.Pp^42!=uot?Wg* ݉[(]^L].5;dž5왳sJևo!>яK9rfm}kM}N&-cgLe+'|I9A|8CFWA. }jVɯǭWM M#ZZtҞ|aV^emNMkXO۸E)l{eZ[}E: uJ,bfRS? %!Kd79gm$ Nso;[$ۍde{zw__+9$Mk=.h]>$Kh6Gk`L{种03/P 3_KK5q=ꈆlx.@3JXBH|=x;T˕>2]S%_H/St A£i:+moֵ9C9s"&!s1LG'h}\6e7YoCKa[<{{=gڟ5KYܢWl$/<8Cб4:oJKV)GyBR7A cl_D~XHc >ᵚї-Y@-9&& +dFT4=YI8cyKJ57-~w}?1޵1uL7K?i>H*l^)+E2Ft< pZ?qgGjvGw })oZ_W=M&XK h*6`+nm심^J?N !>yK,,Ib']$}7g^0?Ҋ/CECqXt_>j?2oIytQ',fZ=qW+H+qg?F=^E<c&p7lq_v;xym~TꗂqWws mh1H{74t֗6;"p[G/C'B84O(GiT wC|{5n#ل7r?vzBJzl0>5p6c*|( "9 (rl XU*Qj͵AZm=SSIN6'2tFExCk, k 94=(m|/J0W~ޠIGVdA~fCx~DAX16?*( x2n r0l{otS6jLз RpmJ4A'BGs0<@(XP X2Y HJ}j W|C1 +WlmX\C tT}&;d):߄dN' UP)-R_ bP3pxv Lm^Repu-ƿW=-,ɕS֫`=0H> p2[F ['I]6ZX7ft+J,t;Ie8QEUXIۃY2XMxhM6pW,bry Hjg_rPi \!>D;6gMwgؕ)ppA|j䟡0ě.z='{@5aI*}k{8%w}Au`9Yjfn>Reoힽ#V9/ij5زؾX3߾[.. #,3=iؖ"u#g?pV&ҦTEӑ7c}挒Wɷ-.lG3Y\/%|e/<4F/a[Z*p^\[t 邚<;NOQ"wOBOi!-=T׉ _ ,.Aje1-i.-Cflkx}duc|/DYן'W\=c@2a8f1_ڙ ?y>|TGɔx?Yr.\7GҘM3AZ)%t%?Ǜ*9=ړ;:1T]mN@B [pU;P-O\Wyei,&y ~h)Q-\\v! m_KBLx_ӏF^Zzne~r5!2B\#NZ-BFljtF]Kzß ^ܟi1VP^$PryojwGIi9cu8CZu$Ws0!P/Cɗ}'(]ԓ3Gࢶ뱶MxA`W4,&xp7d[_)|?fž/;)N%q36aXD8 :n$V!^?'ıYnZV\Fo\=JݗxR)"Ӛcd*+X$JĉAo*%.~ބ#j9W{ɳ 'z53lgQl2Z|?K0=Ó6:sn 5 nL ;YTRdy_Zt<:IFѾ2Ħ[;tp鏈vp}K I!%k}{o"8 x Q[>J*`w_ Ezޏơg,=yX20DMQ/l,ǧYРx1iKѧl+~=yUЇєPLrKOKtϵv/ !P:9D{a8N.GtHip腃ܡ:.߿lGhQm$9=|"MvCܮ@gvV?t@v!#ԃ][gR>YkO4d^C.h(hbu!n~S+ՠ6\cU؞f<$=uY-^Q;GxpEzNm;IA- ےfx FUcIE%GgWC :2D꛵+{֑u"qd*Lo|a#;g.\wߡٿ˦è ΅r^VT%t5e|%@"sz:B e]&ҧ(@q ["3r'=IP]X=E:ʯ,A_SOicYQy)eTPB7F` X ב! 0k|'Cdg`< B8\t ]2`/cv<;f;*i~Uc i-*O&`, nC4@W34UEy2Z^t*Y:KxJc"SWdy(l e)M%8@I#9xT@[;?yWbl{d *srV˘I!!.LtTqnxoc` e-0)sD<| SťHcu/ؗgX[y[.ƹ8ʿq :AN{"W*ATCi:L jslRK)G+[TO#IUxV/M^RT) #-GuLF@uLiI06CRci=R%k n^ l7si=kZrrnos^.:oCPKl+ .yxSnԑU惮w]toqt ljZ8ʋhp3r)Qj{9 9|Q*3rֽB]|U-ImiVS.nT-]L=jw&{ Qp<ylcc涢 &_BMI< ig"U1/8Wo>*RiDo=&ka DsV__ OuS'urLsw1ֈL$WMr:.dY=[)y)9ס ҉]]ڤẚE;$srY&+~"Fһ;{JtA5kX6C*Fy&qԓBbY  Fs$OѪb< ~PzWmt@y ]\ȂȂ(vƗiVrY&yG{bKk 6O>^e^`E«|p,Jyt/]Cg/&g}Uqt$zy{.6/BmG\ʼnl N4oE;ɃJcp01P.0_3#i*CYZK(Ox΂Wd Y@rQt*Ezb$;`Ȇ <>u.v4pjΪ/$GL3%I;| $uc?mS\Q:}nϫ#gqMg*LQ;MRM~B[]GZor1؞UUGѿx \ˎ!U^^._=h2_d '[m_keݴ56x5F լ  kB zC}?~ڬ`U-A}?ΌA8y*;t-/$ >v]Ô(=-Β-_[I: ?яieOP)VVυ>2 t\P-2Q2qv#X,XPm\{Wq5 A<d+8?L<77O ;Uκ깳IZ 2ygW՗,hO҃GikmZ;Iv^$HZ`(x_mi"5Ֆ|8I"\awBίJPCA˯Ki. kᢨ ]HeNcp ToB`0B}tV`AĞ+@Ei?NC`\AiY=L HZQd.fI*}tH@=G腙syP۬bXe6- ~tT ,G 5C s刯L;״/|7}Bq| m| @1">)qҐ`VsQtɼ ۴-bV64' Pue0 Α^GCd9}فiӊIY#d Ą m~2M#5!a/cZwmM<4֗4 }3ܕt!ij"Tz͒!`ҊV+@%|VV{^8)^ωg ̉x|pJeEKWN.ىcu FAp!t h ( >'HaĶ澶楞ᩆi%;.[K`s5)qe剥A3`}ԇuyx`Ҩ\2`kit^ݺ)9eOMsD4 A3`9>+Nt᚜Y5G|amׁniGJ7\sP@_ҕ*rB瞷ҞrhWgKE<Κ])×J|Yj ̐끊JGi ؇pťN;6.KMeĪ7aI%gZMM,-\GjeKŽ ZҼ[&;O;x =ƚfzZ:%>RcwsCWGv}qqM1 tͶU62 ]IciZM1˯n{!^aW;뇴/il]ݬ!s防Rn9ט[O,(W;*tzr1mIbe{=kN)O4DlJ<;)@RgzQ\[-VT6{6zP`KfLԆHE2#+P-^BHguQʄljD{2}g0!Te]I&Cy6~EM7Dmbuh^EQ.C&kN"%HGp23Y5O0OFQw5tUK yWlH{0ݶH3^X5٨FU~Fl41Dkmjoޙl=I&_Z]BQ@cic݃yv='0љh^8^aCUxyX"53z'I/"@ RT* Uy-.\Rx&=ҫj}V8,w6Ͱ8ՠSDI/af_+3(I. QݴZnܚ%:#xYt%c)/7+ߨ=»lvDa8||#0ʀHS: P_U$A߀++n#{< 0U߈_[?Jtc6t!V=TTSX4S&4}C:$1jz@T\1{z-8>=՗{7ھY3}ѷ,˗,AF~,}c?dvNִVA*a*@ _y3csb- ~ k">0ిOCϭLexXMCl ȷ[BpzK*ቨWjxgpﲷEkcu5WO zai"BעҨ'®K(+ +OP a;p_ӰM{trx; gp8vkK:^?624X `h-rΒ^8[KXxC8BSV+F2OR q$Zx beL]L-]ܥ\y?+J<(<6K"l$0GNZ$glo}o;o>aZa%h*\gev\j>7D 33;LԱ@Xomh;k鬃x,F2ǂ^nal@ [ G\Gܒ8;q? :gbA=IR()oWQڪ<(N9u45wt{؋ZXO;eFou;RH*Wbbs<[(/URIgM;mS<,l5M=yGDFO0R/5M+ٕe8XHɶOMLm=ܰ?*czN9g[}6=ٕDۼa_ WhofmQ+lg / ţYvo,-L){JQP)p|D4)ytZʙނA)6[Neu"K9fXH]6Ka?D#iIv1=ns,σIxWdK'Q̟5QU[&ZhvkKg3[ G0e3:BTkj U \ug^SLTա-qVʌ*+EFTfsZ^MmN*jMYm;zmbu Ys I?<5aƳrtFvv򷶍u,$[]h?RUH=AI|+uH{ 62zTmDnv92_ I PbEA x6U.^^r> Q=ܟfJCf[,rѹ,&_38F; X,|++w؅3YC4tD .g5ֲѓkS=N<ÅBTmC5'?,C^)4ik>ޛՓxXA~ H!H[_M< +`ӄ=~irT~яo}n41䥳QUpA`-5B\-쭚)ZN]WW$@UO\9˨X`y\- g:%Ǚ-K(ŏxXwdf@g3S\ED&$cFsAƇ´xL7<7{7JlzsB7q#`aM`6C-P`˛[/V`Ɇ}s嶷`S҇X&j}q4%Ԇs3]F څ6"gmS ԇ 5"z$HorD8^ Or$b, b֛ISvcZXHAZw3}0/aql87 Zr pbFziKhvP9P~@cIu<Oy\i\񙍝mH-M"OTHh;:- >}TitdXrhm^`QoA&c&WIUTVuxyE Ҷʦi' !opN􋺕j3y%#q^.enp46YH[fZMҺzYY`  H&P?C HzuxmOrGzl)=hi(p٨(B%䣫3wk zQ"X<kXq8Z*3<,D@b{YUgo.j?ZP<~"TKg`!\<2FJᯃa%xզ)p:~ }52tܒei A{ __e<\JYn@Ru4Cw3wy몹9ucw0W..=$_d?W":ٕi`4T>(ay%d<`o*c`V8ܒɕ3/)6$~RzPV+Dk/+N{(oi:`$0S~./ =}#_ #-e k7rwdx{ z[Blmd>%}=!]k5cAA龬@=|\洙M+#+,%6iU9ҜdI\aQ-yo1tO=|Y1`g-a5XĮ5o/K}D{qsVXsYy IP+QId곴IS]5Mm۽x +W;;sٵ)-?mg[Jkp7BFd|C(\kc94\l,*geiF䓱zE's$H޲j1kځmu'aS(j%k4^6cD4]O=Jk9z;x+^;[ױwj :7^<o3 J$7y{|섾N{6V~7U#.Pbڛ7O t(B;Kh:! i]cm;V)pPfY5Uy)B7Ao]:tcE{ IZW WKyl=q`Smi2ޡ&c֢HpjFjΒbS5'08B7Ӧx5™=J't Ӝ u;xLv ,GuﺠZF)Lx#E[}j zn52<3/N(znlx4ËpihKN2ѐK ;5Z m Np/CZ} [iY4%TAzZ(Z g]wkmNȓkTO3dj% ?*.I08f,{%gHqv'T,7]] o1zC}CX" I#Q_ Y62&uDϑ=$Xx *h7 ڏ,ת9?Sd*z>6\!>X@&ZښAq6D7pIKK{z&CԚJ]ݕwA0zXMv@fV:؛I z9{QiS";$sn  bk ǹ:.cǟ{DwTEڏfG. FVŰ֭ReF2pF6h:NH QvWNx'NFz>JBOS/[:C0HSTNhYڅƥi1·p4jI&%َwcB*eDWB:W2^fuJ;cp Hj=|eGI,ߵ,"\vO2B L [J<8-H!9Ofq&΋C\›: )ճŇ 2FZ6b2C,1Jኰ>u$+G˩VPdsQI|BZ/ Q6x@JT+jU8^ΟݓBu'n8CwvW{+>-B1uQ>8p5q,5Úƹ[>zj ǑDڋ5%W2tܟ\*5D%C̋\E8&y pUNG*j{VeY+85K0&nB݉β{$4ymGMM:!+Fď8m%ܒX*j!qӠr =|Ep(Þ l-.x ǍJB}_q860LK+Xb,6{{clw.Nvh-ˢ"'7p <RLE-;,R;sXz&{:;xC=h9rB72)SR 0TWIvB孻i=pKNh>p5k_Zc0s罜|i뎉Pcce}D2RHiYҚ >>;d:# V b+/ |k%t; RR5xթIny]4.bG/]O(響V{Xn`0x++"3l'5+Z,EޗE^3TZ8pzwURJշ)mO`=D ,C"\ d&tπ#ʓacGb,uM &^]by4:}3uݭ`2(~Dc}hy'ղ>s9J!&s`.[vr6= QQm]" iUmJ5|d\; o:t7Y+ bgD7kڍ{j=GB:ig\r7G;UHK(YF᭬%9_p>XYF:FLy=E] $ߗ-&E- ڍ 5)9o\sl]bNc#ĂZ饜\ nipy-0.3.0/nipy/testing/decorators.py000066400000000000000000000055741210344137400176560ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Extend numpy's decorators to use nipy's gui and data labels. """ from numpy.testing.decorators import * from nipy.utils import templates, example_data, DataError def make_label_dec(label, ds=None): """Factory function to create a decorator that applies one or more labels. Parameters ---------- label : str or sequence One or more labels that will be applied by the decorator to the functions it decorates. Labels are attributes of the decorated function with their value set to True. ds : str An optional docstring for the resulting decorator. If not given, a default docstring is auto-generated. Returns ------- ldec : function A decorator. Examples -------- >>> slow = make_label_dec('slow') >>> print slow.__doc__ Labels a test as 'slow' >>> rare = make_label_dec(['slow','hard'], ... "Mix labels 'slow' and 'hard' for rare tests") >>> @rare ... def f(): pass ... >>> >>> f.slow True >>> f.hard True """ if isinstance(label,basestring): labels = [label] else: labels = label # Validate that the given label(s) are OK for use in setattr() by doing a # dry run on a dummy function. tmp = lambda : None for label in labels: setattr(tmp,label,True) # This is the actual decorator we'll return def decor(f): for label in labels: setattr(f,label,True) return f # Apply the user's docstring if ds is None: ds = "Labels a test as %r" % label decor.__doc__ = ds return decor # Nipy specific labels gui = make_label_dec('gui') data = make_label_dec('data') # For tests that need further review def needs_review(msg): """ Skip a test that needs further review. Parameters ---------- msg : string msg regarding the review that needs to be done """ def skip_func(func): return skipif(True, msg)(func) return skip_func # Easier version of the numpy knownfailure def knownfailure(f): return knownfailureif(True)(f) def if_datasource(ds, msg): try: ds.get_filename() except DataError: return skipif(True, msg) return lambda f : f def if_templates(f): return if_datasource(templates, 'Cannot find template data')(f) def if_example_data(f): return if_datasource(example_data, 'Cannot find example data')(f) def skip_doctest_if(condition): """Decorator - mark a function or method for skipping its doctest. This decorator allows you to mark a function whose docstring you wish to omit from testing, while preserving the docstring for introspection, help, etc.""" if not condition: return lambda f : f return make_label_dec('skip_doctest') nipy-0.3.0/nipy/testing/doctester.py000066400000000000000000000155351210344137400175030ustar00rootroot00000000000000""" Custom doctester based on Numpy doctester To run doctests via nose, you'll need ``nosetests nipy/testing/doctester.py --doctest-test``, because this file will be identified as containing tests. """ import re from doctest import register_optionflag import numpy as np from ..fixes.numpy.testing.noseclasses import (NumpyDoctest, NumpyOutputChecker, NumpyDocTestFinder) IGNORE_OUTPUT = register_optionflag('IGNORE_OUTPUT') SYMPY_EQUAL = register_optionflag('SYMPY_EQUAL') STRIP_ARRAY_REPR = register_optionflag('STRIP_ARRAY_REPR') IGNORE_DTYPE = register_optionflag('IGNORE_DTYPE') NOT_EQUAL = register_optionflag('NOT_EQUAL') FP_4DP = register_optionflag('FP_4DP') FP_6DP = register_optionflag('FP_6DP') FP_REG = re.compile(r'(?>> round_numbers('A=0.234, B=12.345', 2) 'A=0.23, B=12.35' Rounds the floating point value as it finds it in the string. This is even true for numbers with exponentials. Remember that: >>> '%.3f' % 0.3339e-10 '0.000' This routine will recognize an exponential as something to process, but only works on the decimal part (leaving the exponential part is it is): >>> round_numbers('(0.3339e-10, "string")', 3) '(0.334e-10, "string")' """ fmt = '%%.%df' % precision def dorep(match): gs = match.groups() res = fmt % float(gs[0]) if not gs[1] is None: res+=gs[1] return res return FP_REG.sub(dorep, in_str) ARRAY_REG = re.compile('^\s*array\((.*)\)\s*$', re.DOTALL) DTYPE_REG = re.compile('\s*,\s+dtype=.*', re.DOTALL) def strip_array_repr(in_str): """ Removes array-specific part of repr from string `in_str` This parser only works on lines that contain *only* an array repr (and therefore start with ``array``, and end with a close parenthesis. To remove dtypes in array reprs that may be somewhere within the line, use the ``IGNORE_DTYPE`` doctest option. Parameters ---------- in_str : str String maybe containing a repr for an array Returns ------- out_str : str String from which the array specific parts of the repr have been removed. Examples -------- >>> arr = np.arange(5, dtype='i2') Here's the normal repr: >>> arr array([0, 1, 2, 3, 4], dtype=int16) The repr with the 'array' bits removed: >>> strip_array_repr(repr(arr)) '[0, 1, 2, 3, 4]' """ arr_match = ARRAY_REG.match(in_str) if arr_match is None: return in_str out_str = arr_match.groups()[0] return DTYPE_REG.sub('', out_str) IGNORE_DTYPE_REG = re.compile(',\s+dtype=.*?(?=\))', re.DOTALL) def ignore_dtype(in_str): """ Removes dtype=[dtype] from string `in_str` Parameters ---------- in_str : str String maybe containing dtype specifier Returns ------- out_str : str String from which the dtype specifier has been removed. Examples -------- >>> arr = np.arange(5, dtype='i2') Here's the normal repr: >>> arr array([0, 1, 2, 3, 4], dtype=int16) The repr with the dtype bits removed >>> ignore_dtype(repr(arr)) 'array([0, 1, 2, 3, 4])' >>> ignore_dtype('something(again, dtype=something)') 'something(again)' Even if there are more closed brackets after the dtype >>> ignore_dtype('something(again, dtype=something) (1, 2)') 'something(again) (1, 2)' We need the close brackets to match >>> ignore_dtype('again, dtype=something') 'again, dtype=something' """ return IGNORE_DTYPE_REG.sub('', in_str) class NipyOutputChecker(NumpyOutputChecker): def check_output(self, want, got, optionflags): if IGNORE_OUTPUT & optionflags: return True # When writing tests we sometimes want to assure ourselves that the # results are _not_ equal wanted_tf = not (NOT_EQUAL & optionflags) # Strip dtype if IGNORE_DTYPE & optionflags: want = ignore_dtype(want) got = ignore_dtype(got) # Strip array repr from got and want if requested if STRIP_ARRAY_REPR & optionflags: # STRIP_ARRAY_REPR only matches for a line containing *only* an # array repr. Use IGNORE_DTYPE to ignore a dtype specifier embedded # within a more complex line. want = strip_array_repr(want) got = strip_array_repr(got) # If testing floating point, round to required number of digits if optionflags & (FP_4DP | FP_6DP): if optionflags & FP_4DP: dp = 4 elif optionflags & FP_6DP: dp = 6 want = round_numbers(want, dp) got = round_numbers(got, dp) # Are the strings equal when run through sympy? if SYMPY_EQUAL & optionflags: from sympy import sympify res = sympify(want) == sympify(got) return res == wanted_tf # Pass tests through two-pass numpy checker res = NumpyOutputChecker.check_output(self, want, got, optionflags) # Return True if we wanted True and got True, or if we wanted False and # got False return res == wanted_tf class DocTestSkip(object): """Object wrapper for doctests to be skipped.""" def __init__(self,obj): self.obj = obj def __getattribute__(self,key): if key == '__doc__': return None return getattr(object.__getattribute__(self, 'obj'), key) class NipyDocTestFinder(NumpyDocTestFinder): def _find(self, tests, obj, name, module, source_lines, globs, seen): """ Find tests for the given object and any contained objects, and add them to `tests`. Add ability to skip doctest """ if hasattr(obj, "skip_doctest") and obj.skip_doctest: obj = DocTestSkip(obj) NumpyDocTestFinder._find(self,tests, obj, name, module, source_lines, globs, seen) class NipyDoctest(NumpyDoctest): name = 'nipydoctest' # call nosetests with --with-nipydoctest out_check_class = NipyOutputChecker test_finder_class = NipyDocTestFinder def set_test_context(self, test): # set namespace for tests test.globs['np'] = np nipy-0.3.0/nipy/testing/functional.nii.gz000066400000000000000000001212731210344137400204140ustar00rootroot00000000000000ўJ/home/mb312/usr/local/lib/python2.5/site-packages/nipy/testing/functional.niiet[..i.vOw ;eq !Xqq9?QVͱXkԜyىІ38 `j?O? F^FSclqIHnT#6?GWA5!>ܱ{?rb>yqId0Kϻ?:5jm8r@O];Suq+;Ea@ sbؑ90F`R "ToI+]iUܣcUa( \$m4_d:)$MFWRԽq rPنCd,M\;kâ&Zle +eF; Kg;K 7i{NF{He?x/n= 4$&us(irWrbghdX'ȡ*iE8ӻ+M4zApkłî/J#23SWzwUCW<wOX+LeX܅޺xʫױHw \C'WrUbŅW^{~0"$JХs6fxYA_ U3ueߒm9GϪUeq֨GFZp!sq,>lzF%@'Ak0\NFF;!=\gKLm`/|أlsdr B6Ӷ^гiBIaKSA"#vݓB :sMv&zPu/W=`)bMk-(!' hv34$rR27 e#d.H,Пd[ܧ# VڠzJ ,Uu-KA#t@>w'xm;LҾNҋQ@) Ml6n-wj!2ߺ9TPߤ,l YrJWG:.4- ՑIt)^G|b\(;Gb٢dsǔLM{ s <~0#TwWfL"*Lw}ϡǻa| H˲ \ Qo:oiϸ[P[WAIįы}iaS}yJ"ú,嘿== u7i<|#E>T3}00֔CgJ&hI2Ґ#o=<L~wVaOEjPyxh貍 z_o[RcЎ |oxؐxBV=oTO[ڛީ^AfoWйx]?X3>n'1To};1a<3tqC|}_7ObhK Pz|.RylF$Rt@ ߰Cn{ ;c]]?HO`?¬wSݩ#| ЎQq|MU|0y@Lڥr5&(3r[:ao)l'.i#N;e@yHΡ6} (RZنו'+Ȍ̘td>-#mEHߛf7C^⥼⠴%?R 3f3g!bp-SFթ7̝~C]ڴ).>ɛ0*qxx[4p[&5CƌK.6[*P!;U+PNnVz*%\Hy>]: [RcZJ]NIL-./,^q{Y oyx9 u_DVGN^ 7&K=mkdž|+ӬgC4ٜ8bA l lDK GInݗA70w!<[竊ƛ]dve#r[PaQӾkYa)dVxR+,W5imHw=pǰ` d/߰ixf㫃;w1u#}ӎ/S%wuk CFF&Qe*dnВUʧ0@Z--syL-\#Fycz 8̅+|L&0]T*Di!y%03WEKOQBY?ݒudn\(Fۖko^"#wU{tP`7pwۈ۟|jz|+y<+H /bT|pC6XELJ[uWJC'idNHL ط}ƛl:Pkr"|N֐6lT;g40ǎ  s-z K_| OzhG-\L*d19:X"zY(mVgJa)4u5Y y-`sw֝~T>?XE̲l_cʷug/[/vު@wff/P)Ӏ߀z$㯑}S+4^hۑ/D2<P3~ Һ[]LSz\-t2)FҨ"D^C] <;u,`y [fc;3gp$-Wۉӝv4v~P|AlwwNɋvelv ]+Y&+EfBT>ԋ|r C}+*`!'}L΂/ ؿ=Ҕ-~W1K'k@zu2ˉW@NLb9N\Y3RLPo2YǴKY2@v4,^6(! ;UEҌUXl$&źJ]%#p=ؓ5Uz ' TOJ  ;+!82QozNW{\aEk-=~Ma_?u;uDMx1MʊMu~U6 r/SZ͍L vaۏ̾\;YQ9 ?2p'7Pu $Ѳ8uCt(Mp 8ύt55bst zƘc+ЌLtʓ?4Yb3g'Be-VE8oiY :? эp3öGGQBj*R4Z&Q rRIlۑq̬Wv W )p/O'#\WEO8Σj@ Bp jْCLas_"BHy^XKIq(e9mM(GKAFїǬ˅q^JUgQ9a\QGߩ)VH;ETQ R-[vԧ}GTQٮ혚$PH }Dej놄 W}'ň'3^˦ u4-]AX,NQit+ʝ]@bkt5|Np4SV#cpAAUQ(36cœ%mfYE/̲Frk7$IτvIc`%;c߾E͖A􁷇9@0k@fkw`[w:w>Ec<ݟQE1;uɕ`\f3T5w,L4M"ɥUXo.4mMTs-jGs6r|jzA=KџXAe!d'?Kf)P-$^ _U^{[9m+sn01 Q)˰6C{S}9=?Uѿam&M.\A=X!tve>- S=D󬶓#':ߡ} X!oɜe)n{~GlvfzQEc +-O[ ?U{zVU.~2ng>`GӺRx&0Na}c宜~C:7L~~:m-zK|pehC`^ѽ1$7)Vb/[9Syh(;Kiz?N8p zɞjg/6AэH6VTFןPK~Jpv_@=>]]=k 1,<4 7%Fjn]}Ѝnm߄/FX؝U"zسO??/^#~Y <+[Ԝb=,kX /QWsm5F+Yf*|n÷,le`:cS.)pI9FC>Ag<1$[r q Z7~6Z&C?~w@MƝ1ضʬD) qtJ,$(O @LG܋_=䄖fؓD_|TWjgO$7/bKRao D ~W\Jo\G_oybhb11gl?gOZ6wԸR 41^fۦ_\tHH{J;L4ۉ*zdņtN`hiB;K6 |lGr^|(W%s`vikqD#vqS<:k64e2Z/ɝq`+$1mzz\@Dxo|h٩-f`̀8O-'6;fr|ZH3:gQf\\䞪7ccIaX9?،f^"e,[dvO!X҈'8$m2.96l]91.rB'F27%|{Xժ^0\7׏ӔOx7T9ax]K_"Ͼrg6/9b_è$Fx+v[K3x֥_5=kJ֫cMDE]:"MDW}6lmy{1%$%vm0vl未o\ CMs%~B;0YZ2(B۩():ڎ< GB48~41Yeӛ!F[?Z3:ӷM{OrfRA"gʛ&D;T6;u#xc{$M^;\k L`x=A ^|S w.x 9FaH4*09 w@u 'h =ȰZ2=S}v _(nrfo8A6uZ޶VR2H^aӪ&68v0AO]sS7Cd~S^nq3R{/U^/ÙF5dH[2Ϧf9$~lrB佸p04]fTn8f_lgI,$ջݥ 4x>jGVN 遶@ju+۟ڲиl WTEIql7Ga训=| ֞q|ԟ&TTuhoC)Dnqﺘnұ-Vf 龬GO= "{dvQqEIζi1GTR, T*-( ,?:7>Ź}ozL Ӓ"j5{εo1u%wv:AT=EȺ͋ӣw-GJkE Mzr*;b%,rуo9BSB o|]-r6E J6s/-b$zNL 5 abgow곮}Q`KF;Pҏܫoe+V:hO>wY_kѹU$bOX<ì (u\CM*1ܶQQ 2 Ն[G |s֧N0yL3=|cLM-yv=;YZ D3LcZ&ߩ`2s^]!u-mv}k< K;%^+9hX=b a">gBxwfrU tҝB#Mxof)be8 ù0Pot1ѕx ]#% YwkT6Ջ\l.2ui5_4޶C-;_0KZ@L˙Է&{]koKw ʯI9VMoԦ_֣ X[[aP@ <>檎7{d!lSNTI ']n(MuN=O6[$"\g懆!UNx AO{vAA,O-?k^Nj ǾS !dlc[AgfiWxYےGVo<]UJ6)v-u1eq|Zo&aE2󡬮bMt6 ;PƋ^0ꟹG~)un_f۲Cl61.m:l!nK{qമœٸٓ/l^6|x8:ww8*wI"׀c%v?xK؜=x@n; "2lLfOtuYݤ=N4NJT_zNugLxVܴD4! 9s: -ݎ|(6-AV/ٮ<`<7s6FRO"`l}.`+oErVtt܎b\n H^q^M $UxE܋ L?T0yo˜|y7N\HtuJB7XѰLҒkZf;Spã"ֻj;ULd̩|B8>*FY%J"w*RYeuk浜cj7C+HzZ Pf|ejMvxdCp,0tg[\1oMs]6 9ÿj8ЎXz #)-j3X}.>Дx+FaA:`!|b<1Uvh8_bii1LJpgk^dn*ᡶe1̀L54qDF,+Pv=Xg=q㣃;} sɕ*1FSd<ӛsˌj@'=F~2-T {7gۺi"SX7jcMcelq0R +AGMCAZXY¦ ݓS&JA=9P13;oL؎7~@X0n5"mog* .R_!Z5G[Uz(dD0@Ol Я>tCb81+N^EP+ú sHECFF,LqIX!\}B^^?ǒ ,]odb=XJl L4{F5_jp9_ +hH]߄$*N5Y'$:|!H VsLg&> 궶fvTb^Hx#BCh;pAL ߏDOx3Ȍ?Qhd /Yz~ he'F fWS)mںJ&ᚩ ^|~6Z bn`A/ :H֪+`f͵abn1RmئWE2$X/; q_FجQ8| kRJnm _e^FcpInbJo|o=B?l;0r]fϐD% \YV_ʯ?*g10/U_,Wזք֞bW$`NĂU7©ā1WG:z͚|jfC&Z70ؓLrPMɽ9{i ldUS`nI'c)v;P8],=Mڊ75"-A,rHNaOq4&,Jz?Ѷ55 d%oV#=O3~6[_BFp,FasCמy mq“A'9Đ-':x1/\ř&m멯տ/r~X5bDFY @e^9#jN&c0g/?!Q"OKhR\>$W[ ]Xʜ=θpNpt [$qɥ;4 PaQBihJg,Uzs_Ja- ô%Qn8PNMYz٪5bW\gi$/_e15T($c 唐~6k[(u~@nD ;xlWOoʪa5VQM^в{!>_33jqVDu)`>,[#& ML'BPe<w_<xۅqbT OXy2^5' yRAzIj/tLL̔ ;6]ukљ2UV!]-6=aqVD['|flCwvFR-=|>:?d2t|#qQ[SI7t4*`5xuؠ ع3|Mrb.X:ϑTM6OOaYp[)2 *Їb6f%!c^I~Tx؀?"O!zgh "\nhS"=$y:vIPwXa}U-F?Znj:c*pv@`K ܻmA5~?}qf C~, ےk $Դ4=uU|.a|Nz"V~@/ 4}q=J@;TYLdMEI5 po+JU$EgJL`BәT j$KvGb8DaLgQbj*~ռFQ[#4#zڜT"lojJ;^6ttM *3DT㐉vˮ?[)$ |HSjtpoҨDRL%D:`>6]eD`b$  iTzé mK u%f  fC1eO=0]Ꚓ=y܃:%%ֈ,~'$|Ygxwi]("z@(]m&N79=x&_#VXegfW!rIZssVEs3e~FsǢga=v#M-Z"5NK qH^_"iW9@+sPSeBz羥kYc^Bm6wOz8; 2Ŋʸ}Apa7ʯ']NC:۹~D6nIl ~yryp.?*lvU(tqCJ%˭^}QFI_5Eզj^'^()Mc'wHbt%U" Ae"s}롖.#j3:B&B6xDs(PNQkX5 MENwP,z+VrG}L]џRp@z8Shu`$EJ3S /èV$v[}/}ve]2VqS˩Ba2UG/mRt=~mF\!V\?@! yPKuI`_Jo#,1,G&oR"7` ^i-6^ Ert vH҉mZR=!gLT5b0,P`ngfՇ֎C.kcoRosN cljq":MZDJAxdקTS3mr=hzs]Ͽ|4PtI1us \!}Q/M:ТҹFI,.SC4e85@/Bb屍(XEG]%OpȠycJoauU$%QQXOz7ۇVynʖH${f#2љ?V|'롋e>9"]L&w 8mۃ'9?Drj۱6j}Cf jUu}ƣ-=OV!/WO8vWDNW3zYdP W^ ~! OMof 43{ {ԽRҡxXIؽNgВIՉM>{MAAB&vy3avvmBCl: ,:Mlxm+d57IͲ~gq;.`Cp7VkZC#+ԦD+H(DlhH9jSaq.ɇ3@ht|,"Qgp0'o([#\\`jؑӚ8b),B\?rwB, XjC#\ۜv1u/dΤRX$ƥ }*@Zj Z=WFeO$[j;R4@+4{^9!qt3q 0;/yՈ;Ӣ #շJcp6;^+iE˶Fz2!7dt^KK?=bcʲ4ʧnPP1\7.-[$bсց R&4-j+D\m,zyמ=_W;ѧZ;̮)1ՆZCw?#6J=_b|UOeEp4h{2|y.~JU}uEj[cd^JeY'862WuCăbYL #J7O_S&IfH(e l׳gzZw_t\#n\#ϭ\U9-[;|aV9J,n\ae=8?jW .S/>?D3O}=>u!FWԴxStnC65l"ov3s,vU)q-QZkq܈p&BU#fb79\B+ku9ꍦΎEւ:BGCF'[ ֠6]$bŁ$fsv45NW3عTΘzHxT Ehg-=elú_K-I4Rx/L . g`0p 'L6M㷑,C8s[#I?Q ]Z`Yuvx T$67W;A&\舐PQs\\=]uJBap[&|#pcF*]ӠX&|]8mFwyLqԛ>QxscՍ36^44agic~֫&7E--=%p𷪽CCծ+q[%fڈ " ?WBqS[l cKL b(U7FC45/jg佄*nS uZ &d>/M6_K6Efax;`^V[{ix,zTpVLKyϤZuΰMvJku= 5ݧ ={LHώkt o)Ѫ PUo: Eu7EON~Hisiݯ.)I.;M}9\ϾN cz0_v'W ߐhcg@s ݪOⰙC0r_T;5ܓ(i_󐸶~E4x´ &mM -r_KʙIX1\rRsWW8G gBUY5ޝV76/rvQhN%֌hubBBms>$`!މxÇ-g u/b"kՋMC!n 4Av!od/v^=(0y~h G :){2O'[B̓l@O;:\i=ηa"dF l oiigt[^ETjrUwq:mPHֶO|Xi5#J>.&#@N]D6D۱1f\@1AH} \ f(U7f;sΌMLF}!;X_N4cC [ <5cڇ.P9f?4?dN`$2s#DZLqx/[xN-!J}ںڕ Yhi- UW QpGNV,r?$H*5 n(lcfCkmݘ  ^+ymۆ#`&T+=)pfF,Gζ27εBn)~ lwaT* C:YLCcƟ;V9;| Ո6Чgk?3z.im~4+x64t/GhV`/{:"l-?.&~ABӱz b` M\"*__Ocq/lP*CዚoR؇ka2FHfQi*WQ쳩Dlf(O$-Y-;:,f8KT\OMhSALh +՞ƛ(׊ÈIREZy.Wn =:`; 2b9ei?e v5N[Ee{Lru,`N'dIN_aaQYmX,q7kG"ʘq}Omww˸!6-V=U 3.p!Ma bWT9bL\9xM~oL?@`}~/.ϟWf~Ӷms(O_w|W. m3FCcA-Ə؍Q4mDGnC W0?h@Ll ۺ(Q(q[R_'o7q6 ˙-i/hk>?u.5`'/KM a>mwE[?[J@YOU/`nkf}zlno>$X:V3[-P߶`MDaf iV[X("9ﴯ&Sh6nDRyuB~\rMB+ޘ`3޾L*4yYy@j~mC8!Sz2RH4Q7#FS"lt4KT|tw&M0vC7)*M!ʄk@kvN冡q+\{]B=zOHhm/>JO}J_=~3/D2 <]QL~kcKeL2K ZOH4\f d}+`IS ʭ,3Aˡe7_5NlʷQI)TH=ʉ;w3sخZXWGnL9wGtN~:6Q=|+B~k68(kzh# t<ݚΒ()٤ձ 3.n#h5>k*7:m%:`cQ PbE DᚻeIҁ CZM|Uz9ohZÎ~)ɉma~oԨ)̰!i`vgw`TU@m/+1d*ڱfjG&&;Ӑ/$eg,om3{+lQ[P"Z~~:DSYosmל2J6vaxǽw(԰[BhOo22^q̤Wq(̞(*fչ- >{8Y9klšw_}T>UUz>_y+uuYʠ u*+sf3&Tvbr;\D^mԴ2D8J2'epZK8@gpe~/FP fluYd-5r쳅 BwmCƽ5/2F*[LP\NLk2mj WBpX;;*k)"v|$7j[#VoqP2̅:x'xvy2LOm 'F`1Ig GSlKDɣڀɵ |mMͧu 5ZBuwcz73M=eQB[JMʹ'ֺ '2k-R(kdG ^<6V8W#,K#b}?ޢrPQp!kV;;JŸC,4n\3p'Ȱ<1vQWV2#IB:@ =!.]a1mjCU.e>DfZoE̍r$fXz+lDc90O, %&PF |tf"Xx#>,;svZ~]?A(;Feӈj tM}0j'˺Q_D.abaE^̿? Ň^SnyS(M L;,4IHvo=393DԯWH9ឨ_ѱ ]Q?6POI?w(o(![[˛$Izl~/-_ĸnY3ϙ [kaۦ̵1Is}庇TWsFӕV#bewmۆnx9EmF+EZ=nj "4\ZajU%1l::o";~ nŸZ@C)MAf  wduE+h'.uQX2JuR`6^#(꩛P M6[HPVM-+k~nAbO\AzN"qö[rx}jHz;IF땉! nÊ@B\Elmv1lT-  Ov/\?)wltK&0U -S1 @@F.s.'`Kf0qUhSVM& )&Hcv竳K],d2)wYy&B$lQtE UKV**gIʣD,%(;_aMqW",{5Bq \(fHj6B$.;^;ogEs̆RG3Pa_/k6cbW1pݡabm1jʋx4"201lclDz~&ÞdBtktW~_v9fL8vn6 )=V4i *пr? Q"EfdnQR[lxm5i# qGיvS㖅+i#` 1@5oqBŃA"zny('9 ݥM?M-vdhQ*0Ao&q[lα;*ucdشgퟧ (:n)x­(Us1M0k+d319-Uʦ HݪȐv|$BsWG ]C^GtO_v,SQv'6=߲:/ib],~ mFjR2Jw=}R&[A*age@D;QHs7R&x6^t(n6ٮ.~2}'p~EkCAFl򐶥(S'*]cH[k'_+SXQnڄqx[7ډJ.dr3fI&;!2&hkE&4S$SG)z=)k(F3_id 2Twk~!Лr2ٹDm()6=rN@34,Jc)OaC4lXҦ0@؞7)SΩ BVh6ʄ/؃P9ސ;kBEuo妭淚 45.n9/v u^]b݌]PxOU}j:li6_U=԰GF톹(4#M:L&C-l_s { /Ĝ09B,[^FkNݨ>۰E*)z抈``33;]2Q"G@ޝ_-yۄC^&ݝ9tTdNә(x=˂8]}C6 nTk:1\007g떈16mT_P7Cϋiw߉ƪ`&Z\_0$֝-u_/|~IWcGpXϴdd*ͨ˿= =;K;a0#Veu_u6[[D)+3 G[1ҧ Y.sks&hsViISgbҼw 8[lf|AZm(k'|' Pp{ (RWleXx rͦoBiT_* ypiV55~l\m `_UE72參 XV/oOz-QԳئ Rt6F83]K eA.t[k8Jh,]ً<7']")C*jӰ!rw.g}h'Yh{4W;ZA\ӉrrcisE!O}d:+5cDJԯn8.qvkS-aaIW92dʯuW~aq#{@J.=$}}͞Kcp2!:[x> p&-w#CiFNGĄŵe*۹@ oWn6xADF#%H}+@™c cĝ`icix:2t? ZHR.T:+UP)?#rGtv}.A%)5d[,qAE7)2$m[{;x 56W]1]Gzm.S>1/4ټ0$PkH>2[XDžHL[bGKT^ |IYɝZ\29dZ C_!_dFo͙UW~-]k%-O)tZCvox#"᧓v*G"x'4dR\ЬjV\q>=sgXqx%2fjݙ|L"ٹ;M厃=^:+ϸ;+}|.? a,eq*i]dv s`Eڊ1?g)Py='{Jvi{gSa9 p a8KWs}K?$} @q!tK"׹1hŷ2=,v;OMOkʊ>Myޜʿ?ufWԛom V ]pݹ6%u3 ۵NJ21 vm톾^9A5~ G1WSlN6~D<ѷDKL5/\Z"E1I#d"l˲:8 ``(bnbn*04 ӭ71d+fJnE; I{'Dk$zEwx na&N HDC7O~Gfrld8Meۣ .AۺH}$n&*2V;W2Hrk%!܈iF hׂwKO*!#Üfk1NݓUXUVTcRp$w }nfpaU]d 6 Jyk} eUX ^Z9IJsWҸWDzx}"1e# K9bRC??fI3u`jk X멹7e" i;%5VkpG+ƭکdD]}ԶǸlOD;^"MR-.>}@֌44cc%Ni壹8\6o65l}ͩE`1:SfbQ |NCMg@\zXHg6G;&4\E"֎mm ԤO3~!6pbTx>'A#KDl#a$Oɬ#5:+SFh 6O|2x_4FtT3SS̏8Dtr]`&NU}ึR h !AvXh/p<:,/r?yrTSQ{Ob䎬oT7f ;$tl :NMsLdjM8O|hg(qeJ`蹨g n(X_Oa<h57x2>ŸΘEl3gF.f۾iֵQcM2FGn b)#ٚ+oo.ft*p^ƏXcM-;P+sLi)cfPĭf,Flx:Lw᛽O/eg_#p?Ӫl`^+v(^BZeO̗U&೐K4r+<(DNEQ #c}q縇Wy7Mj,jgx'4uܘߖG,Mku8p*l\giKX߰-xa{:ϻT`UfF`FC# =qnL#`<Ѿ@N*:stMDvu:7mGCZ>PޖPzIIzDNI] K ]۳yO~Qr; iЮ},mX'H=873i,MTT5& 6abu/}dW:kK=wgsp+tDnkmn /_*^vNԉWr OiKqo"'*/U+2 y_,fyLOc~*?"aʪ̆'.c7J~Y=y]wNfK=dR9>E :&de5PW4\Ӟ7U]Ca|am8aT}cC2JYmP|1%\ LIEl|T8F$!yѵAcFӎ2?,7lM b>WxiybR{B۠Z(|C!7o".Z/F*/⻛'!^j) vM4\[[ w=͚O;*ed&yfӴtpܹ΍=|gt Js'ځe]eCحXU4<_m D|7*!mTSXݲ,MⰵڮWb"j E~-GўnT7w:sc$[i#{8?LQDZ!bM{IO5JfN RU8A15Vz LQxK!/ķMeCpJr  c-τ:&s ["z&kNQgJX{&֎\6̋~i9iEMSV/-O,!76;Q4CoKV\pUN ĴkzʐսoP$8>K==}dڼ$egݦ+1T?}&kK &rSFz Dt#8^/8rf͸0Lw8&Z=Po4Ŋ#hjeX4R]r9M*j.~8Nj~S`&BSٶZf8̢S\W>%~v7?tI⭶fBP}([fTo*[ ɶ+Ϳ5iÄ*zUPPD+dm껄-Y(R$>m+C \t _Fh;tem\O262UZjzCKկ($U[E1.0?Hh>R]P՞,.T]1:5`kSFu;@PU0k VxPW\ՎM]_x͘zxS=R/R8e#Ia;:)&|uߚ/khן̘W&c,j Sk{'qi;3u{u5 a.CFzxj ["V5+$ۆ(& %5 d4|>=09|vλȿ扦DS' CSCaWF[$|IEl5w@+-=҃¯ySrg䮻[VO۴TkBVVp)_- 帝djX쨫`>fIͅ|{6;eB֡֍|jn4sYθG&ϙB uhGϫ0"t%T?.V.mғ4r`5 .RORI/hg̴ا,4W4,Tm:&l>_921摞 ")Ws\nm pTV??33` p[F\n{Mw k(F}G TѤb ao9RDஷkۓ>^3 ;ۺIԡxqLypܵwW܊6M.buCX5>b=0{O=O<{RDuתmyuUҧg S%*7kY!˘;UK| 0o~C:?!=Lh|*/QORh8 կ\ -'|xr3Rڦ_F2Of%bdw4.o3{9^#?Y }NӺ/;ڡCne/. |AMoziRmi6/ hG#/[#aNz@GT?w?۽c2ΑA`F; !{vmRmY@5*M.!~cJ10Ena(zĐc֔`aSg %7P7J3ޚnIoh92ϺU8y&.( Ŋ&i~YBC|sF><>J`MN56ڪD/Zimߢj7M'|6QHڲPk 58+U1XiDۄEh[oJp QǾ}ִ5ZN s*TVm/W|SfX;JVj>usˑa|So y.fz_wXGeϋdYR]#]M1So=Qჷ}|`.ܯ+^mH->/,ʿz-ǛWl#$[n&vr|h%|{Ū r)g5mjp/=Xa=k֚7V~8Sө$%"voNIJT#`9'N cPd*xg+yv;ڗh[*R K1]'6Z7 [e"A"i:lPW9B^mTv%~U϶6 7QT\=3&j X/I*t䩪+mBf##1+e^2$qKmں|-(Do3D a3)G*/,2ylf"X mx݆@U>k8J< fNg#6|}faG0B(}{:,+O ,>K^HF9{9`oLTNvD\_b39FKnn+>INP"`gw|3l?!j둳O V:w agid? ҹ47x@gLQĤMV2Ԟ6rv*Ukί3ma&U.i:Mw*u_zc=,6BK oz]]@Tˠ,Φ>oMm/lSLKh3"R% V utgulWyFEq>|t Nٝґ"tDQQFl{{,+X0{ cP̙yw^/ޘLuYtɮGB(B!=gXRXEa|p . I6lec^c~=epFVݶ䋥cںΌ=CRVBmSYG_{Zx,@;ְ->D|%TkFڄO-[~rjyHz;$P"#%6tZy8S6) YA4)&UR7GU f]%X/ЛM !,G0%4y@Wꏚ[~#N߉D&US,7& Z!2W1 yU7 d2%"6xt=yTc૎.ЮŖϥz\G M|bD5 AS1.1"GݞIDjt"VQ#R &m尵!vvBRq>=; Թ ET ߖ5G &8?Pk"jQPYBE%p *Zuvx 0-\<҂':.R\Sߑe~Xl[ךּ쯑m=ФxiHnT_D^~d#MX!YCFM,Bs+[{T1j~m `::'fZchƭ &GC/cL濕X lLk 5oUePAk$.yx>d.hX#3Zp"tS&8 񶣿: U< 0Yn3o/#ϥ73.ܙN9Q}o{zwiB2]l1:Ѧ^n/+>2(?K-Ԣ~u?HtK = R3R4[@P埐Z"EMYg㴟]w,ұ&jm "[h+8CwB[ΧI };,Sȗ4v]+\mwKϋ XN҈!w9x4InCJtkWgGoR,個r]f67ZS[r{Ɗ+N|X?x5}tV},nw̙odf~1Ed6?ws#gqXDWOmD{zmnFS1gp(˪o OuNv}A74PlD2,V-z-R%\{ JF ݫy _kǷ?QA56V4Q}hK{:q.]֜ge0o ~իgn2=Zt󔻽ikCڮ֡?l_jZΒgWaN m>mP@gPVe(}rIX" tRZSO-Ƌ|9J<2U(2هFHς5xsqx,b86rlOU4sfN cgj$QR!D ,#bJ{?m~|@ֈcYDh nu&8:9T?]tiδܲ;- M!kWZ('+ȩ0ê|b[,.KM݀/)Z5iLkLSJ#SMmZbe1vLa~G!M \p?,+ S*2ˬoHOe_ȩ"^ ?뵘ڽ 3.(s僖{!]*m(XoEa4Gᚺ3qY[,p$ @,7ݎ`-SQCׁ, 9(pM쩸AM@j#]7Hn:-ХCSžcaw'3 ƥM`bFH #'Kzck "zb0wЗmnv:&|6-vnV\ηE3,rч2I~)wbLw/%䳺2O8j.OK,ï}(tMjKkmmJl|~G?X8EUc#q5$57-k>X^dɾ_$ 4ex7ٕ3{4N^4.@8RT8"yq.h$FaZB=tI=(%ʶӿw~TRjvp.V-U;{Sd?w1v&]Rɟ`PΩ[L'+I1U  !F£SGǚ 8Mh$8aŊ⏟"R UͿ,`>գipvkpWsxtS(˻ZN8BNnK'aRV/W#Iviuz|~+f"? 9ЄSnr{~^NaڤPP|S+ !s3L-Ndvo](Ýa;eR.'SQrU%SDFQ]bZTi3a̙~($cq*C,VDIR-~i2DJwS e ǯ;DLfo*-3Jf*ہnbH4c6s-w葡kð"{&*5c^%{?UE xh1u1 M M9NEѼGh/b 3|NeJ5^L{x0AWrܳ1<dG슃c/&u13Ts*J6Dȣc|Q&|u 6\U1ͯA|"O XF G#Nd#clqj$}4XfH ﵼ7" 5s<0Ms7v2!x3MܩS,O Du\ _>4gƉ@r뼤zߧ:}e(tSĿZMu ~S|\Ao$O[qx0 %-N}n.c׃ciGw`/7҉6}Z~SB9^6^oJ )@6:T5Ν=,qL8ɒ}%n ż7gaiJ{XMoo%^@Lt;<9N/C!AQANe*6lÃZ=;:44T#z?5^n8ɲb;e9yUK±6Eʭɷ/w ȶT3 D:]FT ŭ7^\Vb=PrOS^*?'{v֣@C Ȝpv6i.k}j ((nXYRY!PyZ+:TwNrcZB1䡽ypgů*6A73R{ Tp)RT psWw$Q9cwuyhvȖLU"l%LyqS'YۣfkBŚ6ullh9>"›$'AuFQ\Jy6bf:,'\"OGa]e类X]ZU9"Ok*fN",mh̠]<h}QՙSg Ag|6ށտ>%wRx>Ю U*!TRk}fw}ϋ5n5/z;lҞF_t{=]lCe }/E/$у Cw##c(`g,6hIF rL.="yCm(/fXy^jʓ+ N`2ظExr,ېjtZ~cj8έU+|(*fXjwW|GrkcwmڦMm1.g]vabZ`3x:K!W}bp{ɏ0T-g(/Gc^ÛOju\l~ּVna +Ze)Ph2>R_^ă]wȿD-[=u2:0̵> < f7^uIyۘKK(OTWT/,Osw+< u}1GEwO/aY Lċ? ]+=1%. A` =(x?ask\܊e*& OzZSh{f-XnA|SeYdK噺xZ,]{_hN7h>67n{/e˼ >#)>8J8ܚ`" t6VwLvS*'K$[$$śb͙6#UkJk fqg8}ڬT!*s# ^eEСhUy-֢yh} zDPhqt%6^sPjW>vuW`/Y cUQjK_$v|"R`3STjBM rе)j,X5u2T5Y9^6,씤< >0u g?O,*G7|=z='6GznnMwFw > 7c,{-'A~m|$I iOT膸$0(/YJily0qC(*iҠZccItޝBDWu;? hoh꩷I Q#O5j kkt .ɰA CZ>M3H%0,Kt0=aQ^CIDf_mec+QTxnd0Zׅ"NQ {NoŹ,3Dc|=;|8(<.P9  k- nogqS{5\O-q &Ctv/QnG㿏?w tЖ\?f]/ɻ n$#)%֕hY?)V JD<)<"握苹Iak. 1k侎z'aa QZ/"#9ΒoIdfJ,Rj_9[)6M[i r[8o.Fϰ6u&e=';ːt4W41gn?"xW+vǟGFn] g/xUj 1_?gy/buy}nE^Ųc7֖V2ay] iC-;vÜuۍ7vUʵ-yQ%}2vb#[͑x3bmFwOClm\B,hF#M1вۮPD~pA2BjHxl"K*կԋfyVӌdoG]zCq::yã/*|oưn$6)BX<ȼ>KMmlM=]я:s8c =FzɽyܽsCDzfrq6yۦ3%">-Hf YpؾjiʦE!`Y7>rCj-K VsrJk$<ouG_۩)"ˉcv?F:x(RjeMxq+౼ M3:޹vIL>80pN^4 Rbhmt:p?i9tSDsX._2 Z,[нp`|u/.0|%;Sl.ݫ#k0[ԓFgWg Y3s~ENeSuӡTxZBl%2HE&Y8@ pe~!T" zȮPfsO C* Sw)bhG`.We$m 4;>k\\QƇ⫝̸oom4l|@LJ=^(.KWy׫V!WZw`v^['NS![r8`9/ \ޑPn)p,0c(;k*͔6_ xa\bdЧI'NJBk Cdϑ֍hZAȡoh'4*soD^.}/?t$&@r}Ƹ+Ź n h_8۸`'Dm"٭Lnv ǵCg'.Uƶ_Z-_Z\PkVHx:UY9vg2l-^x{Te~aኁўs[FDaI[ӄfh*t G'6arM [hGi6[&<6DfDwU5«\ی'# ֝F ebb{Ʌ(HpAc*fM|t$#6֎Aa>(ɝ]r*/}5\!zAsqW^I6G9W՚Cj-oGQU}Ek4B?NCoB?źzfvŁ\kf(ᜁ2[n;2Y"QxjX q,jïnRNIImžDkJnI*(}D>KL%QTf!sK-*iQS]`?0?]tۮlT2L:K2@\z;'z"]!K.Fm68%D\6P+NgT}$f6mQ{y{yԒQ.aIPZ`=ΘB?"􋸨nipy-0.3.0/nipy/testing/nosepatch.py000066400000000000000000000037651210344137400174750ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Monkeypatch nose to accept any callable as a method. By default, nose's ismethod() fails for static methods. Once this is fixed in upstream nose we can disable it. Note: merely importing this module causes the monkeypatch to be applied.""" import unittest import nose.loader from inspect import ismethod, isfunction def getTestCaseNames(self, testCaseClass): """Override to select with selector, unless config.getTestCaseNamesCompat is True """ if self.config.getTestCaseNamesCompat: return unittest.TestLoader.getTestCaseNames(self, testCaseClass) def wanted(attr, cls=testCaseClass, sel=self.selector): item = getattr(cls, attr, None) # MONKEYPATCH: replace this: #if not ismethod(item): # return False # return sel.wantMethod(item) # With: if ismethod(item): return sel.wantMethod(item) # static method or something. If this is a static method, we # can't get the class information, and we have to treat it # as a function. Thus, we will miss things like class # attributes for test selection if isfunction(item): return sel.wantFunction(item) return False # END MONKEYPATCH cases = filter(wanted, dir(testCaseClass)) for base in testCaseClass.__bases__: for case in self.getTestCaseNames(base): if case not in cases: cases.append(case) # add runTest if nothing else picked if not cases and hasattr(testCaseClass, 'runTest'): cases = ['runTest'] if self.sortTestMethodsUsing: cases.sort(self.sortTestMethodsUsing) return cases ########################################################################## # Apply monkeypatch here nose.loader.TestLoader.getTestCaseNames = getTestCaseNames ########################################################################## nipy-0.3.0/nipy/testing/nosetester.py000066400000000000000000000076311210344137400177000ustar00rootroot00000000000000""" Nipy nosetester Sets doctests to run by default Use our own doctest plugin (based on that of numpy) """ from ..fixes.numpy.testing.nosetester import NoseTester, import_nose def fpw_opt_str(): """ Return first-package-wins option string for this version of nose Versions of nose prior to 1.1.0 needed ``=True`` for ``first-package-wins``, versions after won't accept it. changeset: 816:c344a4552d76 http://code.google.com/p/python-nose/issues/detail?id=293 Returns ------- fpw_str : str Either '--first-package-wins' or '--first-package-wins=True' depending on the nose version we are running. """ # protect nose import to provide comprehensible error if missing nose = import_nose() config = nose.config.Config() fpw_str = '--first-package-wins' opt_parser = config.getParser('') opt_def = opt_parser.get_option('--first-package-wins') if opt_def is None: raise RuntimeError('Nose does not accept "first-package-wins"' ' - is this an old nose version?') if opt_def.takes_value(): # the =True variant fpw_str += '=True' return fpw_str def prepare_imports(): """ Prepare any imports for testing run At the moment, we prepare matplotlib by trying to make it use a backend that does not need a display """ try: import matplotlib as mpl except ImportError: pass else: mpl.use('svg') class NipyNoseTester(NoseTester): """ Numpy-like testing class * Removes some numpy-specific excludes * Disables numpy's fierce clearout of module import context for doctests * Run doctests by default """ excludes = [] def _get_custom_doctester(self): """ Use our our own doctester """ import_nose() from .doctester import NipyDoctest return NipyDoctest() def test(self, label='fast', verbose=1, extra_argv=None, doctests=True, coverage=False): """ Run tests for module using nose. As for numpy tester, except enable tests by default. Parameters ---------- label : {'fast', 'full', '', attribute identifier}, optional Identifies the tests to run. This can be a string to pass to directly the nosetests executable with the '-A' option (an attribute identifier), or one of several special values. Special values are: * 'fast' - the default - which corresponds to the ``nosetests -A`` option of 'not slow'. * 'full' - fast (as above) and slow tests as in the 'no -A' option to nosetests - this is the same as ''. * None or '' - run all tests. verbose : int, optional Verbosity value for test outputs, in the range 1-10. Default is 1. extra_argv : list, optional List with any extra arguments to pass to nosetests. doctests : bool, optional If True, run doctests in module. Default is True. coverage : bool, optional If True, report coverage of nipy code. Default is False. (This requires the `coverage module: `_). Returns ------- result : object Returns the result of running the tests as a ``nose.result.TextTestResult`` object. Notes ----- Each nipy module should expose `test` in its namespace to run all tests for it. For example, to run all tests for nipy.algorithms: >>> import nipy.algorithms >>> nipy.algorithms.test() #doctest: +SKIP """ prepare_imports() if extra_argv is None: extra_argv = [] extra_argv.append(fpw_opt_str()) return super(NipyNoseTester, self).test(label, verbose, extra_argv, doctests, coverage) nipy-0.3.0/nipy/testing/setup.py000066400000000000000000000007651210344137400166460ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('testing', parent_package, top_path) config.add_subpackage('tests') config.add_data_files('*.nii.gz') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipy-0.3.0/nipy/testing/tests/000077500000000000000000000000001210344137400162665ustar00rootroot00000000000000nipy-0.3.0/nipy/testing/tests/__init__.py000066400000000000000000000000271210344137400203760ustar00rootroot00000000000000# Make tests a package nipy-0.3.0/nipy/testing/tests/test_doctester.py000066400000000000000000000037661210344137400217070ustar00rootroot00000000000000""" Testing round numbers utility """ import numpy as np from ..doctester import round_numbers, strip_array_repr from numpy.testing import (assert_array_almost_equal, assert_array_equal) from nose.tools import assert_true, assert_equal, assert_raises def test_strip_array_repr(): # check array repr removal for arr in (np.array(1), np.array(1, dtype=bool), np.arange(12), np.arange(12).reshape((3,4)), np.zeros((3,4), dtype=[('f1', 'f'), ('f2', int)])): expected = arr.tolist() list_repr = strip_array_repr(repr(arr)).replace('\n', '') actual = eval(list_repr) assert_equal(expected, actual) def test_round_numbers(): # Test string floating point tranformation in_out_strs = ( # input, 4DP, 6DP output ('100', '100', '100'), ('A string', 'A string', 'A string'), ('0.25', '0.2500', '0.250000'), ('0.12345', '0.1235', '0.123450'), # round up 4DP ('0.12343', '0.1234', '0.123430'), # round down 4DP ('0.1234567', '0.1235', '0.123457'), # round up 6DP ('0.1234564', '0.1235', '0.123456'), # round down 6DP ('345.1234564', '345.1235', '345.123456'), # round down 6DP ('0.1234564e-10', '0.1235e-10', '0.123456e-10'), # round down 6DP ('a0.1234564', 'a0.1234564','a0.1234564'), ('0.1234564a', '0.1234564a','0.1234564a'), ('_0.1234564', '_0.1234564','_0.1234564'), ('0.1234564_', '0.1234564_','0.1234564_'), ('(0.1234567)', '(0.1235)', '(0.123457)'), # round up 6DP ('(0.1234564)', '(0.1235)', '(0.123456)'), # round down 6DP ('(0.1234564e2)', '(0.1235e2)', '(0.123456e2)'), # round down 6DP ('(0.1234567)\n{0.7654321}', '(0.1235)\n{0.7654}', '(0.123457)\n{0.765432}'), ) for in_str, out_4, out_6 in in_out_strs: assert_equal(round_numbers(in_str, 4), out_4) assert_equal(round_numbers(in_str, 6), out_6) nipy-0.3.0/nipy/testing/tests/test_doctesting.py000066400000000000000000000071301210344137400220430ustar00rootroot00000000000000""" Doctests for Nipy / NumPy-specific nose/doctest modifications """ # try the #random directive on the output line def check_random_directive(): ''' >>> 2+2 #random: may vary on your system ''' # check the implicit "import numpy as np" def check_implicit_np(): ''' >>> np.array([1,2,3]) array([1, 2, 3]) ''' # there's some extraneous whitespace around the correct responses def check_whitespace_enabled(): ''' # whitespace after the 3 >>> 1+2 3 # whitespace before the 7 >>> 3+4 7 ''' def check_empty_output(): """ Check that no output does not cause an error. This is related to nose bug 445; the numpy plugin changed the doctest-result-variable default and therefore hit this bug: http://code.google.com/p/python-nose/issues/detail?id=445 >>> a = 10 """ def check_skip(): """ Check skip directive The test below should not run >>> 1/0 #doctest: +SKIP """ def func(): return 1 def check_have_module_context(): """ Check that, unlike numpy, we do have the module namespace >>> func() 1 """ def check_fails(): """ Check inversion directive The directive is mainly for tests >>> 'black' #doctest: +NOT_EQUAL 'white' >>> 'white' #doctest: +NOT_EQUAL 'black' """ def check_ignore_output(): """ Check IGNORE_OUTPUT option works >>> 'The answer' #doctest: +IGNORE_OUTPUT 42 >>> 'The answer' #doctest: +IGNORE_OUTPUT 'The answer' """ def check_sympy_equal(): """ Check SYMPY_EQUAL option >>> from sympy import symbols >>> a, b, c = symbols('a, b, c') >>> a + b #doctest: +SYMPY_EQUAL b + a >>> a + b #doctest: +SYMPY_EQUAL a + b >>> a + b #doctest: +SYMPY_EQUAL +NOT_EQUAL a + c >>> a + b #doctest: +SYMPY_EQUAL +NOT_EQUAL a - b """ def check_fp_equal(): """ Check floating point equal >>> 0.12345678 #doctest: +FP_6DP 0.1234569 >>> 0.12345678 #doctest: +FP_6DP +NOT_EQUAL 0.1234564 >>> 0.12345678 #doctest: +FP_4DP 0.1235 >>> 0.12345678 #doctest: +FP_6DP +NOT_EQUAL 0.1235 """ def check_array_repr(): """ Stripping of array repr >>> arr = np.arange(5, dtype='i2') The test should match with and without the array repr >>> arr #doctest: +STRIP_ARRAY_REPR [0, 1, 2, 3, 4] >>> arr #doctest: +STRIP_ARRAY_REPR array([0, 1, 2, 3, 4], dtype=int16) """ def check_ignore_dtype(): """ Stripping of dtype from array repr >>> arr = np.arange(5, dtype='i2') The test should match with and without the array repr >>> arr #doctest: +IGNORE_DTYPE array([0, 1, 2, 3, 4]) >>> arr #doctest: +IGNORE_DTYPE array([0, 1, 2, 3, 4], dtype=int16) >>> arr #doctest: +IGNORE_DTYPE array([0, 1, 2, 3, 4], dtype=int32) >>> 1, arr, 3 #doctest: +IGNORE_DTYPE (1, array([0, 1, 2, 3, 4], dtype=int32), 3) """ def check_combinations(): """ Check the processing combines as expected >>> 0.33333 #doctest: +SYMPY_EQUAL +NOT_EQUAL 0.3333 >>> 0.33333 #doctest: +SYMPY_EQUAL +FP_4DP 0.3333 >>> arr = np.arange(5, dtype='i2') This next will not sympify unless the array repr is removed >>> arr #doctest: +STRIP_ARRAY_REPR +SYMPY_EQUAL array([0, 1, 2, 3, 4], dtype=int16) """ if __name__ == '__main__': # Run tests outside nipy test rig import sys import nose from nipy.testing.doctester import NipyDoctest argv = [sys.argv[0], __file__, '--with-nipydoctest'] + sys.argv[1:] nose.core.TestProgram(argv=argv, addplugins=[NipyDoctest()]) nipy-0.3.0/nipy/testing/tests/test_images.py000066400000000000000000000011031210344137400211370ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ''' Test example images ''' from nipy import load_image from nipy.testing import funcfile, anatfile from nose.tools import assert_true, assert_false, assert_equal def test_dims(): fimg = load_image(funcfile) # make sure time dimension is correctly set in affine yield assert_equal, fimg.coordmap.affine[3,3], 2.0 # should follow, but also make sure affine is invertible ainv = fimg.coordmap.inverse yield assert_false, ainv is None nipy-0.3.0/nipy/tests/000077500000000000000000000000001210344137400146115ustar00rootroot00000000000000nipy-0.3.0/nipy/tests/__init__.py000066400000000000000000000000251210344137400167170ustar00rootroot00000000000000#init for nipy/tests nipy-0.3.0/nipy/tests/test_scripts.py000066400000000000000000000110001210344137400177010ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Test scripts If we appear to be running from the development directory, use the scripts in the top-level folder ``scripts``. Otherwise try and get the scripts from the path """ from __future__ import with_statement import sys import os from os.path import dirname, join as pjoin, isfile, isdir, abspath, realpath from subprocess import Popen, PIPE from nibabel.tmpdirs import InTemporaryDirectory from nipy import load_image from nose.tools import assert_true, assert_false, assert_equal from ..testing import funcfile from numpy.testing import decorators, assert_almost_equal from nipy.testing.decorators import make_label_dec from nibabel.optpkg import optional_package matplotlib, HAVE_MPL, _ = optional_package('matplotlib') needs_mpl = decorators.skipif(not HAVE_MPL, "Test needs matplotlib") script_test = make_label_dec('script_test') # Need shell to get path to correct executables USE_SHELL = True DEBUG_PRINT = os.environ.get('NIPY_DEBUG_PRINT', False) def local_script_dir(): # Check for presence of scripts in development directory. ``realpath`` # checks for the situation where the development directory has been linked # into the path. below_nipy_dir = realpath(pjoin(dirname(__file__), '..', '..')) devel_script_dir = pjoin(below_nipy_dir, 'scripts') if isfile(pjoin(below_nipy_dir, 'setup.py')) and isdir(devel_script_dir): return devel_script_dir return None LOCAL_SCRIPT_DIR = local_script_dir() def run_command(cmd): if not LOCAL_SCRIPT_DIR is None: # Windows can't run script files without extensions natively so we need # to run local scripts (no extensions) via the Python interpreter. On # Unix, we might have the wrong incantation for the Python interpreter # in the hash bang first line in the source file. So, either way, run # the script through the Python interpreter cmd = "%s %s" % (sys.executable, pjoin(LOCAL_SCRIPT_DIR, cmd)) if DEBUG_PRINT: print("Running command '%s'" % cmd) proc = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=USE_SHELL) stdout, stderr = proc.communicate() if proc.poll() == None: proc.terminate() if proc.returncode != 0: raise RuntimeError('Command "%s" failed with stdout\n%s\nstderr\n%s\n' % (cmd, stdout, stderr)) return proc.returncode @needs_mpl @script_test def test_nipy_diagnose(): # Test nipy diagnose script fimg = load_image(funcfile) ncomps = 12 with InTemporaryDirectory() as tmpdir: # Need to quote out path in case it has spaces cmd = 'nipy_diagnose "%s" --ncomponents=%d --out-path="%s"' % ( funcfile, ncomps, tmpdir) run_command(cmd) for out_fname in ('components_functional.png', 'pcnt_var_functional.png', 'tsdiff_functional.png', 'vectors_components_functional.npz'): assert_true(isfile(out_fname)) for out_img in ('max_functional.nii.gz', 'mean_functional.nii.gz', 'min_functional.nii.gz', 'std_functional.nii.gz'): img = load_image(out_img) assert_equal(img.shape, fimg.shape[:-1]) del img pca_img = load_image('pca_functional.nii.gz') assert_equal(pca_img.shape, fimg.shape[:-1] + (ncomps,)) del pca_img @needs_mpl @script_test def test_nipy_tsdiffana(): # Test nipy_tsdiffana script out_png = 'ts_out.png' with InTemporaryDirectory(): # Quotes in case of space in arguments cmd = 'nipy_tsdiffana "%s" --out-file="%s"' % (funcfile, out_png) run_command(cmd) assert_true(isfile(out_png)) @script_test def test_nipy_3_4d(): # Test nipy_3dto4d and nipy_4dto3d fimg = load_image(funcfile) N = fimg.shape[-1] out_4d = 'func4d.nii' with InTemporaryDirectory() as tmpdir: # Quotes in case of space in arguments cmd = 'nipy_4dto3d "%s" --out-path="%s"' % (funcfile, tmpdir) run_command(cmd) imgs_3d = ['functional_%04d.nii' % i for i in range(N)] for iname in imgs_3d: assert_true(isfile(iname)) cmd = 'nipy_3dto4d "%s" --out-4d="%s"' % ('" "'.join(imgs_3d), out_4d) run_command(cmd) fimg_back = load_image(out_4d) assert_almost_equal(fimg.get_data(), fimg_back.get_data()) del fimg_back nipy-0.3.0/nipy/utils/000077500000000000000000000000001210344137400146075ustar00rootroot00000000000000nipy-0.3.0/nipy/utils/__init__.py000066400000000000000000000015571210344137400167300ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ General utilities for code support. These are modules that we (broadly-speaking) wrote; packages that other people wrote, that we ship, go in the nipy.externals tree. """ from nibabel.data import make_datasource, DataError, datasource_or_bomber # Module level datasource instances for convenience from ..info import DATA_PKGS templates = datasource_or_bomber(DATA_PKGS['nipy-templates']) example_data = datasource_or_bomber(DATA_PKGS['nipy-data']) try: example_data.get_filename() except DataError: HAVE_EXAMPLE_DATA = False else: HAVE_EXAMPLE_DATA = True try: templates.get_filename() except DataError: HAVE_TEMPLATES = False else: HAVE_TEMPLATES = True from nipy.testing import Tester test = Tester().test bench = Tester().bench nipy-0.3.0/nipy/utils/arrays.py000066400000000000000000000022261210344137400164640ustar00rootroot00000000000000""" Array utilities """ import numpy as np def strides_from(shape, dtype, order='C'): """ Return strides as for continuous array shape `shape` and given `dtype` Parameters ---------- shape : sequence shape of array to calculate strides from dtype : dtype-like dtype specifier for array order : {'C', 'F'}, optional whether array is C or FORTRAN ordered Returns ------- strides : tuple seqence length ``len(shape)`` giving strides for continuous array with given `shape`, `dtype` and `order` Examples -------- >>> strides_from((2,3,4), 'i4') (48, 16, 4) >>> strides_from((3,2), np.float) (16, 8) >>> strides_from((5,4,3), np.bool, order='F') (1, 5, 20) """ dt = np.dtype(dtype) if dt.itemsize == 0: raise ValueError('Empty dtype "%s"' % dt) if order == 'F': strides = np.cumprod([dt.itemsize] + list(shape[:-1])) elif order == 'C': strides = np.cumprod([dt.itemsize] + list(shape)[::-1][:-1]) strides = strides[::-1] else: raise ValueError('Unexpected order "%s"' % order) return tuple(strides) nipy-0.3.0/nipy/utils/compat3.py000066400000000000000000000020201210344137400165210ustar00rootroot00000000000000""" Routines for Python 3 compatibility These are in addition to the nibabel.py3k routines. """ import sys py3 = sys.version_info[0] >= 3 if py3: def to_str(s): """ Convert `s` to string, decoding as latin1 if `s` is bytes """ if isinstance(s, bytes): return s.decode('latin1') return str(s) else: to_str = str def open4csv(fname, mode): """ Open filename `fname` for CSV IO in read or write `mode` Parameters ---------- fname : str filename to open mode : {'r', 'w'} Mode to open file. Don't specify binary or text modes; we need to chose these according to python version. Returns ------- fobj : file object open file object; needs to be closed by the caller """ if mode not in ('r', 'w'): raise ValueError('Only "r" and "w" allowed for mode') if not py3: # Files for csv reading and writing should be binary mode return open(fname, mode + 'b') return open(fname, mode, newline='') nipy-0.3.0/nipy/utils/perlpie.py000066400000000000000000000076141210344137400166310ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Perform a global search and replace on the current directory *recursively*. This a small python wrapper around the `perl -p -i -e` functionality. I **strongly recommend** running `perlpie` on files under source control. In this way it's easy to track your changes and if you discover your regular expression was wrong you can easily revert. I also recommend using `grin` to test your regular expressions before running `perlpie`. Parameters ---------- regex : regular expression Regular expression matching the string you want to replace newstring : string The string you would like to replace the oldstring with. Note this is not a regular expression but the exact string. One exception to this rule is the at symbol `@`. This has special meaning in perl, so you need an escape character for this. See Examples below. Requires -------- perl : The underlying language we're using to perform the search and replace. `grin `_ : Grin is a tool written by Robert Kern to wrap `grep` and `find` with python and easier command line options. Examples -------- Replace all occurences of foo with bar:: perlpie foo bar Replace numpy.testing with nipy's testing framework:: perlpie 'from\s+numpy\.testing.*' 'from nipy.testing import *' Replace all @slow decorators in my code with @dec.super_slow. Here we have to escape the @ symbol which has special meaning in perl:: perlpie '\@slow' '\@dec.super_slow' Remove all occurences of importing make_doctest_suite:: perlpie 'from\snipy\.utils\.testutils.*make_doctest_suite' """ # notes on perl-dash-pie # perl -p -i -e 's/oldstring/newstring/g' * # find . -name '*.html' -print0 | xargs -0 perl -pi -e 's/oldstring/newstring/g' from optparse import OptionParser import subprocess usage_doc = "usage: %prog [options] regex newstring" def check_deps(): try: import grin except ImportError: print 'perlpie requires grin to operate.' print 'You can find grin in the python package index:' print ' http://pypi.python.org/pypi/grin/' return False # assume they have perl for now return True def perl_dash_pie(oldstr, newstr, dry_run=None): """Use perl to replace the oldstr with the newstr. Examples -------- # To replace all occurences of 'import numpy as N' with 'import numpy as np' from nipy.utils import perlpie perlpie.perl_dash_pie('import\s+numpy\s+as\s+N', 'import numpy as np') grind | xargs perl -pi -e 's/import\s+numpy\s+as\s+N/import numpy as np/g' """ if dry_run: cmd = "grind | xargs perl -p -e 's/%s/%s/g'" % (oldstr, newstr) else: cmd = "grind | xargs perl -pi -e 's/%s/%s/g'" % (oldstr, newstr) print cmd try: subprocess.check_call(cmd, shell=True) except subprocess.CalledProcessError, err: msg = """ Error while executing perl_dash_pie command: %s Error: %s """ % (cmd, str(err)) raise Exception(msg) def print_extended_help(option, opt_str, value, parser, *args, **kwargs): print __doc__ def main(): description = __doc__.splitlines()[0] usage = usage_doc parser = OptionParser(usage=usage, description=description) parser.add_option('-e', '--extended-help', action='callback', callback=print_extended_help, help='print extended help including examples') parser.add_option('-n', '--dry-run', action="store_true", dest="dry_run", help='send results to stdout without modifying files') (options, args) = parser.parse_args() if not args: parser.print_help() return if check_deps(): oldstr = args[0] newstr = args[1] perl_dash_pie(oldstr, newstr, options.dry_run) nipy-0.3.0/nipy/utils/setup.py000066400000000000000000000007151210344137400163240ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('utils', parent_package, top_path) config.add_subpackage('tests') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict()) nipy-0.3.0/nipy/utils/skip_test.py000066400000000000000000000015711210344137400171720ustar00rootroot00000000000000""" Utilities to skip tests """ import sys import inspect def skip_if_running_nose(msg=None): """ Raise a SkipTest if we appear to be running the nose test loader. Parameters ========== msg: string, optional The message issued when SkipTest is raised """ if not 'nose' in sys.modules: return try: import nose except ImportError: return # Now check that we have the loader in the call stask stack = inspect.stack() from nose import loader loader_file_name = loader.__file__ if loader_file_name.endswith('.pyc'): loader_file_name = loader_file_name[:-1] for frame, file_name, line_num, func_name, line, number in stack: if file_name == loader_file_name: if msg is not None: raise nose.SkipTest(msg) else: raise nose.SkipTest nipy-0.3.0/nipy/utils/tests/000077500000000000000000000000001210344137400157515ustar00rootroot00000000000000nipy-0.3.0/nipy/utils/tests/__init__.py000066400000000000000000000022521210344137400200630ustar00rootroot00000000000000# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Please write tests for all code submitted to the repository. The code will be used by many people, and will in due course be used in live analyses, so we need to make sure that we have the best possible defenses against bugs. It also helps us think about code interfaces, and gives examples of code use that can be useful for others using the code. Python's unit testing framework (the U{unittest} module) is used to implement project tests. We use the convention that each package contains a subpackage called tests which contains modules defining test cases (subclasses of U{unittest.TestCase}) for that package. The nipy.utils.tests package contains an example test case called L{test_template.TemplateTest} to get you started writing your tests. Please try to include working test cases for all functions and classes that you contribute. Often, writing tests for your code before the code is written helps to frame your thoughts about what the code should look like. """ nipy-0.3.0/nipy/utils/tests/test_arrays.py000066400000000000000000000020231210344137400206600ustar00rootroot00000000000000""" Testing arrays module """ import numpy as np from ..arrays import strides_from from numpy.testing import (assert_array_almost_equal, assert_array_equal) from nose.tools import assert_true, assert_equal, assert_raises def test_strides_from(): for shape in ((3,), (2,3), (2,3,4), (5,4,3,2)): for order in 'FC': for dtype in sum(np.sctypes.values(), []): if dtype is str: dtype = 'S3' elif dtype is unicode: dtype = 'U4' elif dtype is np.void: continue exp = np.empty(shape, dtype=dtype, order=order).strides assert_equal(strides_from(shape, dtype, order), exp) assert_raises(ValueError, strides_from, shape, np.void, order) assert_raises(ValueError, strides_from, shape, str, order) assert_raises(ValueError, strides_from, shape, unicode, order) assert_raises(ValueError, strides_from, (3,2), 'f8', 'G') nipy-0.3.0/nipy/utils/tests/test_compat3.py000066400000000000000000000023341210344137400207320ustar00rootroot00000000000000""" Testing compat3 module """ from __future__ import with_statement from nibabel.py3k import asstr, asbytes from ..compat3 import to_str, open4csv from nose.tools import (assert_true, assert_false, assert_raises, assert_equal, assert_not_equal) from nibabel.tmpdirs import InTemporaryDirectory def test_to_str(): # Test routine to convert to string assert_equal('1', to_str(1)) assert_equal('1.0', to_str(1.0)) assert_equal('from', to_str(asstr('from'))) assert_equal('from', to_str(asbytes('from'))) def test_open4csv(): # Test opening of csv files import csv contents = [['oh', 'my', 'G'], ['L', 'O', 'L'], ['when', 'cleaning', 'windas']] with InTemporaryDirectory(): with open4csv('my.csv', 'w') as fobj: writer = csv.writer(fobj) writer.writerows(contents) with open4csv('my.csv', 'r') as fobj: dialect = csv.Sniffer().sniff(fobj.read()) fobj.seek(0) reader = csv.reader(fobj, dialect) back = list(reader) assert_equal(contents, back) assert_raises(ValueError, open4csv, 'my.csv', 'rb') assert_raises(ValueError, open4csv, 'my.csv', 'wt') nipy-0.3.0/nipy/utils/tests/test_skip_test.py000066400000000000000000000013111210344137400213630ustar00rootroot00000000000000""" Test the test skip utilities. """ import nose from nipy.utils.skip_test import skip_if_running_nose # First we must check that during test loading time, our skip_test does # fire try: skip_if_running_nose() skip_test_raised = False except nose.SkipTest: skip_test_raised = True def test_raise_at_load_time(): """ Check that SkipTest was raised at load time """ nose.tools.assert_true(skip_test_raised) def test_not_raise_at_run_time(): """ Check that SkipTest is not raised at run time """ try: skip_if_running_nose() except nose.SkipTest: # We need to raise another exception, as nose will capture this # one raise AssertionError nipy-0.3.0/scripts/000077500000000000000000000000001210344137400141575ustar00rootroot00000000000000nipy-0.3.0/scripts/nipy_3dto4d000077500000000000000000000044541210344137400162540ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: DESCRIP = 'Read 3D image files and write a 4D file' EPILOG = \ '''nipy_3dto4d will take a series of 3D nifti images in any format readable by nibabel and concatenate them into a 4D image, and write the image with format guessed from the output image filename. You can set the filename with the ``--out-4d`` parameter, or we make a filename from the input names. ''' import os from os.path import join as pjoin from nipy.externals.argparse import (ArgumentParser, RawDescriptionHelpFormatter) import nibabel as nib def do_3d_to_4d(filenames, check_affines=True): imgs = [] for fname in filenames: img = nib.load(fname) imgs.append(img) return nib.concat_images(imgs, check_affines=check_affines) def main(): parser = ArgumentParser(description=DESCRIP, epilog=EPILOG, formatter_class=RawDescriptionHelpFormatter) parser.add_argument('in_filenames', type=str, nargs='+', help='3D image filenames') parser.add_argument('--out-4d', type=str, help='4D output image name') parser.add_argument('--check-affines', type=bool, default=True, help='False if you want to ignore differences ' 'in affines between the 3D images, True if you ' 'want to raise an error for significant ' 'differences (default is True)') # parse the command line args = parser.parse_args() # get input 3ds filenames = args.in_filenames # affine check check_affines = args.check_affines # get output name out_fname = args.out_4d if out_fname is None: pth, fname = os.path.split(filenames[0]) froot, ext = os.path.splitext(fname) if ext in ('.gz', '.bz2'): gz = ext froot, ext = os.path.splitext(froot) else: gz = '' out_fname = pjoin(pth, froot + '_4d' + ext + gz) img4d = do_3d_to_4d(filenames, check_affines=check_affines) nib.save(img4d, out_fname) if __name__ == '__main__': main() nipy-0.3.0/scripts/nipy_4dto3d000077500000000000000000000026301210344137400162460ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: DESCRIP = 'Read 4D image file and write 3D nifti file for each volume' EPILOG = \ '''nipy_4dto3d will generate a series of 3D nifti images for each volume a 4D image series in any format readable by `nibabel`. ''' from os.path import splitext, join as pjoin, split as psplit import nibabel as nib from nipy.externals.argparse import (ArgumentParser, RawDescriptionHelpFormatter) def main(): parser = ArgumentParser(description=DESCRIP, epilog=EPILOG, formatter_class=RawDescriptionHelpFormatter) parser.add_argument('filename', type=str, help='4D image filename') parser.add_argument('--out-path', type=str, help='path for output image files') args = parser.parse_args() out_path = args.out_path img = nib.load(args.filename) imgs = nib.four_to_three(img) froot, ext = splitext(args.filename) if ext in ('.gz', '.bz2'): froot, ext = splitext(froot) if not out_path is None: pth, fname = psplit(froot) froot = pjoin(out_path, fname) for i, img3d in enumerate(imgs): fname3d = '%s_%04d.nii' % (froot, i) nib.save(img3d, fname3d) if __name__ == '__main__': main() nipy-0.3.0/scripts/nipy_diagnose000077500000000000000000000056041210344137400167420ustar00rootroot00000000000000#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: DESCRIP = 'Calculate and write results for diagnostic screen' EPILOG = \ '''nipy_diagnose will generate a series of diagnostic images for a 4D fMRI image volume. The following images will be generated. is the input filename extension (e.g. '.nii'): * components_